|
|
@@ -77,10 +77,10 @@ def test_decagon_layer_03(): |
|
|
|
|
|
|
|
for i in range(2):
|
|
|
|
assert len(d_layer.next_layer_repr[i]) == 2
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i], list)
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i], torch.nn.ModuleList)
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i][0], Convolutions)
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i][0].node_type_column, int)
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i][0].convolutions, list)
|
|
|
|
assert isinstance(d_layer.next_layer_repr[i][0].convolutions, torch.nn.ModuleList)
|
|
|
|
assert all([
|
|
|
|
isinstance(dgca, DropoutGraphConvActivation) \
|
|
|
|
for dgca in d_layer.next_layer_repr[i][0].convolutions
|
|
|
@@ -209,7 +209,28 @@ class Dummy4(torch.nn.Module): |
|
|
|
self.dummy_1 = torch.nn.ModuleList([ Dummy1() ])
|
|
|
|
|
|
|
|
|
|
|
|
class Dummy5(torch.nn.Module):
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
self.dummy_1 = [ torch.nn.ModuleList([ Dummy1() ]) ]
|
|
|
|
|
|
|
|
|
|
|
|
class Dummy6(torch.nn.Module):
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
self.dummy_1 = torch.nn.ModuleList([ torch.nn.ModuleList([ Dummy1() ]) ])
|
|
|
|
|
|
|
|
|
|
|
|
class Dummy7(torch.nn.Module):
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
self.dummy_1 = torch.nn.ModuleList([ torch.nn.ModuleList() ])
|
|
|
|
self.dummy_1[0].append(Dummy1())
|
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_01():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_2 = Dummy2()
|
|
|
|
dummy_2 = dummy_2.to(device)
|
|
|
@@ -217,6 +238,8 @@ def test_module_nesting_01(): |
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_02():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_3 = Dummy3()
|
|
|
|
dummy_3 = dummy_3.to(device)
|
|
|
@@ -224,7 +247,36 @@ def test_module_nesting_02(): |
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_03():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_4 = Dummy4()
|
|
|
|
dummy_4 = dummy_4.to(device)
|
|
|
|
assert dummy_4.dummy_1[0].whatever.device == device
|
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_04():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_5 = Dummy5()
|
|
|
|
dummy_5 = dummy_5.to(device)
|
|
|
|
assert dummy_5.dummy_1[0][0].whatever.device != device
|
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_05():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_6 = Dummy6()
|
|
|
|
dummy_6 = dummy_6.to(device)
|
|
|
|
assert dummy_6.dummy_1[0][0].whatever.device == device
|
|
|
|
|
|
|
|
|
|
|
|
def test_module_nesting_06():
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
pytest.skip('No CUDA support on this host')
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
dummy_7 = Dummy7()
|
|
|
|
dummy_7 = dummy_7.to(device)
|
|
|
|
assert dummy_7.dummy_1[0][0].whatever.device == device
|