@@ -80,7 +80,10 @@ class DecagonLayer(torch.nn.Module): | |||||
repr_ = sum(repr_) | repr_ = sum(repr_) | ||||
repr_ = torch.nn.functional.normalize(repr_, p=2, dim=1) | repr_ = torch.nn.functional.normalize(repr_, p=2, dim=1) | ||||
next_layer_repr[node_type_row].append(repr_) | next_layer_repr[node_type_row].append(repr_) | ||||
next_layer_repr[node_type_row] = sum(next_layer_repr[node_type_row]) | |||||
next_layer_repr[node_type_row] = self.layer_activation(next_layer_repr[node_type_row]) | |||||
if len(next_layer_repr[node_type_row]) == 0: | |||||
next_layer_repr[node_type_row] = torch.zeros(self.output_dim[node_type_row]) | |||||
else: | |||||
next_layer_repr[node_type_row] = sum(next_layer_repr[node_type_row]) | |||||
next_layer_repr[node_type_row] = self.layer_activation(next_layer_repr[node_type_row]) | |||||
return next_layer_repr | return next_layer_repr |
@@ -16,7 +16,7 @@ class InputLayer(torch.nn.Module): | |||||
output_dim = output_dim or \ | output_dim = output_dim or \ | ||||
list(map(lambda a: a.count, data.node_types)) | list(map(lambda a: a.count, data.node_types)) | ||||
if not isinstance(output_dim, list): | if not isinstance(output_dim, list): | ||||
output_dim = [output_dim,] * len(data.node_types) | output_dim = [output_dim,] * len(data.node_types) | ||||
@@ -72,7 +72,7 @@ class OneHotInputLayer(torch.nn.Module): | |||||
def __repr__(self) -> str: | def __repr__(self) -> str: | ||||
s = '' | s = '' | ||||
s += 'One-hot Icosagon input layer\n' | |||||
s += 'Icosagon one-hot input layer\n' | |||||
s += ' # of node types: %d\n' % len(self.data.node_types) | s += ' # of node types: %d\n' % len(self.data.node_types) | ||||
for nt in self.data.node_types: | for nt in self.data.node_types: | ||||
s += ' - %s (%d)\n' % (nt.name, nt.count) | s += ' - %s (%d)\n' % (nt.name, nt.count) | ||||
@@ -43,8 +43,47 @@ def test_decode_layer_02(): | |||||
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer) | seq = torch.nn.Sequential(in_layer, d_layer, dec_layer) | ||||
pred_adj_matrices = seq(None) | pred_adj_matrices = seq(None) | ||||
assert isinstance(pred_adj_matrices, dict) | assert isinstance(pred_adj_matrices, dict) | ||||
assert len(pred_adj_matrices) == 1 | assert len(pred_adj_matrices) == 1 | ||||
assert isinstance(pred_adj_matrices[0, 0], list) | assert isinstance(pred_adj_matrices[0, 0], list) | ||||
assert len(pred_adj_matrices[0, 0]) == 1 | assert len(pred_adj_matrices[0, 0]) == 1 | ||||
def test_decode_layer_03(): | |||||
d = Data() | |||||
d.add_node_type('Dummy 1', 100) | |||||
d.add_node_type('Dummy 2', 100) | |||||
d.add_relation_type('Dummy Relation 1', 0, 1, | |||||
torch.rand((100, 100), dtype=torch.float32).round().to_sparse()) | |||||
in_layer = OneHotInputLayer(d) | |||||
d_layer = DecagonLayer(in_layer.output_dim, 32, d) | |||||
dec_layer = DecodeLayer(input_dim=d_layer.output_dim, data=d, keep_prob=1., | |||||
decoder_class={(0, 1): DEDICOMDecoder}, activation=lambda x: x) | |||||
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer) | |||||
pred_adj_matrices = seq(None) | |||||
assert isinstance(pred_adj_matrices, dict) | |||||
assert len(pred_adj_matrices) == 2 | |||||
assert isinstance(pred_adj_matrices[0, 1], list) | |||||
assert isinstance(pred_adj_matrices[1, 0], list) | |||||
assert len(pred_adj_matrices[0, 1]) == 1 | |||||
assert len(pred_adj_matrices[1, 0]) == 1 | |||||
def test_decode_layer_04(): | |||||
d = Data() | |||||
d.add_node_type('Dummy', 100) | |||||
assert len(d.relation_types[0, 0]) == 0 | |||||
in_layer = OneHotInputLayer(d) | |||||
d_layer = DecagonLayer(in_layer.output_dim, 32, d) | |||||
dec_layer = DecodeLayer(input_dim=d_layer.output_dim, data=d, keep_prob=1., | |||||
decoder_class=DEDICOMDecoder, activation=lambda x: x) | |||||
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer) | |||||
pred_adj_matrices = seq(None) | |||||
assert isinstance(pred_adj_matrices, dict) | |||||
assert len(pred_adj_matrices) == 0 |
@@ -70,6 +70,13 @@ def test_input_layer_03(): | |||||
assert layer.node_reps[1].device == device | assert layer.node_reps[1].device == device | ||||
def test_input_layer_04(): | |||||
d = _some_data() | |||||
layer = InputLayer(d, 32) | |||||
s = repr(layer) | |||||
assert s.startswith('Icosagon input layer') | |||||
def test_one_hot_input_layer_01(): | def test_one_hot_input_layer_01(): | ||||
d = _some_data() | d = _some_data() | ||||
layer = OneHotInputLayer(d) | layer = OneHotInputLayer(d) | ||||
@@ -104,3 +111,10 @@ def test_one_hot_input_layer_03(): | |||||
# assert layer.device.type == 'cuda:0' | # assert layer.device.type == 'cuda:0' | ||||
assert layer.node_reps[0].device == device | assert layer.node_reps[0].device == device | ||||
assert layer.node_reps[1].device == device | assert layer.node_reps[1].device == device | ||||
def test_one_hot_input_layer_04(): | |||||
d = _some_data() | |||||
layer = OneHotInputLayer(d) | |||||
s = repr(layer) | |||||
assert s.startswith('Icosagon one-hot input layer') |