IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

124 lines
4.8KB

  1. import torch
  2. from .convolve import DropoutGraphConvActivation
  3. from .data import Data
  4. from .trainprep import PreparedData
  5. from typing import List, \
  6. Union, \
  7. Callable
  8. from collections import defaultdict
  9. from dataclasses import dataclass
  10. class Convolutions(torch.nn.Module):
  11. node_type_column: int
  12. convolutions: torch.nn.ModuleList # [DropoutGraphConvActivation]
  13. def __init__(self, node_type_column: int,
  14. convolutions: torch.nn.ModuleList, **kwargs):
  15. super().__init__(**kwargs)
  16. self.node_type_column = node_type_column
  17. self.convolutions = convolutions
  18. class DecagonLayer(torch.nn.Module):
  19. def __init__(self,
  20. input_dim: List[int],
  21. output_dim: List[int],
  22. data: Union[Data, PreparedData],
  23. keep_prob: float = 1.,
  24. rel_activation: Callable[[torch.Tensor], torch.Tensor] = lambda x: x,
  25. layer_activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
  26. **kwargs):
  27. super().__init__(**kwargs)
  28. if not isinstance(input_dim, list):
  29. raise ValueError('input_dim must be a list')
  30. if not output_dim:
  31. raise ValueError('output_dim must be specified')
  32. if not isinstance(output_dim, list):
  33. output_dim = [output_dim] * len(data.node_types)
  34. if not isinstance(data, Data) and not isinstance(data, PreparedData):
  35. raise ValueError('data must be of type Data or PreparedData')
  36. self.input_dim = input_dim
  37. self.output_dim = output_dim
  38. self.data = data
  39. self.keep_prob = float(keep_prob)
  40. self.rel_activation = rel_activation
  41. self.layer_activation = layer_activation
  42. self.is_sparse = False
  43. self.next_layer_repr = None
  44. self.build()
  45. def build_fam_one_node_type(self, fam):
  46. convolutions = torch.nn.ModuleList()
  47. for r in fam.relation_types:
  48. conv = DropoutGraphConvActivation(self.input_dim[fam.node_type_column],
  49. self.output_dim[fam.node_type_row], r.adjacency_matrix,
  50. self.keep_prob, self.rel_activation)
  51. convolutions.append(conv)
  52. self.next_layer_repr[fam.node_type_row].append(
  53. Convolutions(fam.node_type_column, convolutions))
  54. def build_fam_two_node_types(self, fam) -> None:
  55. convolutions_row = torch.nn.ModuleList()
  56. convolutions_column = torch.nn.ModuleList()
  57. for r in fam.relation_types:
  58. if r.adjacency_matrix is not None:
  59. conv = DropoutGraphConvActivation(self.input_dim[fam.node_type_column],
  60. self.output_dim[fam.node_type_row], r.adjacency_matrix,
  61. self.keep_prob, self.rel_activation)
  62. convolutions_row.append(conv)
  63. if r.adjacency_matrix_backward is not None:
  64. conv = DropoutGraphConvActivation(self.input_dim[fam.node_type_row],
  65. self.output_dim[fam.node_type_column], r.adjacency_matrix_backward,
  66. self.keep_prob, self.rel_activation)
  67. convolutions_column.append(conv)
  68. self.next_layer_repr[fam.node_type_row].append(
  69. Convolutions(fam.node_type_column, convolutions_row))
  70. self.next_layer_repr[fam.node_type_column].append(
  71. Convolutions(fam.node_type_row, convolutions_column))
  72. def build_family(self, fam) -> None:
  73. if fam.node_type_row == fam.node_type_column:
  74. self.build_fam_one_node_type(fam)
  75. else:
  76. self.build_fam_two_node_types(fam)
  77. def build(self):
  78. self.next_layer_repr = torch.nn.ModuleList([
  79. torch.nn.ModuleList() for _ in range(len(self.data.node_types)) ])
  80. for fam in self.data.relation_families:
  81. self.build_family(fam)
  82. def __call__(self, prev_layer_repr):
  83. next_layer_repr = [ [] for _ in range(len(self.data.node_types)) ]
  84. n = len(self.data.node_types)
  85. for node_type_row in range(n):
  86. for convolutions in self.next_layer_repr[node_type_row]:
  87. repr_ = [ conv(prev_layer_repr[convolutions.node_type_column]) \
  88. for conv in convolutions.convolutions ]
  89. repr_ = sum(repr_)
  90. repr_ = torch.nn.functional.normalize(repr_, p=2, dim=1)
  91. next_layer_repr[node_type_row].append(repr_)
  92. if len(next_layer_repr[node_type_row]) == 0:
  93. next_layer_repr[node_type_row] = torch.zeros(self.output_dim[node_type_row])
  94. else:
  95. next_layer_repr[node_type_row] = sum(next_layer_repr[node_type_row])
  96. next_layer_repr[node_type_row] = self.layer_activation(next_layer_repr[node_type_row])
  97. return next_layer_repr