IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

119 lines
4.2KB

  1. #
  2. # This module implements a single layer of the Decagon
  3. # model. This is going to be already quite complex, as
  4. # we will be using all the graph convolutional building
  5. # blocks.
  6. #
  7. # h_{i}^(k+1) = ϕ(∑_r ∑_{j∈N{r}^{i}} c_{r}^{ij} * \
  8. # W_{r}^(k) h_{j}^{k} + c_{r}^{i} h_{i}^(k))
  9. #
  10. # N{r}^{i} - set of neighbors of node i under relation r
  11. # W_{r}^(k) - relation-type specific weight matrix
  12. # h_{i}^(k) - hidden state of node i in layer k
  13. # h_{i}^(k)∈R^{d(k)} where d(k) is the dimensionality
  14. # of the representation in k-th layer
  15. # ϕ - activation function
  16. # c_{r}^{ij} - normalization constants
  17. # c_{r}^{ij} = 1/sqrt(|N_{r}^{i}| |N_{r}^{j}|)
  18. # c_{r}^{i} - normalization constants
  19. # c_{r}^{i} = 1/|N_{r}^{i}|
  20. #
  21. import torch
  22. from .convolve import SparseMultiDGCA
  23. from .data import Data
  24. from typing import List, \
  25. Union, \
  26. Callable
  27. class Layer(torch.nn.Module):
  28. def __init__(self, output_dim: Union[int, List[int]], **kwargs) -> None:
  29. super().__init__(**kwargs)
  30. self.output_dim = output_dim
  31. class InputLayer(Layer):
  32. def __init__(self, data: Data, output_dim: Union[int, List[int]]= None, **kwargs) -> None:
  33. output_dim = output_dim or \
  34. list(map(lambda a: a.count, data.node_types))
  35. if not isinstance(output_dim, list):
  36. output_dim = [output_dim,] * len(data.node_types)
  37. super().__init__(output_dim, **kwargs)
  38. self.data = data
  39. self.node_reps = None
  40. self.build()
  41. def build(self) -> None:
  42. self.node_reps = []
  43. for i, nt in enumerate(self.data.node_types):
  44. reps = torch.rand(nt.count, self.output_dim[i])
  45. reps = torch.nn.Parameter(reps)
  46. self.register_parameter('node_reps[%d]' % i, reps)
  47. self.node_reps.append(reps)
  48. def forward(self) -> List[torch.nn.Parameter]:
  49. return self.node_reps
  50. def __repr__(self) -> str:
  51. s = ''
  52. s += 'GNN input layer with output_dim: %s\n' % self.output_dim
  53. s += ' # of node types: %d\n' % len(self.data.node_types)
  54. for nt in self.data.node_types:
  55. s += ' - %s (%d)\n' % (nt.name, nt.count)
  56. return s.strip()
  57. class DecagonLayer(Layer):
  58. def __init__(self,
  59. data: Data,
  60. previous_layer: Layer,
  61. output_dim: Union[int, List[int]],
  62. keep_prob: float = 1.,
  63. rel_activation: Callable[[torch.Tensor], torch.Tensor] = lambda x: x,
  64. layer_activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
  65. **kwargs):
  66. if not isinstance(output_dim, list):
  67. output_dim = [ output_dim ] * len(data.node_types)
  68. super().__init__(output_dim, **kwargs)
  69. self.data = data
  70. self.previous_layer = previous_layer
  71. self.input_dim = previous_layer.output_dim
  72. self.keep_prob = keep_prob
  73. self.rel_activation = rel_activation
  74. self.layer_activation = layer_activation
  75. self.convolutions = None
  76. self.build()
  77. def build(self):
  78. self.convolutions = {}
  79. for key in self.data.relation_types.keys():
  80. adjacency_matrices = \
  81. self.data.get_adjacency_matrices(*key)
  82. self.convolutions[key] = SparseMultiDGCA(self.input_dim,
  83. self.output_dim, adjacency_matrices,
  84. self.keep_prob, self.rel_activation)
  85. # for node_type_row, node_type_col in enumerate(self.data.node_
  86. # if rt.node_type_row == i or rt.node_type_col == i:
  87. def __call__(self, prev_layer_repr):
  88. new_layer_repr = []
  89. for i, nt in enumerate(self.data.node_types):
  90. new_repr = []
  91. for key in self.data.relation_types.keys():
  92. nt_row, nt_col = key
  93. if nt_row != i and nt_col != i:
  94. continue
  95. if nt_row == i:
  96. x = prev_layer_repr[nt_col]
  97. else:
  98. x = prev_layer_repr[nt_row]
  99. conv = self.convolutions[key]
  100. new_repr.append(conv(x))
  101. new_repr = sum(new_repr)
  102. new_layer_repr.append(new_repr)
  103. return new_layer_repr