IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

112 lines
3.9KB

  1. #
  2. # This module implements a single layer of the Decagon
  3. # model. This is going to be already quite complex, as
  4. # we will be using all the graph convolutional building
  5. # blocks.
  6. #
  7. # h_{i}^(k+1) = ϕ(∑_r ∑_{j∈N{r}^{i}} c_{r}^{ij} * \
  8. # W_{r}^(k) h_{j}^{k} + c_{r}^{i} h_{i}^(k))
  9. #
  10. # N{r}^{i} - set of neighbors of node i under relation r
  11. # W_{r}^(k) - relation-type specific weight matrix
  12. # h_{i}^(k) - hidden state of node i in layer k
  13. # h_{i}^(k)∈R^{d(k)} where d(k) is the dimensionality
  14. # of the representation in k-th layer
  15. # ϕ - activation function
  16. # c_{r}^{ij} - normalization constants
  17. # c_{r}^{ij} = 1/sqrt(|N_{r}^{i}| |N_{r}^{j}|)
  18. # c_{r}^{i} - normalization constants
  19. # c_{r}^{i} = 1/|N_{r}^{i}|
  20. #
  21. import torch
  22. from .convolve import SparseMultiDGCA
  23. from .data import Data
  24. from typing import List, Union
  25. class Layer(torch.nn.Module):
  26. def __init__(self, output_dim: Union[int, List[int]], **kwargs) -> None:
  27. super().__init__(**kwargs)
  28. self.output_dim = output_dim
  29. class InputLayer(Layer):
  30. def __init__(self, data: Data, output_dim: Union[int, List[int]]= None, **kwargs) -> None:
  31. output_dim = output_dim or \
  32. list(map(lambda a: a.count, data.node_types))
  33. if not isinstance(output_dim, list):
  34. output_dim = [output_dim,] * len(data.node_types)
  35. super().__init__(output_dim, **kwargs)
  36. self.data = data
  37. self.node_reps = None
  38. self.build()
  39. def build(self) -> None:
  40. self.node_reps = []
  41. for i, nt in enumerate(self.data.node_types):
  42. reps = torch.rand(nt.count, self.output_dim[i])
  43. reps = torch.nn.Parameter(reps)
  44. self.register_parameter('node_reps[%d]' % i, reps)
  45. self.node_reps.append(reps)
  46. def forward(self) -> List[torch.nn.Parameter]:
  47. return self.node_reps
  48. def __repr__(self) -> str:
  49. s = ''
  50. s += 'GNN input layer with output_dim: %s\n' % self.output_dim
  51. s += ' # of node types: %d\n' % len(self.data.node_types)
  52. for nt in self.data.node_types:
  53. s += ' - %s (%d)\n' % (nt.name, nt.count)
  54. return s.strip()
  55. class DecagonLayer(Layer):
  56. def __init__(self, data: Data,
  57. input_dim, output_dim,
  58. keep_prob=1.,
  59. rel_activation=lambda x: x,
  60. layer_activation=torch.nn.functional.relu,
  61. **kwargs):
  62. super().__init__(output_dim, **kwargs)
  63. self.data = data
  64. self.input_dim = input_dim
  65. self.keep_prob = keep_prob
  66. self.rel_activation = rel_activation
  67. self.layer_activation = layer_activation
  68. self.convolutions = None
  69. self.build()
  70. def build(self):
  71. self.convolutions = {}
  72. for key in self.data.relation_types.keys():
  73. adjacency_matrices = \
  74. self.data.get_adjacency_matrices(*key)
  75. self.convolutions[key] = SparseMultiDGCA(self.input_dim,
  76. self.output_dim, adjacency_matrices,
  77. self.keep_prob, self.rel_activation)
  78. # for node_type_row, node_type_col in enumerate(self.data.node_
  79. # if rt.node_type_row == i or rt.node_type_col == i:
  80. def __call__(self, prev_layer_repr):
  81. new_layer_repr = []
  82. for i, nt in enumerate(self.data.node_types):
  83. new_repr = []
  84. for key in self.data.relation_types.keys():
  85. nt_row, nt_col = key
  86. if nt_row != i and nt_col != i:
  87. continue
  88. if nt_row == i:
  89. x = prev_layer_repr[nt_col]
  90. else:
  91. x = prev_layer_repr[nt_row]
  92. conv = self.convolutions[key]
  93. new_repr.append(conv(x))
  94. new_repr = sum(new_repr)
  95. new_layer_repr.append(new_repr)
  96. return new_layer_repr