IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

98 řádky
3.4KB

  1. import torch
  2. from .convolve import DropoutGraphConvActivation
  3. from .data import Data
  4. from .trainprep import PreparedData
  5. from typing import List, \
  6. Union, \
  7. Callable
  8. from collections import defaultdict
  9. from dataclasses import dataclass
  10. @dataclass
  11. class Convolutions(object):
  12. node_type_column: int
  13. convolutions: List[DropoutGraphConvActivation]
  14. class DecagonLayer(torch.nn.Module):
  15. def __init__(self,
  16. input_dim: List[int],
  17. output_dim: List[int],
  18. data: Union[Data, PreparedData],
  19. keep_prob: float = 1.,
  20. rel_activation: Callable[[torch.Tensor], torch.Tensor] = lambda x: x,
  21. layer_activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
  22. **kwargs):
  23. super().__init__(**kwargs)
  24. if not isinstance(input_dim, list):
  25. raise ValueError('input_dim must be a list')
  26. if not output_dim:
  27. raise ValueError('output_dim must be specified')
  28. if not isinstance(output_dim, list):
  29. output_dim = [output_dim] * len(data.node_types)
  30. if not isinstance(data, Data) and not isinstance(data, PreparedData):
  31. raise ValueError('data must be of type Data or PreparedData')
  32. self.input_dim = input_dim
  33. self.output_dim = output_dim
  34. self.data = data
  35. self.keep_prob = float(keep_prob)
  36. self.rel_activation = rel_activation
  37. self.layer_activation = layer_activation
  38. self.is_sparse = False
  39. self.next_layer_repr = None
  40. self.build()
  41. def build(self):
  42. n = len(self.data.node_types)
  43. rel_types = self.data.relation_types
  44. self.next_layer_repr = [ [] for _ in range(n) ]
  45. for node_type_row in range(n):
  46. if node_type_row not in rel_types:
  47. continue
  48. for node_type_column in range(n):
  49. if node_type_column not in rel_types[node_type_row]:
  50. continue
  51. rels = rel_types[node_type_row][node_type_column]
  52. if len(rels) == 0:
  53. continue
  54. convolutions = []
  55. for r in rels:
  56. conv = DropoutGraphConvActivation(self.input_dim[node_type_column],
  57. self.output_dim[node_type_row], r.adjacency_matrix,
  58. self.keep_prob, self.rel_activation)
  59. convolutions.append(conv)
  60. self.next_layer_repr[node_type_row].append(
  61. Convolutions(node_type_column, convolutions))
  62. def __call__(self, prev_layer_repr):
  63. next_layer_repr = [ [] for _ in range(len(self.data.node_types)) ]
  64. n = len(self.data.node_types)
  65. for node_type_row in range(n):
  66. for convolutions in self.next_layer_repr[node_type_row]:
  67. repr_ = [ conv(prev_layer_repr[convolutions.node_type_column]) \
  68. for conv in convolutions.convolutions ]
  69. repr_ = sum(repr_)
  70. repr_ = torch.nn.functional.normalize(repr_, p=2, dim=1)
  71. next_layer_repr[node_type_row].append(repr_)
  72. next_layer_repr[node_type_row] = sum(next_layer_repr[node_type_row])
  73. next_layer_repr[node_type_row] = self.layer_activation(next_layer_repr[node_type_row])
  74. return next_layer_repr