|
@@ -23,7 +23,9 @@ |
|
|
import torch
|
|
|
import torch
|
|
|
from .convolve import SparseMultiDGCA
|
|
|
from .convolve import SparseMultiDGCA
|
|
|
from .data import Data
|
|
|
from .data import Data
|
|
|
from typing import List, Union
|
|
|
|
|
|
|
|
|
from typing import List, \
|
|
|
|
|
|
Union, \
|
|
|
|
|
|
Callable
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Layer(torch.nn.Module):
|
|
|
class Layer(torch.nn.Module):
|
|
@@ -65,15 +67,20 @@ class InputLayer(Layer): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DecagonLayer(Layer):
|
|
|
class DecagonLayer(Layer):
|
|
|
def __init__(self, data: Data,
|
|
|
|
|
|
input_dim, output_dim,
|
|
|
|
|
|
keep_prob=1.,
|
|
|
|
|
|
rel_activation=lambda x: x,
|
|
|
|
|
|
layer_activation=torch.nn.functional.relu,
|
|
|
|
|
|
|
|
|
def __init__(self,
|
|
|
|
|
|
data: Data,
|
|
|
|
|
|
previous_layer: Layer,
|
|
|
|
|
|
output_dim: Union[int, List[int]],
|
|
|
|
|
|
keep_prob: float = 1.,
|
|
|
|
|
|
rel_activation: Callable[[torch.Tensor], torch.Tensor] = lambda x: x,
|
|
|
|
|
|
layer_activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
|
|
|
**kwargs):
|
|
|
**kwargs):
|
|
|
|
|
|
if not isinstance(output_dim, list):
|
|
|
|
|
|
output_dim = [ output_dim ] * len(data.node_types)
|
|
|
super().__init__(output_dim, **kwargs)
|
|
|
super().__init__(output_dim, **kwargs)
|
|
|
self.data = data
|
|
|
self.data = data
|
|
|
self.input_dim = input_dim
|
|
|
|
|
|
|
|
|
self.previous_layer = previous_layer
|
|
|
|
|
|
self.input_dim = previous_layer.output_dim
|
|
|
self.keep_prob = keep_prob
|
|
|
self.keep_prob = keep_prob
|
|
|
self.rel_activation = rel_activation
|
|
|
self.rel_activation = rel_activation
|
|
|
self.layer_activation = layer_activation
|
|
|
self.layer_activation = layer_activation
|
|
|