|                                                                                                                                                                                                                                                                                                | 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300 | #
# Copyright (C) Stanislaw Adaszewski, 2020
# License: GPLv3
#
"""
This module implements the basic convolutional blocks of Decagon.
Just as a quick reminder, the basic convolution formula here is:
y = A * (x * W)
where:
W is a weight matrix
A is an adjacency matrix
x is a matrix of latent representations of a particular type of neighbors.
As we have x here twice, a trick is obviously necessary for this to work.
A must be previously normalized with:
c_{r}^{ij} = 1/sqrt(|N_{r}^{i}| |N_{r}^{j}|)
or
c_{r}^{i} = 1/|N_{r}^{i}|
Let's work through this step by step to convince ourselves that the
formula is correct.
x = [
    [0, 1, 0, 1],
    [1, 1, 1, 0],
    [0, 0, 0, 1]
]
W = [
    [0, 1],
    [1, 0],
    [0.5, 0.5],
    [0.25, 0.75]
]
A = [
    [0, 1, 0],
    [1, 0, 1],
    [0, 1, 0]
]
so the graph looks like this:
(0) -- (1) -- (2)
and therefore the representations in the next layer should be:
h_{0}^{k+1} = c_{r}^{0,1} * h_{1}^{k} * W + c_{r}^{0} * h_{0}^{k}
h_{1}^{k+1} = c_{r}^{0,1} * h_{0}^{k} * W + c_{r}^{2,1} * h_{2}^{k} +
    c_{r}^{1} * h_{1}^{k}
h_{2}^{k+1} = c_{r}^{2,1} * h_{1}^{k} * W + c_{r}^{2} * h_{2}^{k}
In actual Decagon code we can see that that latter part propagating directly
the old representation is gone. I will try to do the same for now.
So we have to only take care of:
h_{0}^{k+1} = c_{r}^{0,1} * h_{1}^{k} * W
h_{1}^{k+1} = c_{r}^{0,1} * h_{0}^{k} * W + c_{r}^{2,1} * h_{2}^{k}
h_{2}^{k+1} = c_{r}^{2,1} * h_{1}^{k} * W
If A is square the Decagon's EdgeMinibatchIterator preprocesses it as follows:
A = A + eye(len(A))
rowsum = A.sum(1)
deg_mat_inv_sqrt = diags(power(rowsum, -0.5))
A = dot(A, deg_mat_inv_sqrt)
A = A.transpose()
A = A.dot(deg_mat_inv_sqrt)
Let's see what gives in our case:
A = A + eye(len(A))
[
    [1, 1, 0],
    [1, 1, 1],
    [0, 1, 1]
]
rowsum = A.sum(1)
[2, 3, 2]
deg_mat_inv_sqrt = diags(power(rowsum, -0.5))
[
    [1./sqrt(2), 0,  0],
    [0, 1./sqrt(3),  0],
    [0,  0, 1./sqrt(2)]
]
A = dot(A, deg_mat_inv_sqrt)
[
    [ 1/sqrt(2), 1/sqrt(3),         0 ],
    [ 1/sqrt(2), 1/sqrt(3), 1/sqrt(2) ],
    [         0, 1/sqrt(3), 1/sqrt(2) ]
]
A = A.transpose()
[
    [ 1/sqrt(2), 1/sqrt(2),         0 ],
    [ 1/sqrt(3), 1/sqrt(3), 1/sqrt(3) ],
    [         0, 1/sqrt(2), 1/sqrt(2) ]
]
A = A.dot(deg_mat_inv_sqrt)
[
    [ 1/sqrt(2) * 1/sqrt(2),   1/sqrt(2) * 1/sqrt(3),                       0 ],
    [ 1/sqrt(3) * 1/sqrt(2),   1/sqrt(3) * 1/sqrt(3),   1/sqrt(3) * 1/sqrt(2) ],
    [                     0,   1/sqrt(2) * 1/sqrt(3),   1/sqrt(2) * 1/sqrt(2) ],
]
thus:
[
    [0.5       , 0.40824829, 0.        ],
    [0.40824829, 0.33333333, 0.40824829],
    [0.        , 0.40824829, 0.5       ]
]
This checks out with the 1/sqrt(|N_{r}^{i}| |N_{r}^{j}|) formula.
Then, we get back to the main calculation:
y = x * W
y = A * y
y = x * W
[
    [ 1.25, 0.75 ],
    [ 1.5 , 1.5  ],
    [ 0.25, 0.75 ]
]
y = A * y
[
    0.5 * [ 1.25, 0.75 ] + 0.40824829 * [ 1.5, 1.5 ],
    0.40824829 * [ 1.25, 0.75 ] + 0.33333333 * [ 1.5, 1.5 ] + 0.40824829 * [ 0.25, 0.75 ],
    0.40824829 * [ 1.5, 1.5 ] + 0.5 * [ 0.25, 0.75 ]
]
that is:
[
    [1.23737243, 0.98737244],
    [1.11237243, 1.11237243],
    [0.73737244, 0.98737244]
].
All checks out nicely, good.
"""
import torch
from .dropout import dropout_sparse, \
    dropout
from .weights import init_glorot
from typing import List, Callable
class SparseGraphConv(torch.nn.Module):
    """Convolution layer for sparse inputs."""
    def __init__(self, in_channels: int, out_channels: int,
        adjacency_matrix: torch.Tensor, **kwargs) -> None:
        super().__init__(**kwargs)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.weight = init_glorot(in_channels, out_channels)
        self.adjacency_matrix = adjacency_matrix
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = torch.sparse.mm(x, self.weight)
        x = torch.sparse.mm(self.adjacency_matrix, x)
        return x
class SparseDropoutGraphConvActivation(torch.nn.Module):
    def __init__(self, input_dim: int, output_dim: int,
        adjacency_matrix: torch.Tensor, keep_prob: float=1.,
        activation: Callable[[torch.Tensor], torch.Tensor]=torch.nn.functional.relu,
        **kwargs) -> None:
        super().__init__(**kwargs)
        self.sparse_graph_conv = SparseGraphConv(input_dim, output_dim, adjacency_matrix)
        self.keep_prob = keep_prob
        self.activation = activation
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = dropout_sparse(x, self.keep_prob)
        x = self.sparse_graph_conv(x)
        x = self.activation(x)
        return x
class SparseMultiDGCA(torch.nn.Module):
    def __init__(self, input_dim: List[int], output_dim: int,
        adjacency_matrices: List[torch.Tensor], keep_prob: float=1.,
        activation: Callable[[torch.Tensor], torch.Tensor]=torch.nn.functional.relu,
        **kwargs) -> None:
        super().__init__(**kwargs)
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.adjacency_matrices = adjacency_matrices
        self.keep_prob = keep_prob
        self.activation = activation
        self.sparse_dgca = None
        self.build()
    def build(self):
        if len(self.input_dim) != len(self.adjacency_matrices):
            raise ValueError('input_dim must have the same length as adjacency_matrices')
        self.sparse_dgca = []
        for input_dim, adj_mat in zip(self.input_dim, self.adjacency_matrices):
            self.sparse_dgca.append(SparseDropoutGraphConvActivation(input_dim, self.output_dim, adj_mat, self.keep_prob, self.activation))
    def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
        if not isinstance(x, list):
            raise ValueError('x must be a list of tensors')
        out = torch.zeros(len(x[0]), self.output_dim, dtype=x[0].dtype)
        for i, f in enumerate(self.sparse_dgca):
            out += f(x[i])
        out = torch.nn.functional.normalize(out, p=2, dim=1)
        return out
class GraphConv(torch.nn.Module):
    def __init__(self, in_channels: int, out_channels: int,
        adjacency_matrix: torch.Tensor, **kwargs) -> None:
        super().__init__(**kwargs)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.weight = init_glorot(in_channels, out_channels)
        self.adjacency_matrix = adjacency_matrix
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = torch.mm(x, self.weight)
        x = torch.mm(self.adjacency_matrix, x)
        return x
class DropoutGraphConvActivation(torch.nn.Module):
    def __init__(self, input_dim: int, output_dim: int,
        adjacency_matrix: torch.Tensor, keep_prob: float=1.,
        activation: Callable[[torch.Tensor], torch.Tensor]=torch.nn.functional.relu,
        **kwargs) -> None:
        super().__init__(**kwargs)
        self.graph_conv = GraphConv(input_dim, output_dim, adjacency_matrix)
        self.keep_prob = keep_prob
        self.activation = activation
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = dropout(x, keep_prob=self.keep_prob)
        x = self.graph_conv(x)
        x = self.activation(x)
        return x
class MultiDGCA(torch.nn.Module):
    def __init__(self, input_dim: List[int], output_dim: int,
        adjacency_matrices: List[torch.Tensor], keep_prob: float=1.,
        activation: Callable[[torch.Tensor], torch.Tensor]=torch.nn.functional.relu,
        **kwargs) -> None:
        super().__init__(**kwargs)
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.adjacency_matrices = adjacency_matrices
        self.keep_prob = keep_prob
        self.activation = activation
        self.dgca = None
        self.build()
    def build(self):
        if len(self.input_dim) != len(self.adjacency_matrices):
            raise ValueError('input_dim must have the same length as adjacency_matrices')
        self.dgca = []
        for input_dim, adj_mat in zip(self.input_dim, self.adjacency_matrices):
            self.dgca.append(DropoutGraphConvActivation(input_dim, self.output_dim, adj_mat, self.keep_prob, self.activation))
    def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
        if not isinstance(x, list):
            raise ValueError('x must be a list of tensors')
        out = torch.zeros(len(x[0]), self.output_dim, dtype=x[0].dtype)
        for i, f in enumerate(self.dgca):
            out += f(x[i])
        out = torch.nn.functional.normalize(out, p=2, dim=1)
        return out
 |