IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
ソースを参照

Add test_dropout_graph_conv_activation().

master
Stanislaw Adaszewski 4年前
コミット
8f41021d69
3個のファイルの変更67行の追加3行の削除
  1. +3
    -2
      src/decagon_pytorch/convolve.py
  2. +13
    -0
      src/decagon_pytorch/dropout.py
  3. +51
    -1
      tests/decagon_pytorch/test_convolve.py

+ 3
- 2
src/decagon_pytorch/convolve.py ファイルの表示

@@ -1,5 +1,6 @@
import torch
from .dropout import dropout_sparse
from .dropout import dropout_sparse, \
dropout
from .weights import init_glorot
@@ -79,7 +80,7 @@ class DropoutGraphConvActivation(torch.nn.Module):
self.activation = activation
def forward(self, x):
x = torch.nn.functional.dropout(x, 1.-self.keep_prob)
x = dropout(x, keep_prob=self.keep_prob)
x = self.graph_conv(x)
x = self.activation(x)
return x


+ 13
- 0
src/decagon_pytorch/dropout.py ファイルの表示

@@ -16,3 +16,16 @@ def dropout_sparse(x, keep_prob):
x = torch.sparse_coo_tensor(i, v, size=size)
return x * (1./keep_prob)
def dropout(x, keep_prob):
"""Dropout for dense tensors.
"""
shape = x.shape
x = torch.flatten(x)
n = keep_prob + torch.rand(len(x))
n = (1. - torch.floor(n)).to(torch.bool)
x[n] = 0
x = torch.reshape(x, shape)
# x = torch.nn.functional.dropout(x, p=1.-keep_prob)
return x * (1./keep_prob)

+ 51
- 1
tests/decagon_pytorch/test_convolve.py ファイルの表示

@@ -16,6 +16,8 @@ def prepare_data():
adj_mat[adj_mat < .5] = 0
adj_mat = np.ceil(adj_mat)
adjacency_matrices.append(adj_mat)
print('latent:', latent)
print('adjacency_matrices[0]:', adjacency_matrices[0])
return latent, adjacency_matrices
@@ -51,6 +53,18 @@ def graph_conv_torch():
return latent
def dropout_graph_conv_activation_torch(keep_prob=1.):
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent = torch.tensor(latent)
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat)
conv = decagon_pytorch.convolve.DropoutGraphConvActivation(10, 10,
adj_mat, keep_prob=keep_prob)
latent = conv(latent)
return latent
def sparse_graph_conv_torch():
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
@@ -120,7 +134,7 @@ def test_sparse_graph_conv():
assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
def test_sparse_dropout_grap_conv_activation():
def test_sparse_dropout_graph_conv_activation():
for i in range(11):
keep_prob = i/10. + np.finfo(np.float32).eps
@@ -163,3 +177,39 @@ def test_graph_conv():
latent_sparse = sparse_graph_conv_torch()
assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())
def setup_function(fun):
if fun == test_dropout_graph_conv_activation:
setup_function.old_dropout = decagon_pytorch.convolve.dropout, \
decagon_pytorch.convolve.dropout_sparse
decagon_pytorch.convolve.dropout = lambda x, keep_prob: x
decagon_pytorch.convolve.dropout_sparse = lambda x, keep_prob: x
def teardown_function(fun):
if fun == test_dropout_graph_conv_activation:
decagon_pytorch.convolve.dropout, \
decagon_pytorch.convolve.dropout_sparse = \
setup_function.old_dropout
def test_dropout_graph_conv_activation():
for i in range(11):
keep_prob = i/10.
if keep_prob == 0:
keep_prob += np.finfo(np.float32).eps
print('keep_prob:', keep_prob)
latent_dense = dropout_graph_conv_activation_torch(keep_prob)
latent_dense = latent_dense.detach().numpy()
print('latent_dense:', latent_dense)
latent_sparse = sparse_dropout_graph_conv_activation_torch(keep_prob)
latent_sparse = latent_sparse.detach().numpy()
print('latent_sparse:', latent_sparse)
nonzero = (latent_dense != 0) & (latent_sparse != 0)
assert np.all(latent_dense[nonzero] == latent_sparse[nonzero])

読み込み中…
キャンセル
保存