|
@@ -16,6 +16,8 @@ def prepare_data(): |
|
|
adj_mat[adj_mat < .5] = 0
|
|
|
adj_mat[adj_mat < .5] = 0
|
|
|
adj_mat = np.ceil(adj_mat)
|
|
|
adj_mat = np.ceil(adj_mat)
|
|
|
adjacency_matrices.append(adj_mat)
|
|
|
adjacency_matrices.append(adj_mat)
|
|
|
|
|
|
print('latent:', latent)
|
|
|
|
|
|
print('adjacency_matrices[0]:', adjacency_matrices[0])
|
|
|
return latent, adjacency_matrices
|
|
|
return latent, adjacency_matrices
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -51,6 +53,18 @@ def graph_conv_torch(): |
|
|
return latent
|
|
|
return latent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dropout_graph_conv_activation_torch(keep_prob=1.):
|
|
|
|
|
|
torch.random.manual_seed(0)
|
|
|
|
|
|
latent, adjacency_matrices = prepare_data()
|
|
|
|
|
|
latent = torch.tensor(latent)
|
|
|
|
|
|
adj_mat = adjacency_matrices[0]
|
|
|
|
|
|
adj_mat = torch.tensor(adj_mat)
|
|
|
|
|
|
conv = decagon_pytorch.convolve.DropoutGraphConvActivation(10, 10,
|
|
|
|
|
|
adj_mat, keep_prob=keep_prob)
|
|
|
|
|
|
latent = conv(latent)
|
|
|
|
|
|
return latent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sparse_graph_conv_torch():
|
|
|
def sparse_graph_conv_torch():
|
|
|
torch.random.manual_seed(0)
|
|
|
torch.random.manual_seed(0)
|
|
|
latent, adjacency_matrices = prepare_data()
|
|
|
latent, adjacency_matrices = prepare_data()
|
|
@@ -120,7 +134,7 @@ def test_sparse_graph_conv(): |
|
|
assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
|
|
|
assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_sparse_dropout_grap_conv_activation():
|
|
|
|
|
|
|
|
|
def test_sparse_dropout_graph_conv_activation():
|
|
|
for i in range(11):
|
|
|
for i in range(11):
|
|
|
keep_prob = i/10. + np.finfo(np.float32).eps
|
|
|
keep_prob = i/10. + np.finfo(np.float32).eps
|
|
|
|
|
|
|
|
@@ -163,3 +177,39 @@ def test_graph_conv(): |
|
|
latent_sparse = sparse_graph_conv_torch()
|
|
|
latent_sparse = sparse_graph_conv_torch()
|
|
|
|
|
|
|
|
|
assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())
|
|
|
assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setup_function(fun):
|
|
|
|
|
|
if fun == test_dropout_graph_conv_activation:
|
|
|
|
|
|
setup_function.old_dropout = decagon_pytorch.convolve.dropout, \
|
|
|
|
|
|
decagon_pytorch.convolve.dropout_sparse
|
|
|
|
|
|
|
|
|
|
|
|
decagon_pytorch.convolve.dropout = lambda x, keep_prob: x
|
|
|
|
|
|
decagon_pytorch.convolve.dropout_sparse = lambda x, keep_prob: x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def teardown_function(fun):
|
|
|
|
|
|
if fun == test_dropout_graph_conv_activation:
|
|
|
|
|
|
decagon_pytorch.convolve.dropout, \
|
|
|
|
|
|
decagon_pytorch.convolve.dropout_sparse = \
|
|
|
|
|
|
setup_function.old_dropout
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_dropout_graph_conv_activation():
|
|
|
|
|
|
for i in range(11):
|
|
|
|
|
|
keep_prob = i/10.
|
|
|
|
|
|
if keep_prob == 0:
|
|
|
|
|
|
keep_prob += np.finfo(np.float32).eps
|
|
|
|
|
|
print('keep_prob:', keep_prob)
|
|
|
|
|
|
|
|
|
|
|
|
latent_dense = dropout_graph_conv_activation_torch(keep_prob)
|
|
|
|
|
|
latent_dense = latent_dense.detach().numpy()
|
|
|
|
|
|
print('latent_dense:', latent_dense)
|
|
|
|
|
|
|
|
|
|
|
|
latent_sparse = sparse_dropout_graph_conv_activation_torch(keep_prob)
|
|
|
|
|
|
latent_sparse = latent_sparse.detach().numpy()
|
|
|
|
|
|
print('latent_sparse:', latent_sparse)
|
|
|
|
|
|
|
|
|
|
|
|
nonzero = (latent_dense != 0) & (latent_sparse != 0)
|
|
|
|
|
|
|
|
|
|
|
|
assert np.all(latent_dense[nonzero] == latent_sparse[nonzero])
|