IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

296 行
10KB

  1. import decagon_pytorch.convolve
  2. import decagon.deep.layers
  3. import torch
  4. import tensorflow as tf
  5. import numpy as np
  6. def prepare_data():
  7. np.random.seed(0)
  8. latent = np.random.random((5, 10)).astype(np.float32)
  9. latent[latent < .5] = 0
  10. latent = np.ceil(latent)
  11. adjacency_matrices = []
  12. for _ in range(5):
  13. adj_mat = np.random.random((len(latent),) * 2).astype(np.float32)
  14. adj_mat[adj_mat < .5] = 0
  15. adj_mat = np.ceil(adj_mat)
  16. adjacency_matrices.append(adj_mat)
  17. print('latent:', latent)
  18. print('adjacency_matrices[0]:', adjacency_matrices[0])
  19. return latent, adjacency_matrices
  20. def dense_to_sparse_tf(x):
  21. a, b = np.where(x)
  22. indices = np.array([a, b]).T
  23. values = x[a, b]
  24. return tf.sparse.SparseTensor(indices, values, x.shape)
  25. def dropout_sparse_tf(x, keep_prob, num_nonzero_elems):
  26. """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
  27. """
  28. noise_shape = [num_nonzero_elems]
  29. random_tensor = keep_prob
  30. random_tensor += tf.convert_to_tensor(torch.rand(noise_shape).detach().numpy())
  31. # tf.convert_to_tensor(np.random.random(noise_shape))
  32. # tf.random_uniform(noise_shape)
  33. dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
  34. pre_out = tf.sparse_retain(x, dropout_mask)
  35. return pre_out * (1./keep_prob)
  36. def dense_graph_conv_torch():
  37. torch.random.manual_seed(0)
  38. latent, adjacency_matrices = prepare_data()
  39. latent = torch.tensor(latent)
  40. adj_mat = adjacency_matrices[0]
  41. adj_mat = torch.tensor(adj_mat)
  42. conv = decagon_pytorch.convolve.DenseGraphConv(10, 10,
  43. adj_mat)
  44. latent = conv(latent)
  45. return latent
  46. def dense_dropout_graph_conv_activation_torch(keep_prob=1.):
  47. torch.random.manual_seed(0)
  48. latent, adjacency_matrices = prepare_data()
  49. latent = torch.tensor(latent)
  50. adj_mat = adjacency_matrices[0]
  51. adj_mat = torch.tensor(adj_mat)
  52. conv = decagon_pytorch.convolve.DenseDropoutGraphConvActivation(10, 10,
  53. adj_mat, keep_prob=keep_prob)
  54. latent = conv(latent)
  55. return latent
  56. def sparse_graph_conv_torch():
  57. torch.random.manual_seed(0)
  58. latent, adjacency_matrices = prepare_data()
  59. print('latent.dtype:', latent.dtype)
  60. latent = torch.tensor(latent).to_sparse()
  61. adj_mat = adjacency_matrices[0]
  62. adj_mat = torch.tensor(adj_mat).to_sparse()
  63. print('adj_mat.dtype:', adj_mat.dtype,
  64. 'latent.dtype:', latent.dtype)
  65. conv = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  66. adj_mat)
  67. latent = conv(latent)
  68. return latent
  69. def sparse_graph_conv_tf():
  70. torch.random.manual_seed(0)
  71. latent, adjacency_matrices = prepare_data()
  72. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  73. torch.tensor(adjacency_matrices[0]).to_sparse())
  74. weight = tf.constant(conv_torch.weight.detach().numpy())
  75. latent = dense_to_sparse_tf(latent)
  76. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  77. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  78. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  79. return latent
  80. def sparse_dropout_graph_conv_activation_torch(keep_prob=1.):
  81. torch.random.manual_seed(0)
  82. latent, adjacency_matrices = prepare_data()
  83. latent = torch.tensor(latent).to_sparse()
  84. adj_mat = adjacency_matrices[0]
  85. adj_mat = torch.tensor(adj_mat).to_sparse()
  86. conv = decagon_pytorch.convolve.SparseDropoutGraphConvActivation(10, 10,
  87. adj_mat, keep_prob=keep_prob)
  88. latent = conv(latent)
  89. return latent
  90. def sparse_dropout_graph_conv_activation_tf(keep_prob=1.):
  91. torch.random.manual_seed(0)
  92. latent, adjacency_matrices = prepare_data()
  93. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  94. torch.tensor(adjacency_matrices[0]).to_sparse())
  95. weight = tf.constant(conv_torch.weight.detach().numpy())
  96. nonzero_feat = np.sum(latent > 0)
  97. latent = dense_to_sparse_tf(latent)
  98. latent = dropout_sparse_tf(latent, keep_prob,
  99. nonzero_feat)
  100. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  101. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  102. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  103. latent = tf.nn.relu(latent)
  104. return latent
  105. def test_sparse_graph_conv():
  106. latent_torch = sparse_graph_conv_torch()
  107. latent_tf = sparse_graph_conv_tf()
  108. assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
  109. def test_sparse_dropout_graph_conv_activation():
  110. for i in range(11):
  111. keep_prob = i/10. + np.finfo(np.float32).eps
  112. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob)
  113. latent_tf = sparse_dropout_graph_conv_activation_tf(keep_prob)
  114. latent_torch = latent_torch.detach().numpy()
  115. latent_tf = latent_tf.eval(session = tf.Session())
  116. print('latent_torch:', latent_torch)
  117. print('latent_tf:', latent_tf)
  118. assert np.all(latent_torch - latent_tf < .000001)
  119. def test_sparse_multi_dgca():
  120. latent_torch = None
  121. latent_tf = []
  122. for i in range(11):
  123. keep_prob = i/10. + np.finfo(np.float32).eps
  124. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob) \
  125. if latent_torch is None \
  126. else latent_torch + sparse_dropout_graph_conv_activation_torch(keep_prob)
  127. latent_tf.append(sparse_dropout_graph_conv_activation_tf(keep_prob))
  128. latent_torch = torch.nn.functional.normalize(latent_torch, p=2, dim=1)
  129. latent_tf = tf.add_n(latent_tf)
  130. latent_tf = tf.nn.l2_normalize(latent_tf, dim=1)
  131. latent_torch = latent_torch.detach().numpy()
  132. latent_tf = latent_tf.eval(session = tf.Session())
  133. assert np.all(latent_torch - latent_tf < .000001)
  134. def test_graph_conv():
  135. latent_dense = dense_graph_conv_torch()
  136. latent_sparse = sparse_graph_conv_torch()
  137. assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())
  138. # def setup_function(fun):
  139. # if fun == test_dropout_graph_conv_activation or \
  140. # fun == test_multi_dgca:
  141. # print('Disabling dropout for testing...')
  142. # setup_function.old_dropout = decagon_pytorch.convolve.dropout, \
  143. # decagon_pytorch.convolve.dropout_sparse
  144. #
  145. # decagon_pytorch.convolve.dropout = lambda x, keep_prob: x
  146. # decagon_pytorch.convolve.dropout_sparse = lambda x, keep_prob: x
  147. #
  148. #
  149. # def teardown_function(fun):
  150. # print('Re-enabling dropout...')
  151. # if fun == test_dropout_graph_conv_activation or \
  152. # fun == test_multi_dgca:
  153. # decagon_pytorch.convolve.dropout, \
  154. # decagon_pytorch.convolve.dropout_sparse = \
  155. # setup_function.old_dropout
  156. def flexible_dropout_graph_conv_activation_torch(keep_prob=1.):
  157. torch.random.manual_seed(0)
  158. latent, adjacency_matrices = prepare_data()
  159. latent = torch.tensor(latent).to_sparse()
  160. adj_mat = adjacency_matrices[0]
  161. adj_mat = torch.tensor(adj_mat).to_sparse()
  162. conv = decagon_pytorch.convolve.DropoutGraphConvActivation(10, 10,
  163. adj_mat, keep_prob=keep_prob)
  164. latent = conv(latent)
  165. return latent
  166. def _disable_dropout(monkeypatch):
  167. monkeypatch.setattr(decagon_pytorch.convolve.dense, 'dropout',
  168. lambda x, keep_prob: x)
  169. monkeypatch.setattr(decagon_pytorch.convolve.sparse, 'dropout_sparse',
  170. lambda x, keep_prob: x)
  171. monkeypatch.setattr(decagon_pytorch.convolve.universal, 'dropout',
  172. lambda x, keep_prob: x)
  173. monkeypatch.setattr(decagon_pytorch.convolve.universal, 'dropout_sparse',
  174. lambda x, keep_prob: x)
  175. def test_dropout_graph_conv_activation(monkeypatch):
  176. _disable_dropout(monkeypatch)
  177. for i in range(11):
  178. keep_prob = i/10.
  179. if keep_prob == 0:
  180. keep_prob += np.finfo(np.float32).eps
  181. print('keep_prob:', keep_prob)
  182. latent_dense = dense_dropout_graph_conv_activation_torch(keep_prob)
  183. latent_dense = latent_dense.detach().numpy()
  184. print('latent_dense:', latent_dense)
  185. latent_sparse = sparse_dropout_graph_conv_activation_torch(keep_prob)
  186. latent_sparse = latent_sparse.detach().numpy()
  187. print('latent_sparse:', latent_sparse)
  188. latent_flex = flexible_dropout_graph_conv_activation_torch(keep_prob)
  189. latent_flex = latent_flex.detach().numpy()
  190. print('latent_flex:', latent_flex)
  191. nonzero = (latent_dense != 0) & (latent_sparse != 0)
  192. assert np.all(latent_dense[nonzero] == latent_sparse[nonzero])
  193. nonzero = (latent_dense != 0) & (latent_flex != 0)
  194. assert np.all(latent_dense[nonzero] == latent_flex[nonzero])
  195. nonzero = (latent_sparse != 0) & (latent_flex != 0)
  196. assert np.all(latent_sparse[nonzero] == latent_flex[nonzero])
  197. def test_multi_dgca(monkeypatch):
  198. _disable_dropout(monkeypatch)
  199. keep_prob = .5
  200. torch.random.manual_seed(0)
  201. latent, adjacency_matrices = prepare_data()
  202. latent_sparse = torch.tensor(latent).to_sparse()
  203. latent = torch.tensor(latent)
  204. assert np.all(latent_sparse.to_dense().numpy() == latent.numpy())
  205. adjacency_matrices_sparse = [ torch.tensor(a).to_sparse() for a in adjacency_matrices ]
  206. adjacency_matrices = [ torch.tensor(a) for a in adjacency_matrices ]
  207. for i in range(len(adjacency_matrices)):
  208. assert np.all(adjacency_matrices[i].numpy() == adjacency_matrices_sparse[i].to_dense().numpy())
  209. torch.random.manual_seed(0)
  210. multi_sparse = decagon_pytorch.convolve.SparseMultiDGCA([10,] * len(adjacency_matrices), 10, adjacency_matrices_sparse, keep_prob=keep_prob)
  211. torch.random.manual_seed(0)
  212. multi = decagon_pytorch.convolve.DenseMultiDGCA([10,] * len(adjacency_matrices), 10, adjacency_matrices, keep_prob=keep_prob)
  213. print('len(adjacency_matrices):', len(adjacency_matrices))
  214. print('len(multi_sparse.sparse_dgca):', len(multi_sparse.sparse_dgca))
  215. print('len(multi.dgca):', len(multi.dgca))
  216. for i in range(len(adjacency_matrices)):
  217. assert np.all(multi_sparse.sparse_dgca[i].sparse_graph_conv.weight.detach().numpy() == multi.dgca[i].graph_conv.weight.detach().numpy())
  218. # torch.random.manual_seed(0)
  219. latent_sparse = multi_sparse([latent_sparse,] * len(adjacency_matrices))
  220. # torch.random.manual_seed(0)
  221. latent = multi([latent,] * len(adjacency_matrices))
  222. assert np.all(latent_sparse.detach().numpy() == latent.detach().numpy())