IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

166 rindas
5.5KB

  1. import decagon_pytorch.convolve
  2. import decagon.deep.layers
  3. import torch
  4. import tensorflow as tf
  5. import numpy as np
  6. def prepare_data():
  7. np.random.seed(0)
  8. latent = np.random.random((5, 10)).astype(np.float32)
  9. latent[latent < .5] = 0
  10. latent = np.ceil(latent)
  11. adjacency_matrices = []
  12. for _ in range(5):
  13. adj_mat = np.random.random((len(latent),) * 2).astype(np.float32)
  14. adj_mat[adj_mat < .5] = 0
  15. adj_mat = np.ceil(adj_mat)
  16. adjacency_matrices.append(adj_mat)
  17. return latent, adjacency_matrices
  18. def dense_to_sparse_tf(x):
  19. a, b = np.where(x)
  20. indices = np.array([a, b]).T
  21. values = x[a, b]
  22. return tf.sparse.SparseTensor(indices, values, x.shape)
  23. def dropout_sparse_tf(x, keep_prob, num_nonzero_elems):
  24. """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
  25. """
  26. noise_shape = [num_nonzero_elems]
  27. random_tensor = keep_prob
  28. random_tensor += tf.convert_to_tensor(torch.rand(noise_shape).detach().numpy())
  29. # tf.convert_to_tensor(np.random.random(noise_shape))
  30. # tf.random_uniform(noise_shape)
  31. dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
  32. pre_out = tf.sparse_retain(x, dropout_mask)
  33. return pre_out * (1./keep_prob)
  34. def graph_conv_torch():
  35. torch.random.manual_seed(0)
  36. latent, adjacency_matrices = prepare_data()
  37. latent = torch.tensor(latent)
  38. adj_mat = adjacency_matrices[0]
  39. adj_mat = torch.tensor(adj_mat)
  40. conv = decagon_pytorch.convolve.GraphConv(10, 10,
  41. adj_mat)
  42. latent = conv(latent)
  43. return latent
  44. def sparse_graph_conv_torch():
  45. torch.random.manual_seed(0)
  46. latent, adjacency_matrices = prepare_data()
  47. print('latent.dtype:', latent.dtype)
  48. latent = torch.tensor(latent).to_sparse()
  49. adj_mat = adjacency_matrices[0]
  50. adj_mat = torch.tensor(adj_mat).to_sparse()
  51. print('adj_mat.dtype:', adj_mat.dtype,
  52. 'latent.dtype:', latent.dtype)
  53. conv = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  54. adj_mat)
  55. latent = conv(latent)
  56. return latent
  57. def sparse_graph_conv_tf():
  58. torch.random.manual_seed(0)
  59. latent, adjacency_matrices = prepare_data()
  60. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  61. torch.tensor(adjacency_matrices[0]).to_sparse())
  62. weight = tf.constant(conv_torch.weight.detach().numpy())
  63. latent = dense_to_sparse_tf(latent)
  64. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  65. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  66. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  67. return latent
  68. def sparse_dropout_graph_conv_activation_torch(keep_prob=1.):
  69. torch.random.manual_seed(0)
  70. latent, adjacency_matrices = prepare_data()
  71. latent = torch.tensor(latent).to_sparse()
  72. adj_mat = adjacency_matrices[0]
  73. adj_mat = torch.tensor(adj_mat).to_sparse()
  74. conv = decagon_pytorch.convolve.SparseDropoutGraphConvActivation(10, 10,
  75. adj_mat, keep_prob=keep_prob)
  76. latent = conv(latent)
  77. return latent
  78. def sparse_dropout_graph_conv_activation_tf(keep_prob=1.):
  79. torch.random.manual_seed(0)
  80. latent, adjacency_matrices = prepare_data()
  81. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  82. torch.tensor(adjacency_matrices[0]).to_sparse())
  83. weight = tf.constant(conv_torch.weight.detach().numpy())
  84. nonzero_feat = np.sum(latent > 0)
  85. latent = dense_to_sparse_tf(latent)
  86. latent = dropout_sparse_tf(latent, keep_prob,
  87. nonzero_feat)
  88. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  89. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  90. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  91. latent = tf.nn.relu(latent)
  92. return latent
  93. def test_sparse_graph_conv():
  94. latent_torch = sparse_graph_conv_torch()
  95. latent_tf = sparse_graph_conv_tf()
  96. assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
  97. def test_sparse_dropout_grap_conv_activation():
  98. for i in range(11):
  99. keep_prob = i/10. + np.finfo(np.float32).eps
  100. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob)
  101. latent_tf = sparse_dropout_graph_conv_activation_tf(keep_prob)
  102. latent_torch = latent_torch.detach().numpy()
  103. latent_tf = latent_tf.eval(session = tf.Session())
  104. print('latent_torch:', latent_torch)
  105. print('latent_tf:', latent_tf)
  106. assert np.all(latent_torch - latent_tf < .000001)
  107. def test_sparse_multi_dgca():
  108. latent_torch = None
  109. latent_tf = []
  110. for i in range(11):
  111. keep_prob = i/10. + np.finfo(np.float32).eps
  112. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob) \
  113. if latent_torch is None \
  114. else latent_torch + sparse_dropout_graph_conv_activation_torch(keep_prob)
  115. latent_tf.append(sparse_dropout_graph_conv_activation_tf(keep_prob))
  116. latent_torch = torch.nn.functional.normalize(latent_torch, p=2, dim=1)
  117. latent_tf = tf.add_n(latent_tf)
  118. latent_tf = tf.nn.l2_normalize(latent_tf, dim=1)
  119. latent_torch = latent_torch.detach().numpy()
  120. latent_tf = latent_tf.eval(session = tf.Session())
  121. assert np.all(latent_torch - latent_tf < .000001)
  122. def test_graph_conv():
  123. latent_dense = graph_conv_torch()
  124. latent_sparse = sparse_graph_conv_torch()
  125. assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())