IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

147 linhas
5.0KB

  1. import decagon_pytorch.convolve
  2. import decagon.deep.layers
  3. import torch
  4. import tensorflow as tf
  5. import numpy as np
  6. def prepare_data():
  7. np.random.seed(0)
  8. latent = np.random.random((5, 10)).astype(np.float32)
  9. latent[latent < .5] = 0
  10. latent = np.ceil(latent)
  11. adjacency_matrices = []
  12. for _ in range(5):
  13. adj_mat = np.random.random((len(latent),) * 2).astype(np.float32)
  14. adj_mat[adj_mat < .5] = 0
  15. adj_mat = np.ceil(adj_mat)
  16. adjacency_matrices.append(adj_mat)
  17. return latent, adjacency_matrices
  18. def dense_to_sparse_tf(x):
  19. a, b = np.where(x)
  20. indices = np.array([a, b]).T
  21. values = x[a, b]
  22. return tf.sparse.SparseTensor(indices, values, x.shape)
  23. def dropout_sparse_tf(x, keep_prob, num_nonzero_elems):
  24. """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
  25. """
  26. noise_shape = [num_nonzero_elems]
  27. random_tensor = keep_prob
  28. random_tensor += tf.convert_to_tensor(torch.rand(noise_shape).detach().numpy())
  29. # tf.convert_to_tensor(np.random.random(noise_shape))
  30. # tf.random_uniform(noise_shape)
  31. dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
  32. pre_out = tf.sparse_retain(x, dropout_mask)
  33. return pre_out * (1./keep_prob)
  34. def sparse_graph_conv_torch():
  35. torch.random.manual_seed(0)
  36. latent, adjacency_matrices = prepare_data()
  37. print('latent.dtype:', latent.dtype)
  38. latent = torch.tensor(latent).to_sparse()
  39. adj_mat = adjacency_matrices[0]
  40. adj_mat = torch.tensor(adj_mat).to_sparse()
  41. print('adj_mat.dtype:', adj_mat.dtype,
  42. 'latent.dtype:', latent.dtype)
  43. conv = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  44. adj_mat)
  45. latent = conv(latent)
  46. return latent
  47. def sparse_graph_conv_tf():
  48. torch.random.manual_seed(0)
  49. latent, adjacency_matrices = prepare_data()
  50. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  51. torch.tensor(adjacency_matrices[0]).to_sparse())
  52. weight = tf.constant(conv_torch.weight.detach().numpy())
  53. latent = dense_to_sparse_tf(latent)
  54. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  55. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  56. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  57. return latent
  58. def sparse_dropout_graph_conv_activation_torch(keep_prob=1.):
  59. torch.random.manual_seed(0)
  60. latent, adjacency_matrices = prepare_data()
  61. latent = torch.tensor(latent).to_sparse()
  62. adj_mat = adjacency_matrices[0]
  63. adj_mat = torch.tensor(adj_mat).to_sparse()
  64. conv = decagon_pytorch.convolve.SparseDropoutGraphConvActivation(10, 10,
  65. adj_mat, keep_prob=keep_prob)
  66. latent = conv(latent)
  67. return latent
  68. def sparse_dropout_graph_conv_activation_tf(keep_prob=1.):
  69. torch.random.manual_seed(0)
  70. latent, adjacency_matrices = prepare_data()
  71. conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
  72. torch.tensor(adjacency_matrices[0]).to_sparse())
  73. weight = tf.constant(conv_torch.weight.detach().numpy())
  74. nonzero_feat = np.sum(latent > 0)
  75. latent = dense_to_sparse_tf(latent)
  76. latent = dropout_sparse_tf(latent, keep_prob,
  77. nonzero_feat)
  78. adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
  79. latent = tf.sparse_tensor_dense_matmul(latent, weight)
  80. latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
  81. latent = tf.nn.relu(latent)
  82. return latent
  83. def test_sparse_graph_conv():
  84. latent_torch = sparse_graph_conv_torch()
  85. latent_tf = sparse_graph_conv_tf()
  86. assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
  87. def test_sparse_dropout_grap_conv_activation():
  88. for i in range(11):
  89. keep_prob = i/10. + np.finfo(np.float32).eps
  90. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob)
  91. latent_tf = sparse_dropout_graph_conv_activation_tf(keep_prob)
  92. latent_torch = latent_torch.detach().numpy()
  93. latent_tf = latent_tf.eval(session = tf.Session())
  94. print('latent_torch:', latent_torch)
  95. print('latent_tf:', latent_tf)
  96. assert np.all(latent_torch - latent_tf < .000001)
  97. def test_sparse_multi_dgca():
  98. latent_torch = None
  99. latent_tf = []
  100. for i in range(11):
  101. keep_prob = i/10. + np.finfo(np.float32).eps
  102. latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob) \
  103. if latent_torch is None \
  104. else latent_torch + sparse_dropout_graph_conv_activation_torch(keep_prob)
  105. latent_tf.append(sparse_dropout_graph_conv_activation_tf(keep_prob))
  106. latent_torch = torch.nn.functional.normalize(latent_torch, p=2, dim=1)
  107. latent_tf = tf.add_n(latent_tf)
  108. latent_tf = tf.nn.l2_normalize(latent_tf, dim=1)
  109. latent_torch = latent_torch.detach().numpy()
  110. latent_tf = latent_tf.eval(session = tf.Session())
  111. assert np.all(latent_torch - latent_tf < .000001)