IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

270 Zeilen
9.2KB

  1. #
  2. # Copyright (C) Stanislaw Adaszewski, 2020
  3. # License: GPLv3
  4. #
  5. import numpy as np
  6. import torch
  7. import torch.utils.data
  8. from typing import List, \
  9. Union, \
  10. Tuple
  11. from .data import Data, \
  12. EdgeType
  13. from .cumcount import cumcount
  14. import time
  15. import multiprocessing
  16. import multiprocessing.pool
  17. from itertools import product, \
  18. repeat
  19. from functools import reduce
  20. def fixed_unigram_candidate_sampler_slow(
  21. true_classes: torch.Tensor,
  22. num_repeats: torch.Tensor,
  23. unigrams: torch.Tensor,
  24. distortion: float = 1.) -> torch.Tensor:
  25. assert isinstance(true_classes, torch.Tensor)
  26. assert isinstance(num_repeats, torch.Tensor)
  27. assert isinstance(unigrams, torch.Tensor)
  28. distortion = float(distortion)
  29. if len(true_classes.shape) != 2:
  30. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  31. if len(num_repeats.shape) != 1:
  32. raise ValueError('num_repeats must be 1D')
  33. if torch.any((unigrams > 0).sum() - \
  34. (true_classes >= 0).sum(dim=1) < \
  35. num_repeats):
  36. raise ValueError('Not enough classes to choose from')
  37. res = []
  38. if distortion != 1.:
  39. unigrams = unigrams.to(torch.float64)
  40. unigrams = unigrams ** distortion
  41. def fun(i):
  42. if i and i % 100 == 0:
  43. print(i)
  44. if num_repeats[i] == 0:
  45. return []
  46. pos = torch.flatten(true_classes[i, :])
  47. pos = pos[pos >= 0]
  48. w = unigrams.clone().detach()
  49. w[pos] = 0
  50. sampler = torch.utils.data.WeightedRandomSampler(w,
  51. num_repeats[i].item(), replacement=False)
  52. res = list(sampler)
  53. return res
  54. with multiprocessing.pool.ThreadPool() as p:
  55. res = p.map(fun, range(len(num_repeats)))
  56. res = reduce(list.__add__, res, [])
  57. return torch.tensor(res)
  58. def fixed_unigram_candidate_sampler(
  59. true_classes: torch.Tensor,
  60. num_repeats: torch.Tensor,
  61. unigrams: torch.Tensor,
  62. distortion: float = 1.) -> torch.Tensor:
  63. if len(true_classes.shape) != 2:
  64. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  65. if len(num_repeats.shape) != 1:
  66. raise ValueError('num_repeats must be 1D')
  67. if torch.any((unigrams > 0).sum() - \
  68. (true_classes >= 0).sum(dim=1) < \
  69. num_repeats):
  70. raise ValueError('Not enough classes to choose from')
  71. num_rows = true_classes.shape[0]
  72. print('true_classes.shape:', true_classes.shape)
  73. # unigrams = np.array(unigrams)
  74. if distortion != 1.:
  75. unigrams = unigrams.to(torch.float64) ** distortion
  76. print('unigrams:', unigrams)
  77. indices = torch.arange(num_rows)
  78. indices = torch.repeat_interleave(indices, num_repeats)
  79. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  80. indices.view(-1, 1) ], dim=1)
  81. num_samples = len(indices)
  82. result = torch.zeros(num_samples, dtype=torch.long)
  83. print('num_rows:', num_rows, 'num_samples:', num_samples)
  84. while len(indices) > 0:
  85. print('len(indices):', len(indices))
  86. print('indices:', indices)
  87. sampler = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  88. candidates = torch.tensor(list(sampler))
  89. candidates = candidates.view(len(indices), 1)
  90. print('candidates:', candidates)
  91. print('true_classes:', true_classes[indices[:, 1], :])
  92. result[indices[:, 0]] = candidates.transpose(0, 1)
  93. print('result:', result)
  94. mask = (candidates == true_classes[indices[:, 1], :])
  95. mask = mask.sum(1).to(torch.bool)
  96. # append_true_classes = torch.full(( len(true_classes), ), -1)
  97. # append_true_classes[~mask] = torch.flatten(candidates)[~mask]
  98. # true_classes = torch.cat([
  99. # append_true_classes.view(-1, 1),
  100. # true_classes
  101. # ], dim=1)
  102. print('mask:', mask)
  103. indices = indices[mask]
  104. # result[indices] = 0
  105. return result
  106. def get_edges_and_degrees(adj_mat: torch.Tensor) -> \
  107. Tuple[torch.Tensor, torch.Tensor]:
  108. if adj_mat.is_sparse:
  109. adj_mat = adj_mat.coalesce()
  110. degrees = torch.zeros(adj_mat.shape[1], dtype=torch.int64,
  111. device=adj_mat.device)
  112. degrees = degrees.index_add(0, adj_mat.indices()[1],
  113. torch.ones(adj_mat.indices().shape[1], dtype=torch.int64,
  114. device=adj_mat.device))
  115. edges_pos = adj_mat.indices().transpose(0, 1)
  116. else:
  117. degrees = adj_mat.sum(0)
  118. edges_pos = torch.nonzero(adj_mat, as_tuple=False)
  119. return edges_pos, degrees
  120. def get_true_classes(adj_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  121. indices = adj_mat.indices()
  122. row_count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  123. #print('indices[0]:', indices[0], count[indices[0]])
  124. row_count = row_count.index_add(0, indices[0],
  125. torch.ones(indices.shape[1], dtype=torch.long))
  126. #print('count:', count)
  127. max_true_classes = torch.max(row_count).item()
  128. #print('max_true_classes:', max_true_classes)
  129. true_classes = torch.full((adj_mat.shape[0], max_true_classes),
  130. -1, dtype=torch.long)
  131. # inv = torch.unique(indices[0], return_inverse=True)
  132. # indices = indices.copy()
  133. # true_classes[indices[0], 0] = indices[1]
  134. t = time.time()
  135. cc = cumcount(indices[0].cpu().numpy())
  136. print('cumcount() took:', time.time() - t)
  137. cc = torch.tensor(cc)
  138. t = time.time()
  139. true_classes[indices[0], cc] = indices[1]
  140. print('assignment took:', time.time() - t)
  141. ''' count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  142. for i in range(indices.shape[1]):
  143. # print('looping...')
  144. row = indices[0, i]
  145. col = indices[1, i]
  146. #print('row:', row, 'col:', col, 'count[row]:', count[row])
  147. true_classes[row, count[row]] = col
  148. count[row] += 1 '''
  149. # t = time.time()
  150. # true_classes = torch.repeat_interleave(true_classes, row_count, dim=0)
  151. # print('repeat_interleave() took:', time.time() - t)
  152. return true_classes, row_count
  153. def negative_sample_adj_mat(adj_mat: torch.Tensor,
  154. remove_diagonal: bool=False) -> torch.Tensor:
  155. if not isinstance(adj_mat, torch.Tensor):
  156. raise ValueError('adj_mat must be a torch.Tensor, got: %s' % adj_mat.__class__.__name__)
  157. edges_pos, degrees = get_edges_and_degrees(adj_mat)
  158. degrees = degrees.to(torch.float32) + 1.0 / torch.numel(adj_mat)
  159. true_classes, row_count = get_true_classes(adj_mat)
  160. if remove_diagonal:
  161. true_classes = torch.cat([ torch.arange(len(adj_mat)).view(-1, 1),
  162. true_classes ], dim=1)
  163. # true_classes = edges_pos[:, 1].view(-1, 1)
  164. # print('true_classes:', true_classes)
  165. neg_neighbors = fixed_unigram_candidate_sampler(
  166. true_classes, row_count, degrees, 0.75).to(adj_mat.device)
  167. print('neg_neighbors:', neg_neighbors)
  168. pos_vertices = torch.repeat_interleave(torch.arange(len(adj_mat)),
  169. row_count)
  170. edges_neg = torch.cat([ pos_vertices.view(-1, 1),
  171. neg_neighbors.view(-1, 1) ], 1)
  172. adj_mat_neg = torch.sparse_coo_tensor(indices = edges_neg.transpose(0, 1),
  173. values=torch.ones(len(edges_neg)), size=adj_mat.shape,
  174. dtype=adj_mat.dtype, device=adj_mat.device)
  175. adj_mat_neg = adj_mat_neg.coalesce()
  176. indices = adj_mat_neg.indices()
  177. adj_mat_neg = torch.sparse_coo_tensor(indices,
  178. torch.ones(indices.shape[1]), adj_mat.shape,
  179. dtype=adj_mat.dtype, device=adj_mat.device)
  180. adj_mat_neg = adj_mat_neg.coalesce()
  181. return adj_mat_neg
  182. def negative_sample_data(data: Data) -> Data:
  183. new_edge_types = {}
  184. res = Data(target_value=0)
  185. for vt in data.vertex_types:
  186. res.add_vertex_type(vt.name, vt.count)
  187. for key, et in data.edge_types.items():
  188. print('key:', key)
  189. adjacency_matrices_neg = []
  190. for adj_mat in et.adjacency_matrices:
  191. remove_diagonal = True \
  192. if et.vertex_type_row == et.vertex_type_column \
  193. else False
  194. adj_mat_neg = negative_sample_adj_mat(adj_mat, remove_diagonal)
  195. adjacency_matrices_neg.append(adj_mat_neg)
  196. res.add_edge_type(et.name,
  197. et.vertex_type_row, et.vertex_type_column,
  198. adjacency_matrices_neg, et.decoder_factory)
  199. #new_et = EdgeType(et.name, et.vertex_type_row,
  200. # et.vertex_type_column, adjacency_matrices_neg,
  201. # et.decoder_factory, et.total_connectivity)
  202. #new_edge_types[key] = new_et
  203. #res = Data(data.vertex_types, new_edge_types)
  204. return res
  205. def merge_data(pos_data: Data, neg_data: Data) -> Data:
  206. assert isinstance(pos_data, Data)
  207. assert isinstance(neg_data, Data)
  208. res = PosNegData()
  209. for vt in pos_data.vertex_types:
  210. res.add_vertex_type(vt.name, vt.count)
  211. for key, pos_et in pos_data.edge_types.items():
  212. neg_et = neg_data.edge_types[key]
  213. res.add_edge_type(pos_et.name,
  214. pos_et.vertex_type_row, pos_et.vertex_type_column,
  215. pos_et.adjacency_matrices, neg_et.adjacency_matrices,
  216. pos_et.decoder_factory)