IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

338 lines
12KB

  1. #
  2. # Copyright (C) Stanislaw Adaszewski, 2020
  3. # License: GPLv3
  4. #
  5. import numpy as np
  6. import torch
  7. import torch.utils.data
  8. from typing import List, \
  9. Union, \
  10. Tuple
  11. from .data import Data, \
  12. EdgeType
  13. from .cumcount import cumcount
  14. import time
  15. import multiprocessing
  16. import multiprocessing.pool
  17. from itertools import product, \
  18. repeat
  19. from functools import reduce
  20. def fixed_unigram_candidate_sampler(
  21. true_classes: torch.Tensor,
  22. num_repeats: torch.Tensor,
  23. unigrams: torch.Tensor,
  24. distortion: float = 1.) -> torch.Tensor:
  25. assert isinstance(true_classes, torch.Tensor)
  26. assert isinstance(num_repeats, torch.Tensor)
  27. assert isinstance(unigrams, torch.Tensor)
  28. distortion = float(distortion)
  29. if len(true_classes.shape) != 2:
  30. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  31. if len(num_repeats.shape) != 1:
  32. raise ValueError('num_repeats must be 1D')
  33. if torch.any((unigrams > 0).sum() - \
  34. (true_classes >= 0).sum(dim=1) < \
  35. num_repeats):
  36. raise ValueError('Not enough classes to choose from')
  37. true_class_count = true_classes.shape[1] - (true_classes == -1).sum(dim=1)
  38. true_classes = torch.cat([
  39. true_classes,
  40. torch.full(( len(true_classes), torch.max(num_repeats) ), -1,
  41. dtype=true_classes.dtype)
  42. ], dim=1)
  43. indices = torch.repeat_interleave(torch.arange(len(true_classes)), num_repeats)
  44. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  45. indices.view(-1, 1) ], dim=1)
  46. result = torch.zeros(len(indices), dtype=torch.long)
  47. while len(indices) > 0:
  48. candidates = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  49. candidates = torch.tensor(list(candidates)).view(-1, 1)
  50. inner_order = torch.argsort(candidates[:, 0])
  51. indices_np = indices[inner_order].detach().cpu().numpy()
  52. outer_order = np.argsort(indices_np[:, 1], kind='stable')
  53. outer_order = torch.tensor(outer_order, device=inner_order.device)
  54. candidates = candidates[inner_order][outer_order]
  55. indices = indices[inner_order][outer_order]
  56. mask = (true_classes[indices[:, 1]] == candidates).sum(dim=1).to(torch.bool)
  57. can_cum = cumcount(candidates[:, 0])
  58. ind_cum = cumcount(indices[:, 1])
  59. repeated = (can_cum > 0) & (ind_cum > 0)
  60. mask = mask | repeated
  61. updated = indices[~mask]
  62. if len(updated) > 0:
  63. ofs = true_class_count[updated[:, 1]] + \
  64. cumcount(updated[:, 1])
  65. true_classes[updated[:, 1], ofs] = candidates[~mask].transpose(0, 1)
  66. true_class_count[updated[:, 1]] = ofs + 1
  67. result[indices[:, 0]] = candidates.transpose(0, 1)
  68. indices = indices[mask]
  69. return result
  70. def fixed_unigram_candidate_sampler_slow(
  71. true_classes: torch.Tensor,
  72. num_repeats: torch.Tensor,
  73. unigrams: torch.Tensor,
  74. distortion: float = 1.) -> torch.Tensor:
  75. assert isinstance(true_classes, torch.Tensor)
  76. assert isinstance(num_repeats, torch.Tensor)
  77. assert isinstance(unigrams, torch.Tensor)
  78. distortion = float(distortion)
  79. if len(true_classes.shape) != 2:
  80. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  81. if len(num_repeats.shape) != 1:
  82. raise ValueError('num_repeats must be 1D')
  83. if torch.any((unigrams > 0).sum() - \
  84. (true_classes >= 0).sum(dim=1) < \
  85. num_repeats):
  86. raise ValueError('Not enough classes to choose from')
  87. res = []
  88. if distortion != 1.:
  89. unigrams = unigrams.to(torch.float64)
  90. unigrams = unigrams ** distortion
  91. def fun(i):
  92. if i and i % 100 == 0:
  93. print(i)
  94. if num_repeats[i] == 0:
  95. return []
  96. pos = torch.flatten(true_classes[i, :])
  97. pos = pos[pos >= 0]
  98. w = unigrams.clone().detach()
  99. w[pos] = 0
  100. sampler = torch.utils.data.WeightedRandomSampler(w,
  101. num_repeats[i].item(), replacement=False)
  102. res = list(sampler)
  103. return res
  104. with multiprocessing.pool.ThreadPool() as p:
  105. res = p.map(fun, range(len(num_repeats)))
  106. res = reduce(list.__add__, res, [])
  107. return torch.tensor(res)
  108. def fixed_unigram_candidate_sampler_old(
  109. true_classes: torch.Tensor,
  110. num_repeats: torch.Tensor,
  111. unigrams: torch.Tensor,
  112. distortion: float = 1.) -> torch.Tensor:
  113. if len(true_classes.shape) != 2:
  114. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  115. if len(num_repeats.shape) != 1:
  116. raise ValueError('num_repeats must be 1D')
  117. if torch.any((unigrams > 0).sum() - \
  118. (true_classes >= 0).sum(dim=1) < \
  119. num_repeats):
  120. raise ValueError('Not enough classes to choose from')
  121. num_rows = true_classes.shape[0]
  122. print('true_classes.shape:', true_classes.shape)
  123. # unigrams = np.array(unigrams)
  124. if distortion != 1.:
  125. unigrams = unigrams.to(torch.float64) ** distortion
  126. print('unigrams:', unigrams)
  127. indices = torch.arange(num_rows)
  128. indices = torch.repeat_interleave(indices, num_repeats)
  129. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  130. indices.view(-1, 1) ], dim=1)
  131. num_samples = len(indices)
  132. result = torch.zeros(num_samples, dtype=torch.long)
  133. print('num_rows:', num_rows, 'num_samples:', num_samples)
  134. while len(indices) > 0:
  135. print('len(indices):', len(indices))
  136. print('indices:', indices)
  137. sampler = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  138. candidates = torch.tensor(list(sampler))
  139. candidates = candidates.view(len(indices), 1)
  140. print('candidates:', candidates)
  141. print('true_classes:', true_classes[indices[:, 1], :])
  142. result[indices[:, 0]] = candidates.transpose(0, 1)
  143. print('result:', result)
  144. mask = (candidates == true_classes[indices[:, 1], :])
  145. mask = mask.sum(1).to(torch.bool)
  146. # append_true_classes = torch.full(( len(true_classes), ), -1)
  147. # append_true_classes[~mask] = torch.flatten(candidates)[~mask]
  148. # true_classes = torch.cat([
  149. # append_true_classes.view(-1, 1),
  150. # true_classes
  151. # ], dim=1)
  152. print('mask:', mask)
  153. indices = indices[mask]
  154. # result[indices] = 0
  155. return result
  156. def get_edges_and_degrees(adj_mat: torch.Tensor) -> \
  157. Tuple[torch.Tensor, torch.Tensor]:
  158. if adj_mat.is_sparse:
  159. adj_mat = adj_mat.coalesce()
  160. degrees = torch.zeros(adj_mat.shape[1], dtype=torch.int64,
  161. device=adj_mat.device)
  162. degrees = degrees.index_add(0, adj_mat.indices()[1],
  163. torch.ones(adj_mat.indices().shape[1], dtype=torch.int64,
  164. device=adj_mat.device))
  165. edges_pos = adj_mat.indices().transpose(0, 1)
  166. else:
  167. degrees = adj_mat.sum(0)
  168. edges_pos = torch.nonzero(adj_mat, as_tuple=False)
  169. return edges_pos, degrees
  170. def get_true_classes(adj_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  171. indices = adj_mat.indices()
  172. row_count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  173. #print('indices[0]:', indices[0], count[indices[0]])
  174. row_count = row_count.index_add(0, indices[0],
  175. torch.ones(indices.shape[1], dtype=torch.long))
  176. #print('count:', count)
  177. max_true_classes = torch.max(row_count).item()
  178. #print('max_true_classes:', max_true_classes)
  179. true_classes = torch.full((adj_mat.shape[0], max_true_classes),
  180. -1, dtype=torch.long)
  181. # inv = torch.unique(indices[0], return_inverse=True)
  182. # indices = indices.copy()
  183. # true_classes[indices[0], 0] = indices[1]
  184. t = time.time()
  185. cc = cumcount(indices[0])
  186. print('cumcount() took:', time.time() - t)
  187. # cc = torch.tensor(cc)
  188. t = time.time()
  189. true_classes[indices[0], cc] = indices[1]
  190. print('assignment took:', time.time() - t)
  191. ''' count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  192. for i in range(indices.shape[1]):
  193. # print('looping...')
  194. row = indices[0, i]
  195. col = indices[1, i]
  196. #print('row:', row, 'col:', col, 'count[row]:', count[row])
  197. true_classes[row, count[row]] = col
  198. count[row] += 1 '''
  199. # t = time.time()
  200. # true_classes = torch.repeat_interleave(true_classes, row_count, dim=0)
  201. # print('repeat_interleave() took:', time.time() - t)
  202. return true_classes, row_count
  203. def negative_sample_adj_mat(adj_mat: torch.Tensor,
  204. remove_diagonal: bool=False) -> torch.Tensor:
  205. if not isinstance(adj_mat, torch.Tensor):
  206. raise ValueError('adj_mat must be a torch.Tensor, got: %s' % adj_mat.__class__.__name__)
  207. edges_pos, degrees = get_edges_and_degrees(adj_mat)
  208. degrees = degrees.to(torch.float32) + 1.0 / torch.numel(adj_mat)
  209. true_classes, row_count = get_true_classes(adj_mat)
  210. if remove_diagonal:
  211. true_classes = torch.cat([ torch.arange(len(adj_mat)).view(-1, 1),
  212. true_classes ], dim=1)
  213. # true_classes = edges_pos[:, 1].view(-1, 1)
  214. # print('true_classes:', true_classes)
  215. neg_neighbors = fixed_unigram_candidate_sampler(
  216. true_classes, row_count, degrees, 0.75).to(adj_mat.device)
  217. print('neg_neighbors:', neg_neighbors)
  218. pos_vertices = torch.repeat_interleave(torch.arange(len(adj_mat)),
  219. row_count)
  220. edges_neg = torch.cat([ pos_vertices.view(-1, 1),
  221. neg_neighbors.view(-1, 1) ], 1)
  222. adj_mat_neg = torch.sparse_coo_tensor(indices = edges_neg.transpose(0, 1),
  223. values=torch.ones(len(edges_neg)), size=adj_mat.shape,
  224. dtype=adj_mat.dtype, device=adj_mat.device)
  225. adj_mat_neg = adj_mat_neg.coalesce()
  226. indices = adj_mat_neg.indices()
  227. adj_mat_neg = torch.sparse_coo_tensor(indices,
  228. torch.ones(indices.shape[1]), adj_mat.shape,
  229. dtype=adj_mat.dtype, device=adj_mat.device)
  230. adj_mat_neg = adj_mat_neg.coalesce()
  231. return adj_mat_neg
  232. def negative_sample_data(data: Data) -> Data:
  233. new_edge_types = {}
  234. res = Data(target_value=0)
  235. for vt in data.vertex_types:
  236. res.add_vertex_type(vt.name, vt.count)
  237. for key, et in data.edge_types.items():
  238. print('key:', key)
  239. adjacency_matrices_neg = []
  240. for adj_mat in et.adjacency_matrices:
  241. remove_diagonal = True \
  242. if et.vertex_type_row == et.vertex_type_column \
  243. else False
  244. adj_mat_neg = negative_sample_adj_mat(adj_mat, remove_diagonal)
  245. adjacency_matrices_neg.append(adj_mat_neg)
  246. res.add_edge_type(et.name,
  247. et.vertex_type_row, et.vertex_type_column,
  248. adjacency_matrices_neg, et.decoder_factory)
  249. #new_et = EdgeType(et.name, et.vertex_type_row,
  250. # et.vertex_type_column, adjacency_matrices_neg,
  251. # et.decoder_factory, et.total_connectivity)
  252. #new_edge_types[key] = new_et
  253. #res = Data(data.vertex_types, new_edge_types)
  254. return res
  255. def merge_data(pos_data: Data, neg_data: Data) -> Data:
  256. assert isinstance(pos_data, Data)
  257. assert isinstance(neg_data, Data)
  258. res = PosNegData()
  259. for vt in pos_data.vertex_types:
  260. res.add_vertex_type(vt.name, vt.count)
  261. for key, pos_et in pos_data.edge_types.items():
  262. neg_et = neg_data.edge_types[key]
  263. res.add_edge_type(pos_et.name,
  264. pos_et.vertex_type_row, pos_et.vertex_type_column,
  265. pos_et.adjacency_matrices, neg_et.adjacency_matrices,
  266. pos_et.decoder_factory)