IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

341 Zeilen
12KB

  1. #
  2. # Copyright (C) Stanislaw Adaszewski, 2020
  3. # License: GPLv3
  4. #
  5. import numpy as np
  6. import torch
  7. import torch.utils.data
  8. from typing import List, \
  9. Union, \
  10. Tuple
  11. from .data import Data, \
  12. EdgeType
  13. from .cumcount import cumcount
  14. import time
  15. import multiprocessing
  16. import multiprocessing.pool
  17. from itertools import product, \
  18. repeat
  19. from functools import reduce
  20. def fixed_unigram_candidate_sampler_new(
  21. true_classes: torch.Tensor,
  22. num_repeats: torch.Tensor,
  23. unigrams: torch.Tensor,
  24. distortion: float = 1.) -> torch.Tensor:
  25. assert isinstance(true_classes, torch.Tensor)
  26. assert isinstance(num_repeats, torch.Tensor)
  27. assert isinstance(unigrams, torch.Tensor)
  28. distortion = float(distortion)
  29. if len(true_classes.shape) != 2:
  30. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  31. if len(num_repeats.shape) != 1:
  32. raise ValueError('num_repeats must be 1D')
  33. if torch.any((unigrams > 0).sum() - \
  34. (true_classes >= 0).sum(dim=1) < \
  35. num_repeats):
  36. raise ValueError('Not enough classes to choose from')
  37. true_class_count = true_classes.shape[1] - (true_classes == -1).sum(dim=1)
  38. true_classes = torch.cat([
  39. true_classes,
  40. torch.full(( len(true_classes), torch.max(num_repeats) ), -1,
  41. dtype=true_classes.dtype)
  42. ], dim=1)
  43. indices = torch.repeat_interleave(torch.arange(len(true_classes)), num_repeats)
  44. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  45. indices.view(-1, 1) ], dim=1)
  46. result = torch.zeros(len(indices), dtype=torch.long)
  47. while len(indices) > 0:
  48. print(len(indices))
  49. candidates = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  50. candidates = torch.tensor(list(candidates)).view(-1, 1)
  51. inner_order = torch.argsort(candidates[:, 0])
  52. indices_np = indices[inner_order].detach().cpu().numpy()
  53. outer_order = np.argsort(indices_np[:, 1], kind='stable')
  54. outer_order = torch.tensor(outer_order, device=inner_order.device)
  55. candidates = candidates[inner_order][outer_order]
  56. indices = indices[inner_order][outer_order]
  57. mask = (true_classes[indices[:, 1]] == candidates).sum(dim=1).to(torch.bool)
  58. can_cum = cumcount(candidates[:, 0])
  59. ind_cum = cumcount(indices[:, 1])
  60. repeated = (can_cum > 0) & (ind_cum > 0)
  61. # TODO: this is wrong, still requires work
  62. mask = mask | repeated
  63. updated = indices[~mask]
  64. if len(updated) > 0:
  65. ofs = true_class_count[updated[:, 1]] + \
  66. cumcount(updated[:, 1])
  67. true_classes[updated[:, 1], ofs] = candidates[~mask].transpose(0, 1)
  68. true_class_count[updated[:, 1]] = ofs + 1
  69. result[indices[:, 0]] = candidates.transpose(0, 1)
  70. indices = indices[mask]
  71. return result
  72. def fixed_unigram_candidate_sampler_slow(
  73. true_classes: torch.Tensor,
  74. num_repeats: torch.Tensor,
  75. unigrams: torch.Tensor,
  76. distortion: float = 1.) -> torch.Tensor:
  77. assert isinstance(true_classes, torch.Tensor)
  78. assert isinstance(num_repeats, torch.Tensor)
  79. assert isinstance(unigrams, torch.Tensor)
  80. distortion = float(distortion)
  81. if len(true_classes.shape) != 2:
  82. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  83. if len(num_repeats.shape) != 1:
  84. raise ValueError('num_repeats must be 1D')
  85. if torch.any((unigrams > 0).sum() - \
  86. (true_classes >= 0).sum(dim=1) < \
  87. num_repeats):
  88. raise ValueError('Not enough classes to choose from')
  89. res = []
  90. if distortion != 1.:
  91. unigrams = unigrams.to(torch.float64)
  92. unigrams = unigrams ** distortion
  93. def fun(i):
  94. if i and i % 100 == 0:
  95. print(i)
  96. if num_repeats[i] == 0:
  97. return []
  98. pos = torch.flatten(true_classes[i, :])
  99. pos = pos[pos >= 0]
  100. w = unigrams.clone().detach()
  101. w[pos] = 0
  102. sampler = torch.utils.data.WeightedRandomSampler(w,
  103. num_repeats[i].item(), replacement=False)
  104. res = list(sampler)
  105. return res
  106. with multiprocessing.pool.ThreadPool() as p:
  107. res = p.map(fun, range(len(num_repeats)))
  108. res = reduce(list.__add__, res, [])
  109. return torch.tensor(res)
  110. def fixed_unigram_candidate_sampler(
  111. true_classes: torch.Tensor,
  112. num_repeats: torch.Tensor,
  113. unigrams: torch.Tensor,
  114. distortion: float = 1.) -> torch.Tensor:
  115. if len(true_classes.shape) != 2:
  116. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  117. if len(num_repeats.shape) != 1:
  118. raise ValueError('num_repeats must be 1D')
  119. if torch.any((unigrams > 0).sum() - \
  120. (true_classes >= 0).sum(dim=1) < \
  121. num_repeats):
  122. raise ValueError('Not enough classes to choose from')
  123. num_rows = true_classes.shape[0]
  124. print('true_classes.shape:', true_classes.shape)
  125. # unigrams = np.array(unigrams)
  126. if distortion != 1.:
  127. unigrams = unigrams.to(torch.float64) ** distortion
  128. print('unigrams:', unigrams)
  129. indices = torch.arange(num_rows)
  130. indices = torch.repeat_interleave(indices, num_repeats)
  131. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  132. indices.view(-1, 1) ], dim=1)
  133. num_samples = len(indices)
  134. result = torch.zeros(num_samples, dtype=torch.long)
  135. print('num_rows:', num_rows, 'num_samples:', num_samples)
  136. while len(indices) > 0:
  137. print('len(indices):', len(indices))
  138. print('indices:', indices)
  139. sampler = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  140. candidates = torch.tensor(list(sampler))
  141. candidates = candidates.view(len(indices), 1)
  142. print('candidates:', candidates)
  143. print('true_classes:', true_classes[indices[:, 1], :])
  144. result[indices[:, 0]] = candidates.transpose(0, 1)
  145. print('result:', result)
  146. mask = (candidates == true_classes[indices[:, 1], :])
  147. mask = mask.sum(1).to(torch.bool)
  148. # append_true_classes = torch.full(( len(true_classes), ), -1)
  149. # append_true_classes[~mask] = torch.flatten(candidates)[~mask]
  150. # true_classes = torch.cat([
  151. # append_true_classes.view(-1, 1),
  152. # true_classes
  153. # ], dim=1)
  154. print('mask:', mask)
  155. indices = indices[mask]
  156. # result[indices] = 0
  157. return result
  158. def get_edges_and_degrees(adj_mat: torch.Tensor) -> \
  159. Tuple[torch.Tensor, torch.Tensor]:
  160. if adj_mat.is_sparse:
  161. adj_mat = adj_mat.coalesce()
  162. degrees = torch.zeros(adj_mat.shape[1], dtype=torch.int64,
  163. device=adj_mat.device)
  164. degrees = degrees.index_add(0, adj_mat.indices()[1],
  165. torch.ones(adj_mat.indices().shape[1], dtype=torch.int64,
  166. device=adj_mat.device))
  167. edges_pos = adj_mat.indices().transpose(0, 1)
  168. else:
  169. degrees = adj_mat.sum(0)
  170. edges_pos = torch.nonzero(adj_mat, as_tuple=False)
  171. return edges_pos, degrees
  172. def get_true_classes(adj_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  173. indices = adj_mat.indices()
  174. row_count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  175. #print('indices[0]:', indices[0], count[indices[0]])
  176. row_count = row_count.index_add(0, indices[0],
  177. torch.ones(indices.shape[1], dtype=torch.long))
  178. #print('count:', count)
  179. max_true_classes = torch.max(row_count).item()
  180. #print('max_true_classes:', max_true_classes)
  181. true_classes = torch.full((adj_mat.shape[0], max_true_classes),
  182. -1, dtype=torch.long)
  183. # inv = torch.unique(indices[0], return_inverse=True)
  184. # indices = indices.copy()
  185. # true_classes[indices[0], 0] = indices[1]
  186. t = time.time()
  187. cc = cumcount(indices[0])
  188. print('cumcount() took:', time.time() - t)
  189. # cc = torch.tensor(cc)
  190. t = time.time()
  191. true_classes[indices[0], cc] = indices[1]
  192. print('assignment took:', time.time() - t)
  193. ''' count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  194. for i in range(indices.shape[1]):
  195. # print('looping...')
  196. row = indices[0, i]
  197. col = indices[1, i]
  198. #print('row:', row, 'col:', col, 'count[row]:', count[row])
  199. true_classes[row, count[row]] = col
  200. count[row] += 1 '''
  201. # t = time.time()
  202. # true_classes = torch.repeat_interleave(true_classes, row_count, dim=0)
  203. # print('repeat_interleave() took:', time.time() - t)
  204. return true_classes, row_count
  205. def negative_sample_adj_mat(adj_mat: torch.Tensor,
  206. remove_diagonal: bool=False) -> torch.Tensor:
  207. if not isinstance(adj_mat, torch.Tensor):
  208. raise ValueError('adj_mat must be a torch.Tensor, got: %s' % adj_mat.__class__.__name__)
  209. edges_pos, degrees = get_edges_and_degrees(adj_mat)
  210. degrees = degrees.to(torch.float32) + 1.0 / torch.numel(adj_mat)
  211. true_classes, row_count = get_true_classes(adj_mat)
  212. if remove_diagonal:
  213. true_classes = torch.cat([ torch.arange(len(adj_mat)).view(-1, 1),
  214. true_classes ], dim=1)
  215. # true_classes = edges_pos[:, 1].view(-1, 1)
  216. # print('true_classes:', true_classes)
  217. neg_neighbors = fixed_unigram_candidate_sampler(
  218. true_classes, row_count, degrees, 0.75).to(adj_mat.device)
  219. print('neg_neighbors:', neg_neighbors)
  220. pos_vertices = torch.repeat_interleave(torch.arange(len(adj_mat)),
  221. row_count)
  222. edges_neg = torch.cat([ pos_vertices.view(-1, 1),
  223. neg_neighbors.view(-1, 1) ], 1)
  224. adj_mat_neg = torch.sparse_coo_tensor(indices = edges_neg.transpose(0, 1),
  225. values=torch.ones(len(edges_neg)), size=adj_mat.shape,
  226. dtype=adj_mat.dtype, device=adj_mat.device)
  227. adj_mat_neg = adj_mat_neg.coalesce()
  228. indices = adj_mat_neg.indices()
  229. adj_mat_neg = torch.sparse_coo_tensor(indices,
  230. torch.ones(indices.shape[1]), adj_mat.shape,
  231. dtype=adj_mat.dtype, device=adj_mat.device)
  232. adj_mat_neg = adj_mat_neg.coalesce()
  233. return adj_mat_neg
  234. def negative_sample_data(data: Data) -> Data:
  235. new_edge_types = {}
  236. res = Data(target_value=0)
  237. for vt in data.vertex_types:
  238. res.add_vertex_type(vt.name, vt.count)
  239. for key, et in data.edge_types.items():
  240. print('key:', key)
  241. adjacency_matrices_neg = []
  242. for adj_mat in et.adjacency_matrices:
  243. remove_diagonal = True \
  244. if et.vertex_type_row == et.vertex_type_column \
  245. else False
  246. adj_mat_neg = negative_sample_adj_mat(adj_mat, remove_diagonal)
  247. adjacency_matrices_neg.append(adj_mat_neg)
  248. res.add_edge_type(et.name,
  249. et.vertex_type_row, et.vertex_type_column,
  250. adjacency_matrices_neg, et.decoder_factory)
  251. #new_et = EdgeType(et.name, et.vertex_type_row,
  252. # et.vertex_type_column, adjacency_matrices_neg,
  253. # et.decoder_factory, et.total_connectivity)
  254. #new_edge_types[key] = new_et
  255. #res = Data(data.vertex_types, new_edge_types)
  256. return res
  257. def merge_data(pos_data: Data, neg_data: Data) -> Data:
  258. assert isinstance(pos_data, Data)
  259. assert isinstance(neg_data, Data)
  260. res = PosNegData()
  261. for vt in pos_data.vertex_types:
  262. res.add_vertex_type(vt.name, vt.count)
  263. for key, pos_et in pos_data.edge_types.items():
  264. neg_et = neg_data.edge_types[key]
  265. res.add_edge_type(pos_et.name,
  266. pos_et.vertex_type_row, pos_et.vertex_type_column,
  267. pos_et.adjacency_matrices, neg_et.adjacency_matrices,
  268. pos_et.decoder_factory)