IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

335 Zeilen
12KB

  1. #
  2. # Copyright (C) Stanislaw Adaszewski, 2020
  3. # License: GPLv3
  4. #
  5. import numpy as np
  6. import torch
  7. import torch.utils.data
  8. from typing import List, \
  9. Union, \
  10. Tuple
  11. from .data import Data, \
  12. EdgeType
  13. from .cumcount import cumcount
  14. import time
  15. import multiprocessing
  16. import multiprocessing.pool
  17. from itertools import product, \
  18. repeat
  19. from functools import reduce
  20. def fixed_unigram_candidate_sampler(
  21. true_classes: torch.Tensor,
  22. num_repeats: torch.Tensor,
  23. unigrams: torch.Tensor,
  24. distortion: float = 1.) -> torch.Tensor:
  25. assert isinstance(true_classes, torch.Tensor)
  26. assert isinstance(num_repeats, torch.Tensor)
  27. assert isinstance(unigrams, torch.Tensor)
  28. distortion = float(distortion)
  29. if len(true_classes.shape) != 2:
  30. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  31. if len(num_repeats.shape) != 1:
  32. raise ValueError('num_repeats must be 1D')
  33. if torch.any((unigrams > 0).sum() - \
  34. (true_classes >= 0).sum(dim=1) < \
  35. num_repeats):
  36. raise ValueError('Not enough classes to choose from')
  37. true_class_count = true_classes.shape[1] - (true_classes == -1).sum(dim=1)
  38. true_classes = torch.cat([
  39. true_classes,
  40. torch.full(( len(true_classes), torch.max(num_repeats) ), -1,
  41. dtype=true_classes.dtype)
  42. ], dim=1)
  43. indices = torch.repeat_interleave(torch.arange(len(true_classes)), num_repeats)
  44. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  45. indices.view(-1, 1) ], dim=1)
  46. result = torch.zeros(len(indices), dtype=torch.long)
  47. while len(indices) > 0:
  48. candidates = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  49. candidates = torch.tensor(list(candidates)).view(-1, 1)
  50. inner_order = torch.argsort(candidates[:, 0])
  51. indices_np = indices[inner_order].detach().cpu().numpy()
  52. outer_order = np.argsort(indices_np[:, 1], kind='stable')
  53. outer_order = torch.tensor(outer_order, device=inner_order.device)
  54. candidates = candidates[inner_order][outer_order]
  55. indices = indices[inner_order][outer_order]
  56. mask = (true_classes[indices[:, 1]] == candidates).sum(dim=1).to(torch.bool)
  57. can_cum = cumcount(candidates[:, 0])
  58. ind_cum = cumcount(indices[:, 1])
  59. repeated = (can_cum > 0) & (ind_cum > 0)
  60. mask = mask | repeated
  61. updated = indices[~mask]
  62. ofs = true_class_count[updated[:, 1]] + \
  63. cumcount(updated[:, 1])
  64. true_classes[updated[:, 1], ofs] = candidates[~mask].transpose(0, 1)
  65. true_class_count[updated[:, 1]] = ofs + 1
  66. result[indices[:, 0]] = candidates.transpose(0, 1)
  67. indices = indices[mask]
  68. def fixed_unigram_candidate_sampler_slow(
  69. true_classes: torch.Tensor,
  70. num_repeats: torch.Tensor,
  71. unigrams: torch.Tensor,
  72. distortion: float = 1.) -> torch.Tensor:
  73. assert isinstance(true_classes, torch.Tensor)
  74. assert isinstance(num_repeats, torch.Tensor)
  75. assert isinstance(unigrams, torch.Tensor)
  76. distortion = float(distortion)
  77. if len(true_classes.shape) != 2:
  78. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  79. if len(num_repeats.shape) != 1:
  80. raise ValueError('num_repeats must be 1D')
  81. if torch.any((unigrams > 0).sum() - \
  82. (true_classes >= 0).sum(dim=1) < \
  83. num_repeats):
  84. raise ValueError('Not enough classes to choose from')
  85. res = []
  86. if distortion != 1.:
  87. unigrams = unigrams.to(torch.float64)
  88. unigrams = unigrams ** distortion
  89. def fun(i):
  90. if i and i % 100 == 0:
  91. print(i)
  92. if num_repeats[i] == 0:
  93. return []
  94. pos = torch.flatten(true_classes[i, :])
  95. pos = pos[pos >= 0]
  96. w = unigrams.clone().detach()
  97. w[pos] = 0
  98. sampler = torch.utils.data.WeightedRandomSampler(w,
  99. num_repeats[i].item(), replacement=False)
  100. res = list(sampler)
  101. return res
  102. with multiprocessing.pool.ThreadPool() as p:
  103. res = p.map(fun, range(len(num_repeats)))
  104. res = reduce(list.__add__, res, [])
  105. return torch.tensor(res)
  106. def fixed_unigram_candidate_sampler_old(
  107. true_classes: torch.Tensor,
  108. num_repeats: torch.Tensor,
  109. unigrams: torch.Tensor,
  110. distortion: float = 1.) -> torch.Tensor:
  111. if len(true_classes.shape) != 2:
  112. raise ValueError('true_classes must be a 2D matrix with shape (num_samples, num_true)')
  113. if len(num_repeats.shape) != 1:
  114. raise ValueError('num_repeats must be 1D')
  115. if torch.any((unigrams > 0).sum() - \
  116. (true_classes >= 0).sum(dim=1) < \
  117. num_repeats):
  118. raise ValueError('Not enough classes to choose from')
  119. num_rows = true_classes.shape[0]
  120. print('true_classes.shape:', true_classes.shape)
  121. # unigrams = np.array(unigrams)
  122. if distortion != 1.:
  123. unigrams = unigrams.to(torch.float64) ** distortion
  124. print('unigrams:', unigrams)
  125. indices = torch.arange(num_rows)
  126. indices = torch.repeat_interleave(indices, num_repeats)
  127. indices = torch.cat([ torch.arange(len(indices)).view(-1, 1),
  128. indices.view(-1, 1) ], dim=1)
  129. num_samples = len(indices)
  130. result = torch.zeros(num_samples, dtype=torch.long)
  131. print('num_rows:', num_rows, 'num_samples:', num_samples)
  132. while len(indices) > 0:
  133. print('len(indices):', len(indices))
  134. print('indices:', indices)
  135. sampler = torch.utils.data.WeightedRandomSampler(unigrams, len(indices))
  136. candidates = torch.tensor(list(sampler))
  137. candidates = candidates.view(len(indices), 1)
  138. print('candidates:', candidates)
  139. print('true_classes:', true_classes[indices[:, 1], :])
  140. result[indices[:, 0]] = candidates.transpose(0, 1)
  141. print('result:', result)
  142. mask = (candidates == true_classes[indices[:, 1], :])
  143. mask = mask.sum(1).to(torch.bool)
  144. # append_true_classes = torch.full(( len(true_classes), ), -1)
  145. # append_true_classes[~mask] = torch.flatten(candidates)[~mask]
  146. # true_classes = torch.cat([
  147. # append_true_classes.view(-1, 1),
  148. # true_classes
  149. # ], dim=1)
  150. print('mask:', mask)
  151. indices = indices[mask]
  152. # result[indices] = 0
  153. return result
  154. def get_edges_and_degrees(adj_mat: torch.Tensor) -> \
  155. Tuple[torch.Tensor, torch.Tensor]:
  156. if adj_mat.is_sparse:
  157. adj_mat = adj_mat.coalesce()
  158. degrees = torch.zeros(adj_mat.shape[1], dtype=torch.int64,
  159. device=adj_mat.device)
  160. degrees = degrees.index_add(0, adj_mat.indices()[1],
  161. torch.ones(adj_mat.indices().shape[1], dtype=torch.int64,
  162. device=adj_mat.device))
  163. edges_pos = adj_mat.indices().transpose(0, 1)
  164. else:
  165. degrees = adj_mat.sum(0)
  166. edges_pos = torch.nonzero(adj_mat, as_tuple=False)
  167. return edges_pos, degrees
  168. def get_true_classes(adj_mat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  169. indices = adj_mat.indices()
  170. row_count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  171. #print('indices[0]:', indices[0], count[indices[0]])
  172. row_count = row_count.index_add(0, indices[0],
  173. torch.ones(indices.shape[1], dtype=torch.long))
  174. #print('count:', count)
  175. max_true_classes = torch.max(row_count).item()
  176. #print('max_true_classes:', max_true_classes)
  177. true_classes = torch.full((adj_mat.shape[0], max_true_classes),
  178. -1, dtype=torch.long)
  179. # inv = torch.unique(indices[0], return_inverse=True)
  180. # indices = indices.copy()
  181. # true_classes[indices[0], 0] = indices[1]
  182. t = time.time()
  183. cc = cumcount(indices[0])
  184. print('cumcount() took:', time.time() - t)
  185. # cc = torch.tensor(cc)
  186. t = time.time()
  187. true_classes[indices[0], cc] = indices[1]
  188. print('assignment took:', time.time() - t)
  189. ''' count = torch.zeros(adj_mat.shape[0], dtype=torch.long)
  190. for i in range(indices.shape[1]):
  191. # print('looping...')
  192. row = indices[0, i]
  193. col = indices[1, i]
  194. #print('row:', row, 'col:', col, 'count[row]:', count[row])
  195. true_classes[row, count[row]] = col
  196. count[row] += 1 '''
  197. # t = time.time()
  198. # true_classes = torch.repeat_interleave(true_classes, row_count, dim=0)
  199. # print('repeat_interleave() took:', time.time() - t)
  200. return true_classes, row_count
  201. def negative_sample_adj_mat(adj_mat: torch.Tensor,
  202. remove_diagonal: bool=False) -> torch.Tensor:
  203. if not isinstance(adj_mat, torch.Tensor):
  204. raise ValueError('adj_mat must be a torch.Tensor, got: %s' % adj_mat.__class__.__name__)
  205. edges_pos, degrees = get_edges_and_degrees(adj_mat)
  206. degrees = degrees.to(torch.float32) + 1.0 / torch.numel(adj_mat)
  207. true_classes, row_count = get_true_classes(adj_mat)
  208. if remove_diagonal:
  209. true_classes = torch.cat([ torch.arange(len(adj_mat)).view(-1, 1),
  210. true_classes ], dim=1)
  211. # true_classes = edges_pos[:, 1].view(-1, 1)
  212. # print('true_classes:', true_classes)
  213. neg_neighbors = fixed_unigram_candidate_sampler(
  214. true_classes, row_count, degrees, 0.75).to(adj_mat.device)
  215. print('neg_neighbors:', neg_neighbors)
  216. pos_vertices = torch.repeat_interleave(torch.arange(len(adj_mat)),
  217. row_count)
  218. edges_neg = torch.cat([ pos_vertices.view(-1, 1),
  219. neg_neighbors.view(-1, 1) ], 1)
  220. adj_mat_neg = torch.sparse_coo_tensor(indices = edges_neg.transpose(0, 1),
  221. values=torch.ones(len(edges_neg)), size=adj_mat.shape,
  222. dtype=adj_mat.dtype, device=adj_mat.device)
  223. adj_mat_neg = adj_mat_neg.coalesce()
  224. indices = adj_mat_neg.indices()
  225. adj_mat_neg = torch.sparse_coo_tensor(indices,
  226. torch.ones(indices.shape[1]), adj_mat.shape,
  227. dtype=adj_mat.dtype, device=adj_mat.device)
  228. adj_mat_neg = adj_mat_neg.coalesce()
  229. return adj_mat_neg
  230. def negative_sample_data(data: Data) -> Data:
  231. new_edge_types = {}
  232. res = Data(target_value=0)
  233. for vt in data.vertex_types:
  234. res.add_vertex_type(vt.name, vt.count)
  235. for key, et in data.edge_types.items():
  236. print('key:', key)
  237. adjacency_matrices_neg = []
  238. for adj_mat in et.adjacency_matrices:
  239. remove_diagonal = True \
  240. if et.vertex_type_row == et.vertex_type_column \
  241. else False
  242. adj_mat_neg = negative_sample_adj_mat(adj_mat, remove_diagonal)
  243. adjacency_matrices_neg.append(adj_mat_neg)
  244. res.add_edge_type(et.name,
  245. et.vertex_type_row, et.vertex_type_column,
  246. adjacency_matrices_neg, et.decoder_factory)
  247. #new_et = EdgeType(et.name, et.vertex_type_row,
  248. # et.vertex_type_column, adjacency_matrices_neg,
  249. # et.decoder_factory, et.total_connectivity)
  250. #new_edge_types[key] = new_et
  251. #res = Data(data.vertex_types, new_edge_types)
  252. return res
  253. def merge_data(pos_data: Data, neg_data: Data) -> Data:
  254. assert isinstance(pos_data, Data)
  255. assert isinstance(neg_data, Data)
  256. res = PosNegData()
  257. for vt in pos_data.vertex_types:
  258. res.add_vertex_type(vt.name, vt.count)
  259. for key, pos_et in pos_data.edge_types.items():
  260. neg_et = neg_data.edge_types[key]
  261. res.add_edge_type(pos_et.name,
  262. pos_et.vertex_type_row, pos_et.vertex_type_column,
  263. pos_et.adjacency_matrices, neg_et.adjacency_matrices,
  264. pos_et.decoder_factory)