IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

116 linhas
3.2KB

  1. #include <torch/extension.h>
  2. #include "dispatch.h"
  3. #include "torch_stablesort_cpu.h"
  4. template<bool descending, typename T>
  5. struct stable_sort_impl {
  6. std::vector<torch::Tensor> operator()(
  7. torch::Tensor input,
  8. int dim,
  9. torch::optional<std::tuple<torch::Tensor, torch::Tensor>> out
  10. ) const {
  11. if (input.is_sparse())
  12. throw std::runtime_error("Sparse tensors are not supported");
  13. if (input.device().type() != torch::DeviceType::CPU)
  14. throw std::runtime_error("Only CPU tensors are supported");
  15. if (out != torch::nullopt)
  16. throw std::runtime_error("out argument is not supported");
  17. auto in = (dim != -1) ?
  18. torch::transpose(input, dim, -1) :
  19. input;
  20. auto in_sizes = in.sizes();
  21. // std::cout << "in_sizes: " << in_sizes << std::endl;
  22. in = in.view({ -1, in.size(-1) }).contiguous();
  23. auto in_outer_stride = in.stride(-2);
  24. auto in_inner_stride = in.stride(-1);
  25. auto pin = static_cast<T*>(in.data_ptr());
  26. auto x = in.clone();
  27. auto x_outer_stride = x.stride(-2);
  28. auto x_inner_stride = x.stride(-1);
  29. auto n_cols = x.size(1);
  30. auto n_rows = x.size(0);
  31. auto px = static_cast<T*>(x.data_ptr());
  32. auto y = torch::empty({ n_rows, n_cols },
  33. torch::TensorOptions().dtype(torch::kInt64));
  34. auto y_outer_stride = y.stride(-2);
  35. auto y_inner_stride = y.stride(-1);
  36. auto py = static_cast<int64_t*>(y.data_ptr());
  37. #pragma omp parallel for
  38. for (decltype(n_rows) i = 0; i < n_rows; i++) {
  39. std::vector<int64_t> indices(n_cols);
  40. for (decltype(n_cols) k = 0; k < n_cols; k++) {
  41. indices[k] = k;
  42. }
  43. std::stable_sort(std::begin(indices), std::end(indices),
  44. [pin, i, in_outer_stride, in_inner_stride](const auto &a, const auto &b) {
  45. auto va = pin[i * in_outer_stride + a * in_inner_stride];
  46. auto vb = pin[i * in_outer_stride + b * in_inner_stride];
  47. if constexpr(descending)
  48. return (vb < va);
  49. else
  50. return (va < vb);
  51. });
  52. for (decltype(n_cols) k = 0; k < n_cols; k++) {
  53. py[i * y_outer_stride + k * y_inner_stride] = indices[k];
  54. px[i * x_outer_stride + k * x_inner_stride] =
  55. pin[i * in_outer_stride + indices[k] * in_inner_stride];
  56. }
  57. }
  58. // std::cout << "Here" << std::endl;
  59. x = x.view(in_sizes);
  60. y = y.view(in_sizes);
  61. x = (dim == -1) ?
  62. x :
  63. torch::transpose(x, dim, -1).contiguous();
  64. y = (dim == -1) ?
  65. y :
  66. torch::transpose(y, dim, -1).contiguous();
  67. // std::cout << "Here 2" << std::endl;
  68. return { x, y };
  69. }
  70. };
  71. template <typename T>
  72. struct stable_sort_impl_desc: stable_sort_impl<true, T> {};
  73. template <typename T>
  74. struct stable_sort_impl_asc: stable_sort_impl<false, T> {};
  75. std::vector<torch::Tensor> dispatch_cpu(torch::Tensor input,
  76. int dim,
  77. bool descending,
  78. torch::optional<std::tuple<torch::Tensor, torch::Tensor>> out) {
  79. if (descending)
  80. return dispatch<stable_sort_impl_desc, std::vector<torch::Tensor>>(
  81. input, dim, out);
  82. else
  83. return dispatch<stable_sort_impl_asc, std::vector<torch::Tensor>>(
  84. input, dim, out);
  85. }