IF YOU WOULD LIKE TO GET AN ACCOUNT, please write an email to s dot adaszewski at gmail dot com. User accounts are meant only to report issues and/or generate pull requests. This is a purpose-specific Git hosting for ADARED projects. Thank you for your understanding!
Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

106 lines
2.9KB

  1. #pragma once
  2. #include <thrust/sort.h>
  3. #include <thrust/device_ptr.h>
  4. #include <thrust/execution_policy.h>
  5. #include "dispatch.h"
  6. #include "torch_stablesort_cuda.h"
  7. template<bool descending, typename T>
  8. struct stable_sort_impl_cuda {
  9. std::vector<torch::Tensor> operator()(
  10. torch::Tensor input,
  11. int dim,
  12. torch::optional<std::tuple<torch::Tensor, torch::Tensor>> out
  13. ) const {
  14. if (input.is_sparse())
  15. throw std::runtime_error("Sparse tensors are not supported");
  16. if (input.device().type() != torch::DeviceType::CUDA)
  17. throw std::runtime_error("Only CUDA tensors are supported");
  18. if (out != torch::nullopt)
  19. throw std::runtime_error("out argument is not supported");
  20. auto x = input.clone();
  21. if (dim != -1)
  22. x = torch::transpose(x, dim, -1);
  23. auto x_sizes = x.sizes();
  24. x = x.view({ -1, x.size(-1) }).contiguous();
  25. auto x_outer_stride = x.stride(-2);
  26. auto x_inner_stride = x.stride(-1);
  27. auto n_cols = x.size(1);
  28. auto n_rows = x.size(0);
  29. auto px = x.data_ptr<T>();
  30. assert(x_inner_stride == 1);
  31. auto y = torch::repeat_interleave(
  32. torch::arange(0, n_cols, 1, torch::TensorOptions()
  33. .dtype(torch::kInt32)
  34. .device(x.device())),
  35. torch::ones(n_rows, torch::TensorOptions()
  36. .dtype(torch::kInt32)
  37. .device(x.device()))
  38. );
  39. auto y_outer_stride = y.stride(-2);
  40. auto y_inner_stride = y.stride(-1);
  41. auto py = y.data_ptr<int32_t>();
  42. assert(y_inner_stride == 1);
  43. for (decltype(n_rows) i = 0; i < n_rows; i++) {
  44. auto ind_beg = thrust::device_pointer_cast(py + i * y_outer_stride);
  45. auto val_beg = thrust::device_pointer_cast(px + i * x_outer_stride);
  46. auto val_end = thrust::device_pointer_cast(px + i * x_outer_stride +
  47. n_cols * x_inner_stride);
  48. if constexpr(descending)
  49. thrust::stable_sort_by_key(thrust::device, val_beg, val_end, ind_beg,
  50. thrust::greater<T>());
  51. else
  52. thrust::stable_sort_by_key(thrust::device, val_beg, val_end, ind_beg);
  53. }
  54. x = x.view(x_sizes);
  55. y = y.view(x_sizes);
  56. x = (dim == -1) ?
  57. x :
  58. torch::transpose(x, dim, -1).contiguous();
  59. y = (dim == -1) ?
  60. y :
  61. torch::transpose(y, dim, -1).contiguous();
  62. return { x, y };
  63. }
  64. };
  65. template <typename T>
  66. struct stable_sort_impl_desc_cuda: stable_sort_impl_cuda<true, T> {};
  67. template <typename T>
  68. struct stable_sort_impl_asc_cuda: stable_sort_impl_cuda<false, T> {};
  69. std::vector<torch::Tensor> dispatch_cuda(torch::Tensor input,
  70. int dim,
  71. bool descending,
  72. torch::optional<std::tuple<torch::Tensor, torch::Tensor>> out) {
  73. if (descending)
  74. return dispatch<stable_sort_impl_desc_cuda, std::vector<torch::Tensor>>(
  75. input, dim, out);
  76. else
  77. return dispatch<stable_sort_impl_asc_cuda, std::vector<torch::Tensor>>(
  78. input, dim, out);
  79. }