You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_topk_op.py 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. # Copyright 2020-21 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. from mindspore import Tensor
  19. from mindspore.ops import operations as P
  20. @pytest.mark.level0
  21. @pytest.mark.platform_x86_gpu_training
  22. @pytest.mark.env_onecard
  23. def test_topk_small_2d():
  24. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  25. x_np = np.random.rand(3, 4).astype(np.float32)
  26. k = 4
  27. ms_output = P.TopK(True)(Tensor(x_np), k)
  28. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  29. assert np.allclose(ms_output[0].asnumpy(), np_output)
  30. x_np = np.random.rand(3, 4).astype(np.float32)
  31. k = 4
  32. ms_output = P.TopK(False)(Tensor(x_np), k)
  33. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  34. assert np.allclose(ms_output[0].asnumpy(), np_output)
  35. @pytest.mark.level0
  36. @pytest.mark.platform_x86_gpu_training
  37. @pytest.mark.env_onecard
  38. def test_topk_3d():
  39. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  40. x_np = np.random.rand(2, 256, 128).astype(np.float32)
  41. k = 4
  42. ms_output = P.TopK(True)(Tensor(x_np), k)
  43. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  44. assert np.allclose(ms_output[0].asnumpy(), np_output)
  45. x_np = np.random.rand(2, 3, 4).astype(np.float32)
  46. k = 2
  47. ms_output = P.TopK(True)(Tensor(x_np), k)
  48. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  49. assert np.allclose(ms_output[0].asnumpy(), np_output)
  50. @pytest.mark.level0
  51. @pytest.mark.platform_x86_gpu_training
  52. @pytest.mark.env_onecard
  53. def test_topk_big_2d():
  54. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  55. x_np = np.random.rand(512, 1024).astype(np.float32)
  56. k = 512
  57. ms_output = P.TopK(True)(Tensor(x_np), k)
  58. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  59. assert np.allclose(ms_output[0].asnumpy(), np_output)
  60. # sorted elements num greater than max thread per block
  61. x_np = np.random.rand(128, 2048).astype(np.float32)
  62. k = 1
  63. ms_output = P.TopK(True)(Tensor(x_np), k)
  64. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  65. assert np.allclose(ms_output[0].asnumpy(), np_output)
  66. x_np = np.random.rand(32, 2048).astype(np.float32)
  67. k = 2048
  68. ms_output = P.TopK(True)(Tensor(x_np), k)
  69. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  70. assert np.allclose(ms_output[0].asnumpy(), np_output)
  71. # sorted elements num greater than max share memory per block
  72. x_np = np.random.rand(16, 40960).astype(np.float32)
  73. k = 1
  74. ms_output = P.TopK(True)(Tensor(x_np), k)
  75. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  76. assert np.allclose(ms_output[0].asnumpy(), np_output)
  77. @pytest.mark.level0
  78. @pytest.mark.platform_x86_gpu_training
  79. @pytest.mark.env_onecard
  80. def test_topk_big_k():
  81. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  82. x_np = np.random.rand(8, 40960).astype(np.float32)
  83. k = 4096
  84. ms_output = P.TopK(True)(Tensor(x_np), k)
  85. np_output = np.sort(x_np, axis=-1)[..., ::-1][..., 0:k]
  86. assert np.allclose(ms_output[0].asnumpy(), np_output)
  87. @pytest.mark.level0
  88. @pytest.mark.platform_x86_gpu_training
  89. @pytest.mark.env_onecard
  90. def test_topk_1d():
  91. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  92. x_np = np.random.rand(12).astype(np.float32)
  93. k = 4
  94. ms_output = P.TopK(True)(Tensor(x_np), k)
  95. np_output = np.sort(x_np)[::-1][0:k]
  96. assert np.allclose(ms_output[0].asnumpy(), np_output)
  97. x_np = np.random.rand(1200).astype(np.float32)
  98. k = 256
  99. ms_output = P.TopK(True)(Tensor(x_np), k)
  100. np_output = np.sort(x_np)[::-1][0:k]
  101. assert np.allclose(ms_output[0].asnumpy(), np_output)
  102. x_np = np.random.rand(250000).astype(np.float32)
  103. k = 2000
  104. ms_output = P.TopK(True)(Tensor(x_np), k)
  105. np_output = np.sort(x_np)[::-1][0:k]
  106. assert np.allclose(ms_output[0].asnumpy(), np_output)
  107. x_np = np.random.rand(10240).astype(np.float32)
  108. k = 4096
  109. ms_output = P.TopK(True)(Tensor(x_np), k)
  110. np_output = np.sort(x_np)[::-1][0:k]
  111. assert np.allclose(ms_output[0].asnumpy(), np_output)
  112. x_np = np.random.rand(720).astype(np.float32)
  113. k = 720
  114. ms_output = P.TopK(True)(Tensor(x_np), k)
  115. np_output = np.sort(x_np)[::-1][0:k]
  116. assert np.allclose(ms_output[0].asnumpy()[:k], np_output)