You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_pack_op.py 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. import mindspore.ops.operations.array_ops as P
  20. from mindspore import Tensor
  21. from mindspore.common.api import ms_function
  22. from mindspore.common.initializer import initializer
  23. from mindspore.common.parameter import Parameter
  24. class StackNet(nn.Cell):
  25. def __init__(self, nptype):
  26. super(StackNet, self).__init__()
  27. self.stack = P.Stack(axis=2)
  28. self.data_np = np.array([0] * 16).astype(nptype)
  29. self.data_np = np.reshape(self.data_np, (2, 2, 2, 2))
  30. self.x1 = Parameter(initializer(
  31. Tensor(self.data_np), [2, 2, 2, 2]), name='x1')
  32. self.x2 = Parameter(initializer(
  33. Tensor(np.arange(16).reshape(2, 2, 2, 2).astype(nptype)), [2, 2, 2, 2]), name='x2')
  34. @ms_function
  35. def construct(self):
  36. return self.stack((self.x1, self.x2))
  37. def stack(nptype):
  38. context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
  39. stack_ = StackNet(nptype)
  40. output = stack_()
  41. expect = np.array([[[[[0, 0],
  42. [0, 0]],
  43. [[0, 1],
  44. [2, 3]]],
  45. [[[0, 0],
  46. [0, 0]],
  47. [[4, 5],
  48. [6, 7]]]],
  49. [[[[0, 0],
  50. [0, 0]],
  51. [[8, 9],
  52. [10, 11]]],
  53. [[[0, 0],
  54. [0, 0]],
  55. [[12, 13],
  56. [14, 15]]]]]).astype(nptype)
  57. assert (output.asnumpy() == expect).all()
  58. def stack_pynative(nptype):
  59. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  60. x1 = np.array([0] * 16).astype(nptype)
  61. x1 = np.reshape(x1, (2, 2, 2, 2))
  62. x1 = Tensor(x1)
  63. x2 = Tensor(np.arange(16).reshape(2, 2, 2, 2).astype(nptype))
  64. expect = np.array([[[[[0, 0],
  65. [0, 0]],
  66. [[0, 1],
  67. [2, 3]]],
  68. [[[0, 0],
  69. [0, 0]],
  70. [[4, 5],
  71. [6, 7]]]],
  72. [[[[0, 0],
  73. [0, 0]],
  74. [[8, 9],
  75. [10, 11]]],
  76. [[[0, 0],
  77. [0, 0]],
  78. [[12, 13],
  79. [14, 15]]]]]).astype(nptype)
  80. output = P.Stack(axis=2)((x1, x2))
  81. assert (output.asnumpy() == expect).all()
  82. @pytest.mark.level0
  83. @pytest.mark.platform_x86_gpu_training
  84. @pytest.mark.env_onecard
  85. def test_stack_graph_float32():
  86. stack(np.float32)
  87. @pytest.mark.level0
  88. @pytest.mark.platform_x86_gpu_training
  89. @pytest.mark.env_onecard
  90. def test_stack_graph_float16():
  91. stack(np.float16)
  92. @pytest.mark.level0
  93. @pytest.mark.platform_x86_gpu_training
  94. @pytest.mark.env_onecard
  95. def test_stack_graph_int32():
  96. stack(np.int32)
  97. @pytest.mark.level0
  98. @pytest.mark.platform_x86_gpu_training
  99. @pytest.mark.env_onecard
  100. def test_stack_graph_int16():
  101. stack(np.int16)
  102. @pytest.mark.level0
  103. @pytest.mark.platform_x86_gpu_training
  104. @pytest.mark.env_onecard
  105. def test_stack_graph_uint8():
  106. stack(np.uint8)
  107. @pytest.mark.level0
  108. @pytest.mark.platform_x86_gpu_training
  109. @pytest.mark.env_onecard
  110. def test_stack_graph_bool():
  111. stack(np.bool)
  112. @pytest.mark.level0
  113. @pytest.mark.platform_x86_gpu_training
  114. @pytest.mark.env_onecard
  115. def test_stack_pynative_float32():
  116. stack_pynative(np.float32)
  117. @pytest.mark.level0
  118. @pytest.mark.platform_x86_gpu_training
  119. @pytest.mark.env_onecard
  120. def test_stack_pynative_float16():
  121. stack_pynative(np.float16)
  122. @pytest.mark.level0
  123. @pytest.mark.platform_x86_gpu_training
  124. @pytest.mark.env_onecard
  125. def test_stack_pynative_int32():
  126. stack_pynative(np.int32)
  127. @pytest.mark.level0
  128. @pytest.mark.platform_x86_gpu_training
  129. @pytest.mark.env_onecard
  130. def test_stack_pynative_int16():
  131. stack_pynative(np.int16)
  132. @pytest.mark.level0
  133. @pytest.mark.platform_x86_gpu_training
  134. @pytest.mark.env_onecard
  135. def test_stack_pynative_uint8():
  136. stack_pynative(np.uint8)
  137. @pytest.mark.level0
  138. @pytest.mark.platform_x86_gpu_training
  139. @pytest.mark.env_onecard
  140. def test_stack_pynative_bool():
  141. stack_pynative(np.bool)