You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_concatv2_op.py 9.0 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.common.api import ms_function
  21. from mindspore.common.initializer import initializer
  22. from mindspore.common.parameter import Parameter
  23. from mindspore.ops import operations as P
  24. class ConcatV32(nn.Cell):
  25. def __init__(self, nptype):
  26. super(ConcatV32, self).__init__()
  27. self.cat = P.Concat(axis=2)
  28. self.x1 = Parameter(initializer(
  29. Tensor(np.arange(2 * 2 * 1).reshape(2, 2, 1).astype(nptype)), [2, 2, 1]), name='x1')
  30. self.x2 = Parameter(initializer(
  31. Tensor(np.arange(2 * 2 * 2).reshape(2, 2, 2).astype(nptype)), [2, 2, 2]), name='x2')
  32. @ms_function
  33. def construct(self):
  34. return self.cat((self.x1, self.x2))
  35. def axis32(nptype):
  36. context.set_context(device_target='GPU')
  37. cat = ConcatV32(nptype)
  38. output = cat()
  39. expect = np.array([[[0., 0., 1.],
  40. [1., 2., 3.]],
  41. [[2., 4., 5.],
  42. [3., 6., 7.]]]).astype(nptype)
  43. print(output)
  44. assert (output.asnumpy() == expect).all()
  45. @pytest.mark.level0
  46. @pytest.mark.platform_x86_gpu_training
  47. @pytest.mark.env_onecard
  48. def test_axis32_float32():
  49. axis32(np.float32)
  50. @pytest.mark.level0
  51. @pytest.mark.platform_x86_gpu_training
  52. @pytest.mark.env_onecard
  53. def test_axis32_int16():
  54. axis32(np.int16)
  55. @pytest.mark.level0
  56. @pytest.mark.platform_x86_gpu_training
  57. @pytest.mark.env_onecard
  58. def test_axis32_uint8():
  59. axis32(np.uint8)
  60. @pytest.mark.level0
  61. @pytest.mark.platform_x86_gpu_training
  62. @pytest.mark.env_onecard
  63. def test_axis32_bool():
  64. axis32(np.bool)
  65. class ConcatV43(nn.Cell):
  66. def __init__(self, nptype):
  67. super(ConcatV43, self).__init__()
  68. self.cat = P.Concat(axis=3)
  69. self.x1 = Parameter(initializer(
  70. Tensor(np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype)), [2, 2, 2, 2]), name='x1')
  71. self.x2 = Parameter(initializer(
  72. Tensor(np.arange(2 * 2 * 2 * 3).reshape(2, 2, 2, 3).astype(nptype)), [2, 2, 2, 3]), name='x2')
  73. @ms_function
  74. def construct(self):
  75. return self.cat((self.x1, self.x2))
  76. def axis43(nptype):
  77. context.set_context(device_target='GPU')
  78. cat = ConcatV43(nptype)
  79. output = cat()
  80. expect = np.array([[[[0., 1., 0., 1., 2.],
  81. [2., 3., 3., 4., 5.]],
  82. [[4., 5., 6., 7., 8.],
  83. [6., 7., 9., 10., 11.]]],
  84. [[[8., 9., 12., 13., 14.],
  85. [10., 11., 15., 16., 17.]],
  86. [[12., 13., 18., 19., 20.],
  87. [14., 15., 21., 22., 23.]]]]).astype(nptype)
  88. assert (output.asnumpy() == expect).all()
  89. print(output)
  90. @pytest.mark.level0
  91. @pytest.mark.platform_x86_gpu_training
  92. @pytest.mark.env_onecard
  93. def test_axis43_float32():
  94. axis43(np.float32)
  95. @pytest.mark.level0
  96. @pytest.mark.platform_x86_gpu_training
  97. @pytest.mark.env_onecard
  98. def test_axis43_int16():
  99. axis43(np.int16)
  100. @pytest.mark.level0
  101. @pytest.mark.platform_x86_gpu_training
  102. @pytest.mark.env_onecard
  103. def test_axis43_uint8():
  104. axis43(np.uint8)
  105. @pytest.mark.level0
  106. @pytest.mark.platform_x86_gpu_training
  107. @pytest.mark.env_onecard
  108. def test_axis43_bool():
  109. axis43(np.bool)
  110. class ConcatV21(nn.Cell):
  111. def __init__(self, nptype):
  112. super(ConcatV21, self).__init__()
  113. self.cat = P.Concat(axis=1)
  114. self.x1 = Parameter(initializer(
  115. Tensor(np.arange(2 * 2).reshape(2, 2).astype(nptype)), [2, 2]), name='x1')
  116. self.x2 = Parameter(initializer(
  117. Tensor(np.arange(2 * 3).reshape(2, 3).astype(nptype)), [2, 3]), name='x2')
  118. @ms_function
  119. def construct(self):
  120. return self.cat((self.x1, self.x2))
  121. def axis21(nptype):
  122. cat = ConcatV21(nptype)
  123. output = cat()
  124. expect = np.array([[0., 1., 0., 1., 2.],
  125. [2., 3., 3., 4., 5.]]).astype(nptype)
  126. assert (output.asnumpy() == expect).all()
  127. print(output)
  128. @pytest.mark.level0
  129. @pytest.mark.platform_x86_gpu_training
  130. @pytest.mark.env_onecard
  131. def test_axis21_float32():
  132. axis21(np.float32)
  133. @pytest.mark.level0
  134. @pytest.mark.platform_x86_gpu_training
  135. @pytest.mark.env_onecard
  136. def test_axis21_int16():
  137. axis21(np.int16)
  138. @pytest.mark.level0
  139. @pytest.mark.platform_x86_gpu_training
  140. @pytest.mark.env_onecard
  141. def test_axis21_uint8():
  142. axis21(np.uint8)
  143. @pytest.mark.level0
  144. @pytest.mark.platform_x86_gpu_training
  145. @pytest.mark.env_onecard
  146. def test_axis21_bool():
  147. axis21(np.bool)
  148. class Concat3INet(nn.Cell):
  149. def __init__(self):
  150. super(Concat3INet, self).__init__()
  151. self.cat = P.Concat(axis=1)
  152. def construct(self, x1, x2, x3):
  153. return self.cat((x1, x2, x3))
  154. def concat_3i(nptype):
  155. cat = Concat3INet()
  156. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  157. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  158. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  159. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  160. x1_ms = Tensor(x1_np)
  161. x2_ms = Tensor(x2_np)
  162. x3_ms = Tensor(x3_np)
  163. output_ms = cat(x1_ms, x2_ms, x3_ms)
  164. error = np.ones(shape=output_np.shape) * 10e-6
  165. diff = output_ms.asnumpy() - output_np
  166. assert np.all(diff < error)
  167. @pytest.mark.level0
  168. @pytest.mark.platform_x86_gpu_training
  169. @pytest.mark.env_onecard
  170. def test_concat_3i_float32():
  171. concat_3i(np.float32)
  172. @pytest.mark.level0
  173. @pytest.mark.platform_x86_gpu_training
  174. @pytest.mark.env_onecard
  175. def test_concat_3i_int16():
  176. concat_3i(np.int16)
  177. @pytest.mark.level0
  178. @pytest.mark.platform_x86_gpu_training
  179. @pytest.mark.env_onecard
  180. def test_concat_3i_uint8():
  181. concat_3i(np.uint8)
  182. @pytest.mark.level0
  183. @pytest.mark.platform_x86_gpu_training
  184. @pytest.mark.env_onecard
  185. def test_concat_3i_bool():
  186. cat = Concat3INet()
  187. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  188. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  189. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  190. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  191. x1_ms = Tensor(x1_np)
  192. x2_ms = Tensor(x2_np)
  193. x3_ms = Tensor(x3_np)
  194. output_ms = cat(x1_ms, x2_ms, x3_ms)
  195. assert (output_ms.asnumpy() == output_np).all()
  196. class Concat4INet(nn.Cell):
  197. def __init__(self):
  198. super(Concat4INet, self).__init__()
  199. self.cat = P.Concat(axis=1)
  200. def construct(self, x1, x2, x3, x4):
  201. return self.cat((x1, x2, x3, x4))
  202. def concat_4i(nptype):
  203. cat = Concat4INet()
  204. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  205. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  206. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  207. x4_np = np.random.randn(32, 5, 224, 224).astype(nptype)
  208. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  209. x1_ms = Tensor(x1_np)
  210. x2_ms = Tensor(x2_np)
  211. x3_ms = Tensor(x3_np)
  212. x4_ms = Tensor(x4_np)
  213. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  214. error = np.ones(shape=output_np.shape) * 10e-6
  215. diff = output_ms.asnumpy() - output_np
  216. assert np.all(diff < error)
  217. @pytest.mark.level0
  218. @pytest.mark.platform_x86_gpu_training
  219. @pytest.mark.env_onecard
  220. def test_concat_4i_float32():
  221. concat_4i(np.float32)
  222. @pytest.mark.level0
  223. @pytest.mark.platform_x86_gpu_training
  224. @pytest.mark.env_onecard
  225. def test_concat_4i_int16():
  226. concat_4i(np.int16)
  227. @pytest.mark.level0
  228. @pytest.mark.platform_x86_gpu_training
  229. @pytest.mark.env_onecard
  230. def test_concat_4i_uint8():
  231. concat_4i(np.uint8)
  232. @pytest.mark.level0
  233. @pytest.mark.platform_x86_gpu_training
  234. @pytest.mark.env_onecard
  235. def test_concat_4i_bool():
  236. cat = Concat4INet()
  237. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  238. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  239. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  240. x4_np = np.random.choice([True, False], (32, 5, 224, 224)).astype(np.bool)
  241. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  242. x1_ms = Tensor(x1_np)
  243. x2_ms = Tensor(x2_np)
  244. x3_ms = Tensor(x3_np)
  245. x4_ms = Tensor(x4_np)
  246. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  247. assert (output_ms.asnumpy() == output_np).all()