You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_concatv2_op.py 10 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.common.api import ms_function
  21. from mindspore.common.initializer import initializer
  22. from mindspore.common.parameter import Parameter
  23. from mindspore.ops import operations as P
  24. class ConcatV32(nn.Cell):
  25. def __init__(self, nptype):
  26. super(ConcatV32, self).__init__()
  27. self.cat = P.Concat(axis=2)
  28. self.x1 = Parameter(initializer(
  29. Tensor(np.arange(2 * 2 * 1).reshape(2, 2, 1).astype(nptype)), [2, 2, 1]), name='x1')
  30. self.x2 = Parameter(initializer(
  31. Tensor(np.arange(2 * 2 * 2).reshape(2, 2, 2).astype(nptype)), [2, 2, 2]), name='x2')
  32. @ms_function
  33. def construct(self):
  34. return self.cat((self.x1, self.x2))
  35. def axis32(nptype):
  36. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  37. cat = ConcatV32(nptype)
  38. output = cat()
  39. expect = np.array([[[0., 0., 1.],
  40. [1., 2., 3.]],
  41. [[2., 4., 5.],
  42. [3., 6., 7.]]]).astype(nptype)
  43. assert (output.asnumpy() == expect).all()
  44. @pytest.mark.level0
  45. @pytest.mark.platform_x86_gpu_training
  46. @pytest.mark.env_onecard
  47. def test_axis32_float64():
  48. axis32(np.float64)
  49. @pytest.mark.level0
  50. @pytest.mark.platform_x86_gpu_training
  51. @pytest.mark.env_onecard
  52. def test_axis32_float32():
  53. axis32(np.float32)
  54. @pytest.mark.level1
  55. @pytest.mark.platform_x86_gpu_training
  56. @pytest.mark.env_onecard
  57. def test_axis32_int16():
  58. axis32(np.int16)
  59. @pytest.mark.level1
  60. @pytest.mark.platform_x86_gpu_training
  61. @pytest.mark.env_onecard
  62. def test_axis32_uint8():
  63. axis32(np.uint8)
  64. @pytest.mark.level1
  65. @pytest.mark.platform_x86_gpu_training
  66. @pytest.mark.env_onecard
  67. def test_axis32_bool():
  68. axis32(np.bool)
  69. class ConcatV43(nn.Cell):
  70. def __init__(self, nptype):
  71. super(ConcatV43, self).__init__()
  72. self.cat = P.Concat(axis=3)
  73. self.x1 = Parameter(initializer(
  74. Tensor(np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype)), [2, 2, 2, 2]), name='x1')
  75. self.x2 = Parameter(initializer(
  76. Tensor(np.arange(2 * 2 * 2 * 3).reshape(2, 2, 2, 3).astype(nptype)), [2, 2, 2, 3]), name='x2')
  77. @ms_function
  78. def construct(self):
  79. return self.cat((self.x1, self.x2))
  80. def axis43(nptype):
  81. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  82. cat = ConcatV43(nptype)
  83. output = cat()
  84. expect = np.array([[[[0., 1., 0., 1., 2.],
  85. [2., 3., 3., 4., 5.]],
  86. [[4., 5., 6., 7., 8.],
  87. [6., 7., 9., 10., 11.]]],
  88. [[[8., 9., 12., 13., 14.],
  89. [10., 11., 15., 16., 17.]],
  90. [[12., 13., 18., 19., 20.],
  91. [14., 15., 21., 22., 23.]]]]).astype(nptype)
  92. assert (output.asnumpy() == expect).all()
  93. @pytest.mark.level0
  94. @pytest.mark.platform_x86_gpu_training
  95. @pytest.mark.env_onecard
  96. def test_axis43_float64():
  97. axis43(np.float64)
  98. @pytest.mark.level0
  99. @pytest.mark.platform_x86_gpu_training
  100. @pytest.mark.env_onecard
  101. def test_axis43_float32():
  102. axis43(np.float32)
  103. @pytest.mark.level1
  104. @pytest.mark.platform_x86_gpu_training
  105. @pytest.mark.env_onecard
  106. def test_axis43_int16():
  107. axis43(np.int16)
  108. @pytest.mark.level0
  109. @pytest.mark.platform_x86_gpu_training
  110. @pytest.mark.env_onecard
  111. def test_axis43_uint8():
  112. axis43(np.uint8)
  113. @pytest.mark.level1
  114. @pytest.mark.platform_x86_gpu_training
  115. @pytest.mark.env_onecard
  116. def test_axis43_bool():
  117. axis43(np.bool)
  118. class ConcatV21(nn.Cell):
  119. def __init__(self, nptype):
  120. super(ConcatV21, self).__init__()
  121. self.cat = P.Concat(axis=1)
  122. self.x1 = Parameter(initializer(
  123. Tensor(np.arange(2 * 2).reshape(2, 2).astype(nptype)), [2, 2]), name='x1')
  124. self.x2 = Parameter(initializer(
  125. Tensor(np.arange(2 * 3).reshape(2, 3).astype(nptype)), [2, 3]), name='x2')
  126. @ms_function
  127. def construct(self):
  128. return self.cat((self.x1, self.x2))
  129. def axis21(nptype):
  130. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  131. cat = ConcatV21(nptype)
  132. output = cat()
  133. expect = np.array([[0., 1., 0., 1., 2.],
  134. [2., 3., 3., 4., 5.]]).astype(nptype)
  135. assert (output.asnumpy() == expect).all()
  136. @pytest.mark.level0
  137. @pytest.mark.platform_x86_gpu_training
  138. @pytest.mark.env_onecard
  139. def test_axis21_float64():
  140. axis21(np.float64)
  141. @pytest.mark.level0
  142. @pytest.mark.platform_x86_gpu_training
  143. @pytest.mark.env_onecard
  144. def test_axis21_float32():
  145. axis21(np.float32)
  146. @pytest.mark.level1
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.env_onecard
  149. def test_axis21_int16():
  150. axis21(np.int16)
  151. @pytest.mark.level0
  152. @pytest.mark.platform_x86_gpu_training
  153. @pytest.mark.env_onecard
  154. def test_axis21_uint8():
  155. axis21(np.uint8)
  156. @pytest.mark.level1
  157. @pytest.mark.platform_x86_gpu_training
  158. @pytest.mark.env_onecard
  159. def test_axis21_bool():
  160. axis21(np.bool)
  161. class Concat3INet(nn.Cell):
  162. def __init__(self):
  163. super(Concat3INet, self).__init__()
  164. self.cat = P.Concat(axis=1)
  165. def construct(self, x1, x2, x3):
  166. return self.cat((x1, x2, x3))
  167. def concat_3i(nptype):
  168. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  169. cat = Concat3INet()
  170. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  171. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  172. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  173. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  174. x1_ms = Tensor(x1_np)
  175. x2_ms = Tensor(x2_np)
  176. x3_ms = Tensor(x3_np)
  177. output_ms = cat(x1_ms, x2_ms, x3_ms)
  178. error = np.ones(shape=output_np.shape) * 10e-6
  179. diff = output_ms.asnumpy() - output_np
  180. assert np.all(diff < error)
  181. @pytest.mark.level0
  182. @pytest.mark.platform_x86_gpu_training
  183. @pytest.mark.env_onecard
  184. def test_concat_3i_float64():
  185. concat_3i(np.float64)
  186. @pytest.mark.level0
  187. @pytest.mark.platform_x86_gpu_training
  188. @pytest.mark.env_onecard
  189. def test_concat_3i_float32():
  190. concat_3i(np.float32)
  191. @pytest.mark.level1
  192. @pytest.mark.platform_x86_gpu_training
  193. @pytest.mark.env_onecard
  194. def test_concat_3i_int16():
  195. concat_3i(np.int16)
  196. @pytest.mark.level1
  197. @pytest.mark.platform_x86_gpu_training
  198. @pytest.mark.env_onecard
  199. def test_concat_3i_uint8():
  200. concat_3i(np.uint8)
  201. @pytest.mark.level1
  202. @pytest.mark.platform_x86_gpu_training
  203. @pytest.mark.env_onecard
  204. def test_concat_3i_bool():
  205. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  206. cat = Concat3INet()
  207. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  208. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  209. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  210. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  211. x1_ms = Tensor(x1_np)
  212. x2_ms = Tensor(x2_np)
  213. x3_ms = Tensor(x3_np)
  214. output_ms = cat(x1_ms, x2_ms, x3_ms)
  215. assert (output_ms.asnumpy() == output_np).all()
  216. class Concat4INet(nn.Cell):
  217. def __init__(self):
  218. super(Concat4INet, self).__init__()
  219. self.cat = P.Concat(axis=1)
  220. def construct(self, x1, x2, x3, x4):
  221. return self.cat((x1, x2, x3, x4))
  222. def concat_4i(nptype):
  223. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  224. cat = Concat4INet()
  225. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  226. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  227. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  228. x4_np = np.random.randn(32, 5, 224, 224).astype(nptype)
  229. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  230. x1_ms = Tensor(x1_np)
  231. x2_ms = Tensor(x2_np)
  232. x3_ms = Tensor(x3_np)
  233. x4_ms = Tensor(x4_np)
  234. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  235. error = np.ones(shape=output_np.shape) * 10e-6
  236. diff = output_ms.asnumpy() - output_np
  237. assert np.all(diff < error)
  238. @pytest.mark.level0
  239. @pytest.mark.platform_x86_gpu_training
  240. @pytest.mark.env_onecard
  241. def test_concat_4i_float64():
  242. concat_4i(np.float64)
  243. @pytest.mark.level0
  244. @pytest.mark.platform_x86_gpu_training
  245. @pytest.mark.env_onecard
  246. def test_concat_4i_float32():
  247. concat_4i(np.float32)
  248. @pytest.mark.level1
  249. @pytest.mark.platform_x86_gpu_training
  250. @pytest.mark.env_onecard
  251. def test_concat_4i_int16():
  252. concat_4i(np.int16)
  253. @pytest.mark.level1
  254. @pytest.mark.platform_x86_gpu_training
  255. @pytest.mark.env_onecard
  256. def test_concat_4i_uint8():
  257. concat_4i(np.uint8)
  258. @pytest.mark.level1
  259. @pytest.mark.platform_x86_gpu_training
  260. @pytest.mark.env_onecard
  261. def test_concat_4i_bool():
  262. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  263. cat = Concat4INet()
  264. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  265. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  266. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  267. x4_np = np.random.choice([True, False], (32, 5, 224, 224)).astype(np.bool)
  268. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  269. x1_ms = Tensor(x1_np)
  270. x2_ms = Tensor(x2_np)
  271. x3_ms = Tensor(x3_np)
  272. x4_ms = Tensor(x4_np)
  273. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  274. assert (output_ms.asnumpy() == output_np).all()