You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_concatv2_op.py 9.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.common.api import ms_function
  21. from mindspore.common.initializer import initializer
  22. from mindspore.common.parameter import Parameter
  23. from mindspore.ops import operations as P
  24. class ConcatV32(nn.Cell):
  25. def __init__(self, nptype):
  26. super(ConcatV32, self).__init__()
  27. self.cat = P.Concat(axis=2)
  28. self.x1 = Parameter(initializer(
  29. Tensor(np.arange(2 * 2 * 1).reshape(2, 2, 1).astype(nptype)), [2, 2, 1]), name='x1')
  30. self.x2 = Parameter(initializer(
  31. Tensor(np.arange(2 * 2 * 2).reshape(2, 2, 2).astype(nptype)), [2, 2, 2]), name='x2')
  32. @ms_function
  33. def construct(self):
  34. return self.cat((self.x1, self.x2))
  35. def axis32(nptype):
  36. context.set_context(device_target='GPU')
  37. cat = ConcatV32(nptype)
  38. output = cat()
  39. expect = np.array([[[0., 0., 1.],
  40. [1., 2., 3.]],
  41. [[2., 4., 5.],
  42. [3., 6., 7.]]]).astype(nptype)
  43. assert (output.asnumpy() == expect).all()
  44. @pytest.mark.level0
  45. @pytest.mark.platform_x86_gpu_training
  46. @pytest.mark.env_onecard
  47. def test_axis32_float64():
  48. axis32(np.float64)
  49. @pytest.mark.level0
  50. @pytest.mark.platform_x86_gpu_training
  51. @pytest.mark.env_onecard
  52. def test_axis32_float32():
  53. axis32(np.float32)
  54. @pytest.mark.level0
  55. @pytest.mark.platform_x86_gpu_training
  56. @pytest.mark.env_onecard
  57. def test_axis32_int16():
  58. axis32(np.int16)
  59. @pytest.mark.level0
  60. @pytest.mark.platform_x86_gpu_training
  61. @pytest.mark.env_onecard
  62. def test_axis32_uint8():
  63. axis32(np.uint8)
  64. @pytest.mark.level0
  65. @pytest.mark.platform_x86_gpu_training
  66. @pytest.mark.env_onecard
  67. def test_axis32_bool():
  68. axis32(np.bool)
  69. class ConcatV43(nn.Cell):
  70. def __init__(self, nptype):
  71. super(ConcatV43, self).__init__()
  72. self.cat = P.Concat(axis=3)
  73. self.x1 = Parameter(initializer(
  74. Tensor(np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype)), [2, 2, 2, 2]), name='x1')
  75. self.x2 = Parameter(initializer(
  76. Tensor(np.arange(2 * 2 * 2 * 3).reshape(2, 2, 2, 3).astype(nptype)), [2, 2, 2, 3]), name='x2')
  77. @ms_function
  78. def construct(self):
  79. return self.cat((self.x1, self.x2))
  80. def axis43(nptype):
  81. context.set_context(device_target='GPU')
  82. cat = ConcatV43(nptype)
  83. output = cat()
  84. expect = np.array([[[[0., 1., 0., 1., 2.],
  85. [2., 3., 3., 4., 5.]],
  86. [[4., 5., 6., 7., 8.],
  87. [6., 7., 9., 10., 11.]]],
  88. [[[8., 9., 12., 13., 14.],
  89. [10., 11., 15., 16., 17.]],
  90. [[12., 13., 18., 19., 20.],
  91. [14., 15., 21., 22., 23.]]]]).astype(nptype)
  92. assert (output.asnumpy() == expect).all()
  93. @pytest.mark.level0
  94. @pytest.mark.platform_x86_gpu_training
  95. @pytest.mark.env_onecard
  96. def test_axis43_float64():
  97. axis43(np.float64)
  98. @pytest.mark.level0
  99. @pytest.mark.platform_x86_gpu_training
  100. @pytest.mark.env_onecard
  101. def test_axis43_float32():
  102. axis43(np.float32)
  103. @pytest.mark.level0
  104. @pytest.mark.platform_x86_gpu_training
  105. @pytest.mark.env_onecard
  106. def test_axis43_int16():
  107. axis43(np.int16)
  108. @pytest.mark.level0
  109. @pytest.mark.platform_x86_gpu_training
  110. @pytest.mark.env_onecard
  111. def test_axis43_uint8():
  112. axis43(np.uint8)
  113. @pytest.mark.level0
  114. @pytest.mark.platform_x86_gpu_training
  115. @pytest.mark.env_onecard
  116. def test_axis43_bool():
  117. axis43(np.bool)
  118. class ConcatV21(nn.Cell):
  119. def __init__(self, nptype):
  120. super(ConcatV21, self).__init__()
  121. self.cat = P.Concat(axis=1)
  122. self.x1 = Parameter(initializer(
  123. Tensor(np.arange(2 * 2).reshape(2, 2).astype(nptype)), [2, 2]), name='x1')
  124. self.x2 = Parameter(initializer(
  125. Tensor(np.arange(2 * 3).reshape(2, 3).astype(nptype)), [2, 3]), name='x2')
  126. @ms_function
  127. def construct(self):
  128. return self.cat((self.x1, self.x2))
  129. def axis21(nptype):
  130. cat = ConcatV21(nptype)
  131. output = cat()
  132. expect = np.array([[0., 1., 0., 1., 2.],
  133. [2., 3., 3., 4., 5.]]).astype(nptype)
  134. assert (output.asnumpy() == expect).all()
  135. @pytest.mark.level0
  136. @pytest.mark.platform_x86_gpu_training
  137. @pytest.mark.env_onecard
  138. def test_axis21_float64():
  139. axis21(np.float64)
  140. @pytest.mark.level0
  141. @pytest.mark.platform_x86_gpu_training
  142. @pytest.mark.env_onecard
  143. def test_axis21_float32():
  144. axis21(np.float32)
  145. @pytest.mark.level0
  146. @pytest.mark.platform_x86_gpu_training
  147. @pytest.mark.env_onecard
  148. def test_axis21_int16():
  149. axis21(np.int16)
  150. @pytest.mark.level0
  151. @pytest.mark.platform_x86_gpu_training
  152. @pytest.mark.env_onecard
  153. def test_axis21_uint8():
  154. axis21(np.uint8)
  155. @pytest.mark.level0
  156. @pytest.mark.platform_x86_gpu_training
  157. @pytest.mark.env_onecard
  158. def test_axis21_bool():
  159. axis21(np.bool)
  160. class Concat3INet(nn.Cell):
  161. def __init__(self):
  162. super(Concat3INet, self).__init__()
  163. self.cat = P.Concat(axis=1)
  164. def construct(self, x1, x2, x3):
  165. return self.cat((x1, x2, x3))
  166. def concat_3i(nptype):
  167. cat = Concat3INet()
  168. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  169. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  170. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  171. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  172. x1_ms = Tensor(x1_np)
  173. x2_ms = Tensor(x2_np)
  174. x3_ms = Tensor(x3_np)
  175. output_ms = cat(x1_ms, x2_ms, x3_ms)
  176. error = np.ones(shape=output_np.shape) * 10e-6
  177. diff = output_ms.asnumpy() - output_np
  178. assert np.all(diff < error)
  179. @pytest.mark.level0
  180. @pytest.mark.platform_x86_gpu_training
  181. @pytest.mark.env_onecard
  182. def test_concat_3i_float64():
  183. concat_3i(np.float64)
  184. @pytest.mark.level0
  185. @pytest.mark.platform_x86_gpu_training
  186. @pytest.mark.env_onecard
  187. def test_concat_3i_float32():
  188. concat_3i(np.float32)
  189. @pytest.mark.level0
  190. @pytest.mark.platform_x86_gpu_training
  191. @pytest.mark.env_onecard
  192. def test_concat_3i_int16():
  193. concat_3i(np.int16)
  194. @pytest.mark.level0
  195. @pytest.mark.platform_x86_gpu_training
  196. @pytest.mark.env_onecard
  197. def test_concat_3i_uint8():
  198. concat_3i(np.uint8)
  199. @pytest.mark.level0
  200. @pytest.mark.platform_x86_gpu_training
  201. @pytest.mark.env_onecard
  202. def test_concat_3i_bool():
  203. cat = Concat3INet()
  204. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  205. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  206. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  207. output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
  208. x1_ms = Tensor(x1_np)
  209. x2_ms = Tensor(x2_np)
  210. x3_ms = Tensor(x3_np)
  211. output_ms = cat(x1_ms, x2_ms, x3_ms)
  212. assert (output_ms.asnumpy() == output_np).all()
  213. class Concat4INet(nn.Cell):
  214. def __init__(self):
  215. super(Concat4INet, self).__init__()
  216. self.cat = P.Concat(axis=1)
  217. def construct(self, x1, x2, x3, x4):
  218. return self.cat((x1, x2, x3, x4))
  219. def concat_4i(nptype):
  220. cat = Concat4INet()
  221. x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
  222. x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
  223. x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
  224. x4_np = np.random.randn(32, 5, 224, 224).astype(nptype)
  225. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  226. x1_ms = Tensor(x1_np)
  227. x2_ms = Tensor(x2_np)
  228. x3_ms = Tensor(x3_np)
  229. x4_ms = Tensor(x4_np)
  230. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  231. error = np.ones(shape=output_np.shape) * 10e-6
  232. diff = output_ms.asnumpy() - output_np
  233. assert np.all(diff < error)
  234. @pytest.mark.level0
  235. @pytest.mark.platform_x86_gpu_training
  236. @pytest.mark.env_onecard
  237. def test_concat_4i_float64():
  238. concat_4i(np.float64)
  239. @pytest.mark.level0
  240. @pytest.mark.platform_x86_gpu_training
  241. @pytest.mark.env_onecard
  242. def test_concat_4i_float32():
  243. concat_4i(np.float32)
  244. @pytest.mark.level0
  245. @pytest.mark.platform_x86_gpu_training
  246. @pytest.mark.env_onecard
  247. def test_concat_4i_int16():
  248. concat_4i(np.int16)
  249. @pytest.mark.level0
  250. @pytest.mark.platform_x86_gpu_training
  251. @pytest.mark.env_onecard
  252. def test_concat_4i_uint8():
  253. concat_4i(np.uint8)
  254. @pytest.mark.level0
  255. @pytest.mark.platform_x86_gpu_training
  256. @pytest.mark.env_onecard
  257. def test_concat_4i_bool():
  258. cat = Concat4INet()
  259. x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
  260. x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
  261. x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
  262. x4_np = np.random.choice([True, False], (32, 5, 224, 224)).astype(np.bool)
  263. output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
  264. x1_ms = Tensor(x1_np)
  265. x2_ms = Tensor(x2_np)
  266. x3_ms = Tensor(x3_np)
  267. x4_ms = Tensor(x4_np)
  268. output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
  269. assert (output_ms.asnumpy() == output_np).all()