You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_getitem.py 45 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test_tensor_slice """
  16. import numpy as np
  17. import pytest
  18. from mindspore import Tensor
  19. from mindspore import Parameter
  20. from mindspore import context
  21. from mindspore import dtype as mstype
  22. from mindspore.nn import Cell
  23. from mindspore.common.parameter import ParameterTuple
  24. from mindspore.ops import composite as C
  25. grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True)
  26. def setup_module():
  27. context.set_context(mode=context.PYNATIVE_MODE)
  28. class NetWorkSlicePositive(Cell):
  29. def __init__(self):
  30. super(NetWorkSlicePositive, self).__init__()
  31. self.tensor_ret0 = Tensor(np.ones([1, 2, 3], np.int32))
  32. self.tensor_ret1 = Tensor(np.ones([4, 8, 10], np.int32))
  33. self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))
  34. self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))
  35. def construct(self, tensor):
  36. ret0 = tensor[3:4:1, 1:5:2, 3:6:1] + self.tensor_ret0
  37. ret1 = tensor[-6:4:1, 0:8:1, ::1] + self.tensor_ret1
  38. ret2 = tensor[::, ::, ::] + self.tensor_ret2
  39. ret3 = tensor[::2] + self.tensor_ret3
  40. return ret0, ret1, ret2, ret3
  41. @pytest.mark.level1
  42. @pytest.mark.platform_arm_ascend_training
  43. @pytest.mark.platform_x86_ascend_training
  44. @pytest.mark.platform_x86_gpu_training
  45. @pytest.mark.env_onecard
  46. def test_slice_positive():
  47. net = NetWorkSlicePositive()
  48. input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
  49. input_0 = Tensor(input_np)
  50. output0, output1, output2, output3 = net(input_0)
  51. assert np.all(output0.asnumpy() == input_np[3:4:1, 1:5:2, 3:6:1] + np.ones([1, 2, 3]))
  52. assert np.all(output1.asnumpy() == input_np[-6:4:1, 0:8:1, ::1] + np.ones([4, 8, 10]))
  53. assert np.all(output2.asnumpy() == input_np[::, ::, ::] + np.ones([6, 8, 10]))
  54. assert np.all(output3.asnumpy() == input_np[::2] + np.ones([3, 8, 10]))
  55. class NetWorkSliceEllipsis(Cell):
  56. def __init__(self):
  57. super(NetWorkSliceEllipsis, self).__init__()
  58. self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))
  59. self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))
  60. self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))
  61. def construct(self, tensor):
  62. ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0
  63. ret1 = tensor[...] + self.tensor_ret1
  64. ret2 = tensor[None] + self.tensor_ret2
  65. ret3 = tensor[True] + self.tensor_ret2
  66. return ret0, ret1, ret2, ret3
  67. @pytest.mark.level1
  68. @pytest.mark.platform_arm_ascend_training
  69. @pytest.mark.platform_x86_ascend_training
  70. @pytest.mark.platform_x86_gpu_training
  71. @pytest.mark.env_onecard
  72. def test_slice_ellipsis():
  73. net = NetWorkSliceEllipsis()
  74. input_np = np.arange(6*7*8*9).reshape(6, 7, 8, 9).astype(np.int32)
  75. input_0 = Tensor(input_np)
  76. output0, output1, output2, output3 = net(input_0)
  77. assert np.all(output0.asnumpy() == input_np[0:4:2, ..., 1] + np.ones([2, 7, 8]))
  78. assert np.all(output1.asnumpy() == input_np[...] + np.ones([6, 7, 8, 9]))
  79. assert np.all(output2.asnumpy() == input_np[None] + np.ones([6, 7, 8, 9]))
  80. assert np.all(output3.asnumpy() == input_np[True] + np.ones([1, 6, 7, 8, 9]))
  81. class NetWorkReduceDimension(Cell):
  82. def __init__(self):
  83. super(NetWorkReduceDimension, self).__init__()
  84. self.tensor_ret1 = Tensor(np.ones([3, 10], np.int32))
  85. self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))
  86. self.tensor_ret3 = Tensor(np.array(8, np.int32))
  87. self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))
  88. def construct(self, tensor):
  89. ret1 = tensor[::2, 1, ::1] + self.tensor_ret1
  90. ret2 = tensor[::, ::, 0] + self.tensor_ret2
  91. ret3 = tensor[3, 2, 5] + self.tensor_ret3
  92. ret4 = tensor[1] + self.tensor_ret4
  93. return ret1, ret2, ret3, ret4
  94. @pytest.mark.level1
  95. @pytest.mark.platform_arm_ascend_training
  96. @pytest.mark.platform_x86_ascend_training
  97. @pytest.mark.platform_x86_gpu_training
  98. @pytest.mark.env_onecard
  99. def test_reduce_dimension():
  100. net = NetWorkReduceDimension()
  101. input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
  102. input_0 = Tensor(input_np)
  103. output1, output2, output3, output4 = net(input_0)
  104. assert np.all(output1.asnumpy() == input_np[::2, 1, ::1] + np.ones([3, 10]))
  105. assert np.all(output2.asnumpy() == input_np[::, ::, 0] + np.ones([6, 8]))
  106. assert np.all(output3.asnumpy() == input_np[3, 2, 5] + np.array(8, np.int32))
  107. assert np.all(output4.asnumpy() == input_np[1] + np.ones([8, 10]))
  108. @pytest.mark.level0
  109. @pytest.mark.platform_arm_ascend_training
  110. @pytest.mark.platform_x86_ascend_training
  111. @pytest.mark.platform_x86_gpu_training
  112. @pytest.mark.env_onecard
  113. class NetWorkSliceStep(Cell):
  114. def __init__(self):
  115. super(NetWorkSliceStep, self).__init__()
  116. self.tensor_ret1 = Tensor(np.ones([6, 5, 10], np.int32))
  117. self.tensor_ret2 = Tensor(np.ones([3, 5, 5], np.int32))
  118. def construct(self, tensor):
  119. ret1 = tensor[::1, -5::, ::-1] + self.tensor_ret1
  120. ret2 = tensor[::2, -5::, ::2] + self.tensor_ret2
  121. return ret1, ret2
  122. @pytest.mark.level0
  123. # ascend op stridedslice has bug, and has not been fixed.
  124. @pytest.mark.platform_x86_gpu_training
  125. @pytest.mark.env_onecard
  126. def test_step_negative():
  127. net = NetWorkSliceStep()
  128. input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
  129. input_0 = Tensor(input_np)
  130. output1, output2 = net(input_0)
  131. assert np.all(output1.asnumpy() == input_np[::1, -5::, ::-1] + np.ones([6, 5, 10]))
  132. assert np.all(output2.asnumpy() == input_np[::2, -5::, ::2] + np.ones([3, 5, 5]))
  133. class TensorGetItemByThreeTensors(Cell):
  134. def __init__(self):
  135. super(TensorGetItemByThreeTensors, self).__init__()
  136. self.const0 = Tensor(np.ones((4, 5, 8, 10)), mstype.int32)
  137. self.const1 = Tensor(np.ones((3, 4, 5, 10)), mstype.int32)
  138. self.const2 = Tensor(np.ones((5, 3, 4, 5)), mstype.int32)
  139. def construct(self, x, index_0, index_1, index_2):
  140. ret0 = x[index_0] + self.const0
  141. ret1 = x[index_0, index_1] + self.const1
  142. ret2 = x[index_0, index_1, index_2] + self.const2
  143. return ret0, ret1, ret2
  144. @pytest.mark.level1
  145. @pytest.mark.platform_arm_ascend_training
  146. @pytest.mark.platform_x86_ascend_training
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.env_onecard
  149. def test_getitem_by_tensors():
  150. """This testcase may encounter a sync stream error occasionally"""
  151. net = TensorGetItemByThreeTensors()
  152. input_x = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
  153. index_0 = np.random.randint(6, size=(3, 4, 5)).astype(np.int32)
  154. index_1 = np.random.randint(6, size=(4, 5)).astype(np.int32)
  155. index_2 = np.random.randint(6, size=(5, 3, 4, 5)).astype(np.int32)
  156. input_x_ms = Tensor(input_x)
  157. index_0_ms = Tensor(index_0)
  158. index_1_ms = Tensor(index_1)
  159. input_2_ms = Tensor(index_2)
  160. output0, output1, output2 = net(input_x_ms, index_0_ms, index_1_ms, input_2_ms)
  161. assert np.all(output0.asnumpy() == input_x[index_0] + np.ones([4, 5, 8, 10]))
  162. assert np.all(output1.asnumpy() == input_x[index_0, index_1] + np.ones([3, 4, 5, 10]))
  163. assert np.all(output2.asnumpy() == input_x[index_0, index_1, index_2] + np.ones([5, 3, 4, 5]))
  164. class TensorGetItemByMixedTensorsBasicCase(Cell):
  165. def __init__(self, c0, c1, c2, c3, c4, c5):
  166. super(TensorGetItemByMixedTensorsBasicCase, self).__init__()
  167. self.const0 = Tensor(c0)
  168. self.const1 = Tensor(c1)
  169. self.const2 = Tensor(c2)
  170. self.const3 = Tensor(c3)
  171. self.const4 = Tensor(c4)
  172. self.const5 = Tensor(c5)
  173. def construct(self, tensor, index_0, index_1):
  174. ret0 = tensor[index_0, index_1, 0:3] + self.const0
  175. ret1 = tensor[0:3, index_0, ...] + self.const1
  176. ret2 = tensor[0, index_0, index_1] + self.const2
  177. ret3 = tensor[..., index_0, 0:3] + self.const3
  178. ret4 = tensor[0:2, index_0, index_1] + self.const4
  179. ret5 = tensor[..., index_0, index_1] + self.const5
  180. return ret0, ret1, ret2, ret3, ret4, ret5
  181. @pytest.mark.level1
  182. @pytest.mark.platform_arm_ascend_training
  183. @pytest.mark.platform_x86_ascend_training
  184. @pytest.mark.platform_x86_gpu_training
  185. @pytest.mark.env_onecard
  186. def test_getitem_by_mixed_tensors():
  187. const0 = np.ones((3, 4, 5, 3), np.float32)
  188. const1 = np.ones((3, 3, 4, 5, 5), np.float32)
  189. const2 = np.ones((3, 4, 5), np.float32)
  190. const3 = np.ones((3, 3, 4, 5, 3), np.float32)
  191. const4 = np.ones((2, 3, 4, 5), np.float32)
  192. const5 = np.ones((3, 3, 4, 5), np.float32)
  193. net = TensorGetItemByMixedTensorsBasicCase(const0, const1, const2, const3, const4, const5)
  194. input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
  195. input_ms = Tensor(input_np, mstype.float32)
  196. index_np_0 = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
  197. index_np_1 = np.random.randint(4, size=(4, 5)).astype(np.int32)
  198. index_0 = Tensor(index_np_0, mstype.int32)
  199. index_1 = Tensor(index_np_1, mstype.int32)
  200. out0, out1, out2, out3, out4, out5 = net(input_ms, index_0, index_1)
  201. assert np.all(out0.asnumpy() == (input_np[index_np_0, index_np_1, 0:3] + const0))
  202. assert np.all(out1.asnumpy() == (input_np[0:3, index_np_0, ...] + const1))
  203. assert np.all(out2.asnumpy() == (input_np[0, index_np_0, index_np_1] + const2))
  204. assert np.all(out3.asnumpy() == (input_np[..., index_np_0, 0:3] + const3))
  205. assert np.all(out4.asnumpy() == (input_np[0:2, index_np_0, index_np_1] + const4))
  206. assert np.all(out5.asnumpy() == (input_np[..., index_np_0, index_np_1] + const5))
  207. class TensorItemByNone(Cell):
  208. def construct(self, tensor):
  209. ret = tensor.item()
  210. return ret
  211. @pytest.mark.level1
  212. @pytest.mark.platform_arm_ascend_training
  213. @pytest.mark.platform_x86_ascend_training
  214. @pytest.mark.platform_x86_gpu_training
  215. @pytest.mark.env_onecard
  216. def test_item_by_none():
  217. net = TensorItemByNone()
  218. input_1d_np = np.ndarray([1]).astype(np.float32)
  219. input_1d_ms = Tensor(input_1d_np, mstype.float32)
  220. input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
  221. input_3d_ms = Tensor(input_3d_np, mstype.float32)
  222. output_ms = net(input_1d_ms)
  223. assert np.all(output_ms.asnumpy() == input_1d_np.item())
  224. with pytest.raises(ValueError):
  225. net(input_3d_ms)
  226. class TensorItemByItem(Cell):
  227. def construct(self, tensor, index):
  228. ret = tensor.item(index)
  229. return ret
  230. @pytest.mark.level1
  231. @pytest.mark.platform_arm_ascend_training
  232. @pytest.mark.platform_x86_ascend_training
  233. @pytest.mark.platform_x86_gpu_training
  234. @pytest.mark.env_onecard
  235. def test_item_by_int():
  236. net = TensorItemByItem()
  237. input_1d_np = np.ndarray([1]).astype(np.float32)
  238. input_1d_ms = Tensor(input_1d_np, mstype.float32)
  239. input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
  240. input_3d_ms = Tensor(input_3d_np, mstype.float32)
  241. index_np_1, index_np_2, index_np_3, index_np_4 = 0, 1.0, 30, 60
  242. output_1d_ms = net(input_1d_ms, index_np_1)
  243. output_3d_ms_1 = net(input_3d_ms, index_np_1)
  244. output_3d_ms_2 = net(input_3d_ms, index_np_3)
  245. assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
  246. assert np.all(output_3d_ms_1.asnumpy() == input_3d_np.item(index_np_1))
  247. assert np.all(output_3d_ms_2.asnumpy() == input_3d_np.item(index_np_3))
  248. with pytest.raises(TypeError):
  249. net(input_1d_ms, index_np_2)
  250. with pytest.raises(IndexError):
  251. net(input_1d_ms, index_np_3)
  252. with pytest.raises(TypeError):
  253. net(input_3d_ms, index_np_2)
  254. with pytest.raises(IndexError):
  255. net(input_3d_ms, index_np_4)
  256. @pytest.mark.level1
  257. @pytest.mark.platform_arm_ascend_training
  258. @pytest.mark.platform_x86_ascend_training
  259. @pytest.mark.platform_x86_gpu_training
  260. @pytest.mark.env_onecard
  261. def test_item_by_tuple():
  262. net = TensorItemByItem()
  263. input_1d_np = np.ndarray([1]).astype(np.float32)
  264. input_1d_ms = Tensor(input_1d_np, mstype.float32)
  265. input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
  266. input_3d_ms = Tensor(input_3d_np, mstype.float32)
  267. index_np_1 = (0,)
  268. index_np_2 = (1, 2)
  269. index_np_3 = (1, 2, 3)
  270. index_np_4 = (3, 4, 4)
  271. index_np_5 = (1, 2, 3, 4)
  272. output_1d_ms = net(input_1d_ms, index_np_1)
  273. output_3d_ms = net(input_3d_ms, index_np_3)
  274. assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
  275. assert np.all(output_3d_ms.asnumpy() == input_3d_np.item(index_np_3))
  276. with pytest.raises(ValueError):
  277. net(input_1d_ms, index_np_2)
  278. with pytest.raises(ValueError):
  279. net(input_3d_ms, index_np_2)
  280. with pytest.raises(IndexError):
  281. net(input_3d_ms, index_np_4)
  282. with pytest.raises(ValueError):
  283. net(input_3d_ms, index_np_5)
  284. class TensorSetItemByMixedTensors_0(Cell):
  285. def __init__(self, value):
  286. super(TensorSetItemByMixedTensors_0, self).__init__()
  287. self.const = Tensor(np.ones((3, 4, 5), np.float32))
  288. self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)),
  289. mstype.float32),
  290. name="x")
  291. self.value = value
  292. def construct(self, index_0, index_1, index_2):
  293. self.param[0:2, index_0, index_1] = self.value
  294. ret = self.param + self.const
  295. return ret
  296. @pytest.mark.level1
  297. @pytest.mark.platform_arm_ascend_training
  298. @pytest.mark.platform_x86_ascend_training
  299. @pytest.mark.platform_x86_gpu_training
  300. @pytest.mark.env_onecard
  301. def test_setitem_by_mixed_tensors_0():
  302. value = 88.0
  303. net = TensorSetItemByMixedTensors_0(value)
  304. index_0 = np.random.randint(3, size=(3, 4, 5))
  305. index_1 = np.random.randint(4, size=(4, 5))
  306. index_2 = np.random.randint(3, size=(2, 1, 4, 5))
  307. index_0_ms = Tensor(index_0, mstype.int32)
  308. index_1_ms = Tensor(index_1, mstype.int32)
  309. index_2_ms = Tensor(index_2, mstype.int32)
  310. input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
  311. const = np.ones((3, 4, 5), np.float32)
  312. out = net(index_0_ms, index_1_ms, index_2_ms)
  313. input_np[0:2, index_0, index_1] = value
  314. assert np.all(out.asnumpy() == (input_np + const))
  315. @pytest.mark.level0
  316. @pytest.mark.platform_arm_ascend_training
  317. @pytest.mark.platform_x86_ascend_training
  318. @pytest.mark.platform_x86_gpu_training
  319. @pytest.mark.env_onecard
  320. class TensorSetItemByMixedTensors_1(Cell):
  321. def __init__(self, value):
  322. super(TensorSetItemByMixedTensors_1, self).__init__()
  323. self.const = Tensor(np.ones((3, 4, 5), np.float32))
  324. self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float32),
  325. name="x")
  326. self.value = value
  327. def construct(self, index_0, index_1, index_2):
  328. self.param[0:2, index_0, ...] = self.value
  329. ret = self.param + self.const
  330. return ret
  331. @pytest.mark.level1
  332. @pytest.mark.platform_arm_ascend_training
  333. @pytest.mark.platform_x86_ascend_training
  334. @pytest.mark.platform_x86_gpu_training
  335. @pytest.mark.env_onecard
  336. def test_setitem_by_mixed_tensors_1():
  337. value = 88.0
  338. net = TensorSetItemByMixedTensors_1(value)
  339. index_0 = np.random.randint(3, size=(3, 4, 5))
  340. index_1 = np.random.randint(4, size=(4, 5))
  341. index_2 = np.random.randint(3, size=(2, 1, 4, 5))
  342. index_0_ms = Tensor(index_0, mstype.int32)
  343. index_1_ms = Tensor(index_1, mstype.int32)
  344. index_2_ms = Tensor(index_2, mstype.int32)
  345. input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
  346. const = np.ones((3, 4, 5), np.float32)
  347. out = net(index_0_ms, index_1_ms, index_2_ms)
  348. input_np[0:2, index_0, ...] = value
  349. assert np.all(out.asnumpy() == (input_np + const))
  350. @pytest.mark.level0
  351. @pytest.mark.platform_arm_ascend_training
  352. @pytest.mark.platform_x86_ascend_training
  353. @pytest.mark.platform_x86_gpu_training
  354. @pytest.mark.env_onecard
  355. class TensorSetItemByMixedTensors_2(Cell):
  356. def __init__(self, value):
  357. super(TensorSetItemByMixedTensors_2, self).__init__()
  358. self.const = Tensor(np.ones((3, 4, 5), np.float16))
  359. self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float16),
  360. name="x")
  361. self.value = value
  362. def construct(self, index_0, index_1, index_2):
  363. self.param[..., index_0, 1] = self.value
  364. ret = self.param + self.const
  365. return ret
  366. @pytest.mark.level1
  367. @pytest.mark.platform_arm_ascend_training
  368. @pytest.mark.platform_x86_ascend_training
  369. @pytest.mark.platform_x86_gpu_training
  370. @pytest.mark.env_onecard
  371. def test_setitem_by_mixed_tensors_2():
  372. value = 88.0
  373. net = TensorSetItemByMixedTensors_2(value)
  374. index_0 = np.random.randint(3, size=(3, 4, 5))
  375. index_1 = np.random.randint(4, size=(4, 5))
  376. index_2 = np.random.randint(3, size=(2, 1, 4, 5))
  377. index_0_ms = Tensor(index_0, mstype.int32)
  378. index_1_ms = Tensor(index_1, mstype.int32)
  379. index_2_ms = Tensor(index_2, mstype.int32)
  380. input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
  381. const = np.ones((3, 4, 5), np.float32)
  382. out = net(index_0_ms, index_1_ms, index_2_ms)
  383. input_np[..., index_0, 1] = value
  384. assert np.all(out.asnumpy() == (input_np + const))
  385. class TensorGetItemByMixedTensorsIndexError(Cell):
  386. def construct(self, x, index_0, index_1):
  387. ret = x[index_0, index_1, 0:3, ..., 0:5, [1, 2, 3, 4]]
  388. return ret
  389. @pytest.mark.level1
  390. @pytest.mark.platform_arm_ascend_training
  391. @pytest.mark.platform_x86_ascend_training
  392. @pytest.mark.platform_x86_gpu_training
  393. @pytest.mark.env_onecard
  394. def test_getitem_by_mixed_tensor_exception():
  395. input_ms = Tensor(np.arange(3 * 4 * 5 * 6 * 7 * 8 * 9).reshape((3, 4, 5, 6, 7, 8, 9)), mstype.int32)
  396. index_0 = Tensor(np.random.randint(3, size=(3, 4, 5)), mstype.int32)
  397. index_1 = Tensor(np.random.randint(4, size=(3, 4, 5)), mstype.int32)
  398. net1 = TensorGetItemByMixedTensorsIndexError()
  399. with pytest.raises(IndexError):
  400. net1(input_ms, index_0, index_1)
  401. class TensorSetItemByOneTensorWithNumber(Cell):
  402. def __init__(self, value):
  403. super(TensorSetItemByOneTensorWithNumber, self).__init__()
  404. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  405. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  406. self.value = value
  407. def construct(self, index):
  408. self.param[index] = self.value
  409. ret = self.param + self.const
  410. return ret
  411. @pytest.mark.level1
  412. @pytest.mark.platform_arm_ascend_training
  413. @pytest.mark.platform_x86_ascend_training
  414. @pytest.mark.platform_x86_gpu_training
  415. @pytest.mark.env_onecard
  416. def test_setitem_one_tensor_with_number():
  417. value = 0.0
  418. net = TensorSetItemByOneTensorWithNumber(value)
  419. index_np = np.random.randint(4, size=(5, 4))
  420. index = Tensor(index_np, mstype.int32)
  421. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
  422. const = np.ones((6, 7, 8)).astype(np.float32)
  423. out = net(index)
  424. input_data[index_np] = value
  425. assert np.all(out.asnumpy() == (input_data + const))
  426. class TensorSetItemByOneTensorWithTensor(Cell):
  427. def __init__(self):
  428. super(TensorSetItemByOneTensorWithTensor, self).__init__()
  429. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  430. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  431. def construct(self, index, value):
  432. self.param[index] = value
  433. ret = self.param + self.const
  434. return ret
  435. @pytest.mark.level1
  436. @pytest.mark.platform_arm_ascend_training
  437. @pytest.mark.platform_x86_ascend_training
  438. @pytest.mark.platform_x86_gpu_training
  439. @pytest.mark.env_onecard
  440. def test_setitem_by_one_tensor_with_tensor():
  441. net = TensorSetItemByOneTensorWithTensor()
  442. index_np = np.random.randint(4, size=(5, 4))
  443. index = Tensor(index_np, mstype.int32)
  444. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
  445. const = np.ones((6, 7, 8)).astype(np.float32)
  446. value = np.zeros((4, 7, 8)).astype(np.float32)
  447. value_ms = Tensor(value, mstype.float32)
  448. out = net(index, value_ms)
  449. input_data[index_np] = value
  450. assert np.all(out.asnumpy() == (input_data + const))
  451. class TensorSetItemByOneTensorWithTupleOfNumber(Cell):
  452. def __init__(self, value):
  453. super(TensorSetItemByOneTensorWithTupleOfNumber, self).__init__()
  454. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  455. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  456. self.value = value
  457. def construct(self, index):
  458. self.param[index] = self.value
  459. ret = self.param + self.const
  460. return ret
  461. @pytest.mark.level1
  462. @pytest.mark.platform_arm_ascend_training
  463. @pytest.mark.platform_x86_ascend_training
  464. @pytest.mark.platform_x86_gpu_training
  465. @pytest.mark.env_onecard
  466. def test_setitem_by_one_tensor_with_tuple_number():
  467. value = (0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7)
  468. net = TensorSetItemByOneTensorWithTupleOfNumber(value)
  469. input_np = np.random.randint(5, size=(5, 4))
  470. input_ms = Tensor(input_np, mstype.int32)
  471. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
  472. const = np.ones((6, 7, 8)).astype(np.float32)
  473. out = net(input_ms)
  474. input_data[input_np] = value
  475. assert np.all(out.asnumpy() == (input_data + const))
  476. class TensorSetItemByOneTensorWithTupleOfTensor(Cell):
  477. def __init__(self):
  478. super(TensorSetItemByOneTensorWithTupleOfTensor, self).__init__()
  479. self.const = Tensor(np.ones((6, 3, 8)), mstype.float32)
  480. self.param = Parameter(Tensor(np.arange(6 * 3 * 8).reshape((6, 3, 8)), mstype.float32), name="x")
  481. def construct(self, index, value_0, value_1, value_2):
  482. self.param[index] = (value_0, value_1, value_2)
  483. ret = self.param + self.const
  484. return ret
  485. @pytest.mark.level1
  486. @pytest.mark.platform_arm_ascend_training
  487. @pytest.mark.platform_x86_ascend_training
  488. @pytest.mark.platform_x86_gpu_training
  489. @pytest.mark.env_onecard
  490. def test_setitem_by_one_tensor_with_tuple_tensors():
  491. net = TensorSetItemByOneTensorWithTupleOfTensor()
  492. input_np = np.random.randint(6, size=(5, 4)).astype(np.int32)
  493. input_ms = Tensor(input_np, mstype.int32)
  494. input_data = np.arange(6 * 3 * 8).reshape((6, 3, 8)).astype(np.float32)
  495. value_0_np = np.zeros((8,), np.float32)
  496. value_1_np = np.ones((8,), np.float32)
  497. value_2_np = np.ones((8,), np.float32)*2
  498. value_0 = Tensor(value_0_np)
  499. value_1 = Tensor(value_1_np)
  500. value_2 = Tensor(value_2_np)
  501. const = np.ones((6, 3, 8)).astype(np.float32)
  502. out = net(input_ms, value_0, value_1, value_2)
  503. input_data[input_np] = (value_0_np, value_1_np, value_2_np)
  504. assert np.all(out.asnumpy() == (input_data + const))
  505. class TensorSetItemByTensorsWithNumber(Cell):
  506. def __init__(self, value):
  507. super(TensorSetItemByTensorsWithNumber, self).__init__()
  508. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  509. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  510. self.value = value
  511. def construct(self, index_0, index_1, index_2):
  512. self.param[index_0, index_1, index_2] = self.value
  513. ret = self.param + self.const
  514. return ret
  515. @pytest.mark.level1
  516. @pytest.mark.platform_arm_ascend_training
  517. @pytest.mark.platform_x86_ascend_training
  518. @pytest.mark.platform_x86_gpu_training
  519. @pytest.mark.env_onecard
  520. @pytest.mark.level0
  521. def test_setitem_by_tensors_with_number():
  522. value = 0.0
  523. net = TensorSetItemByTensorsWithNumber(value)
  524. index_0 = np.random.randint(6, size=(3, 4, 5))
  525. index_1 = np.random.randint(7, size=(4, 5))
  526. index_2 = np.random.randint(8, size=(5, 3, 4, 5))
  527. index_0_ms = Tensor(index_0, mstype.int32)
  528. index_1_ms = Tensor(index_1, mstype.int32)
  529. index_2_ms = Tensor(index_2, mstype.int32)
  530. out = net(index_0_ms, index_1_ms, index_2_ms)
  531. const = np.ones((6, 7, 8)).astype(np.float32)
  532. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
  533. input_data[index_0, index_1, index_2] = value
  534. assert np.all(out.asnumpy() == (input_data + const))
  535. class TensorSetItemByTensorsWithTensor(Cell):
  536. def __init__(self):
  537. super(TensorSetItemByTensorsWithTensor, self).__init__()
  538. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  539. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  540. def construct(self, index_0, index_1, index_2, value):
  541. self.param[index_0, index_1, index_2] = value
  542. ret = self.param + self.const
  543. return ret
  544. @pytest.mark.level0
  545. @pytest.mark.platform_arm_ascend_training
  546. @pytest.mark.platform_x86_ascend_training
  547. @pytest.mark.platform_x86_gpu_training
  548. @pytest.mark.env_onecard
  549. def test_setitem_by_tensors_with_tensor():
  550. net = TensorSetItemByTensorsWithTensor()
  551. index_0 = np.random.randint(6, size=(3, 4, 5))
  552. index_1 = np.random.randint(7, size=(4, 5))
  553. index_2 = np.random.randint(8, size=(5, 3, 4, 5))
  554. value = np.zeros((4, 5)).astype(np.float32)
  555. index_0_ms = Tensor(index_0, mstype.int32)
  556. index_1_ms = Tensor(index_1, mstype.int32)
  557. index_2_ms = Tensor(index_2, mstype.int32)
  558. value_ms = Tensor(value, mstype.float32)
  559. out = net(index_0_ms, index_1_ms, index_2_ms, value_ms)
  560. const = np.ones((6, 7, 8)).astype(np.float32)
  561. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
  562. input_data[index_0, index_1, index_2] = value
  563. assert np.all(out.asnumpy() == (input_data + const))
  564. class TensorSetItemByTensorsWithTensorNumberError(Cell):
  565. def __init__(self):
  566. super(TensorSetItemByTensorsWithTensorNumberError, self).__init__()
  567. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  568. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  569. def construct(self, index_0, index_1, index_2, index_3, value):
  570. self.param[index_0, index_1, index_2, index_3] = value
  571. ret = self.param + self.const
  572. return ret
  573. @pytest.mark.level1
  574. @pytest.mark.platform_arm_ascend_training
  575. @pytest.mark.platform_x86_ascend_training
  576. @pytest.mark.platform_x86_gpu_training
  577. @pytest.mark.env_onecard
  578. def test_setitem_by_tensors_with_tensor_error():
  579. index_0 = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
  580. index_1 = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
  581. index_2 = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
  582. index_3 = Tensor(np.random.randint(8, size=(1, 3, 4, 5)), mstype.int32)
  583. value = Tensor(np.zeros((2, 5)), mstype.float32)
  584. net = TensorSetItemByTensorsWithTensorNumberError()
  585. with pytest.raises(IndexError):
  586. net(index_0, index_1, index_2, index_3, value)
  587. class TensorSetItemByTensorsWithTupleOfNumber(Cell):
  588. def __init__(self, value):
  589. super(TensorSetItemByTensorsWithTupleOfNumber, self).__init__()
  590. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  591. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  592. self.value = value
  593. def construct(self, index_0, index_1, index_2):
  594. self.param[index_0, index_1, index_2] = self.value
  595. ret = self.param + self.const
  596. return ret
  597. @pytest.mark.level1
  598. @pytest.mark.platform_arm_ascend_training
  599. @pytest.mark.platform_x86_ascend_training
  600. # GPU op has bug, and has not been fixed.
  601. @pytest.mark.env_onecard
  602. def test_setitem_by_tensors_with_tuple_of_number():
  603. value = (0.0, 1.1, 2.2, 3.3, 4.4)
  604. net = TensorSetItemByTensorsWithTupleOfNumber(value)
  605. index_0 = np.random.randint(6, size=(3, 4, 5))
  606. index_1 = np.random.randint(7, size=(4, 5))
  607. index_2 = np.random.randint(8, size=(5, 3, 4, 5))
  608. index_0_ms = Tensor(index_0, mstype.int32)
  609. index_1_ms = Tensor(index_1, mstype.int32)
  610. index_2_ms = Tensor(index_2, mstype.int32)
  611. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
  612. input_data[index_0, index_1, index_2] = value
  613. const = np.ones((6, 7, 8)).astype(np.float32)
  614. out = net(index_0_ms, index_1_ms, index_2_ms)
  615. assert np.all(out.asnumpy() == (input_data + const))
  616. class TensorSetItemByTensorsWithTupleOfTensor(Cell):
  617. def __init__(self):
  618. super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__()
  619. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  620. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  621. def construct(self, index_0, index_1, index_2, value_0, value_1, value_2):
  622. self.param[index_0, index_1, index_2] = (value_0, value_1, value_2)
  623. ret = self.param + self.const
  624. return ret
  625. @pytest.mark.level1
  626. @pytest.mark.platform_arm_ascend_training
  627. @pytest.mark.platform_x86_ascend_training
  628. # GPU op has bug, and has not been fixed.
  629. @pytest.mark.env_onecard
  630. def test_setitem_by_tensors_with_tuple_of_tensor():
  631. value_0 = np.zeros((4, 5))
  632. value_1 = np.ones((4, 5))
  633. value_2 = np.ones((4, 5)) * 2
  634. value_0_ms = Tensor(value_0, mstype.float32)
  635. value_1_ms = Tensor(value_1, mstype.float32)
  636. value_2_ms = Tensor(value_2, mstype.float32)
  637. net = TensorSetItemByTensorsWithTupleOfTensor()
  638. index_0 = np.random.randint(6, size=(3, 4, 5))
  639. index_1 = np.random.randint(7, size=(4, 5))
  640. index_2 = np.random.randint(8, size=(5, 3, 4, 5))
  641. index_0_ms = Tensor(index_0, mstype.int32)
  642. index_1_ms = Tensor(index_1, mstype.int32)
  643. index_2_ms = Tensor(index_2, mstype.int32)
  644. input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
  645. input_data[index_0, index_1, index_2] = (value_0, value_1, value_2)
  646. const = np.ones((6, 7, 8)).astype(np.float32)
  647. out = net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms, value_2_ms)
  648. assert np.all(out.asnumpy() == (input_data + const))
  649. class TensorSetItemByTensorsWithTupleOfTensorNumberError(Cell):
  650. def __init__(self):
  651. super(TensorSetItemByTensorsWithTupleOfTensorNumberError, self).__init__()
  652. self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
  653. self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
  654. def construct(self, index_0, index_1, index_2, value_0, value_1):
  655. self.param[index_0, index_1, index_2] = (value_0, value_1)
  656. ret = self.param + self.const
  657. return ret
  658. @pytest.mark.level1
  659. @pytest.mark.platform_arm_ascend_training
  660. @pytest.mark.platform_x86_ascend_training
  661. @pytest.mark.platform_x86_gpu_training
  662. @pytest.mark.env_onecard
  663. def test_setitem_by_tensor_with_tuple_of_tensor_error():
  664. net = TensorSetItemByTensorsWithTupleOfTensorNumberError()
  665. index_0_ms = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
  666. index_1_ms = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
  667. index_2_ms = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
  668. value_0 = np.zeros((4, 5))
  669. value_1 = np.ones((4, 5))
  670. value_0_ms = Tensor(value_0, mstype.float32)
  671. value_1_ms = Tensor(value_1, mstype.float32)
  672. with pytest.raises(ValueError):
  673. net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms)
  674. @pytest.mark.level1
  675. @pytest.mark.platform_arm_ascend_training
  676. @pytest.mark.platform_x86_ascend_training
  677. @pytest.mark.platform_x86_gpu_training
  678. @pytest.mark.env_onecard
  679. def test_setitem_grad():
  680. class Net(Cell):
  681. def __init__(self):
  682. super(Net, self).__init__()
  683. self.weight = Parameter(
  684. Tensor(np.ones([4, 4, 5]), dtype=mstype.float32), "b1", requires_grad=True)
  685. def construct(self, a, b):
  686. a[1:3:1, ::] = b
  687. c = a + self.weight
  688. return c
  689. class GradNet(Cell):
  690. def __init__(self, net):
  691. super(GradNet, self).__init__()
  692. self.net = net
  693. self.weights = ParameterTuple(net.trainable_params())
  694. def construct(self, x, y, sens):
  695. return grad_by_list_with_sens(self.net, self.weights)(x, y, sens)
  696. net = GradNet(Net())
  697. x = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
  698. y = Tensor(np.array([3]).astype(np.float32), mstype.float32)
  699. sens = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
  700. net(x, y, sens)
  701. class TensorAssignWithSliceError1(Cell):
  702. def construct(self, a, b):
  703. a[1:3:-1, ::] = b
  704. return a
  705. class TensorAssignWithSliceError2(Cell):
  706. def construct(self, a, b):
  707. a[1:3:-1] = b
  708. return a
  709. class TensorAssignWithSlice2(Cell):
  710. def construct(self, a, b, ck):
  711. a[1:5] = b
  712. a[3:4] = 5
  713. a[-1:1:-1] = b
  714. a[-1:3:-1] = 5
  715. a[::] = b
  716. a[::] = 9
  717. z = a + ck
  718. return z
  719. class TensorAssignWithSlice(Cell):
  720. def __init__(self):
  721. super(TensorAssignWithSlice, self).__init__()
  722. self.c = 2.0
  723. def construct(self, a, b, ck):
  724. a[1:3, ::] = b
  725. a[2:3:, 3:] = b
  726. a[::] = b
  727. a[::] = self.c
  728. a[::, ::] = b
  729. a[::, ::] = self.c
  730. a[2:3:, 0:, 4:1:-1] = b
  731. a[2:3:, 0:, 4:1:-1] = self.c
  732. z = a + ck
  733. return z
  734. @pytest.mark.level1
  735. @pytest.mark.platform_arm_ascend_training
  736. @pytest.mark.platform_x86_ascend_training
  737. @pytest.mark.platform_x86_gpu_training
  738. @pytest.mark.env_onecard
  739. def test_tensor_assign_slice_value_1():
  740. net = TensorAssignWithSlice()
  741. a = np.arange(60).reshape(3, 4, 5)
  742. b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)
  743. ck = np.arange(60).reshape(3, 4, 5)
  744. ta = Tensor(a, dtype=mstype.float32)
  745. tb = Tensor(b, dtype=mstype.float32)
  746. tck = Tensor(ck, dtype=mstype.float32)
  747. out = net(ta, tb, tck)
  748. a[1:3, ::] = b
  749. a[2:3:, 3:] = b
  750. a[::] = b
  751. a[::] = 2.0
  752. a[::, ::] = b
  753. a[::, ::] = 2.0
  754. a[2:3:, 0:, 4:1:-1] = b
  755. a[2:3:, 0:, 4:1:-1] = 2.0
  756. z = a + ck
  757. assert np.all(z == out.asnumpy())
  758. @pytest.mark.level1
  759. @pytest.mark.platform_arm_ascend_training
  760. @pytest.mark.platform_x86_ascend_training
  761. @pytest.mark.platform_x86_gpu_training
  762. @pytest.mark.env_onecard
  763. def test_tensor_assign_slice_value_2():
  764. net2 = TensorAssignWithSlice2()
  765. a = np.array([1, 2, 3, 4, 5, 6, 7, 8])
  766. ck = np.array([1, 2, 3, 4, 5, 6, 7, 8])
  767. b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)
  768. tb = Tensor(b, dtype=mstype.float32)
  769. ta = Tensor(a, dtype=mstype.float32)
  770. tck = Tensor(ck, dtype=mstype.float32)
  771. out = net2(ta, tb, tck)
  772. a[1:5] = b
  773. a[3:4] = 5
  774. a[-1:1:-1] = b
  775. a[-1:3:-1] = 5
  776. a[::] = b
  777. a[::] = 9
  778. z = a + ck
  779. assert np.all(z == out.asnumpy())
  780. @pytest.mark.level1
  781. @pytest.mark.platform_arm_ascend_training
  782. @pytest.mark.platform_x86_ascend_training
  783. @pytest.mark.platform_x86_gpu_training
  784. @pytest.mark.env_onecard
  785. def test_tensor_assign_exception():
  786. net = TensorAssignWithSlice()
  787. net2 = TensorAssignWithSlice2()
  788. # The test case is no longer appropriate since x[1:3:-1] = np.array(2) does
  789. # not incur an error in numpy, which leaves the original array unchanged after
  790. # the assign operation.
  791. # net_e1 = TensorAssignWithSliceError1()
  792. # net_e2 = TensorAssignWithSliceError2()
  793. a = np.arange(60).reshape(3, 4, 5)
  794. ck = np.arange(60).reshape(3, 4, 5)
  795. b = Tensor([1], dtype=mstype.float32)
  796. Ta = Tensor(a, dtype=mstype.float32)
  797. Tck = Tensor(ck, dtype=mstype.float32)
  798. Ta4d = Tensor(a.reshape(1, 3, 4, 5), dtype=mstype.float32)
  799. Ta4d_ck = Tensor(ck.reshape(1, 3, 4, 5), dtype=mstype.float32)
  800. Tb = Tensor([1, 3], dtype=mstype.float32)
  801. Tc = Tensor([], dtype=mstype.float32)
  802. t = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
  803. tck = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
  804. # Error for A[Slice] = Number
  805. # 1. A[Slice] = Number, Slice error
  806. # with pytest.raises(ValueError):
  807. # net_e2(t, 2)
  808. # Error for A[Slice] = U, U is a Tensor
  809. # 1. A[Slice] = U, u.size is error
  810. with pytest.raises(ValueError):
  811. net2(t, Tb, tck)
  812. # 2. A[Slice] = U, U is empty
  813. with pytest.raises(ValueError):
  814. net2(t, Tc, tck)
  815. # 3. A[Slice] = U, U.size error
  816. with pytest.raises(ValueError):
  817. net2(t, Tb, tck)
  818. # Error for A[Tuple(Slice...)] = Tensor
  819. # 1. A[Tuple(Slice...)] = U, U is empty
  820. with pytest.raises(ValueError):
  821. net(Ta, Tc, Tck)
  822. # 2. A[Tuple(Slice...)] = U, U.size error
  823. with pytest.raises(ValueError):
  824. net(Ta, Tb, Tck)
  825. # 3. A[Tuple(Slice...)] = U, Slice error
  826. # with pytest.raises(IndexError):
  827. # net_e1(Ta, b)
  828. # Error for A[Tuple(Slice...)] = Number
  829. # 1. A[Tuple(Slice...)] = Number, Slice error
  830. # with pytest.raises(IndexError):
  831. # net_e1(Ta, 2)
  832. net = TensorAssignWithInteger()
  833. # Error for A[Number] = scalar/Tensor
  834. # 1. A[Number] = U, U is a Tensor, u.size not match
  835. with pytest.raises(ValueError):
  836. net(Ta, Tb, Tck)
  837. with pytest.raises(ValueError):
  838. net(Ta, Tc, Tck)
  839. # 2. A[Number] = U, the number index error
  840. with pytest.raises(IndexError):
  841. net(Ta4d, b, Ta4d_ck)
  842. # Error for A[(n,m)] = scalar/Tensor
  843. # 1. A[(n,m)] = U, U is a tensor. u.size not match
  844. net = TensorAssignWithTupleInteger()
  845. with pytest.raises(ValueError):
  846. net(Ta, Tc, Tck)
  847. with pytest.raises(ValueError):
  848. net(Ta, Tb, Tck)
  849. # 2. A[(n,m)] = U, the number index error
  850. with pytest.raises(IndexError):
  851. net(Ta4d, b, Ta4d_ck)
  852. # Error for A[...] = U or A[1:, ...] = u
  853. # 1. A[...] = scalar/tensor
  854. net = TensorAssignWithEllipsis()
  855. net(Ta, Ta4d)
  856. with pytest.raises(ValueError):
  857. net(Ta, Tc)
  858. with pytest.raises(ValueError):
  859. net(Ta, Tb)
  860. # 2. A[::, 1:, ...] = scalar/tensor
  861. net = TensorAssignWithTupleEllipsis()
  862. net(Ta, b)
  863. with pytest.raises(ValueError):
  864. net(Ta, Tb)
  865. class TensorAssignWithTupleEllipsis2(Cell):
  866. def construct(self, a, b):
  867. a[1:, ..., ::] = b
  868. return a
  869. class TensorAssignWithTupleEllipsis(Cell):
  870. def construct(self, a, b):
  871. a[:2, ...] = 1.0
  872. a[1:, ...] = b
  873. return a
  874. class TensorAssignWithEllipsis(Cell):
  875. def construct(self, a, b):
  876. a[...] = 1
  877. a[...] = b
  878. return a
  879. class TensorAssignWithInteger(Cell):
  880. def construct(self, a, b, ck):
  881. a[1] = 1
  882. a[0] = b
  883. z = a + ck
  884. return z
  885. class TensorAssignWithTupleInteger(Cell):
  886. def construct(self, a, b, ck):
  887. a[(1)] = 1
  888. a[(1)] = b
  889. a[(1, 1)] = b
  890. a[(1, 1)] = 1
  891. z = a + ck
  892. return z
  893. class TensorAssignWithBoolTensorIndex(Cell):
  894. def __init__(self):
  895. super(TensorAssignWithBoolTensorIndex, self).__init__()
  896. self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
  897. self.u_scalar = 5
  898. def construct(self, a, b, c, u_tensor):
  899. a[c] = self.u_scalar
  900. a[b] = u_tensor
  901. z = a + self.t
  902. return z
  903. class TensorAssignWithBoolTensorIndexError(Cell):
  904. def construct(self, a, b, c, u_tensor):
  905. a[b][c] = u_tensor
  906. return a
  907. class TensorAssignWithBoolTensorIndex2(Cell):
  908. def __init__(self):
  909. super(TensorAssignWithBoolTensorIndex2, self).__init__()
  910. self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
  911. self.u_scalar = 5
  912. def construct(self, a, u_tensor):
  913. a[a > 8] = u_tensor
  914. a[a >= 6] = self.u_scalar
  915. a[a < 3] = self.u_scalar
  916. a[a <= 5] = u_tensor
  917. a[a == 5] = self.u_scalar
  918. z = a + self.t
  919. return z
  920. class TensorAssignWithBoolTensorIndex2Error(Cell):
  921. def construct(self, a, u_tensor):
  922. a[a > 8][a > 5] = u_tensor
  923. return a
  924. @pytest.mark.level1
  925. @pytest.mark.platform_arm_ascend_training
  926. @pytest.mark.platform_x86_ascend_training
  927. @pytest.mark.platform_x86_gpu_training
  928. @pytest.mark.env_onecard
  929. def test_tensor_assign_bool_index_0():
  930. a = np.arange(60).reshape(3, 4, 5)
  931. b = a > 5
  932. c = a < 3
  933. Ta = Tensor(a, dtype=mstype.float32)
  934. Tb = Tensor(b)
  935. Tc = Tensor(c)
  936. u_tensor = Tensor([1], dtype=mstype.float32)
  937. net1 = TensorAssignWithBoolTensorIndex()
  938. out = net1(Ta, Tb, Tc, u_tensor)
  939. res = np.arange(60).reshape(3, 4, 5)
  940. res[c] = 5
  941. res[b] = 1
  942. res = res + np.ones([3, 4, 5])
  943. assert np.all(out.asnumpy() == res)
  944. @pytest.mark.level1
  945. @pytest.mark.platform_arm_ascend_training
  946. @pytest.mark.platform_x86_ascend_training
  947. @pytest.mark.platform_x86_gpu_training
  948. @pytest.mark.env_onecard
  949. def test_tensor_assign_bool_index_1():
  950. a = np.arange(60).reshape(3, 4, 5)
  951. Ta = Tensor(a, dtype=mstype.float32)
  952. u_tensor = Tensor([1], dtype=mstype.float32)
  953. net2 = TensorAssignWithBoolTensorIndex2()
  954. out = net2(Ta, u_tensor)
  955. res = np.arange(60).reshape(3, 4, 5)
  956. res[res > 8] = 1
  957. res[res >= 6] = 5
  958. res[res < 3] = 5
  959. res[res <= 5] = 1
  960. res[res == 5] = 5
  961. res = res + np.ones([3, 4, 5])
  962. assert np.all(out.asnumpy() == res)
  963. @pytest.mark.level1
  964. @pytest.mark.platform_arm_ascend_training
  965. @pytest.mark.platform_x86_ascend_training
  966. @pytest.mark.platform_x86_gpu_training
  967. @pytest.mark.env_onecard
  968. def test_tensor_assign_bool_index_exception():
  969. a = np.arange(60).reshape(3, 4, 5)
  970. b = a > 5
  971. c = a < 3
  972. Ta = Tensor(a, dtype=mstype.float32)
  973. Tb = Tensor(b)
  974. Tc = Tensor(c)
  975. Td = Tensor([True, True])
  976. u_tensor = Tensor([1], dtype=mstype.float32)
  977. u_tensor_error = Tensor([1, 2], dtype=mstype.float32)
  978. u_scalar = 5
  979. net1 = TensorAssignWithBoolTensorIndex()
  980. net2 = TensorAssignWithBoolTensorIndex2()
  981. with pytest.raises(ValueError):
  982. net1(Ta, Td, Tc, u_tensor)
  983. with pytest.raises(IndexError):
  984. net1(Ta, u_tensor, Tc, u_tensor)
  985. with pytest.raises(ValueError):
  986. net1(Ta, Tb, Td, u_tensor)
  987. with pytest.raises(IndexError):
  988. net1(Ta, Tb, Ta, u_tensor)
  989. with pytest.raises(ValueError):
  990. net1(Ta, Tb, Tc, u_tensor_error)
  991. # net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)
  992. with pytest.raises(ValueError):
  993. net2(Ta, u_tensor_error)
  994. net3 = TensorAssignWithBoolTensorIndexError()
  995. with pytest.raises(IndexError):
  996. net3(Ta, Tb, Tc, u_tensor)
  997. with pytest.raises(IndexError):
  998. net3(Ta, Tb, Tc, u_scalar)
  999. net4 = TensorAssignWithBoolTensorIndex2Error()
  1000. with pytest.raises(IndexError):
  1001. net4(Ta, u_tensor)
  1002. with pytest.raises(IndexError):
  1003. net4(Ta, u_scalar)
  1004. @pytest.mark.level0
  1005. @pytest.mark.platform_arm_ascend_training
  1006. @pytest.mark.platform_x86_ascend_training
  1007. @pytest.mark.platform_x86_gpu_training
  1008. @pytest.mark.env_onecard
  1009. def test_tensor_slice_reduce_out_of_bounds_neg():
  1010. class NetWork(Cell):
  1011. def __init__(self):
  1012. super(NetWork, self).__init__()
  1013. self.tensor_ret = Tensor(np.array(9, np.int32))
  1014. def construct(self, tensor):
  1015. ret = tensor[-7, 3, 4]
  1016. return ret
  1017. input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
  1018. net = NetWork()
  1019. with pytest.raises(IndexError) as ex:
  1020. net(input_tensor)
  1021. assert "'begin[0]' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
  1022. "but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': -7." in str(ex.value)
  1023. @pytest.mark.level1
  1024. @pytest.mark.platform_arm_ascend_training
  1025. @pytest.mark.platform_x86_ascend_training
  1026. @pytest.mark.platform_x86_gpu_training
  1027. @pytest.mark.env_onecard
  1028. def test_tensor_slice_reduce_out_of_bounds_positive():
  1029. class NetWork(Cell):
  1030. def __init__(self):
  1031. super(NetWork, self).__init__()
  1032. self.tensor_ret = Tensor(np.array(9, np.int32))
  1033. def construct(self, tensor):
  1034. ret = tensor[6, 3, 4]
  1035. return ret
  1036. input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
  1037. net = NetWork()
  1038. with pytest.raises(IndexError) as ex:
  1039. net(input_tensor)
  1040. assert "'begin[0]' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
  1041. "but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': 6." in str(ex.value)
  1042. @pytest.mark.level0
  1043. @pytest.mark.platform_arm_ascend_training
  1044. @pytest.mark.platform_x86_ascend_training
  1045. @pytest.mark.platform_x86_gpu_training
  1046. @pytest.mark.env_onecard
  1047. def test_tensor_range():
  1048. a = np.arange(4*5*6).reshape(4, 5, 6).astype(np.float32)
  1049. ta = Tensor(a, mstype.float32)
  1050. ms_out = []
  1051. for item in ta:
  1052. ms_out.append(item)
  1053. np_out = []
  1054. for item in a:
  1055. np_out.append(item)
  1056. for i, elem in enumerate(ms_out):
  1057. assert np.all(elem.asnumpy() == np_out[i])