You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_check.py 14 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test control ops """
  16. import os
  17. import numpy as np
  18. import pytest
  19. import mindspore as ms
  20. from mindspore import Tensor
  21. from mindspore import context
  22. from mindspore import nn
  23. from mindspore.common import dtype as mstype
  24. from mindspore.ops import composite as C
  25. from mindspore.ops import operations as P
  26. from mindspore.common.parameter import Parameter
  27. context.set_context(mode=context.GRAPH_MODE)
  28. grad_by_list = C.GradOperation(get_by_list=True)
  29. grad_all = C.GradOperation(get_all=True)
  30. grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)
  31. def if_compile_test(x_init, y_init):
  32. """
  33. Feature: if compile test.
  34. Description: if compile test
  35. Expectation: compile done without error.
  36. """
  37. class Net(nn.Cell):
  38. def __init__(self):
  39. """"""
  40. super(Net, self).__init__()
  41. self.square = P.Square()
  42. self.add = P.Add()
  43. self.value = Tensor(3, dtype=ms.float32)
  44. self.switch = P.GeSwitch()
  45. self.merge = P.Merge()
  46. self.less = P.Less()
  47. def construct(self, x, y):
  48. cond = self.less(x, y)
  49. ret = self.value
  50. if cond:
  51. ret = self.add(x, ret)
  52. ret = self.add(y, ret)
  53. else:
  54. ret = self.square(self.value)
  55. return ret
  56. x = Tensor(x_init, dtype=ms.float32)
  57. y = Tensor(y_init, dtype=ms.float32)
  58. net = Net()
  59. output = net(x, y)
  60. return output
  61. def test_if_nested_compile():
  62. """
  63. Feature: if nested compile test.
  64. Description: if nested compile test
  65. Expectation: compile done without error.
  66. """
  67. class Net(nn.Cell):
  68. def __init__(self, auto_prefix=True):
  69. """"""
  70. super().__init__(auto_prefix=auto_prefix)
  71. self.squre = P.Square()
  72. self.value = Tensor(3, dtype=ms.float32)
  73. def construct(self, x, y):
  74. res = self.value
  75. if x <= y:
  76. res = x + res
  77. res = y + res
  78. else:
  79. if x == y:
  80. res = self.squre(self.value * y)
  81. else:
  82. res = self.squre(self.value)
  83. return res
  84. x = Tensor(1.0, dtype=ms.float32)
  85. y = Tensor(2.0, dtype=ms.float32)
  86. net = Net()
  87. net(x, y)
  88. def test_if_inside_for():
  89. """
  90. Feature: if inside test.
  91. Description: if inside test
  92. Expectation: compile done without error.
  93. """
  94. class Net(nn.Cell):
  95. def __init__(self, auto_prefix=True):
  96. """"""
  97. super().__init__(auto_prefix=auto_prefix)
  98. self.squre = P.Square()
  99. self.value = Tensor(3, dtype=ms.float32)
  100. self.count = 4
  101. def construct(self, x, y):
  102. res = 0
  103. for i in range(self.count):
  104. if i == x:
  105. res = res + x
  106. else:
  107. res = res - y
  108. return res
  109. c1 = Tensor(1, dtype=ms.int32)
  110. c2 = Tensor(1, dtype=ms.int32)
  111. net = Net()
  112. net(c1, c2)
  113. def test_while_with_weight_in_condition():
  114. """
  115. Feature: while with weight in condition test.
  116. Description: while with weight in condition test
  117. Expectation: compile done without error.
  118. """
  119. class Net(nn.Cell):
  120. def __init__(self):
  121. """"""
  122. super(Net, self).__init__()
  123. self.loop = Parameter(Tensor(1, dtype=ms.float32), name="loop")
  124. def construct(self, x):
  125. while self.loop < 5:
  126. self.loop += 1
  127. x += 1
  128. return x
  129. net = Net()
  130. x = Tensor(-1, dtype=ms.float32)
  131. grad_all(net)(x)
  132. def test_while_add():
  133. """
  134. Feature: while add test.
  135. Description: while add test
  136. Expectation: compile done without error.
  137. """
  138. class Net(nn.Cell):
  139. def __init__(self, data):
  140. """"""
  141. super(Net, self).__init__()
  142. self.start = Tensor(0, dtype=mstype.int32)
  143. self.end = Tensor(2, dtype=mstype.int32)
  144. self.out = Tensor(np.zeros([2, 3], dtype=np.float32))
  145. self.add = P.Add()
  146. def construct(self, inputs):
  147. idx = self.start
  148. end = self.end
  149. out = self.out
  150. while idx < end:
  151. xi = inputs[idx, :, :]
  152. out = self.add(out, xi)
  153. idx = idx + 1
  154. return out
  155. x = Tensor(np.arange(10 * 2 * 3).reshape(10, 2, 3).astype(np.float32))
  156. net = Net(x)
  157. net(x)
  158. def test_tensor_all_construct_lack_branch():
  159. """
  160. Feature: tensor all construct lack test.
  161. Description: tensor all construct lack test
  162. Expectation: compile done without error.
  163. """
  164. class NetConditionLackBranch(nn.Cell):
  165. def __init__(self):
  166. """"""
  167. super(NetConditionLackBranch, self).__init__()
  168. self.logicaland = P.LogicalAnd()
  169. self.logicalor = P.LogicalOr()
  170. def construct(self, input1, input2):
  171. if input1.all():
  172. return self.logicaland(input1, input2)
  173. while input1.any():
  174. return self.logicalor(input1, input2)
  175. # NOTICE: here missing return statement, default return None
  176. input_np_1 = np.random.choice([True], size=(2, 3, 4, 5))
  177. input_tensor_1 = Tensor(input_np_1)
  178. input_np_2 = np.random.choice([True, False], size=(2, 3, 4, 5))
  179. input_tensor_2 = Tensor(input_np_2)
  180. net = NetConditionLackBranch()
  181. with pytest.raises(Exception):
  182. net(input_tensor_1, input_tensor_2)
  183. def test_parser_switch_layer_func_primitive():
  184. """
  185. Feature: parser switch layer func primitive test.
  186. Description: parser switch layer func primitive test
  187. Expectation: compile done without error.
  188. """
  189. class FinalNet(nn.Cell):
  190. def __init__(self, funcs):
  191. """"""
  192. super().__init__()
  193. self.funcs = funcs
  194. def construct(self, i, input1):
  195. x = self.funcs[i](input1)
  196. return x
  197. func1 = P.ReLU()
  198. func2 = P.Softmax()
  199. funcs = (func1, func2)
  200. net = FinalNet(funcs)
  201. input1 = Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32))
  202. i = Tensor(1, mstype.int32)
  203. with pytest.raises(ValueError):
  204. net(i, input1)
  205. def test_large_for_loop():
  206. """
  207. Feature: large for loop test.
  208. Description: large for loop test
  209. Expectation: compile done without error.
  210. """
  211. class Net(nn.Cell):
  212. def __init__(self):
  213. """"""
  214. super(Net, self).__init__()
  215. self.flatten = P.ReLU() # nn.Flatten()
  216. def construct(self, x):
  217. for elem in range(1, 1900):
  218. x = self.flatten(x + elem)
  219. return x
  220. t = Tensor(np.ones([2, 3], dtype=np.float32))
  221. net = Net()
  222. os.environ['MS_DEV_RECURSIVE_EVAL'] = '1'
  223. old_max_call_depth = context.get_context('max_call_depth')
  224. context.set_context(max_call_depth=60)
  225. with pytest.raises(RuntimeError) as err:
  226. net(t)
  227. context.set_context(max_call_depth=old_max_call_depth)
  228. os.environ['MS_DEV_RECURSIVE_EVAL'] = '0'
  229. assert 'Exceed function call depth limit 60' in str(err.value)
  230. def test_large_for_loop_with_continue_break():
  231. """
  232. Feature: large for loop with continue break test.
  233. Description: large for loop with continue break test
  234. Expectation: compile done without error.
  235. """
  236. class Net(nn.Cell):
  237. def __init__(self):
  238. """"""
  239. super(Net, self).__init__()
  240. self.flatten = P.ReLU() # nn.Flatten()
  241. def construct(self, x):
  242. idx = 0
  243. for elem1 in range(200):
  244. idx = idx + 1
  245. if idx < 10:
  246. x = x + 0.5
  247. continue
  248. if idx > 500:
  249. break
  250. x = self.flatten(x + elem1)
  251. return x
  252. os.environ['MS_DEV_RECURSIVE_EVAL'] = '1'
  253. old_max_call_depth = context.get_context('max_call_depth')
  254. context.set_context(max_call_depth=2000)
  255. t = Tensor(np.ones([2, 3], dtype=np.float32))
  256. net = Net()
  257. net(t)
  258. os.environ['MS_DEV_RECURSIVE_EVAL'] = '0'
  259. context.set_context(max_call_depth=old_max_call_depth)
  260. def test_recursive_call():
  261. """
  262. Feature: recursive call test.
  263. Description: recursive call test
  264. Expectation: compile done without error.
  265. """
  266. class Net(nn.Cell):
  267. """ Net definition """
  268. def __init__(self):
  269. """"""
  270. super(Net, self).__init__()
  271. self.fc = nn.Dense(10, 10) # padding=0
  272. # self.net2 = Net2()
  273. def construct(self, x):
  274. net2 = Net2()
  275. x = net2(x)
  276. out = self.fc(x)
  277. return out
  278. class Net2(nn.Cell):
  279. def __init__(self):
  280. super(Net2, self).__init__()
  281. self.net = Net()
  282. self.fc = nn.Dense(10, 10)
  283. def construct(self, x):
  284. x = self.net(x)
  285. out = self.fc(x)
  286. return out
  287. context.set_context(mode=context.GRAPH_MODE)
  288. os.environ['MS_DEV_RECURSIVE_EVAL'] = '1'
  289. old_max_call_depth = context.get_context('max_call_depth')
  290. context.set_context(max_call_depth=80)
  291. input_data = Tensor(np.identity(10).astype(np.float32))
  292. net = Net2()
  293. with pytest.raises(RuntimeError):
  294. net(input_data)
  295. os.environ['MS_DEV_RECURSIVE_EVAL'] = '0'
  296. context.set_context(max_call_depth=old_max_call_depth)
  297. def test_pow():
  298. """
  299. Feature: pow test.
  300. Description: pow test
  301. Expectation: compile done without error.
  302. """
  303. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  304. power = Tensor(np.array(3.0, np.int64))
  305. testpow = P.Pow()
  306. expect = np.array([[8, 8], [27, 27]])
  307. result = testpow(input_tensor, power)
  308. assert np.all(result.asnumpy() == expect)
  309. def test_pow1():
  310. """
  311. Feature: pow one test.
  312. Description: pow one test
  313. Expectation: compile done without error.
  314. """
  315. input_tensor = Tensor(np.array([[2, 2], [2, 2]]))
  316. power = Tensor(np.array(3.0, np.int64))
  317. testpow = P.Pow()
  318. expect = np.array([[8, 8], [8, 8]])
  319. result = testpow(input_tensor, power)
  320. assert np.all(result.asnumpy() == expect)
  321. def test_pow2():
  322. """
  323. Feature: pow two test.
  324. Description: pow two test
  325. Expectation: compile done without error.
  326. """
  327. input_tensor = Tensor(np.array([[1, 1], [2, 2]]))
  328. power = Tensor(np.array(3.0, np.int64))
  329. testpow = P.Pow()
  330. expect = np.array([[1, 1], [8, 8]])
  331. result = testpow(input_tensor, power)
  332. assert np.all(result.asnumpy() == expect)
  333. def test_pow3():
  334. """
  335. Feature: pow three test.
  336. Description: pow three test
  337. Expectation: compile done without error.
  338. """
  339. input_tensor = Tensor(np.array([[2, 2], [1, 1]]))
  340. power = Tensor(np.array(3.0, np.int64))
  341. testpow = P.Pow()
  342. expect = np.array([[8, 8], [1, 1]])
  343. result = testpow(input_tensor, power)
  344. assert np.all(result.asnumpy() == expect)
  345. def test_exp():
  346. """
  347. Feature: exp test.
  348. Description: exp test
  349. Expectation: compile done without error.
  350. """
  351. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  352. testexp = P.Exp()
  353. result = testexp(input_tensor)
  354. expect = np.exp(np.array([[2, 2], [3, 3]]))
  355. assert np.all(result.asnumpy() == expect)
  356. def test_exp1():
  357. """
  358. Feature: exp one test.
  359. Description: exp one test
  360. Expectation: compile done without error.
  361. """
  362. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  363. testexp = P.Exp()
  364. result = testexp(input_tensor)
  365. expect = np.exp(np.array([[2, 2], [3, 3]]))
  366. assert np.all(result.asnumpy() == expect)
  367. def test_realdiv():
  368. """
  369. Feature: realdiv test.
  370. Description: realdiv test
  371. Expectation: compile done without error.
  372. """
  373. x = Tensor(2048.0)
  374. y = Tensor(128.0)
  375. div = P.RealDiv()
  376. result = div(x, y)
  377. x = x.asnumpy()
  378. y = y.asnumpy()
  379. expect = x / y
  380. assert np.all(result.asnumpy() == expect)
  381. def test_realdiv1():
  382. """
  383. Feature: realdiv one test.
  384. Description: realdiv one test
  385. Expectation: compile done without error.
  386. """
  387. x = Tensor(256.0)
  388. y = Tensor(128.0)
  389. div = P.RealDiv()
  390. result = div(x, y)
  391. x = x.asnumpy()
  392. y = y.asnumpy()
  393. expect = x / y
  394. assert np.all(result.asnumpy() == expect)
  395. def test_eye():
  396. """
  397. Feature: eye test.
  398. Description: eye test
  399. Expectation: compile done without error.
  400. """
  401. x = np.arange(3)
  402. expect = np.ones_like(x)
  403. expect = np.diag(expect)
  404. eye = P.Eye()
  405. eye_output = eye(3, 3, ms.float32)
  406. assert np.all(eye_output.asnumpy() == expect)
  407. def test_sub():
  408. """
  409. Feature: sub test.
  410. Description: sub test
  411. Expectation: compile done without error.
  412. """
  413. input_x = Tensor(np.ones(shape=[3]))
  414. input_y = Tensor(np.zeros(shape=[3]))
  415. sub = P.Sub()
  416. result = sub(input_x, input_y)
  417. expect = np.ones(shape=[3])
  418. assert np.all(result.asnumpy() == expect)
  419. def test_square():
  420. """
  421. Feature: square test.
  422. Description: square test
  423. Expectation: compile done without error.
  424. """
  425. input_tensor = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  426. square = P.Square()
  427. result = square(input_tensor)
  428. expect = np.array([[1, 4, 9], [16, 25, 36]])
  429. assert np.all(result.asnumpy() == expect)
  430. def test_sqrt():
  431. """
  432. Feature: sqrt test.
  433. Description: sqrt test
  434. Expectation: compile done without error.
  435. """
  436. input_tensor = Tensor(np.array([[4, 4], [9, 9]]))
  437. sqrt = P.Sqrt()
  438. expect = np.array([[2, 2], [3, 3]])
  439. result = sqrt(input_tensor)
  440. assert np.all(result.asnumpy() == expect)