You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_framstruct.py 21 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test_framstruct """
  16. import numpy as np
  17. import mindspore as ms
  18. import mindspore.nn as nn
  19. from mindspore import context
  20. from mindspore.common import dtype as mstype
  21. from mindspore.common.parameter import Parameter, ParameterTuple
  22. from mindspore.ops import composite as C
  23. from mindspore.ops import operations as P
  24. from ..ut_filter import non_graph_engine
  25. from ....mindspore_test_framework.utils.check_gradient import (
  26. ms_function, check_jacobian, Tensor, NNGradChecker,
  27. OperationGradChecker, check_gradient)
  28. context.set_context(mode=context.PYNATIVE_MODE)
  29. def setup_module(module):
  30. context.set_context(mode=context.PYNATIVE_MODE)
  31. grad_all = C.GradOperation(get_all=True)
  32. grad_by_list = C.GradOperation(get_by_list=True)
  33. @ms_function
  34. def while_upper_bound(upper):
  35. rval = 2
  36. while rval < upper:
  37. rval = rval * rval
  38. return rval
  39. def test_while_upper_bound():
  40. res = while_upper_bound(10)
  41. assert res == 16
  42. @ms_function
  43. def while_lower_bound(lower):
  44. """ t_while """
  45. rval = lower
  46. while rval < 100:
  47. rval = rval * rval
  48. return rval
  49. def test_while_lower_bound():
  50. res = while_lower_bound(2)
  51. assert res == 256
  52. @ms_function
  53. def dynamic_make_tuple(x, lower, upper):
  54. out = ()
  55. i = lower
  56. while i < upper:
  57. out = out + (x,)
  58. i = i + 1
  59. return out
  60. def test_dynamic_make_tuple():
  61. assert dynamic_make_tuple(2, 1, 5) == (2, 2, 2, 2)
  62. def test_make_tuple():
  63. # Staticly recursively creating static type is valid in mindspore.
  64. @ms_function
  65. def make_tuple(x):
  66. out = ()
  67. for i in range(3):
  68. out = out + (x,)
  69. return out
  70. res = make_tuple(5)
  71. assert res == (5, 5, 5)
  72. @ms_function
  73. def add(x, y):
  74. """ add """
  75. return x + y
  76. def mul(x, y):
  77. """ mul """
  78. return x * y
  79. def add_mul(x, y):
  80. """ add_mul """
  81. return (x + y) * y
  82. def mainf(x, y):
  83. """ mainf """
  84. return grad_all(mul)(x, y)
  85. def grad_add_mul(x, y):
  86. """ grad_add_mul """
  87. return grad_all(add_mul)(x, y)
  88. @ms_function
  89. def sub(x, y):
  90. """ sub """
  91. return x - y
  92. # pylint: disable=using-constant-test
  93. @ms_function
  94. def if_always_true(x):
  95. """ if_always_true """
  96. if True:
  97. return x
  98. else:
  99. return 0
  100. def test_add():
  101. """ test_add """
  102. res = add(2.5, 3)
  103. assert res == 5.5
  104. def test_sub():
  105. """ test_sub """
  106. res = sub(3.5, 3)
  107. assert res == 0.5
  108. @non_graph_engine
  109. def test_if_always_true():
  110. """ test_if_always_true """
  111. res = if_always_true(1)
  112. assert res == 1
  113. @non_graph_engine
  114. def test_f():
  115. """ test_f """
  116. res = mainf(Tensor(3, dtype=ms.int32), Tensor(2, dtype=ms.int32))
  117. assert res == (2, 3)
  118. @non_graph_engine
  119. def test_grad_add_mul():
  120. """ test_grad_add_mul """
  121. res = grad_add_mul(Tensor(3, dtype=ms.int32), Tensor(2, dtype=ms.int32))
  122. assert res == (2, 7)
  123. def f(x):
  124. if x > 0:
  125. return f(x - 1)
  126. return x
  127. @ms_function
  128. def list_subscript():
  129. """ list_subscript """
  130. x = [1, 2, 3]
  131. return x[0] * x[1]
  132. def test_list_subscript():
  133. """ test_list_subscript """
  134. res = list_subscript()
  135. assert res == 2
  136. @ms_function
  137. def ms_infer_for(xs, y):
  138. """ ms_infer_for """
  139. rval = y
  140. for x in xs:
  141. rval = rval + x
  142. return rval
  143. def test_infer_for():
  144. """ test_infer_for """
  145. t = (1, 2, 3)
  146. y = 4
  147. res = ms_infer_for(t, y)
  148. assert res == 10
  149. @ms_function
  150. def if_construct(a, b):
  151. z = a
  152. if a > b:
  153. z = a + b
  154. else:
  155. z = a * b
  156. if z > b:
  157. return z - a
  158. else:
  159. return a - b
  160. def test_if_construct():
  161. """ test_if_construct """
  162. res = if_construct(3, 6)
  163. assert res == 15
  164. @ms_function
  165. def if_scalar(a, b):
  166. """ if_abstract """
  167. if a:
  168. return a
  169. return b
  170. def test_if_scalar1():
  171. """ test_if_abstract """
  172. res = if_scalar(3, 6)
  173. assert res == 3
  174. def test_if_scalar2():
  175. """ test_if_abstract """
  176. res = if_scalar(0, 6)
  177. assert res == 6
  178. @ms_function
  179. def if_tensor(a, b):
  180. c = a
  181. if a < b:
  182. c = a + a
  183. if c < b:
  184. c = a + c
  185. else:
  186. c = a + b
  187. else:
  188. c = b + b
  189. out = c + c
  190. return out
  191. def test_if_tensor():
  192. res = if_tensor(Tensor(np.ones([1]).astype(np.int32)), Tensor(np.ones([1]).astype(np.int32)))
  193. assert res == Tensor(np.ones([1]).astype(np.int32) * 4)
  194. def rec(x):
  195. """ rec """
  196. if x > 0:
  197. return rec(x - 1)
  198. return x
  199. def test_me_rec():
  200. """ test_me_rec """
  201. res = rec(10)
  202. assert res == 0
  203. def t2_while(x, y):
  204. out = y - x
  205. i = 0
  206. while i < 10:
  207. out = mul(x, y)
  208. i = i + 1
  209. return out
  210. def test_while2():
  211. res = t2_while(2, 3)
  212. assert res == 6
  213. def if_test(a, b):
  214. """ if_test """
  215. if a > b:
  216. return 3 * a
  217. return 2 * b
  218. def grad_if(x, y):
  219. """ grad_if """
  220. return grad_all(if_test)(x, y)
  221. def test_grad_if():
  222. """ test_grad_if """
  223. assert grad_if(Tensor(5, dtype=ms.int32), Tensor(4, dtype=ms.int32)) == (3, 0)
  224. class ConvNet(nn.Cell):
  225. def __init__(self):
  226. super(ConvNet, self).__init__()
  227. out_channel = 16
  228. kernel_size = 3
  229. self.conv = P.Conv2D(out_channel,
  230. kernel_size,
  231. mode=1,
  232. pad_mode="pad",
  233. pad=0,
  234. stride=1,
  235. dilation=2,
  236. group=1)
  237. self.w = Parameter(Tensor(np.ones([16, 16, 3, 3]).astype(np.float32)), name='w')
  238. def construct(self, x):
  239. return self.conv(x, self.w)
  240. conv = ConvNet()
  241. c1 = Tensor([2], mstype.float32)
  242. c2 = Tensor([10], mstype.float32)
  243. c3 = Tensor([1], mstype.float32)
  244. @ms_function
  245. def t1_while(x, y, z):
  246. out = x
  247. i = c1
  248. while i < c2:
  249. out = out + conv(z)
  250. i = i + c3
  251. out = out + out
  252. return out
  253. def test_while_net():
  254. y = Tensor(np.ones([1, 3, 3, 4]).astype(np.float32))
  255. x = Tensor(np.ones([1, 16, 12, 12]).astype(np.float32))
  256. z = Tensor(np.ones([1, 16, 16, 16]).astype(np.float32))
  257. res = t1_while(x, y, z)
  258. assert np.all(res.asnumpy() == np.ones([1, 16, 12, 12]).astype(np.float32) * 2306.0)
  259. @ms_function
  260. def if_while(a, b, x, z):
  261. c = a
  262. i = c1
  263. out = x
  264. if a < b:
  265. c = a + a
  266. while i < c2:
  267. out = out + conv(z)
  268. i = i + c3
  269. else:
  270. c = b + b
  271. out = c + c
  272. return out
  273. def test_if_while():
  274. x = Tensor(np.random.randn(1, 16, 12, 12).astype(np.float32))
  275. z = Tensor(np.random.randn(1, 16, 16, 16).astype(np.float32))
  276. res = if_while(Tensor(np.ones([1]).astype(np.float32)), Tensor(np.ones([1]).astype(np.float32)), x, z)
  277. assert np.all(res.asnumpy() == np.ones([64, 10]).astype(np.float32) * 4.0)
  278. def _while(x):
  279. """ _while """
  280. ret = x * x
  281. i = 2
  282. while i <= 3:
  283. ret = ret * i
  284. i = i + 1
  285. return ret
  286. def grad_while(x):
  287. """ grad_while """
  288. return grad_all(_while)(x)
  289. def test_grad_while():
  290. """ test_grad_while """
  291. assert grad_while(Tensor(5, dtype=ms.int32)) == (60,)
  292. @ms_function
  293. def factorial(n):
  294. """ factorial """
  295. if n == 0:
  296. return 1
  297. return n * factorial(n - 1)
  298. def test_factorial():
  299. res = factorial(3)
  300. assert res == 6
  301. @ms_function
  302. def factorial2(n):
  303. """ factorial """
  304. if n != 0:
  305. return n * factorial2(n - 1)
  306. elif n == 1:
  307. return 1 * factorial2(n - 1)
  308. else:
  309. return 1
  310. def test_factorial2():
  311. res = factorial2(3)
  312. assert res == 6
  313. @ms_function
  314. def foo(n):
  315. if n <= 1:
  316. if n == 1:
  317. return foo(n - 1)
  318. else:
  319. return 1
  320. else:
  321. return foo(n - 1)
  322. def test_foo():
  323. res = foo(5)
  324. assert res == 1
  325. @ms_function
  326. def double_nested_loop(x):
  327. i = 0
  328. s = 0
  329. while i < x:
  330. j = 0
  331. i = i + 1
  332. while j < 3:
  333. j = j + 1
  334. s = s + j
  335. return s
  336. def test_nested_loop():
  337. res = double_nested_loop(3)
  338. assert res == 18
  339. @ms_function
  340. def double_nested_loop2(x):
  341. s = 0
  342. for i in range(x):
  343. for j in range(3):
  344. s = s + j
  345. return s
  346. def test_nested_loop2():
  347. res = double_nested_loop(1)
  348. assert res == 6
  349. def _for(x):
  350. """ _for """
  351. ret = x * x
  352. for i in (2, 3):
  353. ret = ret * i
  354. return ret
  355. @ms_function
  356. def grad_for(x):
  357. """ grad_for """
  358. return grad_all(_for)(x)
  359. @ms_function
  360. def try_tail(x):
  361. """ try_tail """
  362. return C.tail(x)
  363. @non_graph_engine
  364. def test_tail():
  365. """ test_tail """
  366. try_tail((0, 1, 2, 3))
  367. @ms_function
  368. def zero_like_tensor(x):
  369. """ zero_like_tensor """
  370. return C.zeros_like(x)
  371. def test_zeros():
  372. """ test_zeros """
  373. x = Tensor(np.ones([2, 3]).astype(np.int32))
  374. res = zero_like_tensor(x)
  375. assert np.all(res.asnumpy() == np.zeros([2, 3]).astype(np.int32))
  376. @ms_function
  377. def arithmetic_simplify_01(x, y):
  378. """ arithmetic_simplify_01 """
  379. return C.zeros_like(x) * y
  380. def test_arithmetic_simplify_01():
  381. """ test_arithmetic_simplify_01 """
  382. x = Tensor(np.ones([2, 3]).astype(np.int32))
  383. y = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  384. res = arithmetic_simplify_01(x, y)
  385. expect = np.zeros([2, 3]).astype(np.int32)
  386. assert np.all(res.asnumpy() == expect)
  387. @ms_function
  388. def arithmetic_simplify_02(x, y):
  389. """ arithmetic_simplify_02 """
  390. return C.ones_like(x) * y
  391. def test_arithmetic_simplify_02():
  392. """ test_arithmetic_simplify_02 """
  393. x = Tensor(np.ones([2, 3]).astype(np.int32))
  394. y = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  395. res = arithmetic_simplify_02(x, y)
  396. expect = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32)
  397. assert np.all(res.asnumpy() == expect)
  398. @ms_function
  399. def arithmetic_simplify_03(x, y):
  400. """ arithmetic_simplify_03 """
  401. return x * C.ones_like(y)
  402. def test_arithmetic_simplify_03():
  403. """ test_arithmetic_simplify_03 """
  404. x = Tensor(np.ones([2, 3]).astype(np.int32))
  405. y = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  406. res = arithmetic_simplify_03(x, y)
  407. expect = np.ones([2, 3]).astype(np.int32)
  408. assert np.all(res.asnumpy() == expect)
  409. @ms_function
  410. def arithmetic_simplify_04(x):
  411. """ arithmetic_simplify_04 """
  412. return x + 0
  413. def test_arithmetic_simplify_04():
  414. """ test_arithmetic_simplify_04 """
  415. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  416. res = arithmetic_simplify_04(x)
  417. expect = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32)
  418. assert np.all(res.asnumpy() == expect)
  419. @ms_function
  420. def arithmetic_simplify_05(x):
  421. """ arithmetic_simplify_05 """
  422. return x * 1
  423. def test_arithmetic_simplify_05():
  424. """ test_arithmetic_simplify_05 """
  425. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  426. res = arithmetic_simplify_05(x)
  427. expect = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32)
  428. assert np.all(res.asnumpy() == expect)
  429. @ms_function
  430. def arithmetic_simplify_06(x):
  431. """ arithmetic_simplify_06 """
  432. return x * 2 * 5
  433. def test_arithmetic_simplify_06():
  434. """ test_arithmetic_simplify_06 """
  435. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  436. res = arithmetic_simplify_06(x)
  437. expect = np.array([[10, 20, 30], [40, 50, 60]]).astype(np.int32)
  438. assert np.all(res.asnumpy() == expect)
  439. @ms_function
  440. def arithmetic_simplify_07(x):
  441. """ arithmetic_simplify_07 """
  442. return (x + 1) * 2 * 5
  443. def test_arithmetic_simplify_07():
  444. """ test_arithmetic_simplify_07 """
  445. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  446. res = arithmetic_simplify_07(x)
  447. expect = np.array([[20, 30, 40], [50, 60, 70]]).astype(np.int32)
  448. assert np.all(res.asnumpy() == expect)
  449. @ms_function
  450. def arithmetic_simplify_08(x, y):
  451. """ arithmetic_simplify_08 """
  452. return 1 * x * 1 * 1 + 1 * 0 * 1 + 0 + y * 1
  453. def test_arithmetic_simplify_08():
  454. """ test_arithmetic_simplify_08 """
  455. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
  456. y = Tensor(np.ones([2, 3]).astype(np.int32))
  457. res = arithmetic_simplify_08(x, y)
  458. expect = np.array([[2, 3, 4], [5, 6, 7]]).astype(np.int32)
  459. assert np.all(res.asnumpy() == expect)
  460. def test_GradCheckerPrimitive():
  461. """ test_GradCheckerPrimitive """
  462. matmul = P.MatMul()
  463. def prim_f(x, y):
  464. return matmul(x, y)
  465. check_gradient(prim_f, Tensor(np.array([[0.65, 0.8, 0.8]], np.float32)),
  466. Tensor(np.array([[0.1], [0.2], [-.1]], np.float32)),
  467. grad_checker_class=OperationGradChecker, sampling_times=2)
  468. def test_NNGradChecker():
  469. """ test_NNGradChecker """
  470. class Net(nn.Cell):
  471. """ Net definition """
  472. def __init__(self):
  473. super(Net, self).__init__()
  474. self.dense = nn.Dense(10, 10)
  475. def construct(self, x):
  476. out = self.dense(x)
  477. return out
  478. check_gradient(Net(), Tensor(np.random.rand(1, 10).astype(np.float32)),
  479. delta=1e-3,
  480. max_error=1e-3,
  481. grad_checker_class=NNGradChecker, sampling_times=3)
  482. def test_OperationGradChecker():
  483. """ test_OperationGradChecker """
  484. class Net(nn.Cell):
  485. """ Net definition """
  486. def __init__(self):
  487. super(Net, self).__init__()
  488. self.matmul = P.MatMul()
  489. self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')
  490. def construct(self, x, y):
  491. x = x * self.z
  492. out = self.matmul(x, y)
  493. return out
  494. check_gradient(Net(), Tensor(np.array([[0.65, 0.8, 0.8]], np.float32)),
  495. Tensor(np.array([[0.1], [0.2], [-.1]], np.float32)), grad_checker_class=OperationGradChecker,
  496. input_selector=[1], sampling_times=2)
  497. def test_OperationJacobianChecker():
  498. """ test_OperationJacobianChecker """
  499. class Net(nn.Cell):
  500. """ Net definition """
  501. def __init__(self):
  502. super(Net, self).__init__()
  503. self.matmul = P.MatMul()
  504. self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')
  505. def construct(self, x, y):
  506. x = x * self.z
  507. out = self.matmul(x, y)
  508. return x, out
  509. check_jacobian(Net(), Tensor(np.array([[0.65, 0.8, 0.8], [0.1, 0.2, 0.3]], np.float32)),
  510. Tensor(np.array([[0.1, 0.3], [0.2, 0.2], [-.1, 0.4]], np.float32)),
  511. grad_checker_class=OperationGradChecker, input_selector=[0],
  512. output_selector=[0])
  513. def test_NNJacobianChecker():
  514. """ test_NNJacobianChecker """
  515. class Net(nn.Cell):
  516. """ Net definition """
  517. def __init__(self):
  518. super(Net, self).__init__()
  519. self.dense = nn.Dense(10, 10)
  520. def construct(self, x):
  521. out = self.dense(x)
  522. return out, x
  523. check_jacobian(Net(), Tensor(np.random.rand(1, 10).astype(np.float32)),
  524. delta=1e-3,
  525. max_error=1e-7,
  526. grad_checker_class=NNGradChecker,
  527. input_selector=[1],
  528. output_selector=[0])
  529. def multi_outputs(x, y):
  530. z = x + y
  531. return 2 * z, 2 * z
  532. @ms_function
  533. def while_sp(x, y, z):
  534. out = x
  535. i = c3
  536. while i < c2:
  537. out = mul(x, out)
  538. i = i + c3
  539. return out
  540. def test_while_sp():
  541. y = Tensor(np.ones([1, 3]).astype(np.float32))
  542. z = Tensor(np.ones([1, 3]).astype(np.float32))
  543. x = Tensor(np.ones([1, 3]).astype(np.float32) * 2.0)
  544. res = while_sp(x, y, z)
  545. assert np.all(res.asnumpy() == np.ones([1, 3]).astype(np.float32) * 1024.0)
  546. def grad_refactor_simple_1(x, y):
  547. """ add """
  548. return x * x + 2 * y
  549. def test_grad_refactor_simple_1():
  550. assert grad_all(grad_refactor_simple_1)(Tensor(2, dtype=ms.int32), Tensor(1, dtype=ms.int32)) == (4, 2)
  551. def grad_refactor_simple_2(x, y, z):
  552. """ add """
  553. return x * y + z + x * y * z + x + x * y
  554. def test_grad_refactor_simple_2():
  555. x = Tensor(2, dtype=ms.int32)
  556. y = Tensor(3, dtype=ms.int32)
  557. z = Tensor(0, dtype=ms.int32)
  558. assert grad_all(grad_refactor_simple_2)(x, y, z) == (7, 4, 7)
  559. def grad_refactor_1(a, b):
  560. """ if_test """
  561. def inner(x, y):
  562. return x * y
  563. return inner(a, b)
  564. def test_grad_refactor_1():
  565. assert grad_all(grad_refactor_1)(Tensor(2, dtype=ms.int32), Tensor(3, dtype=ms.int32)) == (3, 2)
  566. def grad_refactor_2(a, b):
  567. """ if_test """
  568. def inner(x):
  569. return x * b
  570. return inner(b) * inner(a)
  571. def test_grad_refactor_2():
  572. assert grad_all(grad_refactor_2)(Tensor(2, dtype=ms.int32), Tensor(3, dtype=ms.int32)) == (27, 54)
  573. def grad_refactor_3(a):
  574. """ if_test """
  575. if a > 3:
  576. return 0
  577. return 3 * a
  578. def grad_refactor_4(a):
  579. """ if_test """
  580. if a > 3:
  581. return 3 * a
  582. return 0
  583. def test_grad_refactor_4():
  584. assert grad_all(grad_refactor_4)(Tensor(4, dtype=ms.int32)) == (3,)
  585. def grad_refactor_5(a):
  586. """ if_test """
  587. if a > 3:
  588. return 1
  589. return a
  590. def grad_refactor_6(a, b):
  591. """ if_test """
  592. if a > b:
  593. return 3 * a + b
  594. return 2 * b * a
  595. def test_grad_refactor_6():
  596. assert grad_all(grad_refactor_6)(Tensor(3, dtype=ms.int32), Tensor(2, dtype=ms.int32)) == (3, 1)
  597. def grad_refactor_while(x):
  598. """ grad_refactor_while """
  599. rval = x
  600. while rval < 4:
  601. rval = rval * rval
  602. return rval
  603. def grad_refactor__while_1(x):
  604. """ _while """
  605. ret = x * x
  606. i = 2
  607. while i <= 3:
  608. ret = ret * i
  609. i = i + 1
  610. return ret
  611. def test_grad_refactor_10():
  612. """ test_grad_while """
  613. assert grad_all(grad_refactor__while_1)(Tensor(5, dtype=ms.int32)) == (60,)
  614. def test_grad_refactor_11():
  615. class Net(nn.Cell):
  616. """ Net definition """
  617. def __init__(self):
  618. super(Net, self).__init__()
  619. def construct(self, x, y):
  620. return x * y * y
  621. net = Net()
  622. grad_all(net)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32)))
  623. def test_grad_refactor_12():
  624. class Net(nn.Cell):
  625. """ Net definition """
  626. def __init__(self):
  627. super(Net, self).__init__()
  628. self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')
  629. def construct(self, x, y):
  630. return x * self.z * y
  631. net = Net()
  632. grad_all(net)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.zeros([2]).astype(np.float32)))
  633. def test_grad_refactor_13():
  634. class Net(nn.Cell):
  635. """ Net definition """
  636. def __init__(self):
  637. super(Net, self).__init__()
  638. self.z = Parameter(Tensor(np.ones([2]).astype(np.float32)), name='z')
  639. def construct(self, x, y):
  640. return x * self.z * y
  641. net = Net()
  642. weights = ParameterTuple(net.trainable_params())
  643. grad_by_list(net, weights)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.zeros([2]).astype(np.float32)))
  644. def grad_refactor_14(a, b):
  645. """ if_test """
  646. def inner1(x):
  647. return x * b
  648. def inner2(x):
  649. return a * b
  650. def inner3(x):
  651. if x > 2:
  652. return a
  653. return b
  654. return inner1(b) + inner2(a) + inner3(a)
  655. # pylint: disable=using-constant-test
  656. class IfDeferInline(nn.Cell):
  657. def __init__(self, mul_size):
  658. super().__init__()
  659. self.mul_weight = Tensor(np.full(mul_size, 0.6, dtype=np.float32))
  660. self.mul = P.Mul()
  661. def construct(self, inputs):
  662. x = self.mul(inputs, self.mul_weight)
  663. if True:
  664. x = x
  665. return x
  666. def test_grad_if_defer_inline():
  667. """ test_grad_if_defer_inline """
  668. network = IfDeferInline([128, 96])
  669. network.add_flags(defer_inline=False)
  670. inp = Tensor(np.ones([128, 96]).astype(np.float32))
  671. grads = grad_all(network)(inp)
  672. assert np.all(grads[0].asnumpy() == np.full([128, 96], 0.6, dtype=np.float32))
  673. def test_dict_const():
  674. class Net(nn.Cell):
  675. def __init__(self):
  676. super(Net, self).__init__()
  677. self.res = {'1': 10}
  678. def construct(self):
  679. return self.res
  680. Net()()