You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_auto_monad.py 51 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import os
  16. import re
  17. import time
  18. import pytest
  19. import numpy as np
  20. import mindspore as ms
  21. import mindspore.ops.operations as P
  22. import mindspore.nn as nn
  23. from mindspore.nn import Cell
  24. from mindspore.nn import ReLU, BatchNorm2d, Conv2d, Dense, PReLU, ParameterUpdate
  25. from mindspore.nn import Momentum, SoftmaxCrossEntropyWithLogits
  26. from mindspore import context, Tensor
  27. from mindspore.common.parameter import Parameter
  28. from mindspore.common.initializer import initializer
  29. from mindspore.ops.primitive import constexpr
  30. from capture import Capture, capture, check_output
  31. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  32. @pytest.fixture(name="pynative_save_graphs")
  33. def _pynative_save_graphs():
  34. context.set_context(mode=context.PYNATIVE_MODE, save_graphs=True)
  35. yield
  36. context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
  37. clean_all_ir_files('./')
  38. @pytest.fixture(name="with_save_graphs")
  39. def _with_save_graphs():
  40. context.set_context(save_graphs=True)
  41. yield
  42. context.set_context(save_graphs=False)
  43. clean_all_ir_files('./')
  44. def test_print():
  45. class Print(Cell):
  46. def __init__(self):
  47. super().__init__()
  48. self.print = P.Print()
  49. def construct(self, x, y):
  50. self.print("input_x:", x, "input_y:", y)
  51. return x
  52. cap = Capture()
  53. with capture(cap):
  54. input_x = Tensor(3, dtype=ms.int32)
  55. input_y = Tensor(4, dtype=ms.int32)
  56. net = Print()
  57. net(input_x, input_y)
  58. time.sleep(0.1)
  59. patterns = {'input_x:\nTensor(shape=[], dtype=Int32, value=3)\n'
  60. 'input_y:\nTensor(shape=[], dtype=Int32, value=4)'}
  61. check_output(cap.output, patterns)
  62. def test_print_add():
  63. class Print_Add(Cell):
  64. def __init__(self):
  65. super().__init__()
  66. self.print = P.Print()
  67. self.add = P.Add()
  68. def construct(self, x, y):
  69. x = self.add(x, y)
  70. self.print("input_x:", x, "input_y:", y)
  71. return x
  72. cap = Capture()
  73. with capture(cap):
  74. input_x = Tensor(3, dtype=ms.int32)
  75. input_y = Tensor(4, dtype=ms.int32)
  76. expect = Tensor(7, dtype=ms.int32)
  77. net = Print_Add()
  78. out = net(input_x, input_y)
  79. time.sleep(0.1)
  80. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  81. patterns = {'input_x:\nTensor(shape=[], dtype=Int32, value=7)\n'
  82. 'input_y:\nTensor(shape=[], dtype=Int32, value=4)'}
  83. check_output(cap.output, patterns)
  84. def test_print_assign():
  85. class Print_Assign(Cell):
  86. def __init__(self):
  87. super().__init__()
  88. self.print = P.Print()
  89. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  90. def construct(self, x):
  91. self.print("before:", self.para)
  92. self.para = x
  93. self.print("after:", self.para)
  94. return self.para
  95. cap = Capture()
  96. with capture(cap):
  97. input_x = Tensor(3, dtype=ms.int32)
  98. expect = Tensor(3, dtype=ms.int32)
  99. net = Print_Assign()
  100. out = net(input_x)
  101. time.sleep(0.1)
  102. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  103. patterns = {'before:\nTensor(shape=[], dtype=Int32, value=1)',
  104. 'after:\nTensor(shape=[], dtype=Int32, value=3)'}
  105. check_output(cap.output, patterns)
  106. def test_print_assign_add():
  107. class Print_Assign_Add(Cell):
  108. def __init__(self):
  109. super().__init__()
  110. self.print = P.Print()
  111. self.add = P.Add()
  112. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  113. def construct(self, x, y):
  114. self.print("before:", self.para)
  115. self.para = x
  116. self.print("after:", self.para)
  117. x = self.add(self.para, y)
  118. return x
  119. cap = Capture()
  120. with capture(cap):
  121. input_x = Tensor(3, dtype=ms.int32)
  122. input_y = Tensor(4, dtype=ms.int32)
  123. expect = Tensor(7, dtype=ms.int32)
  124. net = Print_Assign_Add()
  125. out = net(input_x, input_y)
  126. time.sleep(0.1)
  127. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  128. patterns = {'before:\nTensor(shape=[], dtype=Int32, value=1)',
  129. 'after:\nTensor(shape=[], dtype=Int32, value=3)'}
  130. check_output(cap.output, patterns)
  131. def test_print_while():
  132. class Print_While(Cell):
  133. def __init__(self):
  134. super().__init__()
  135. self.print = P.Print()
  136. def construct(self, x, y):
  137. self.print("input_x before:", x, "input_y before:", y)
  138. while x < y:
  139. self.print("input_x after:", x, "input_y after:", y)
  140. x = x + 1
  141. return x
  142. cap = Capture()
  143. with capture(cap):
  144. input_x = Tensor(1, dtype=ms.int32)
  145. input_y = Tensor(4, dtype=ms.int32)
  146. expect = Tensor(4, dtype=ms.int32)
  147. net = Print_While()
  148. out = net(input_x, input_y)
  149. time.sleep(0.1)
  150. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  151. patterns = {'input_x before:\nTensor(shape=[], dtype=Int32, value=1)\n'
  152. 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)',
  153. 'input_x after:\nTensor(shape=[], dtype=Int32, value=1)\n'
  154. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)',
  155. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  156. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)',
  157. 'input_x after:\nTensor(shape=[], dtype=Int32, value=3)\n'
  158. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)'}
  159. check_output(cap.output, patterns)
  160. def test_print_if():
  161. class Print_If(Cell):
  162. def __init__(self):
  163. super().__init__()
  164. self.print = P.Print()
  165. def construct(self, x, y):
  166. self.print("input_x before:", x, "input_y before:", y)
  167. if x < y:
  168. self.print("input_x after:", x, "input_y after:", y)
  169. x = x + 1
  170. return x
  171. cap = Capture()
  172. with capture(cap):
  173. input_x = Tensor(3, dtype=ms.int32)
  174. input_y = Tensor(4, dtype=ms.int32)
  175. expect = Tensor(4, dtype=ms.int32)
  176. net = Print_If()
  177. out = net(input_x, input_y)
  178. time.sleep(0.1)
  179. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  180. patterns = {'input_x before:\nTensor(shape=[], dtype=Int32, value=3)\n'
  181. 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)',
  182. 'input_x after:\nTensor(shape=[], dtype=Int32, value=3)\n'
  183. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)'}
  184. check_output(cap.output, patterns)
  185. def test_print_assign_while():
  186. class Print_Assign_While(Cell):
  187. def __init__(self):
  188. super().__init__()
  189. self.print = P.Print()
  190. self.para = Parameter(Tensor(0, dtype=ms.int32), name='para')
  191. def construct(self, x, y):
  192. self.print("input_x before:", x, "input_y before:",
  193. y, "para before:", self.para)
  194. while x < y:
  195. self.para = x
  196. x = self.para + 1
  197. self.print("input_x after:", x, "input_y after:",
  198. y, "para after:", self.para)
  199. return x
  200. cap = Capture()
  201. with capture(cap):
  202. input_x = Tensor(1, dtype=ms.int32)
  203. input_y = Tensor(4, dtype=ms.int32)
  204. expect = Tensor(4, dtype=ms.int32)
  205. net = Print_Assign_While()
  206. out = net(input_x, input_y)
  207. time.sleep(0.1)
  208. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  209. patterns = {
  210. 'input_x before:\nTensor(shape=[], dtype=Int32, value=1)\n'
  211. 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)\n'
  212. 'para before:\nTensor(shape=[], dtype=Int32, value=0)',
  213. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  214. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  215. 'para after:\nTensor(shape=[], dtype=Int32, value=1)',
  216. 'input_x after:\nTensor(shape=[], dtype=Int32, value=3)\n'
  217. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  218. 'para after:\nTensor(shape=[], dtype=Int32, value=2)',
  219. 'input_x after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  220. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  221. 'para after:\nTensor(shape=[], dtype=Int32, value=3)'}
  222. check_output(cap.output, patterns)
  223. def test_print_assign_if():
  224. class Print_Assign_If(Cell):
  225. def __init__(self):
  226. super().__init__()
  227. self.print = P.Print()
  228. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  229. def construct(self, x, y):
  230. self.print("input_x before:", x, "input_y before:",
  231. y, "para before:", self.para)
  232. self.para = x
  233. if x < y:
  234. x = self.para + 1
  235. self.print("input_x after:", x, "input_y after:",
  236. y, "para after:", self.para)
  237. return x
  238. cap = Capture()
  239. with capture(cap):
  240. input_x = Tensor(3, dtype=ms.int32)
  241. input_y = Tensor(4, dtype=ms.int32)
  242. expect = Tensor(4, dtype=ms.int32)
  243. net = Print_Assign_If()
  244. out = net(input_x, input_y)
  245. time.sleep(0.1)
  246. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  247. patterns = {
  248. 'input_x before:\nTensor(shape=[], dtype=Int32, value=3)\n'
  249. 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)\n'
  250. 'para before:\nTensor(shape=[], dtype=Int32, value=1)',
  251. 'input_x after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  252. 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n'
  253. 'para after:\nTensor(shape=[], dtype=Int32, value=3)'}
  254. check_output(cap.output, patterns)
  255. @pytest.mark.level0
  256. @pytest.mark.platform_arm_ascend_training
  257. @pytest.mark.platform_x86_ascend_training
  258. @pytest.mark.env_onecard
  259. def test_assign():
  260. class Assign(Cell):
  261. def __init__(self):
  262. super().__init__()
  263. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  264. def construct(self, value):
  265. self.para = value
  266. return self.para
  267. input_x = Tensor(3, dtype=ms.int32)
  268. expect = Tensor(3, dtype=ms.int32)
  269. net = Assign()
  270. out = net(input_x)
  271. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  272. @pytest.mark.level0
  273. @pytest.mark.platform_arm_ascend_training
  274. @pytest.mark.platform_x86_ascend_training
  275. @pytest.mark.env_onecard
  276. def test_assign_implicit():
  277. class Assign_Implicit(Cell):
  278. def __init__(self):
  279. super(Assign_Implicit, self).__init__()
  280. self.b = Parameter(initializer(
  281. 1, [5], ms.float32), name="global_step")
  282. def construct(self, w):
  283. self.b = w
  284. return self.b
  285. input_data = Tensor(np.ones([5]).astype(np.int32))
  286. net = Assign_Implicit()
  287. out = net(input_data)
  288. assert out.dtype == ms.float32
  289. @pytest.mark.level0
  290. @pytest.mark.platform_arm_ascend_training
  291. @pytest.mark.platform_x86_ascend_training
  292. @pytest.mark.env_onecard
  293. def test_assign_write_after_read():
  294. class Assign_WAR(Cell):
  295. def __init__(self):
  296. super(Assign_WAR, self).__init__()
  297. self.assign = P.Assign()
  298. self.sub = P.Sub()
  299. self.add = P.Add()
  300. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  301. self.weight = Parameter(Tensor(5, dtype=ms.int32), name='weight')
  302. def construct(self, x, y):
  303. # without auto_monad, execute order is wrong: Add - Assign - Sub - Assign
  304. # expected execute order: Add - Assign - Assign - Sub
  305. self.para = self.add(y, x)
  306. self.assign(self.para, y)
  307. return self.sub(self.para, self.weight)
  308. input_x = Tensor(3, dtype=ms.int32)
  309. input_y = Tensor(4, dtype=ms.int32)
  310. expect = Tensor(-1, dtype=ms.int32)
  311. net = Assign_WAR()
  312. out = net(input_x, input_y)
  313. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  314. @pytest.mark.level0
  315. @pytest.mark.platform_arm_ascend_training
  316. @pytest.mark.platform_x86_ascend_training
  317. @pytest.mark.env_onecard
  318. def test_assign_read_after_write():
  319. class Assign_RAW(Cell):
  320. def __init__(self):
  321. super(Assign_RAW, self).__init__()
  322. self.assign_add = P.AssignAdd()
  323. self.greater = P.Greater()
  324. self.add = P.Add()
  325. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  326. def construct(self, x, y):
  327. # without auto_monad, execute order is wrong: Add - Assign - Greater - AssignAdd
  328. # expected execute order: AssignAdd - Add - Assign
  329. self.greater(x, y)
  330. self.assign_add(self.para, x)
  331. self.para = self.add(x, y)
  332. return self.para
  333. input_x = Tensor(3, dtype=ms.int32)
  334. input_y = Tensor(4, dtype=ms.int32)
  335. expect = Tensor(7, dtype=ms.int32)
  336. net = Assign_RAW()
  337. out = net(input_x, input_y)
  338. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  339. @pytest.mark.level0
  340. @pytest.mark.platform_arm_ascend_training
  341. @pytest.mark.platform_x86_ascend_training
  342. @pytest.mark.env_onecard
  343. def test_assign_if():
  344. class Assign_If(Cell):
  345. def __init__(self):
  346. super().__init__()
  347. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  348. def construct(self, x, y):
  349. if x < y:
  350. self.para = x
  351. else:
  352. self.para = y
  353. return self.para
  354. input_x = Tensor(3, dtype=ms.int32)
  355. input_y = Tensor(4, dtype=ms.int32)
  356. expect = Tensor(3, dtype=ms.int32)
  357. net = Assign_If()
  358. out = net(input_x, input_y)
  359. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  360. @pytest.mark.level0
  361. @pytest.mark.platform_arm_ascend_training
  362. @pytest.mark.platform_x86_ascend_training
  363. @pytest.mark.env_onecard
  364. def test_if():
  365. class If(Cell):
  366. def __init__(self):
  367. super().__init__()
  368. self.add = P.Add()
  369. self.sub = P.Sub()
  370. def construct(self, x, y):
  371. if x > y:
  372. x = self.sub(x, y)
  373. else:
  374. x = self.add(x, y)
  375. return x
  376. input_x = Tensor(3, dtype=ms.int32)
  377. input_y = Tensor(4, dtype=ms.int32)
  378. expect = Tensor(7, dtype=ms.int32)
  379. net = If()
  380. out = net(input_x, input_y)
  381. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  382. @pytest.mark.level0
  383. @pytest.mark.platform_arm_ascend_training
  384. @pytest.mark.platform_x86_ascend_training
  385. @pytest.mark.env_onecard
  386. def test_while():
  387. class While(Cell):
  388. def construct(self, x, y):
  389. y = y + 4
  390. while x < y:
  391. x = x + 1
  392. x = x + 3
  393. return x
  394. input_x = Tensor(2, dtype=ms.int32)
  395. input_y = Tensor(14, dtype=ms.int32)
  396. expect = Tensor(21, dtype=ms.int32)
  397. net = While()
  398. out = net(input_x, input_y)
  399. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  400. @pytest.mark.level0
  401. @pytest.mark.platform_arm_ascend_training
  402. @pytest.mark.platform_x86_ascend_training
  403. @pytest.mark.env_onecard
  404. def test_assign_while():
  405. class Assign_While(Cell):
  406. def __init__(self):
  407. super().__init__()
  408. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  409. def construct(self, x, y):
  410. y = y + 4
  411. while x < y:
  412. x = x + 1
  413. self.para = x
  414. self.para = x - 1
  415. return self.para
  416. input_x = Tensor(2, dtype=ms.int32)
  417. input_y = Tensor(14, dtype=ms.int32)
  418. expect = Tensor(17, dtype=ms.int32)
  419. net = Assign_While()
  420. out = net(input_x, input_y)
  421. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  422. @pytest.mark.level0
  423. @pytest.mark.platform_arm_ascend_training
  424. @pytest.mark.platform_x86_ascend_training
  425. @pytest.mark.env_onecard
  426. def test_for():
  427. class For(Cell):
  428. def construct(self, x, y):
  429. y = x + y
  430. for _ in range(20):
  431. y = y + 1
  432. return y
  433. input_x = Tensor(2, dtype=ms.int32)
  434. input_y = Tensor(4, dtype=ms.int32)
  435. expect = Tensor(26, dtype=ms.int32)
  436. net = For()
  437. out = net(input_x, input_y)
  438. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  439. def test_print_for():
  440. class Print_For(Cell):
  441. def __init__(self):
  442. super().__init__()
  443. self.print = P.Print()
  444. def construct(self, x, y):
  445. y = x + y
  446. self.print("input_x before:", x, "input_y before:", y)
  447. for _ in range(3):
  448. y = y + 1
  449. self.print("input_x after:", x, "input_y after:", y)
  450. return y
  451. cap = Capture()
  452. with capture(cap):
  453. input_x = Tensor(2, dtype=ms.int32)
  454. input_y = Tensor(4, dtype=ms.int32)
  455. expect = Tensor(9, dtype=ms.int32)
  456. net = Print_For()
  457. out = net(input_x, input_y)
  458. time.sleep(0.1)
  459. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  460. patterns = {
  461. 'input_x before:\nTensor(shape=[], dtype=Int32, value=2)\n'
  462. 'input_y before:\nTensor(shape=[], dtype=Int32, value=6)',
  463. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  464. 'input_y after:\nTensor(shape=[], dtype=Int32, value=7)',
  465. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  466. 'input_y after:\nTensor(shape=[], dtype=Int32, value=8)',
  467. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  468. 'input_y after:\nTensor(shape=[], dtype=Int32, value=9)'}
  469. check_output(cap.output, patterns)
  470. def test_print_assign_for():
  471. class Print_Assign_For(Cell):
  472. def __init__(self):
  473. super().__init__()
  474. self.print = P.Print()
  475. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  476. def construct(self, x, y):
  477. y = x + y
  478. self.print("input_x before:", x, "input_y before:",
  479. y, "para before:", self.para)
  480. for _ in range(3):
  481. y = y + 1
  482. self.para = x + y
  483. self.print("input_x after:", x, "input_y after:",
  484. y, "para after:", self.para)
  485. return y
  486. cap = Capture()
  487. with capture(cap):
  488. input_x = Tensor(2, dtype=ms.int32)
  489. input_y = Tensor(4, dtype=ms.int32)
  490. expect = Tensor(9, dtype=ms.int32)
  491. net = Print_Assign_For()
  492. out = net(input_x, input_y)
  493. time.sleep(0.1)
  494. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  495. patterns = {
  496. 'input_x before:\nTensor(shape=[], dtype=Int32, value=2)\n'
  497. 'input_y before:\nTensor(shape=[], dtype=Int32, value=6)\n'
  498. 'para before:\nTensor(shape=[], dtype=Int32, value=1)',
  499. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  500. 'input_y after:\nTensor(shape=[], dtype=Int32, value=7)\n'
  501. 'para after:\nTensor(shape=[], dtype=Int32, value=9)',
  502. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  503. 'input_y after:\nTensor(shape=[], dtype=Int32, value=8)\n'
  504. 'para after:\nTensor(shape=[], dtype=Int32, value=10)',
  505. 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n'
  506. 'input_y after:\nTensor(shape=[], dtype=Int32, value=9)\n'
  507. 'para after:\nTensor(shape=[], dtype=Int32, value=11)'}
  508. check_output(cap.output, patterns)
  509. @pytest.mark.level0
  510. @pytest.mark.platform_arm_ascend_training
  511. @pytest.mark.platform_x86_ascend_training
  512. @pytest.mark.env_onecard
  513. def test_assign_for():
  514. class Assign_For(Cell):
  515. def __init__(self):
  516. super().__init__()
  517. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  518. def construct(self, x, y):
  519. y = y + 4
  520. for _ in range(5):
  521. x = x + y
  522. self.para = x
  523. return self.para
  524. input_x = Tensor(2, dtype=ms.int32)
  525. input_y = Tensor(3, dtype=ms.int32)
  526. expect = Tensor(37, dtype=ms.int32)
  527. net = Assign_For()
  528. out = net(input_x, input_y)
  529. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  530. @constexpr
  531. def _check_shape(shape):
  532. if len(shape) != 1:
  533. raise ValueError(f"Invalid shape {shape}")
  534. @pytest.mark.level0
  535. @pytest.mark.platform_arm_ascend_training
  536. @pytest.mark.platform_x86_ascend_training
  537. @pytest.mark.env_onecard
  538. def test_constexpr_check():
  539. class ConstexprCheck(Cell):
  540. def __init__(self):
  541. super(ConstexprCheck, self).__init__()
  542. self.shape = P.Shape()
  543. def construct(self, x, y):
  544. s = self.shape(x)
  545. _check_shape(s)
  546. x = x + y
  547. return x
  548. x = Tensor([2], dtype=ms.int32)
  549. y = Tensor([3], dtype=ms.int32)
  550. expect = Tensor(5, dtype=ms.int32)
  551. net = ConstexprCheck()
  552. # Input with valid shape.
  553. out = net(x, y)
  554. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  555. # Input with wrong shape, exception is expected.
  556. with pytest.raises(ValueError):
  557. wrong_x = Tensor(np.ones((2, 2)), dtype=ms.int32)
  558. out = net(wrong_x, y)
  559. print(out)
  560. @pytest.mark.level0
  561. @pytest.mark.platform_arm_ascend_training
  562. @pytest.mark.platform_x86_ascend_training
  563. @pytest.mark.env_onecard
  564. def test_if_lambda():
  565. class If_Lambda(Cell):
  566. def __init__(self):
  567. super().__init__()
  568. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  569. def construct(self, x, y):
  570. out = x
  571. if x < y:
  572. x2 = (lambda a: a + a)
  573. out = x2(self.para)
  574. out = out + y
  575. return out
  576. input_x = Tensor(2, dtype=ms.int32)
  577. input_y = Tensor(3, dtype=ms.int32)
  578. expect = Tensor(5, dtype=ms.int32)
  579. net = If_Lambda()
  580. out = net(input_x, input_y)
  581. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  582. @pytest.mark.level0
  583. @pytest.mark.platform_arm_ascend_training
  584. @pytest.mark.platform_x86_ascend_training
  585. @pytest.mark.env_onecard
  586. def test_multi_assign():
  587. class Multi_Assign(Cell):
  588. def __init__(self):
  589. super().__init__()
  590. self.assign = P.Assign()
  591. self.para1 = Parameter(Tensor(1, dtype=ms.int32), name='para1')
  592. self.para2 = Parameter(Tensor(2, dtype=ms.int32), name='para2')
  593. self.para3 = Parameter(Tensor(3, dtype=ms.int32), name='para3')
  594. def construct(self, x, y, z):
  595. a = self.assign(self.para1, x)
  596. a = self.assign(self.para2, y)
  597. a = self.assign(self.para3, z)
  598. return self.para1 + self.para2 + a
  599. x = Tensor(4, dtype=ms.int32)
  600. y = Tensor(5, dtype=ms.int32)
  601. z = Tensor(6, dtype=ms.int32)
  602. expect = Tensor(15, dtype=ms.int32)
  603. net = Multi_Assign()
  604. out = net(x, y, z)
  605. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  606. @pytest.mark.level0
  607. @pytest.mark.platform_arm_ascend_training
  608. @pytest.mark.platform_x86_ascend_training
  609. @pytest.mark.env_onecard
  610. def test_multi_assign_addn():
  611. class Multi_Assign_Addn(Cell):
  612. def __init__(self):
  613. super().__init__()
  614. self.addn = P.AddN()
  615. self.assign = P.Assign()
  616. self.para1 = Parameter(Tensor(1.0, dtype=ms.float32), name='para1')
  617. self.para2 = Parameter(Tensor(3.0, dtype=ms.float32), name='para2')
  618. def construct(self, inputs):
  619. self.assign(self.para1, inputs)
  620. out = self.addn((inputs, self.para1, self.para2))
  621. self.assign(self.para2, inputs)
  622. out = self.addn((out, self.para1, self.para2))
  623. return out
  624. x = Tensor(9.0, dtype=ms.float32)
  625. expect = Tensor(39.0, dtype=ms.float32)
  626. net = Multi_Assign_Addn()
  627. out = net(x)
  628. np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
  629. def test_multi_assign_print():
  630. class Multi_Assign_Print(Cell):
  631. def __init__(self):
  632. super().__init__()
  633. self.pow = P.Pow()
  634. self.print = P.Print()
  635. self.assign = P.Assign()
  636. self.exponent = Tensor([2.0], ms.float32)
  637. self.para1 = Parameter(Tensor(1.0, dtype=ms.float32), name='para1')
  638. self.para2 = Parameter(Tensor(3.0, dtype=ms.float32), name='para2')
  639. def construct(self, inputs):
  640. self.assign(self.para1, inputs)
  641. self.assign(self.para2, self.pow(inputs, self.exponent))
  642. self.print(inputs)
  643. self.print(self.para1)
  644. self.print(self.para2)
  645. return inputs
  646. x = Tensor(9.0, dtype=ms.float32)
  647. expect = Tensor(9.0, dtype=ms.float32)
  648. expect_para1 = Tensor(9.0, dtype=ms.float32)
  649. expect_para2 = Tensor(81.00001, dtype=ms.float32)
  650. net = Multi_Assign_Print()
  651. out = net(x)
  652. np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
  653. np.testing.assert_almost_equal(
  654. net.para1.data.asnumpy(), expect_para1.asnumpy())
  655. np.testing.assert_almost_equal(
  656. net.para2.data.asnumpy(), expect_para2.asnumpy())
  657. @pytest.mark.level0
  658. @pytest.mark.platform_arm_ascend_training
  659. @pytest.mark.platform_x86_ascend_training
  660. @pytest.mark.env_onecard
  661. def test_matmul_assign_biasadd():
  662. class Matmul_Assign_Biasadd(Cell):
  663. def __init__(self):
  664. super().__init__()
  665. inputs = np.array([[1, 1], [1, 1]])
  666. self.parameter1 = Parameter(
  667. Tensor(inputs, ms.float32), name="parameter1")
  668. biasadd = np.array([0, -1])
  669. self.parameter2 = Parameter(
  670. Tensor(biasadd, ms.float32), name="biasadd")
  671. self.assign = P.Assign()
  672. self.matmul = P.MatMul()
  673. self.biasadd = P.BiasAdd()
  674. def construct(self, x):
  675. self.assign(self.parameter1, x)
  676. x = self.matmul(x, self.parameter1)
  677. self.assign(self.parameter1, x)
  678. x = self.biasadd(x, self.parameter2)
  679. return x
  680. net = Matmul_Assign_Biasadd()
  681. inputs = np.array([[1, 2], [3, 4]])
  682. out1 = net(Tensor(inputs, ms.float32))
  683. net = Matmul_Assign_Biasadd()
  684. try:
  685. context.set_context(mode=context.PYNATIVE_MODE)
  686. out2 = net(Tensor(inputs, ms.float32))
  687. np.testing.assert_almost_equal(out1.asnumpy(), out2.asnumpy())
  688. finally:
  689. context.set_context(mode=context.GRAPH_MODE)
  690. @pytest.mark.level0
  691. @pytest.mark.platform_arm_ascend_training
  692. @pytest.mark.platform_x86_ascend_training
  693. @pytest.mark.env_onecard
  694. def test_assign_while_if():
  695. class Assign_While_If(Cell):
  696. def __init__(self):
  697. super().__init__()
  698. self.mul = P.Mul()
  699. self.addn = P.AddN()
  700. self.assign = P.Assign()
  701. self.assign_sub = P.AssignSub()
  702. self.para = Parameter(Tensor(1.0, dtype=ms.float32), name='para')
  703. def construct(self, x, y, z, w):
  704. self.assign(self.para, x)
  705. if self.para > y:
  706. self.assign(self.para, y)
  707. x = self.mul(x, x)
  708. while self.para > z:
  709. x = self.addn((x, self.para))
  710. self.assign_sub(self.para, w)
  711. return x
  712. x = Tensor(99.0, dtype=ms.float32)
  713. y = Tensor(44.0, dtype=ms.float32)
  714. z = Tensor(11.0, dtype=ms.float32)
  715. w = Tensor(1.0, dtype=ms.float32)
  716. expect = Tensor(10725.0, dtype=ms.float32)
  717. net = Assign_While_If()
  718. out = net(x, y, z, w)
  719. np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
  720. @pytest.mark.level0
  721. @pytest.mark.platform_arm_ascend_training
  722. @pytest.mark.platform_x86_ascend_training
  723. @pytest.mark.env_onecard
  724. def test_isolate_call():
  725. class Net(Cell):
  726. def __init__(self):
  727. super().__init__()
  728. self.para1 = Parameter(Tensor(1, dtype=ms.int32), name='para1')
  729. self.para2 = Parameter(Tensor(2, dtype=ms.int32), name='para2')
  730. def construct(self, x, y):
  731. self.setpara(x, y)
  732. return self.para1 + self.para2
  733. def setpara(self, x, y):
  734. self.para1 = x
  735. self.setpara2(y)
  736. return x
  737. def setpara2(self, y):
  738. self.para2 = y
  739. return y
  740. x = Tensor(4, dtype=ms.int32)
  741. y = Tensor(5, dtype=ms.int32)
  742. expect = Tensor(9, dtype=ms.int32)
  743. net = Net()
  744. out = net(x, y)
  745. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  746. @pytest.mark.level0
  747. @pytest.mark.platform_arm_ascend_training
  748. @pytest.mark.platform_x86_ascend_training
  749. @pytest.mark.env_onecard
  750. def test_assign_return_true():
  751. class Net(Cell):
  752. def __init__(self):
  753. super(Net, self).__init__()
  754. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  755. def construct(self, x, y):
  756. if self.mycheck(x, y):
  757. out = x + y
  758. else:
  759. out = x - y
  760. out = self.para + out
  761. return out
  762. def mycheck(self, x, y):
  763. self.setpara(x, y)
  764. return True
  765. def setpara(self, x, y):
  766. self.para = x + y
  767. return True
  768. x = Tensor(2, dtype=ms.int32)
  769. y = Tensor(3, dtype=ms.int32)
  770. expect = Tensor(10, dtype=ms.int32)
  771. net = Net()
  772. out = net(x, y)
  773. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  774. @pytest.mark.level0
  775. @pytest.mark.platform_arm_ascend_training
  776. @pytest.mark.platform_x86_ascend_training
  777. @pytest.mark.env_onecard
  778. def test_unpack_call():
  779. class SetPara(Cell):
  780. def __init__(self, para):
  781. super(SetPara, self).__init__()
  782. self.para = para
  783. def construct(self, x, y):
  784. self.para = x + y
  785. return True
  786. class MyNet(Cell):
  787. def __init__(self):
  788. super(MyNet, self).__init__()
  789. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  790. self.set_para = SetPara(self.para)
  791. def construct(self, *inputs):
  792. self.call_func(self.set_para, *inputs)
  793. out = self.para + 1
  794. return out
  795. def call_func(self, func, *inputs):
  796. func(*inputs)
  797. return True
  798. x = Tensor(2, dtype=ms.int32)
  799. y = Tensor(3, dtype=ms.int32)
  800. expect = Tensor(6, dtype=ms.int32)
  801. net = MyNet()
  802. out = net(x, y)
  803. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  804. @pytest.mark.level0
  805. @pytest.mark.platform_arm_ascend_training
  806. @pytest.mark.platform_x86_ascend_training
  807. @pytest.mark.env_onecard
  808. def test_tuple_of_tuple():
  809. class SetPara(Cell):
  810. def __init__(self, para):
  811. super(SetPara, self).__init__()
  812. self.para = para
  813. def construct(self, x, y):
  814. self.para = x + y
  815. return True
  816. class MyNet(Cell):
  817. def __init__(self):
  818. super(MyNet, self).__init__()
  819. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  820. self.set_para = SetPara(self.para)
  821. def construct(self, x, y):
  822. t1 = (self.set_para, x)
  823. t2 = (t1, y)
  824. t2[0][0](t2[1], t1[1])
  825. out = self.para + 1
  826. return out
  827. def call_func(self, func, *inputs):
  828. func(*inputs)
  829. return True
  830. x = Tensor(2, dtype=ms.int32)
  831. y = Tensor(3, dtype=ms.int32)
  832. expect = Tensor(6, dtype=ms.int32)
  833. net = MyNet()
  834. out = net(x, y)
  835. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  836. @pytest.mark.level0
  837. @pytest.mark.platform_arm_ascend_training
  838. @pytest.mark.platform_x86_ascend_training
  839. @pytest.mark.env_onecard
  840. def test_write_read_write():
  841. class MyNet(Cell):
  842. def __init__(self):
  843. super(MyNet, self).__init__()
  844. self.para1 = Parameter(Tensor(1, dtype=ms.int32), name='para1')
  845. self.para2 = Parameter(Tensor(2, dtype=ms.int32), name='para2')
  846. def construct(self, x, y, x1, y1):
  847. self.para1 = x
  848. self.para2 = y
  849. a = self.para1 + self.para2
  850. self.para1 = x1
  851. self.para2 = y1
  852. return a + self.para1 + self.para2
  853. x = Tensor(3, dtype=ms.int32)
  854. y = Tensor(4, dtype=ms.int32)
  855. x1 = Tensor(5, dtype=ms.int32)
  856. y1 = Tensor(6, dtype=ms.int32)
  857. expect = Tensor(18, dtype=ms.int32)
  858. net = MyNet()
  859. out = net(x, y, x1, y1)
  860. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  861. @pytest.mark.level0
  862. @pytest.mark.platform_arm_ascend_training
  863. @pytest.mark.platform_x86_ascend_training
  864. @pytest.mark.env_onecard
  865. def test_variable_from_outer_graph():
  866. class MyNet(Cell):
  867. def __init__(self):
  868. super(MyNet, self).__init__()
  869. self.cond = False
  870. self.add = P.Add()
  871. self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
  872. def construct(self, x, y):
  873. b = self.para + x
  874. a = self.para + b
  875. if self.cond:
  876. a = self.add(a, x)
  877. else:
  878. a = self.add(a, y)
  879. return a + b
  880. x = Tensor(2, dtype=ms.int32)
  881. y = Tensor(3, dtype=ms.int32)
  882. expect = Tensor(10, dtype=ms.int32)
  883. net = MyNet()
  884. out = net(x, y)
  885. np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
  886. @pytest.mark.level0
  887. @pytest.mark.platform_arm_ascend_training
  888. @pytest.mark.platform_x86_ascend_training
  889. @pytest.mark.env_onecard
  890. def test_ctrl_while_by_while_and_if_in_first_while():
  891. class Net(Cell):
  892. def __init__(self):
  893. super().__init__()
  894. self.relu = P.ReLU()
  895. self.sigmoid = P.Sigmoid()
  896. self.tanh = P.Tanh()
  897. self.add = P.Add()
  898. a = np.full((1,), 5, dtype=np.float32)
  899. self.a = Parameter(Tensor(a), name="a")
  900. b = np.full((1,), 4, dtype=np.float32)
  901. self.b = Parameter(Tensor(b), name="b")
  902. c = np.full((1,), 7, dtype=np.float32)
  903. self.c = Parameter(Tensor(c), name="c")
  904. def construct(self, x):
  905. out = x
  906. while self.a < 7:
  907. if self.a < self.c:
  908. out = self.relu(x)
  909. self.a += 1
  910. while self.c > 5:
  911. out = self.add(out, out)
  912. self.c -= 1
  913. return out
  914. context.set_context(mode=context.GRAPH_MODE)
  915. input_np_a = np.random.randn(2, 3, 4, 5).astype(np.float32)
  916. input_me_a = Tensor(input_np_a)
  917. net = Net()
  918. net(input_me_a)
  919. @pytest.mark.level0
  920. @pytest.mark.platform_arm_ascend_training
  921. @pytest.mark.platform_x86_ascend_training
  922. @pytest.mark.env_onecard
  923. def test_ctrl_if_by_while_and_while_in_first_if():
  924. class Net(Cell):
  925. def __init__(self):
  926. super().__init__()
  927. self.relu = P.ReLU()
  928. self.sigmoid = P.Sigmoid()
  929. self.tanh = P.Tanh()
  930. self.add = P.Add()
  931. a = np.full((1,), 5, dtype=np.float32)
  932. self.a = Parameter(Tensor(a), name="a")
  933. b = np.full((1,), 4, dtype=np.float32)
  934. self.b = Parameter(Tensor(b), name="b")
  935. c = np.full((1,), 7, dtype=np.float32)
  936. self.c = Parameter(Tensor(c), name="c")
  937. def construct(self, x):
  938. out = x
  939. if self.a < self.c:
  940. out = self.relu(x)
  941. while self.a < 7:
  942. self.a += 1
  943. while self.c > 5:
  944. out = self.add(out, out)
  945. self.c -= 1
  946. return out
  947. context.set_context(mode=context.GRAPH_MODE)
  948. input_np_a = np.random.randn(2, 3, 4, 5).astype(np.float32)
  949. input_me_a = Tensor(input_np_a)
  950. net = Net()
  951. net(input_me_a)
  952. @pytest.mark.level0
  953. @pytest.mark.platform_arm_ascend_training
  954. @pytest.mark.platform_x86_ascend_training
  955. @pytest.mark.env_onecard
  956. def test_ctrl_while_by_while_and_while_in_first_while():
  957. class Net(Cell):
  958. def __init__(self):
  959. super().__init__()
  960. self.relu = P.ReLU()
  961. self.sigmoid = P.Sigmoid()
  962. self.tanh = P.Tanh()
  963. self.add = P.Add()
  964. a = np.full((1,), 5, dtype=np.float32)
  965. self.a = Parameter(Tensor(a), name="a")
  966. b = np.full((1,), 4, dtype=np.float32)
  967. self.b = Parameter(Tensor(b), name="b")
  968. c = np.full((1,), 7, dtype=np.float32)
  969. self.c = Parameter(Tensor(c), name="c")
  970. def construct(self, x):
  971. out = x
  972. while self.a < self.c:
  973. out = self.relu(x)
  974. while self.b > 1:
  975. self.b -= 1
  976. self.a += 1
  977. while self.c > 5:
  978. out = self.add(out, out)
  979. self.c -= 1
  980. return out
  981. context.set_context(mode=context.GRAPH_MODE)
  982. input_np_a = np.random.randn(2, 3, 4, 5).astype(np.float32)
  983. input_me_a = Tensor(input_np_a)
  984. net = Net()
  985. net(input_me_a)
  986. def clear_json_info():
  987. os.system("rm -rf ./kernel_meta/*.json")
  988. os.system("rm -rf ./kernel_meta/*.info")
  989. def find_json_info(file):
  990. result = os.system("ls -al ./kernel_meta/%s" % (file))
  991. return result
  992. class MultiOutReluBywaySqrt(Cell):
  993. def __init__(self):
  994. super().__init__()
  995. self.relu = nn.ReLU()
  996. self.sqrt = P.Sqrt()
  997. def construct(self, x):
  998. x = self.relu(x)
  999. x = self.relu(x)
  1000. x1 = self.relu(x)
  1001. x = self.relu(x1)
  1002. y = self.sqrt(x1)
  1003. return x, y
  1004. class MultiOutReluSqrtBywaySqrt(Cell):
  1005. def __init__(self):
  1006. super().__init__()
  1007. self.relu = nn.ReLU()
  1008. self.sqrt = P.Sqrt()
  1009. self.sin = P.Sin()
  1010. def construct(self, x):
  1011. x = self.relu(x)
  1012. x = self.sqrt(x)
  1013. x1 = self.relu(x)
  1014. x = self.sin(x1)
  1015. y = self.sqrt(x1)
  1016. return x, y
  1017. def clean_all_ir_files(folder_path):
  1018. if os.path.exists(folder_path):
  1019. for file_name in os.listdir(folder_path):
  1020. if file_name.endswith('.ir') or file_name.endswith('.dot') or \
  1021. file_name.endswith('.dat') or file_name.endswith('.pb') or \
  1022. file_name.startswith('trace_code_graph'):
  1023. os.remove(os.path.join(folder_path, file_name))
  1024. def find_newest_validateir_file(folder_path):
  1025. ckpt_files = map(lambda f: os.path.join(folder_path, f),
  1026. filter(lambda f: re.match(r'\d+_validate_\d+.ir', f),
  1027. os.listdir(folder_path)))
  1028. return max(ckpt_files, key=os.path.getctime)
  1029. def read_file():
  1030. filename = find_newest_validateir_file('./')
  1031. with open((os.path.join(filename)), 'r') as f:
  1032. content = f.read()
  1033. return content
  1034. # Net contain Prelu,BN,Conv,Dense which have weight value
  1035. class NetRrelu(Cell):
  1036. def __init__(self, in_channel, out_channel):
  1037. super().__init__()
  1038. self.relu = PReLU(channel=in_channel, w=0.25)
  1039. self.bn = BatchNorm2d(num_features=in_channel)
  1040. self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=2, stride=1, has_bias=False,
  1041. weight_init='ones', pad_mode='same')
  1042. self.mean = P.ReduceMean(keep_dims=False)
  1043. self.fc = Dense(in_channels=out_channel, out_channels=out_channel,
  1044. weight_init='ones', bias_init='zeros', has_bias=True)
  1045. def construct(self, x):
  1046. x = self.relu(x)
  1047. x = self.bn(x)
  1048. x = self.conv(x)
  1049. x = self.mean(x, (2, 3))
  1050. x = self.fc(x)
  1051. return x
  1052. def check_keep_batchnorm_fp32_false(kwargs, level):
  1053. if ms.context.get_context("device_target") == "GPU":
  1054. if level == "O2":
  1055. if "keep_batchnorm_fp32" in kwargs.keys() and (not kwargs["keep_batchnorm_fp32"]):
  1056. if "cast_model_type" not in kwargs.keys() or kwargs["cast_model_type"] == ms.float16:
  1057. return True
  1058. else:
  1059. if "cast_model_type" in kwargs.keys() and kwargs["cast_model_type"] == ms.float16:
  1060. if "keep_batchnorm_fp32" not in kwargs.keys() or (not kwargs["keep_batchnorm_fp32"]):
  1061. return True
  1062. return False
  1063. def use_build_train_network_check_cast_num(network, level, inputs, label, cast_num, loss_flag=True, **kwargs):
  1064. diff_cast = 0
  1065. if check_keep_batchnorm_fp32_false(kwargs, level):
  1066. diff_cast += 8
  1067. opt = Momentum(learning_rate=0.0001, momentum=0.009,
  1068. params=network.trainable_params())
  1069. loss = None
  1070. if loss_flag:
  1071. loss = SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')
  1072. train_network = ms.amp.build_train_network(
  1073. network, opt, loss, level=level, **kwargs)
  1074. out_me = train_network(inputs, label)
  1075. if context.get_context("mode") == 0:
  1076. content = read_file()
  1077. castnum = re.findall('Cast', content)
  1078. assert len(castnum) == max(cast_num - diff_cast, 0)
  1079. return out_me
  1080. def test_auto_mixed_precision_train_prelunet(with_save_graphs):
  1081. net2 = NetRrelu(3, 12)
  1082. input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
  1083. label32 = Tensor(np.zeros([1, 12]).astype(np.float32))
  1084. use_build_train_network_check_cast_num(net2, "O2", input32, label32, 16)
  1085. class AssignNet(Cell):
  1086. def __init__(self):
  1087. super().__init__()
  1088. #self._save_graphs(save_graph_flag=True, save_graph_path=".")
  1089. self.relu = ReLU()
  1090. self.mean = P.ReduceMean(keep_dims=False)
  1091. self.assign_sub = P.AssignSub()
  1092. self.input_data = Parameter(initializer(
  1093. 1, [1, 3, 2, 2], ms.float32), name='value')
  1094. def construct(self, x):
  1095. x = self.assign_sub(self.input_data, x)
  1096. x = self.relu(x)
  1097. x = self.mean(x, (2, 3))
  1098. return x
  1099. def test_auto_mixed_precision_train_1(pynative_save_graphs):
  1100. net = AssignNet()
  1101. input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
  1102. label32 = Tensor(np.zeros([1, 3]).astype(np.float32))
  1103. use_build_train_network_check_cast_num(net, "O0", input32, label32, 0)
  1104. def test_auto_mixed_precision_train_2(pynative_save_graphs):
  1105. net = AssignNet()
  1106. input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
  1107. label32 = Tensor(np.zeros([1, 3]).astype(np.float32))
  1108. use_build_train_network_check_cast_num(net, "O2", input32, label32, 2)
  1109. class MixControlNet(Cell):
  1110. def __init__(self, in_channel, x):
  1111. super().__init__()
  1112. #self._save_graphs(save_graph_flag=True, save_graph_path=".")
  1113. self.biasadd = P.BiasAdd()
  1114. self.equal = P.Equal()
  1115. self.addn = P.AddN()
  1116. self.conv = Conv2d(in_channels=in_channel, out_channels=in_channel,
  1117. kernel_size=1, stride=1, has_bias=False,
  1118. weight_init='ones', pad_mode='same')
  1119. self.bn = BatchNorm2d(num_features=in_channel)
  1120. self.assignadd = P.AssignAdd()
  1121. self.assign = P.Assign()
  1122. self.relu = ReLU()
  1123. self.mean = P.ReduceMean(keep_dims=False)
  1124. self.bias = Parameter(
  1125. Tensor(np.random.randint(2, size=(3,)).astype((np.float32))),
  1126. name="bias")
  1127. self.bias2 = Parameter(Tensor(np.ones([3]).astype(np.float32)),
  1128. name="bias2")
  1129. self.parameterupdate = ParameterUpdate(self.bias)
  1130. self.value = Tensor(np.random.randn(*(3,)), ms.float32)
  1131. self.x = x
  1132. def construct(self, input_x):
  1133. x = self.x
  1134. z = self.x
  1135. out = self.biasadd(input_x, self.bias)
  1136. while x < 20:
  1137. update = self.parameterupdate(self.bias2)
  1138. out = self.biasadd(out, update)
  1139. if x < 10:
  1140. out = self.addn((input_x, out))
  1141. while z < 20:
  1142. out = self.conv(out)
  1143. z = z + 1
  1144. if x < 20:
  1145. out = self.biasadd(out, self.bias)
  1146. if x % 2 == 0:
  1147. out = self.biasadd(out, self.bias)
  1148. self.assignadd(self.bias, self.value)
  1149. out = self.bn(out)
  1150. else:
  1151. out = self.conv(out)
  1152. x = x + 1
  1153. out = self.addn((out, out))
  1154. out = self.mean(out, (2, 3))
  1155. return out
  1156. def use_build_train_network_controlflow_check_cast_num(network, level, input_x,
  1157. label, cast_num,
  1158. sparse=False,
  1159. loss_flag=True,
  1160. **kwargs):
  1161. opt = Momentum(learning_rate=0.0001, momentum=0.009,
  1162. params=network.trainable_params())
  1163. loss = None
  1164. if loss_flag:
  1165. loss = SoftmaxCrossEntropyWithLogits(sparse=sparse, reduction='mean')
  1166. train_network = ms.amp.build_train_network(network, opt, loss, level=level,
  1167. **kwargs)
  1168. out_me = train_network(input_x, label)
  1169. if context.get_context("mode") == 0:
  1170. content = read_file()
  1171. castnum = re.findall('Cast', content)
  1172. assert len(castnum) == cast_num
  1173. return out_me
  1174. def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs):
  1175. net = MixControlNet(3, 5)
  1176. input_x = Tensor(
  1177. np.random.randint(2, size=(1, 3, 2, 2)).astype((np.float32)))
  1178. label = Tensor(np.zeros([1, 3]).astype(np.float32))
  1179. if ms.context.get_context("device_target") == "Ascend":
  1180. cast_num = 77
  1181. if ms.context.get_context("device_target") == "GPU":
  1182. cast_num = 73
  1183. use_build_train_network_controlflow_check_cast_num(net, "auto", input_x,
  1184. label, cast_num)
  1185. # op_cast should be located in order_list after abstract_specialize.
  1186. # Besides Ascend, it can work on CPU.
  1187. @pytest.mark.level0
  1188. @pytest.mark.platform_arm_ascend_training
  1189. @pytest.mark.platform_x86_ascend_training
  1190. @pytest.mark.env_onecard
  1191. def test_if_cast():
  1192. class Net(nn.Cell):
  1193. def __init__(self, cond1):
  1194. super().__init__()
  1195. self.cond1 = cond1
  1196. self.op_cast = P.Cast()
  1197. self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')
  1198. def construct(self, beta1, beta2):
  1199. z_local = self.op_cast(self.z, ms.float16)
  1200. self.z = beta2
  1201. if self.cond1:
  1202. out = z_local + beta1
  1203. else:
  1204. out = z_local - beta1
  1205. return out
  1206. context.set_context(save_graphs=False)
  1207. net = Net(True)
  1208. beta1 = Tensor(np.array([2]).astype(np.float32))
  1209. beta2 = Tensor(np.array([10]).astype(np.float32))
  1210. r1 = net(beta1, beta2)
  1211. expect = Tensor(np.array([3]).astype(np.float32))
  1212. np.testing.assert_array_equal(r1.asnumpy(), expect.asnumpy())
  1213. @pytest.mark.level0
  1214. @pytest.mark.platform_arm_ascend_training
  1215. @pytest.mark.platform_x86_ascend_training
  1216. @pytest.mark.env_onecard
  1217. def test_while_forward():
  1218. class MyWhileNet(nn.Cell):
  1219. def __init__(self):
  1220. super().__init__()
  1221. self.max = P.ReduceMax()
  1222. def construct(self, idx, end, x):
  1223. while idx < end:
  1224. part = x[idx, :, :]
  1225. max_num = self.max(part)
  1226. x[idx, :, 0:2] = max_num
  1227. idx = idx + 1
  1228. return x
  1229. net = MyWhileNet()
  1230. idx = Tensor(np.array(0), dtype=ms.int32)
  1231. end = Tensor(np.array(2), dtype=ms.int32)
  1232. x = Tensor(np.arange(8).reshape(2, 2, 2).astype(np.float32), dtype=ms.float32)
  1233. output = net(idx, end, x)
  1234. expect = np.array([[[3, 3], [3, 3]], [[7, 7], [7, 7]]], dtype=np.int32)
  1235. assert np.allclose(output.asnumpy(), expect, 0.0001, 0.0001)
  1236. @pytest.mark.skip(reason="not supported yet")
  1237. def test_multi_add_assign():
  1238. class Net(Cell):
  1239. def __init__(self, i1):
  1240. super(Net, self).__init__()
  1241. self.add = P.Add()
  1242. self.sub = P.Sub()
  1243. self.mul = P.Mul()
  1244. self.assign = P.Assign()
  1245. self.p = Parameter(i1, name='para')
  1246. def construct(self, a, d, e):
  1247. res1 = self.add(self.add(self.add(self.p, a), a), a)
  1248. mul = self.mul(d, e)
  1249. self.assign(self.p, mul)
  1250. res2 = self.sub(self.p, e)
  1251. return res2, res1
  1252. def numpy_out(p, a, d, e):
  1253. res1 = p + a + a + a
  1254. res_as = d * e
  1255. res2 = d * e - e
  1256. return res2, res1, res_as
  1257. p = (np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1258. i0 = (np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1259. i1 = (np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1260. i2 = (np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1261. net = Net(Tensor(p))
  1262. r2, r1 = net(Tensor(i0), Tensor(i1), Tensor(i2))
  1263. outputs = [r2.asnumpy(), r1.asnumpy(), net.p.data.asnumpy()]
  1264. expects = numpy_out(p, i0, i1, i2)
  1265. np.testing.assert_array_equal(outputs, expects)
  1266. @pytest.mark.skip(reason="not supported yet")
  1267. def test_multi_abs_add_assign():
  1268. class Net(Cell):
  1269. def __init__(self, para):
  1270. super(Net, self).__init__()
  1271. self.add = P.Add()
  1272. self.sub = P.Sub()
  1273. self.mul = P.Mul()
  1274. self.abs = P.Abs()
  1275. self.assign = P.Assign()
  1276. self.p = Parameter(para, name='para')
  1277. def construct(self, a, d, e):
  1278. tmp = self.abs(self.add(self.abs(a), self.abs(self.mul(a, a))))
  1279. res1 = self.add(self.p, tmp)
  1280. mul = self.mul(d, e)
  1281. self.assign(self.p, mul)
  1282. res2 = self.sub(self.p, e)
  1283. return res2, res1, tmp
  1284. def numpy_out(p, a, d, e):
  1285. tmp = np.abs(np.abs(a) + np.abs(a * a))
  1286. res1 = p + tmp
  1287. res_as = d * e
  1288. res2 = d * e - e
  1289. return res2, res1, res_as, tmp
  1290. p = -(np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1291. i0 = -(np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1292. i1 = -(np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1293. i2 = -(np.abs(np.random.normal(0, 1, [3])) + 1).astype(np.float32)
  1294. net = Net(Tensor(p))
  1295. r2, r1, tmp = net(Tensor(i0), Tensor(i1), Tensor(i2))
  1296. outputs = [r2.asnumpy(), r1.asnumpy(), net.p.data.asnumpy(), tmp.asnumpy()]
  1297. expects = numpy_out(p, i0, i1, i2)
  1298. np.testing.assert_array_equal(outputs, expects)