You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bprop_mindir.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate the mindir for bprop"""
  16. import numpy as np
  17. import mindspore.nn as nn
  18. from mindspore import context
  19. from mindspore import Tensor, Parameter
  20. from mindspore.ops import operations as P
  21. import mindspore.ops.functional as F
  22. import mindspore.ops as ops
  23. from mindspore.ops.operations import _inner_ops as inner
  24. import mindspore.common.dtype as mstype
  25. from mindspore.common.initializer import initializer
  26. from mindspore.ops.bprop_mindir import serializable_bprop_ops
  27. from mindspore._c_expression import load_mindir
  28. import mindspore.ops._grad as g
  29. class Net(nn.Cell):
  30. def __init__(self, op):
  31. super(Net, self).__init__()
  32. self.op = op
  33. def construct(self, *inputs):
  34. return self.op(*inputs)
  35. class TupleInputNet(nn.Cell):
  36. def __init__(self, op):
  37. super(TupleInputNet, self).__init__()
  38. self.op = op
  39. def construct(self, x):
  40. return self.op((x,))
  41. class GradNet(nn.Cell):
  42. def __init__(self, network):
  43. super(GradNet, self).__init__()
  44. self.grad = ops.GradOperation(get_all=True)
  45. self.network = network
  46. def construct(self, *inputs):
  47. gout = self.grad(self.network)(*inputs)
  48. return gout
  49. def test_load_mindir_dir():
  50. """
  51. Feature: Bprop pre-compilation.
  52. Description: Load all the mindir files of serializable bprop.
  53. Expectation: All are loaded successfully.
  54. """
  55. bprop_path = g.__file__
  56. bprop_installed_dir = bprop_path[: bprop_path.rindex('/')]
  57. bprop_mindir_export_dir = bprop_installed_dir + "/../bprop_mindir"
  58. for op in serializable_bprop_ops:
  59. if isinstance(op, str):
  60. op_name = op
  61. else:
  62. op_name = op.__name__
  63. file_name = bprop_mindir_export_dir + "/" + op_name + "_bprop.mindir"
  64. graph = load_mindir(file_name)
  65. assert not graph is None
  66. def test_relu():
  67. x = Tensor(np.array([[[[-1, 1, 10],
  68. [1, -1, 1],
  69. [10, 1, -1]]]]).astype(np.float32))
  70. relu = Net(P.ReLU())
  71. grad = GradNet(relu)
  72. grad.compile(x)
  73. def test_identity():
  74. x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
  75. identity = Net(P.Identity())
  76. grad = GradNet(identity)
  77. grad.compile(x)
  78. def test_range():
  79. x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
  80. range_net = Net(inner.Range(1.0, 8.0, 2.0))
  81. grad = GradNet(range_net)
  82. grad.compile(x)
  83. def test_ones_like():
  84. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  85. ones_like = Net(P.OnesLike())
  86. grad = GradNet(ones_like)
  87. grad.compile(x)
  88. def test_zeros_like():
  89. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  90. zeros_like = Net(P.ZerosLike())
  91. grad = GradNet(zeros_like)
  92. grad.compile(x)
  93. def test_argmax():
  94. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  95. argmax = Net(P.Argmax())
  96. grad = GradNet(argmax)
  97. grad.compile(x)
  98. def test_argmin():
  99. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  100. argmin = Net(P.Argmin())
  101. grad = GradNet(argmin)
  102. grad.compile(x)
  103. def test_broadcast():
  104. x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
  105. broadcast = TupleInputNet(P.Broadcast(1))
  106. grad = GradNet(broadcast)
  107. grad.compile(x)
  108. def test_is_finite():
  109. x = Tensor(np.ones([2, 4]).astype(np.int32))
  110. is_finite = Net(P.IsFinite())
  111. grad = GradNet(is_finite)
  112. grad.compile(x)
  113. def test_approximate_equal():
  114. x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  115. y = Tensor(np.array([2, 4, 6]).astype(np.float32))
  116. approximate_equal = Net(P.ApproximateEqual(2.))
  117. grad = GradNet(approximate_equal)
  118. grad.compile(x, y)
  119. def test_logical_not():
  120. x = Tensor(np.array([True, False, True]).astype(np.bool))
  121. logical_not = Net(P.LogicalNot())
  122. grad = GradNet(logical_not)
  123. grad.compile(x)
  124. def test_sign():
  125. x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
  126. sign = Net(P.Sign())
  127. grad = GradNet(sign)
  128. grad.compile(x)
  129. def test_round():
  130. x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
  131. round_net = Net(P.Round())
  132. grad = GradNet(round_net)
  133. grad.compile(x)
  134. def test_lin_space():
  135. start = Tensor(1, mstype.float32)
  136. stop = Tensor(10, mstype.float32)
  137. num = 5
  138. lin_space = Net(P.LinSpace())
  139. grad = GradNet(lin_space)
  140. grad.compile(start, stop, num)
  141. def test_dropout_gen_mask():
  142. x = (2, 4, 2, 2)
  143. keep_prob = Tensor(1.0, mstype.float32)
  144. dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
  145. grad = GradNet(dropout_gen_mask)
  146. grad.compile(x, keep_prob)
  147. def test_onehot():
  148. indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
  149. depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
  150. one_hot = Net(P.OneHot())
  151. grad = GradNet(one_hot)
  152. grad.compile(indices, depth, on_value, off_value)
  153. def test_assign():
  154. class AssignNet(nn.Cell):
  155. def __init__(self):
  156. super(AssignNet, self).__init__()
  157. self.assign = P.Assign()
  158. self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
  159. def construct(self, x):
  160. return self.assign(self.variable, x)
  161. value = Tensor([2.0], mstype.float32)
  162. assign = AssignNet()
  163. grad = GradNet(assign)
  164. grad.compile(value)
  165. def test_assign_add():
  166. class AssignAddNet(nn.Cell):
  167. def __init__(self):
  168. super(AssignAddNet, self).__init__()
  169. self.assign_add = P.AssignAdd()
  170. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  171. def construct(self, x):
  172. return self.assign_add(self.variable, x)
  173. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  174. assign_add = AssignAddNet()
  175. grad = GradNet(assign_add)
  176. grad.compile(value)
  177. def test_assign_sub():
  178. class AssignSubNet(nn.Cell):
  179. def __init__(self):
  180. super(AssignSubNet, self).__init__()
  181. self.assign = P.AssignSub()
  182. self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
  183. def construct(self, x):
  184. return self.assign(self.variable, x)
  185. value = Tensor(np.ones([1]).astype(np.int32) * 100)
  186. assign_sub = AssignSubNet()
  187. grad = GradNet(assign_sub)
  188. grad.compile(value)
  189. def test_iou():
  190. anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  191. gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  192. iou = Net(P.IOU())
  193. grad = GradNet(iou)
  194. grad.compile(anchor_boxes, gt_boxes)
  195. def test_bn_training_reduce():
  196. x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
  197. bn_training_reduce = Net(P.BNTrainingReduce())
  198. grad = GradNet(bn_training_reduce)
  199. grad.compile(x)
  200. def test_equal():
  201. x = Tensor([2.0], mstype.float32)
  202. y = Tensor([2.0], mstype.float32)
  203. equal = Net(P.Equal())
  204. grad = GradNet(equal)
  205. grad.compile(x, y)
  206. def test_not_equal():
  207. x = Tensor([2.0], mstype.float32)
  208. y = Tensor([2.0], mstype.float32)
  209. not_equal = Net(P.NotEqual())
  210. grad = GradNet(not_equal)
  211. grad.compile(x, y)
  212. def test_greater():
  213. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  214. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  215. greater = Net(P.Greater())
  216. grad = GradNet(greater)
  217. grad.compile(x, y)
  218. def test_greater_equal():
  219. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  220. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  221. greater_equal = Net(P.GreaterEqual())
  222. grad = GradNet(greater_equal)
  223. grad.compile(x, y)
  224. def test_less():
  225. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  226. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  227. less = Net(P.Less())
  228. grad = GradNet(less)
  229. grad.compile(x, y)
  230. def test_less_equal():
  231. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  232. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  233. less_equal = Net(P.LessEqual())
  234. grad = GradNet(less_equal)
  235. grad.compile(x, y)
  236. def test_logical_and():
  237. x = Tensor(np.array([True, False, True]), mstype.bool_)
  238. y = Tensor(np.array([True, True, False]), mstype.bool_)
  239. logical_and = Net(P.LogicalAnd())
  240. grad = GradNet(logical_and)
  241. grad.compile(x, y)
  242. def test_logical_or():
  243. x = Tensor(np.array([True, False, True]), mstype.bool_)
  244. y = Tensor(np.array([True, True, False]), mstype.bool_)
  245. logical_or = Net(P.LogicalOr())
  246. grad = GradNet(logical_or)
  247. grad.compile(x, y)
  248. def test_reduce_all():
  249. x = Tensor(np.array([[True, False], [True, True]]))
  250. reduce_all = Net(P.ReduceAll(keep_dims=True))
  251. grad = GradNet(reduce_all)
  252. grad.compile(x)
  253. def test_reduce_any():
  254. x = Tensor(np.array([[True, False], [True, True]]))
  255. reduce_all = Net(P.ReduceAny(keep_dims=True))
  256. grad = GradNet(reduce_all)
  257. grad.compile(x)
  258. def test_dropout_do_mask():
  259. input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
  260. keep_prob = Tensor(0.5, mstype.float32)
  261. mask = Tensor(np.ones([2]), mstype.uint8)
  262. dropout_do_mask = Net(P.DropoutDoMask())
  263. grad = GradNet(dropout_do_mask)
  264. grad.compile(input_x, mask, keep_prob)
  265. def test_select():
  266. """
  267. Feature: Bprop pre-compilation.
  268. Description: Compile the backward graph for the select op.
  269. Expectation: Load the bprop mindir successfully.
  270. """
  271. input_cond = Tensor([True, False])
  272. x = Tensor(np.array([1, 2]), mstype.int32)
  273. y = Tensor(np.array([1, 1]), mstype.int32)
  274. select = Net(P.Select())
  275. grad = GradNet(select)
  276. grad.compile(input_cond, x, y)
  277. def test_scatter_max():
  278. """
  279. Feature: Bprop pre-compilation.
  280. Description: Compile the backward graph for the scatter_max op.
  281. Expectation: Load the bprop mindir successfully.
  282. """
  283. class ScatterMaxNet(nn.Cell):
  284. def __init__(self):
  285. super(ScatterMaxNet, self).__init__()
  286. self.scatter_max = P.ScatterMax()
  287. self.input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mstype.float32),
  288. name="input_x")
  289. def construct(self, indices, updates):
  290. return self.scatter_max(self.input_x, indices, updates)
  291. indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
  292. updates = Tensor(np.ones([2, 2, 3]) * 88, mstype.float32)
  293. scatter_max = ScatterMaxNet()
  294. grad = GradNet(scatter_max)
  295. grad.compile(indices, updates)
  296. def test_relu_grad():
  297. """
  298. Feature: Bprop pre-compilation.
  299. Description: Compile the backward graph for the relu_grad op.
  300. Expectation: Load the bprop mindir successfully.
  301. """
  302. x = Tensor(np.array([[[[-1, 1, 10],
  303. [1, -1, 1],
  304. [10, 1, -1]]]]).astype(np.float32))
  305. relu = Net(P.ReLU())
  306. grad1 = GradNet(relu)
  307. grad2 = GradNet(grad1)
  308. grad2.compile(x)
  309. def test_tuple_getitem():
  310. """
  311. Feature: Bprop pre-compilation.
  312. Description: Compile the backward graph for the tuple_getitem op.
  313. Expectation: Load the bprop mindir successfully.
  314. """
  315. class TupleGetitemNet(nn.Cell):
  316. def __init__(self):
  317. super(TupleGetitemNet, self).__init__()
  318. self.maxpool_arg = P.MaxPoolWithArgmax(pad_mode="VALID", kernel_size=2, strides=1)
  319. def construct(self, x):
  320. output = self.maxpool_arg(x)
  321. return output[0]
  322. x = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mstype.float32)
  323. tuple_getitem = TupleGetitemNet()
  324. grad = GradNet(tuple_getitem)
  325. grad.compile(x)
  326. def test_depend():
  327. """
  328. Feature: Bprop pre-compilation.
  329. Description: Compile the backward graph for the depend op.
  330. Expectation: Load the bprop mindir successfully.
  331. """
  332. class DependNet(nn.Cell):
  333. def __init__(self):
  334. super(DependNet, self).__init__()
  335. self.softmax = P.Softmax()
  336. self.depend = ops.Depend()
  337. def construct(self, x, y):
  338. mul = x * y
  339. y = self.depend(y, mul)
  340. output = self.softmax(y)
  341. return output
  342. x = Tensor(np.ones([4, 5]), mstype.float32)
  343. y = Tensor(np.ones([4, 5]), mstype.float32)
  344. depend = DependNet()
  345. grad = GradNet(depend)
  346. grad.compile(x, y)
  347. def test_stop_gradient():
  348. """
  349. Feature: Bprop pre-compilation.
  350. Description: Compile the backward graph for the stop_gradient op.
  351. Expectation: Load the bprop mindir successfully.
  352. """
  353. class StopGradientNet(nn.Cell):
  354. def construct(self, x, y):
  355. c = x * y
  356. c_s = F.stop_gradient(c)
  357. return c_s
  358. x = Tensor(np.ones([4, 5]), mstype.float32)
  359. y = Tensor(np.ones([4, 5]), mstype.float32)
  360. stop_gradient = StopGradientNet()
  361. grad = GradNet(stop_gradient)
  362. grad.compile(x, y)
  363. def test_switch():
  364. """
  365. Feature: Bprop pre-compilation.
  366. Description: Compile the backward graph for the switch op.
  367. Expectation: Load the bprop mindir successfully.
  368. """
  369. context.set_context(mode=context.PYNATIVE_MODE)
  370. class SwitchNet(nn.Cell):
  371. def construct(self, x, y):
  372. if x > y:
  373. return x
  374. return y
  375. x = Tensor(np.array([3]), mstype.float32)
  376. y = Tensor(np.array([2]), mstype.float32)
  377. switch_net = SwitchNet()
  378. grad = GradNet(switch_net)
  379. grad.compile(x, y)
  380. def test_update_state():
  381. """
  382. Feature: Bprop pre-compilation.
  383. Description: Compile the backward graph for the update_state op.
  384. Expectation: Load the bprop mindir successfully.
  385. """
  386. class UpdateStateNet(nn.Cell):
  387. def __init__(self):
  388. super(UpdateStateNet, self).__init__()
  389. self.assign_add = P.AssignAdd()
  390. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  391. def construct(self, x):
  392. return self.assign_add(self.variable, x)
  393. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  394. update_state = UpdateStateNet()
  395. grad = GradNet(update_state)
  396. grad.compile(value)
  397. def test_load():
  398. """
  399. Feature: Bprop pre-compilation.
  400. Description: Compile the backward graph for the load op.
  401. Expectation: Load the bprop mindir successfully.
  402. """
  403. class LoadNet(nn.Cell):
  404. def __init__(self):
  405. super(LoadNet, self).__init__()
  406. self.add = P.Add()
  407. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  408. def construct(self, x):
  409. return self.add(self.variable, x)
  410. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  411. load = LoadNet()
  412. grad = GradNet(load)
  413. grad.compile(value)
  414. def test_floor_div():
  415. """
  416. Feature: Bprop pre-compilation.
  417. Description: Compile the backward graph for the floor_div op.
  418. Expectation: Load the bprop mindir successfully.
  419. """
  420. x = Tensor(np.array([2, 4, -1]), mstype.int32)
  421. y = Tensor(np.array([3, 3, 3]), mstype.int32)
  422. floor_div = Net(P.FloorDiv())
  423. grad = GradNet(floor_div)
  424. grad.compile(x, y)
  425. def test_truncate_div():
  426. """
  427. Feature: Bprop pre-compilation.
  428. Description: Compile the backward graph for the truncate_div op.
  429. Expectation: Load the bprop mindir successfully.
  430. """
  431. x = Tensor(np.array([2, 4, -1]), mstype.int32)
  432. y = Tensor(np.array([3, 3, 3]), mstype.int32)
  433. truncate_div = Net(P.TruncateDiv())
  434. grad = GradNet(truncate_div)
  435. grad.compile(x, y)
  436. def test_minimum():
  437. """
  438. Feature: Bprop pre-compilation.
  439. Description: Compile the backward graph for the minimum op.
  440. Expectation: Load the bprop mindir successfully.
  441. """
  442. x = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
  443. y = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
  444. minimum = Net(P.Minimum())
  445. grad = GradNet(minimum)
  446. grad.compile(x, y)
  447. def test_maximum():
  448. """
  449. Feature: Bprop pre-compilation.
  450. Description: Compile the backward graph for the maximum op.
  451. Expectation: Load the bprop mindir successfully.
  452. """
  453. x = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
  454. y = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
  455. maximum = Net(P.Maximum())
  456. grad = GradNet(maximum)
  457. grad.compile(x, y)
  458. def test_is_nan():
  459. """
  460. Feature: Bprop pre-compilation.
  461. Description: Compile the backward graph for the is_nan op.
  462. Expectation: Load the bprop mindir successfully.
  463. """
  464. x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
  465. is_nan = Net(P.IsNan())
  466. grad = GradNet(is_nan)
  467. grad.compile(x)
  468. def test_is_inf():
  469. """
  470. Feature: Bprop pre-compilation.
  471. Description: Compile the backward graph for the is_inf op.
  472. Expectation: Load the bprop mindir successfully.
  473. """
  474. x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
  475. is_inf = Net(P.IsInf())
  476. grad = GradNet(is_inf)
  477. grad.compile(x)
  478. def test_relu_v2():
  479. """
  480. Feature: Bprop pre-compilation.
  481. Description: Compile the backward graph for the relu_v2 op.
  482. Expectation: Load the bprop mindir successfully.
  483. """
  484. x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mstype.float32)
  485. relu_v2 = Net(P.ReLUV2())
  486. grad = GradNet(relu_v2)
  487. grad.compile(x)