You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bprop_mindir.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate the mindir for bprop"""
  16. import numpy as np
  17. import mindspore.nn as nn
  18. from mindspore import Tensor, Parameter
  19. from mindspore.ops import operations as P
  20. import mindspore.ops.functional as F
  21. import mindspore.ops as ops
  22. from mindspore.ops.operations import _inner_ops as inner
  23. import mindspore.common.dtype as mstype
  24. from mindspore.common.initializer import initializer
  25. from mindspore.ops.bprop_mindir import serializable_bprop_ops
  26. from mindspore._c_expression import load_mindir
  27. import mindspore.ops._grad as g
  28. class Net(nn.Cell):
  29. def __init__(self, op):
  30. super(Net, self).__init__()
  31. self.op = op
  32. def construct(self, *inputs):
  33. return self.op(*inputs)
  34. class TupleInputNet(nn.Cell):
  35. def __init__(self, op):
  36. super(TupleInputNet, self).__init__()
  37. self.op = op
  38. def construct(self, x):
  39. return self.op((x,))
  40. class GradNet(nn.Cell):
  41. def __init__(self, network):
  42. super(GradNet, self).__init__()
  43. self.grad = ops.GradOperation(get_all=True)
  44. self.network = network
  45. def construct(self, *inputs):
  46. gout = self.grad(self.network)(*inputs)
  47. return gout
  48. def test_load_mindir_dir():
  49. """
  50. Feature: Bprop pre-compilation.
  51. Description: Load all the mindir files of serializable bprop.
  52. Expectation: All are loaded successfully.
  53. """
  54. bprop_path = g.__file__
  55. bprop_installed_dir = bprop_path[: bprop_path.rindex('/')]
  56. bprop_mindir_export_dir = bprop_installed_dir + "/../bprop_mindir"
  57. for op in serializable_bprop_ops:
  58. if isinstance(op, str):
  59. op_name = op
  60. else:
  61. op_name = op.__name__
  62. file_name = bprop_mindir_export_dir + "/" + op_name + "_bprop.mindir"
  63. graph = load_mindir(file_name)
  64. assert not graph is None
  65. def test_relu():
  66. x = Tensor(np.array([[[[-1, 1, 10],
  67. [1, -1, 1],
  68. [10, 1, -1]]]]).astype(np.float32))
  69. relu = Net(P.ReLU())
  70. grad = GradNet(relu)
  71. grad.compile(x)
  72. def test_identity():
  73. x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
  74. identity = Net(P.Identity())
  75. grad = GradNet(identity)
  76. grad.compile(x)
  77. def test_range():
  78. x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
  79. range_net = Net(inner.Range(1.0, 8.0, 2.0))
  80. grad = GradNet(range_net)
  81. grad.compile(x)
  82. def test_ones_like():
  83. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  84. ones_like = Net(P.OnesLike())
  85. grad = GradNet(ones_like)
  86. grad.compile(x)
  87. def test_zeros_like():
  88. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  89. zeros_like = Net(P.ZerosLike())
  90. grad = GradNet(zeros_like)
  91. grad.compile(x)
  92. def test_argmax():
  93. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  94. argmax = Net(P.Argmax())
  95. grad = GradNet(argmax)
  96. grad.compile(x)
  97. def test_argmin():
  98. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  99. argmin = Net(P.Argmin())
  100. grad = GradNet(argmin)
  101. grad.compile(x)
  102. def test_broadcast():
  103. x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
  104. broadcast = TupleInputNet(P.Broadcast(1))
  105. grad = GradNet(broadcast)
  106. grad.compile(x)
  107. def test_is_finite():
  108. x = Tensor(np.ones([2, 4]).astype(np.int32))
  109. is_finite = Net(P.IsFinite())
  110. grad = GradNet(is_finite)
  111. grad.compile(x)
  112. def test_approximate_equal():
  113. x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  114. y = Tensor(np.array([2, 4, 6]).astype(np.float32))
  115. approximate_equal = Net(P.ApproximateEqual(2.))
  116. grad = GradNet(approximate_equal)
  117. grad.compile(x, y)
  118. def test_logical_not():
  119. x = Tensor(np.array([True, False, True]).astype(np.bool))
  120. logical_not = Net(P.LogicalNot())
  121. grad = GradNet(logical_not)
  122. grad.compile(x)
  123. def test_sign():
  124. x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
  125. sign = Net(P.Sign())
  126. grad = GradNet(sign)
  127. grad.compile(x)
  128. def test_round():
  129. x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
  130. round_net = Net(P.Round())
  131. grad = GradNet(round_net)
  132. grad.compile(x)
  133. def test_lin_space():
  134. start = Tensor(1, mstype.float32)
  135. stop = Tensor(10, mstype.float32)
  136. num = 5
  137. lin_space = Net(P.LinSpace())
  138. grad = GradNet(lin_space)
  139. grad.compile(start, stop, num)
  140. def test_dropout_gen_mask():
  141. x = (2, 4, 2, 2)
  142. keep_prob = Tensor(1.0, mstype.float32)
  143. dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
  144. grad = GradNet(dropout_gen_mask)
  145. grad.compile(x, keep_prob)
  146. def test_onehot():
  147. indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
  148. depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
  149. one_hot = Net(P.OneHot())
  150. grad = GradNet(one_hot)
  151. grad.compile(indices, depth, on_value, off_value)
  152. def test_assign():
  153. class AssignNet(nn.Cell):
  154. def __init__(self):
  155. super(AssignNet, self).__init__()
  156. self.assign = P.Assign()
  157. self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
  158. def construct(self, x):
  159. return self.assign(self.variable, x)
  160. value = Tensor([2.0], mstype.float32)
  161. assign = AssignNet()
  162. grad = GradNet(assign)
  163. grad.compile(value)
  164. def test_assign_add():
  165. class AssignAddNet(nn.Cell):
  166. def __init__(self):
  167. super(AssignAddNet, self).__init__()
  168. self.assign_add = P.AssignAdd()
  169. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  170. def construct(self, x):
  171. return self.assign_add(self.variable, x)
  172. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  173. assign_add = AssignAddNet()
  174. grad = GradNet(assign_add)
  175. grad.compile(value)
  176. def test_assign_sub():
  177. class AssignSubNet(nn.Cell):
  178. def __init__(self):
  179. super(AssignSubNet, self).__init__()
  180. self.assign = P.AssignSub()
  181. self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
  182. def construct(self, x):
  183. return self.assign(self.variable, x)
  184. value = Tensor(np.ones([1]).astype(np.int32) * 100)
  185. assign_sub = AssignSubNet()
  186. grad = GradNet(assign_sub)
  187. grad.compile(value)
  188. def test_iou():
  189. anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  190. gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  191. iou = Net(P.IOU())
  192. grad = GradNet(iou)
  193. grad.compile(anchor_boxes, gt_boxes)
  194. def test_bn_training_reduce():
  195. x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
  196. bn_training_reduce = Net(P.BNTrainingReduce())
  197. grad = GradNet(bn_training_reduce)
  198. grad.compile(x)
  199. def test_equal():
  200. x = Tensor([2.0], mstype.float32)
  201. y = Tensor([2.0], mstype.float32)
  202. equal = Net(P.Equal())
  203. grad = GradNet(equal)
  204. grad.compile(x, y)
  205. def test_not_equal():
  206. x = Tensor([2.0], mstype.float32)
  207. y = Tensor([2.0], mstype.float32)
  208. not_equal = Net(P.NotEqual())
  209. grad = GradNet(not_equal)
  210. grad.compile(x, y)
  211. def test_greater():
  212. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  213. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  214. greater = Net(P.Greater())
  215. grad = GradNet(greater)
  216. grad.compile(x, y)
  217. def test_greater_equal():
  218. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  219. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  220. greater_equal = Net(P.GreaterEqual())
  221. grad = GradNet(greater_equal)
  222. grad.compile(x, y)
  223. def test_less():
  224. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  225. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  226. less = Net(P.Less())
  227. grad = GradNet(less)
  228. grad.compile(x, y)
  229. def test_less_equal():
  230. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  231. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  232. less_equal = Net(P.LessEqual())
  233. grad = GradNet(less_equal)
  234. grad.compile(x, y)
  235. def test_logical_and():
  236. x = Tensor(np.array([True, False, True]), mstype.bool_)
  237. y = Tensor(np.array([True, True, False]), mstype.bool_)
  238. logical_and = Net(P.LogicalAnd())
  239. grad = GradNet(logical_and)
  240. grad.compile(x, y)
  241. def test_logical_or():
  242. x = Tensor(np.array([True, False, True]), mstype.bool_)
  243. y = Tensor(np.array([True, True, False]), mstype.bool_)
  244. logical_or = Net(P.LogicalOr())
  245. grad = GradNet(logical_or)
  246. grad.compile(x, y)
  247. def test_reduce_all():
  248. x = Tensor(np.array([[True, False], [True, True]]))
  249. reduce_all = Net(P.ReduceAll(keep_dims=True))
  250. grad = GradNet(reduce_all)
  251. grad.compile(x)
  252. def test_reduce_any():
  253. x = Tensor(np.array([[True, False], [True, True]]))
  254. reduce_all = Net(P.ReduceAny(keep_dims=True))
  255. grad = GradNet(reduce_all)
  256. grad.compile(x)
  257. def test_dropout_do_mask():
  258. input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
  259. keep_prob = Tensor(0.5, mstype.float32)
  260. mask = Tensor(np.ones([2]), mstype.uint8)
  261. dropout_do_mask = Net(P.DropoutDoMask())
  262. grad = GradNet(dropout_do_mask)
  263. grad.compile(input_x, mask, keep_prob)
  264. def test_select():
  265. """
  266. Feature: Bprop pre-compilation.
  267. Description: Compile the backward graph for the select op.
  268. Expectation: Load the bprop mindir successfully.
  269. """
  270. input_cond = Tensor([True, False])
  271. x = Tensor(np.array([1, 2]), mstype.int32)
  272. y = Tensor(np.array([1, 1]), mstype.int32)
  273. select = Net(P.Select())
  274. grad = GradNet(select)
  275. grad.compile(input_cond, x, y)
  276. def test_scatter_max():
  277. """
  278. Feature: Bprop pre-compilation.
  279. Description: Compile the backward graph for the scatter_max op.
  280. Expectation: Load the bprop mindir successfully.
  281. """
  282. class ScatterMaxNet(nn.Cell):
  283. def __init__(self):
  284. super(ScatterMaxNet, self).__init__()
  285. self.scatter_max = P.ScatterMax()
  286. self.input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mstype.float32),
  287. name="input_x")
  288. def construct(self, indices, updates):
  289. return self.scatter_max(self.input_x, indices, updates)
  290. indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
  291. updates = Tensor(np.ones([2, 2, 3]) * 88, mstype.float32)
  292. scatter_max = ScatterMaxNet()
  293. grad = GradNet(scatter_max)
  294. grad.compile(indices, updates)
  295. def test_relu_grad():
  296. """
  297. Feature: Bprop pre-compilation.
  298. Description: Compile the backward graph for the relu_grad op.
  299. Expectation: Load the bprop mindir successfully.
  300. """
  301. x = Tensor(np.array([[[[-1, 1, 10],
  302. [1, -1, 1],
  303. [10, 1, -1]]]]).astype(np.float32))
  304. relu = Net(P.ReLU())
  305. grad1 = GradNet(relu)
  306. grad2 = GradNet(grad1)
  307. grad2.compile(x)
  308. def test_tuple_getitem():
  309. """
  310. Feature: Bprop pre-compilation.
  311. Description: Compile the backward graph for the tuple_getitem op.
  312. Expectation: Load the bprop mindir successfully.
  313. """
  314. class TupleGetitemNet(nn.Cell):
  315. def __init__(self):
  316. super(TupleGetitemNet, self).__init__()
  317. self.maxpool_arg = P.MaxPoolWithArgmax(pad_mode="VALID", kernel_size=2, strides=1)
  318. def construct(self, x):
  319. output = self.maxpool_arg(x)
  320. return output[0]
  321. x = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mstype.float32)
  322. tuple_getitem = TupleGetitemNet()
  323. grad = GradNet(tuple_getitem)
  324. grad.compile(x)
  325. def test_depend():
  326. """
  327. Feature: Bprop pre-compilation.
  328. Description: Compile the backward graph for the depend op.
  329. Expectation: Load the bprop mindir successfully.
  330. """
  331. class DependNet(nn.Cell):
  332. def __init__(self):
  333. super(DependNet, self).__init__()
  334. self.softmax = P.Softmax()
  335. self.depend = ops.Depend()
  336. def construct(self, x, y):
  337. mul = x * y
  338. y = self.depend(y, mul)
  339. output = self.softmax(y)
  340. return output
  341. x = Tensor(np.ones([4, 5]), mstype.float32)
  342. y = Tensor(np.ones([4, 5]), mstype.float32)
  343. depend = DependNet()
  344. grad = GradNet(depend)
  345. grad.compile(x, y)
  346. def test_stop_gradient():
  347. """
  348. Feature: Bprop pre-compilation.
  349. Description: Compile the backward graph for the stop_gradient op.
  350. Expectation: Load the bprop mindir successfully.
  351. """
  352. class StopGradientNet(nn.Cell):
  353. def __init__(self):
  354. super(StopGradientNet, self).__init__()
  355. def construct(self, x, y):
  356. c = x * y
  357. c_s = F.stop_gradient(c)
  358. return c_s
  359. x = Tensor(np.ones([4, 5]), mstype.float32)
  360. y = Tensor(np.ones([4, 5]), mstype.float32)
  361. stop_gradient = StopGradientNet()
  362. grad = GradNet(stop_gradient)
  363. grad.compile(x, y)
  364. def test_switch():
  365. """
  366. Feature: Bprop pre-compilation.
  367. Description: Compile the backward graph for the switch op.
  368. Expectation: Load the bprop mindir successfully.
  369. """
  370. class SwitchNet(nn.Cell):
  371. def __init__(self):
  372. super(SwitchNet, self).__init__()
  373. def construct(self, x, y):
  374. if x > y:
  375. return x
  376. return y
  377. x = Tensor(np.array([3]), mstype.float32)
  378. y = Tensor(np.array([2]), mstype.float32)
  379. switch_net = SwitchNet()
  380. grad = GradNet(switch_net)
  381. grad.compile(x, y)
  382. def test_update_state():
  383. """
  384. Feature: Bprop pre-compilation.
  385. Description: Compile the backward graph for the update_state op.
  386. Expectation: Load the bprop mindir successfully.
  387. """
  388. class UpdateStateNet(nn.Cell):
  389. def __init__(self):
  390. super(UpdateStateNet, self).__init__()
  391. self.assign_add = P.AssignAdd()
  392. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  393. def construct(self, x):
  394. return self.assign_add(self.variable, x)
  395. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  396. update_state = UpdateStateNet()
  397. grad = GradNet(update_state)
  398. grad.compile(value)
  399. def test_load():
  400. """
  401. Feature: Bprop pre-compilation.
  402. Description: Compile the backward graph for the load op.
  403. Expectation: Load the bprop mindir successfully.
  404. """
  405. class LoadNet(nn.Cell):
  406. def __init__(self):
  407. super(LoadNet, self).__init__()
  408. self.add = P.Add()
  409. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  410. def construct(self, x):
  411. return self.add(self.variable, x)
  412. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  413. load = LoadNet()
  414. grad = GradNet(load)
  415. grad.compile(value)
  416. def test_floor_div():
  417. """
  418. Feature: Bprop pre-compilation.
  419. Description: Compile the backward graph for the floor_div op.
  420. Expectation: Load the bprop mindir successfully.
  421. """
  422. x = Tensor(np.array([2, 4, -1]), mstype.int32)
  423. y = Tensor(np.array([3, 3, 3]), mstype.int32)
  424. floor_div = Net(P.FloorDiv())
  425. grad = GradNet(floor_div)
  426. grad.compile(x, y)
  427. def test_truncate_div():
  428. """
  429. Feature: Bprop pre-compilation.
  430. Description: Compile the backward graph for the truncate_div op.
  431. Expectation: Load the bprop mindir successfully.
  432. """
  433. x = Tensor(np.array([2, 4, -1]), mstype.int32)
  434. y = Tensor(np.array([3, 3, 3]), mstype.int32)
  435. truncate_div = Net(P.TruncateDiv())
  436. grad = GradNet(truncate_div)
  437. grad.compile(x, y)
  438. def test_minimum():
  439. """
  440. Feature: Bprop pre-compilation.
  441. Description: Compile the backward graph for the minimum op.
  442. Expectation: Load the bprop mindir successfully.
  443. """
  444. x = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
  445. y = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
  446. minimum = Net(P.Minimum())
  447. grad = GradNet(minimum)
  448. grad.compile(x, y)
  449. def test_maximum():
  450. """
  451. Feature: Bprop pre-compilation.
  452. Description: Compile the backward graph for the maximum op.
  453. Expectation: Load the bprop mindir successfully.
  454. """
  455. x = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
  456. y = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
  457. maximum = Net(P.Maximum())
  458. grad = GradNet(maximum)
  459. grad.compile(x, y)
  460. def test_is_nan():
  461. """
  462. Feature: Bprop pre-compilation.
  463. Description: Compile the backward graph for the is_nan op.
  464. Expectation: Load the bprop mindir successfully.
  465. """
  466. x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
  467. is_nan = Net(P.IsNan())
  468. grad = GradNet(is_nan)
  469. grad.compile(x)
  470. def test_is_inf():
  471. """
  472. Feature: Bprop pre-compilation.
  473. Description: Compile the backward graph for the is_inf op.
  474. Expectation: Load the bprop mindir successfully.
  475. """
  476. x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
  477. is_inf = Net(P.IsInf())
  478. grad = GradNet(is_inf)
  479. grad.compile(x)
  480. def test_relu_v2():
  481. """
  482. Feature: Bprop pre-compilation.
  483. Description: Compile the backward graph for the relu_v2 op.
  484. Expectation: Load the bprop mindir successfully.
  485. """
  486. x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mstype.float32)
  487. relu_v2 = Net(P.ReLUV2())
  488. grad = GradNet(relu_v2)
  489. grad.compile(x)