You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bprop_mindir.py 9.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate the mindir for bprop"""
  16. import os
  17. import numpy as np
  18. import mindspore.nn as nn
  19. from mindspore import Tensor, Parameter
  20. from mindspore.ops import operations as P
  21. import mindspore.ops as ops
  22. from mindspore.ops.operations import _inner_ops as inner
  23. import mindspore.common.dtype as mstype
  24. from mindspore.common.initializer import initializer
  25. import mindspore.ops._grad as g
  26. class Net(nn.Cell):
  27. def __init__(self, op):
  28. super(Net, self).__init__()
  29. self.op = op
  30. def construct(self, *inputs, a=0, b=1):
  31. c = a + b
  32. return c, self.op(*inputs)
  33. class TupleInputNet(nn.Cell):
  34. def __init__(self, op):
  35. super(TupleInputNet, self).__init__()
  36. self.op = op
  37. def construct(self, x):
  38. return self.op((x,))
  39. class GradNet(nn.Cell):
  40. def __init__(self, network):
  41. super(GradNet, self).__init__()
  42. self.grad = ops.GradOperation(get_all=True)
  43. self.network = network
  44. def construct(self, *inputs):
  45. gout = self.grad(self.network)(*inputs)
  46. return gout
  47. def test_remove_mindir_dir():
  48. bprop_path = g.__file__
  49. bprop_installed_dir = bprop_path[: bprop_path.rindex('/')]
  50. bprop_mindir_export_dir = bprop_installed_dir + "/../bprop_mindir"
  51. os.rename(bprop_mindir_export_dir, bprop_mindir_export_dir + "_bak")
  52. x = Tensor(np.array([[[[-1, 1, 10],
  53. [1, -1, 1],
  54. [10, 1, -1]]]]).astype(np.float32))
  55. relu = Net(P.ReLU())
  56. grad = GradNet(relu)
  57. grad.compile(x)
  58. os.rename(bprop_mindir_export_dir + "_bak", bprop_mindir_export_dir)
  59. def test_relu():
  60. x = Tensor(np.array([[[[-1, 1, 10],
  61. [1, -1, 1],
  62. [10, 1, -1]]]]).astype(np.float32))
  63. relu = Net(P.ReLU())
  64. grad = GradNet(relu)
  65. grad.compile(x)
  66. def test_identity():
  67. x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
  68. identity = Net(P.Identity())
  69. grad = GradNet(identity)
  70. grad.compile(x)
  71. def test_range():
  72. x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
  73. range_net = Net(inner.Range(1.0, 8.0, 2.0))
  74. grad = GradNet(range_net)
  75. grad.compile(x)
  76. def test_ones_like():
  77. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  78. ones_like = Net(P.OnesLike())
  79. grad = GradNet(ones_like)
  80. grad.compile(x)
  81. def test_zeros_like():
  82. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  83. zeros_like = Net(P.ZerosLike())
  84. grad = GradNet(zeros_like)
  85. grad.compile(x)
  86. def test_argmax():
  87. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  88. argmax = Net(P.Argmax())
  89. grad = GradNet(argmax)
  90. grad.compile(x)
  91. def test_argmin():
  92. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  93. argmin = Net(P.Argmin())
  94. grad = GradNet(argmin)
  95. grad.compile(x)
  96. def test_broadcast():
  97. x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
  98. broadcast = TupleInputNet(P.Broadcast(1))
  99. grad = GradNet(broadcast)
  100. grad.compile(x)
  101. def test_is_finite():
  102. x = Tensor(np.ones([2, 4]).astype(np.int32))
  103. is_finite = Net(P.IsFinite())
  104. grad = GradNet(is_finite)
  105. grad.compile(x)
  106. def test_approximate_equal():
  107. x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  108. y = Tensor(np.array([2, 4, 6]).astype(np.float32))
  109. approximate_equal = Net(P.ApproximateEqual(2.))
  110. grad = GradNet(approximate_equal)
  111. grad.compile(x, y)
  112. def test_logical_not():
  113. x = Tensor(np.array([True, False, True]).astype(np.bool))
  114. logical_not = Net(P.LogicalNot())
  115. grad = GradNet(logical_not)
  116. grad.compile(x)
  117. def test_sign():
  118. x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
  119. sign = Net(P.Sign())
  120. grad = GradNet(sign)
  121. grad.compile(x)
  122. def test_round():
  123. x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
  124. round_net = Net(P.Round())
  125. grad = GradNet(round_net)
  126. grad.compile(x)
  127. def test_lin_space():
  128. start = Tensor(1, mstype.float32)
  129. stop = Tensor(10, mstype.float32)
  130. num = 5
  131. lin_space = Net(P.LinSpace())
  132. grad = GradNet(lin_space)
  133. grad.compile(start, stop, num)
  134. def test_dropout_gen_mask():
  135. x = (2, 4, 2, 2)
  136. keep_prob = Tensor(1.0, mstype.float32)
  137. dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
  138. grad = GradNet(dropout_gen_mask)
  139. grad.compile(x, keep_prob)
  140. def test_onehot():
  141. indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
  142. depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
  143. one_hot = Net(P.OneHot())
  144. grad = GradNet(one_hot)
  145. grad.compile(indices, depth, on_value, off_value)
  146. def test_assign():
  147. class AssignNet(nn.Cell):
  148. def __init__(self):
  149. super(AssignNet, self).__init__()
  150. self.assign = P.Assign()
  151. self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
  152. def construct(self, x):
  153. return self.assign(self.variable, x)
  154. value = Tensor([2.0], mstype.float32)
  155. assign = AssignNet()
  156. grad = GradNet(assign)
  157. grad.compile(value)
  158. def test_assign_add():
  159. class AssignAddNet(nn.Cell):
  160. def __init__(self):
  161. super(AssignAddNet, self).__init__()
  162. self.assign_add = P.AssignAdd()
  163. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  164. def construct(self, x):
  165. return self.assign_add(self.variable, x)
  166. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  167. assign_add = AssignAddNet()
  168. grad = GradNet(assign_add)
  169. grad.compile(value)
  170. def test_assign_sub():
  171. class AssignSubNet(nn.Cell):
  172. def __init__(self):
  173. super(AssignSubNet, self).__init__()
  174. self.assign = P.AssignSub()
  175. self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
  176. def construct(self, x):
  177. return self.assign(self.variable, x)
  178. value = Tensor(np.ones([1]).astype(np.int32) * 100)
  179. assign_sub = AssignSubNet()
  180. grad = GradNet(assign_sub)
  181. grad.compile(value)
  182. def test_iou():
  183. anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  184. gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  185. iou = Net(P.IOU())
  186. grad = GradNet(iou)
  187. grad.compile(anchor_boxes, gt_boxes)
  188. def test_bn_training_reduce():
  189. x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
  190. bn_training_reduce = Net(P.BNTrainingReduce())
  191. grad = GradNet(bn_training_reduce)
  192. grad.compile(x)
  193. def test_equal():
  194. x = Tensor([2.0], mstype.float32)
  195. y = Tensor([2.0], mstype.float32)
  196. equal = Net(P.Equal())
  197. grad = GradNet(equal)
  198. grad.compile(x, y)
  199. def test_not_equal():
  200. x = Tensor([2.0], mstype.float32)
  201. y = Tensor([2.0], mstype.float32)
  202. not_equal = Net(P.NotEqual())
  203. grad = GradNet(not_equal)
  204. grad.compile(x, y)
  205. def test_greater():
  206. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  207. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  208. greater = Net(P.Greater())
  209. grad = GradNet(greater)
  210. grad.compile(x, y)
  211. def test_greater_equal():
  212. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  213. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  214. greater_equal = Net(P.GreaterEqual())
  215. grad = GradNet(greater_equal)
  216. grad.compile(x, y)
  217. def test_less():
  218. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  219. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  220. less = Net(P.Less())
  221. grad = GradNet(less)
  222. grad.compile(x, y)
  223. def test_less_equal():
  224. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  225. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  226. less_equal = Net(P.LessEqual())
  227. grad = GradNet(less_equal)
  228. grad.compile(x, y)
  229. def test_logical_and():
  230. x = Tensor(np.array([True, False, True]), mstype.bool_)
  231. y = Tensor(np.array([True, True, False]), mstype.bool_)
  232. logical_and = Net(P.LogicalAnd())
  233. grad = GradNet(logical_and)
  234. grad.compile(x, y)
  235. def test_logical_or():
  236. x = Tensor(np.array([True, False, True]), mstype.bool_)
  237. y = Tensor(np.array([True, True, False]), mstype.bool_)
  238. logical_or = Net(P.LogicalOr())
  239. grad = GradNet(logical_or)
  240. grad.compile(x, y)
  241. def test_reduce_all():
  242. x = Tensor(np.array([[True, False], [True, True]]))
  243. reduce_all = Net(P.ReduceAll(keep_dims=True))
  244. grad = GradNet(reduce_all)
  245. grad.compile(x)
  246. def test_reduce_any():
  247. x = Tensor(np.array([[True, False], [True, True]]))
  248. reduce_all = Net(P.ReduceAny(keep_dims=True))
  249. grad = GradNet(reduce_all)
  250. grad.compile(x)
  251. def test_dropout_do_mask():
  252. input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
  253. keep_prob = Tensor(0.5, mstype.float32)
  254. mask = Tensor(np.ones([2]), mstype.uint8)
  255. dropout_do_mask = Net(P.DropoutDoMask())
  256. grad = GradNet(dropout_do_mask)
  257. grad.compile(input_x, mask, keep_prob)