You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bprop_mindir.py 9.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate the mindir for bprop"""
  16. import numpy as np
  17. import mindspore.nn as nn
  18. from mindspore import Tensor, Parameter
  19. from mindspore.ops import operations as P
  20. import mindspore.ops as ops
  21. from mindspore.ops.operations import _inner_ops as inner
  22. import mindspore.common.dtype as mstype
  23. from mindspore.common.initializer import initializer
  24. class Net(nn.Cell):
  25. def __init__(self, op):
  26. super(Net, self).__init__()
  27. self.op = op
  28. def construct(self, *inputs, a=0, b=1):
  29. c = a + b
  30. return c, self.op(*inputs)
  31. class TupleInputNet(nn.Cell):
  32. def __init__(self, op):
  33. super(TupleInputNet, self).__init__()
  34. self.op = op
  35. def construct(self, x):
  36. return self.op((x,))
  37. class GradNet(nn.Cell):
  38. def __init__(self, network):
  39. super(GradNet, self).__init__()
  40. self.grad = ops.GradOperation(get_all=True)
  41. self.network = network
  42. def construct(self, *inputs):
  43. gout = self.grad(self.network)(*inputs)
  44. return gout
  45. def test_relu():
  46. x = Tensor(np.array([[[[-1, 1, 10],
  47. [1, -1, 1],
  48. [10, 1, -1]]]]).astype(np.float32))
  49. relu = Net(P.ReLU())
  50. grad = GradNet(relu)
  51. grad.compile(x)
  52. def test_identity():
  53. x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
  54. identity = Net(P.Identity())
  55. grad = GradNet(identity)
  56. grad.compile(x)
  57. def test_range():
  58. x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
  59. range_net = Net(inner.Range(1.0, 8.0, 2.0))
  60. grad = GradNet(range_net)
  61. grad.compile(x)
  62. def test_ones_like():
  63. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  64. ones_like = Net(P.OnesLike())
  65. grad = GradNet(ones_like)
  66. grad.compile(x)
  67. def test_zeros_like():
  68. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  69. zeros_like = Net(P.ZerosLike())
  70. grad = GradNet(zeros_like)
  71. grad.compile(x)
  72. def test_argmax():
  73. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  74. argmax = Net(P.Argmax())
  75. grad = GradNet(argmax)
  76. grad.compile(x)
  77. def test_argmin():
  78. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  79. argmin = Net(P.Argmin())
  80. grad = GradNet(argmin)
  81. grad.compile(x)
  82. def test_broadcast():
  83. x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
  84. broadcast = TupleInputNet(P.Broadcast(1))
  85. grad = GradNet(broadcast)
  86. grad.compile(x)
  87. def test_is_finite():
  88. x = Tensor(np.ones([2, 4]).astype(np.int32))
  89. is_finite = Net(P.IsFinite())
  90. grad = GradNet(is_finite)
  91. grad.compile(x)
  92. def test_approximate_equal():
  93. x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  94. y = Tensor(np.array([2, 4, 6]).astype(np.float32))
  95. approximate_equal = Net(P.ApproximateEqual(2.))
  96. grad = GradNet(approximate_equal)
  97. grad.compile(x, y)
  98. def test_logical_not():
  99. x = Tensor(np.array([True, False, True]).astype(np.bool))
  100. logical_not = Net(P.LogicalNot())
  101. grad = GradNet(logical_not)
  102. grad.compile(x)
  103. def test_sign():
  104. x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
  105. sign = Net(P.Sign())
  106. grad = GradNet(sign)
  107. grad.compile(x)
  108. def test_round():
  109. x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
  110. round_net = Net(P.Round())
  111. grad = GradNet(round_net)
  112. grad.compile(x)
  113. def test_lin_space():
  114. start = Tensor(1, mstype.float32)
  115. stop = Tensor(10, mstype.float32)
  116. num = 5
  117. lin_space = Net(P.LinSpace())
  118. grad = GradNet(lin_space)
  119. grad.compile(start, stop, num)
  120. def test_dropout_gen_mask():
  121. x = (2, 4, 2, 2)
  122. keep_prob = Tensor(1.0, mstype.float32)
  123. dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
  124. grad = GradNet(dropout_gen_mask)
  125. grad.compile(x, keep_prob)
  126. def test_onehot():
  127. indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
  128. depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
  129. one_hot = Net(P.OneHot())
  130. grad = GradNet(one_hot)
  131. grad.compile(indices, depth, on_value, off_value)
  132. def test_assign():
  133. class AssignNet(nn.Cell):
  134. def __init__(self):
  135. super(AssignNet, self).__init__()
  136. self.assign = P.Assign()
  137. self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
  138. def construct(self, x):
  139. return self.assign(self.variable, x)
  140. value = Tensor([2.0], mstype.float32)
  141. assign = AssignNet()
  142. grad = GradNet(assign)
  143. grad.compile(value)
  144. def test_assign_add():
  145. class AssignAddNet(nn.Cell):
  146. def __init__(self):
  147. super(AssignAddNet, self).__init__()
  148. self.assign_add = P.AssignAdd()
  149. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  150. def construct(self, x):
  151. return self.assign_add(self.variable, x)
  152. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  153. assign_add = AssignAddNet()
  154. grad = GradNet(assign_add)
  155. grad.compile(value)
  156. def test_assign_sub():
  157. class AssignSubNet(nn.Cell):
  158. def __init__(self):
  159. super(AssignSubNet, self).__init__()
  160. self.assign = P.AssignSub()
  161. self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
  162. def construct(self, x):
  163. return self.assign(self.variable, x)
  164. value = Tensor(np.ones([1]).astype(np.int32) * 100)
  165. assign_sub = AssignSubNet()
  166. grad = GradNet(assign_sub)
  167. grad.compile(value)
  168. def test_iou():
  169. anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  170. gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  171. iou = Net(P.IOU())
  172. grad = GradNet(iou)
  173. grad.compile(anchor_boxes, gt_boxes)
  174. def test_bn_training_reduce():
  175. x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
  176. bn_training_reduce = Net(P.BNTrainingReduce())
  177. grad = GradNet(bn_training_reduce)
  178. grad.compile(x)
  179. def test_equal():
  180. x = Tensor([2.0], mstype.float32)
  181. y = Tensor([2.0], mstype.float32)
  182. equal = Net(P.Equal())
  183. grad = GradNet(equal)
  184. grad.compile(x, y)
  185. def test_not_equal():
  186. x = Tensor([2.0], mstype.float32)
  187. y = Tensor([2.0], mstype.float32)
  188. not_equal = Net(P.NotEqual())
  189. grad = GradNet(not_equal)
  190. grad.compile(x, y)
  191. def test_greater():
  192. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  193. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  194. greater = Net(P.Greater())
  195. grad = GradNet(greater)
  196. grad.compile(x, y)
  197. def test_greater_equal():
  198. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  199. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  200. greater_equal = Net(P.GreaterEqual())
  201. grad = GradNet(greater_equal)
  202. grad.compile(x, y)
  203. def test_less():
  204. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  205. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  206. less = Net(P.Less())
  207. grad = GradNet(less)
  208. grad.compile(x, y)
  209. def test_less_equal():
  210. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  211. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  212. less_equal = Net(P.LessEqual())
  213. grad = GradNet(less_equal)
  214. grad.compile(x, y)
  215. def test_logical_and():
  216. x = Tensor(np.array([True, False, True]), mstype.bool_)
  217. y = Tensor(np.array([True, True, False]), mstype.bool_)
  218. logical_and = Net(P.LogicalAnd())
  219. grad = GradNet(logical_and)
  220. grad.compile(x, y)
  221. def test_logical_or():
  222. x = Tensor(np.array([True, False, True]), mstype.bool_)
  223. y = Tensor(np.array([True, True, False]), mstype.bool_)
  224. logical_or = Net(P.LogicalOr())
  225. grad = GradNet(logical_or)
  226. grad.compile(x, y)
  227. def test_reduce_all():
  228. x = Tensor(np.array([[True, False], [True, True]]))
  229. reduce_all = Net(P.ReduceAll(keep_dims=True))
  230. grad = GradNet(reduce_all)
  231. grad.compile(x)
  232. def test_reduce_any():
  233. x = Tensor(np.array([[True, False], [True, True]]))
  234. reduce_all = Net(P.ReduceAny(keep_dims=True))
  235. grad = GradNet(reduce_all)
  236. grad.compile(x)
  237. def test_dropout_do_mask():
  238. input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
  239. keep_prob = Tensor(0.5, mstype.float32)
  240. mask = Tensor(np.ones([2]), mstype.uint8)
  241. dropout_do_mask = Net(P.DropoutDoMask())
  242. grad = GradNet(dropout_do_mask)
  243. grad.compile(input_x, mask, keep_prob)