You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bprop_mindir.py 9.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate the mindir for bprop"""
  16. import numpy as np
  17. import mindspore.nn as nn
  18. from mindspore import Tensor, Parameter
  19. from mindspore.ops import operations as P
  20. import mindspore.ops as ops
  21. from mindspore.ops.operations import _inner_ops as inner
  22. import mindspore.common.dtype as mstype
  23. from mindspore.common.initializer import initializer
  24. from mindspore.ops.bprop_mindir import serializable_bprop_ops
  25. from mindspore._c_expression import load_mindir
  26. import mindspore.ops._grad as g
  27. class Net(nn.Cell):
  28. def __init__(self, op):
  29. super(Net, self).__init__()
  30. self.op = op
  31. def construct(self, *inputs):
  32. return self.op(*inputs)
  33. class TupleInputNet(nn.Cell):
  34. def __init__(self, op):
  35. super(TupleInputNet, self).__init__()
  36. self.op = op
  37. def construct(self, x):
  38. return self.op((x,))
  39. class GradNet(nn.Cell):
  40. def __init__(self, network):
  41. super(GradNet, self).__init__()
  42. self.grad = ops.GradOperation(get_all=True)
  43. self.network = network
  44. def construct(self, *inputs):
  45. gout = self.grad(self.network)(*inputs)
  46. return gout
  47. def test_load_mindir_dir():
  48. """
  49. Feature: Bprop pre-compilation.
  50. Description: Load all the mindir files of serializable bprop.
  51. Expectation: All are loaded successfully.
  52. """
  53. bprop_path = g.__file__
  54. bprop_installed_dir = bprop_path[: bprop_path.rindex('/')]
  55. bprop_mindir_export_dir = bprop_installed_dir + "/../bprop_mindir"
  56. for op in serializable_bprop_ops:
  57. file_name = bprop_mindir_export_dir + "/" + op.name + "_bprop.mindir"
  58. graph = load_mindir(file_name)
  59. assert not graph is None
  60. def test_relu():
  61. x = Tensor(np.array([[[[-1, 1, 10],
  62. [1, -1, 1],
  63. [10, 1, -1]]]]).astype(np.float32))
  64. relu = Net(P.ReLU())
  65. grad = GradNet(relu)
  66. grad.compile(x)
  67. def test_identity():
  68. x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
  69. identity = Net(P.Identity())
  70. grad = GradNet(identity)
  71. grad.compile(x)
  72. def test_range():
  73. x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
  74. range_net = Net(inner.Range(1.0, 8.0, 2.0))
  75. grad = GradNet(range_net)
  76. grad.compile(x)
  77. def test_ones_like():
  78. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  79. ones_like = Net(P.OnesLike())
  80. grad = GradNet(ones_like)
  81. grad.compile(x)
  82. def test_zeros_like():
  83. x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  84. zeros_like = Net(P.ZerosLike())
  85. grad = GradNet(zeros_like)
  86. grad.compile(x)
  87. def test_argmax():
  88. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  89. argmax = Net(P.Argmax())
  90. grad = GradNet(argmax)
  91. grad.compile(x)
  92. def test_argmin():
  93. x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  94. argmin = Net(P.Argmin())
  95. grad = GradNet(argmin)
  96. grad.compile(x)
  97. def test_broadcast():
  98. x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
  99. broadcast = TupleInputNet(P.Broadcast(1))
  100. grad = GradNet(broadcast)
  101. grad.compile(x)
  102. def test_is_finite():
  103. x = Tensor(np.ones([2, 4]).astype(np.int32))
  104. is_finite = Net(P.IsFinite())
  105. grad = GradNet(is_finite)
  106. grad.compile(x)
  107. def test_approximate_equal():
  108. x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  109. y = Tensor(np.array([2, 4, 6]).astype(np.float32))
  110. approximate_equal = Net(P.ApproximateEqual(2.))
  111. grad = GradNet(approximate_equal)
  112. grad.compile(x, y)
  113. def test_logical_not():
  114. x = Tensor(np.array([True, False, True]).astype(np.bool))
  115. logical_not = Net(P.LogicalNot())
  116. grad = GradNet(logical_not)
  117. grad.compile(x)
  118. def test_sign():
  119. x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
  120. sign = Net(P.Sign())
  121. grad = GradNet(sign)
  122. grad.compile(x)
  123. def test_round():
  124. x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
  125. round_net = Net(P.Round())
  126. grad = GradNet(round_net)
  127. grad.compile(x)
  128. def test_lin_space():
  129. start = Tensor(1, mstype.float32)
  130. stop = Tensor(10, mstype.float32)
  131. num = 5
  132. lin_space = Net(P.LinSpace())
  133. grad = GradNet(lin_space)
  134. grad.compile(start, stop, num)
  135. def test_dropout_gen_mask():
  136. x = (2, 4, 2, 2)
  137. keep_prob = Tensor(1.0, mstype.float32)
  138. dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
  139. grad = GradNet(dropout_gen_mask)
  140. grad.compile(x, keep_prob)
  141. def test_onehot():
  142. indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
  143. depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
  144. one_hot = Net(P.OneHot())
  145. grad = GradNet(one_hot)
  146. grad.compile(indices, depth, on_value, off_value)
  147. def test_assign():
  148. class AssignNet(nn.Cell):
  149. def __init__(self):
  150. super(AssignNet, self).__init__()
  151. self.assign = P.Assign()
  152. self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
  153. def construct(self, x):
  154. return self.assign(self.variable, x)
  155. value = Tensor([2.0], mstype.float32)
  156. assign = AssignNet()
  157. grad = GradNet(assign)
  158. grad.compile(value)
  159. def test_assign_add():
  160. class AssignAddNet(nn.Cell):
  161. def __init__(self):
  162. super(AssignAddNet, self).__init__()
  163. self.assign_add = P.AssignAdd()
  164. self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
  165. def construct(self, x):
  166. return self.assign_add(self.variable, x)
  167. value = Tensor(np.ones([1]).astype(np.int64) * 100)
  168. assign_add = AssignAddNet()
  169. grad = GradNet(assign_add)
  170. grad.compile(value)
  171. def test_assign_sub():
  172. class AssignSubNet(nn.Cell):
  173. def __init__(self):
  174. super(AssignSubNet, self).__init__()
  175. self.assign = P.AssignSub()
  176. self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
  177. def construct(self, x):
  178. return self.assign(self.variable, x)
  179. value = Tensor(np.ones([1]).astype(np.int32) * 100)
  180. assign_sub = AssignSubNet()
  181. grad = GradNet(assign_sub)
  182. grad.compile(value)
  183. def test_iou():
  184. anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  185. gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
  186. iou = Net(P.IOU())
  187. grad = GradNet(iou)
  188. grad.compile(anchor_boxes, gt_boxes)
  189. def test_bn_training_reduce():
  190. x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
  191. bn_training_reduce = Net(P.BNTrainingReduce())
  192. grad = GradNet(bn_training_reduce)
  193. grad.compile(x)
  194. def test_equal():
  195. x = Tensor([2.0], mstype.float32)
  196. y = Tensor([2.0], mstype.float32)
  197. equal = Net(P.Equal())
  198. grad = GradNet(equal)
  199. grad.compile(x, y)
  200. def test_not_equal():
  201. x = Tensor([2.0], mstype.float32)
  202. y = Tensor([2.0], mstype.float32)
  203. not_equal = Net(P.NotEqual())
  204. grad = GradNet(not_equal)
  205. grad.compile(x, y)
  206. def test_greater():
  207. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  208. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  209. greater = Net(P.Greater())
  210. grad = GradNet(greater)
  211. grad.compile(x, y)
  212. def test_greater_equal():
  213. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  214. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  215. greater_equal = Net(P.GreaterEqual())
  216. grad = GradNet(greater_equal)
  217. grad.compile(x, y)
  218. def test_less():
  219. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  220. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  221. less = Net(P.Less())
  222. grad = GradNet(less)
  223. grad.compile(x, y)
  224. def test_less_equal():
  225. x = Tensor(np.array([1, 2, 3]), mstype.int32)
  226. y = Tensor(np.array([1, 1, 4]), mstype.int32)
  227. less_equal = Net(P.LessEqual())
  228. grad = GradNet(less_equal)
  229. grad.compile(x, y)
  230. def test_logical_and():
  231. x = Tensor(np.array([True, False, True]), mstype.bool_)
  232. y = Tensor(np.array([True, True, False]), mstype.bool_)
  233. logical_and = Net(P.LogicalAnd())
  234. grad = GradNet(logical_and)
  235. grad.compile(x, y)
  236. def test_logical_or():
  237. x = Tensor(np.array([True, False, True]), mstype.bool_)
  238. y = Tensor(np.array([True, True, False]), mstype.bool_)
  239. logical_or = Net(P.LogicalOr())
  240. grad = GradNet(logical_or)
  241. grad.compile(x, y)
  242. def test_reduce_all():
  243. x = Tensor(np.array([[True, False], [True, True]]))
  244. reduce_all = Net(P.ReduceAll(keep_dims=True))
  245. grad = GradNet(reduce_all)
  246. grad.compile(x)
  247. def test_reduce_any():
  248. x = Tensor(np.array([[True, False], [True, True]]))
  249. reduce_all = Net(P.ReduceAny(keep_dims=True))
  250. grad = GradNet(reduce_all)
  251. grad.compile(x)
  252. def test_dropout_do_mask():
  253. input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
  254. keep_prob = Tensor(0.5, mstype.float32)
  255. mask = Tensor(np.ones([2]), mstype.uint8)
  256. dropout_do_mask = Net(P.DropoutDoMask())
  257. grad = GradNet(dropout_do_mask)
  258. grad.compile(input_x, mask, keep_prob)