You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_auto_grad.py 21 kB

4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import mindspore.nn as nn
  17. import mindspore.ops as ops
  18. from mindspore import context
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. from mindspore.ops import composite as C
  22. from mindspore.common.parameter import Parameter, ParameterTuple
  23. grad_all = C.GradOperation(get_all=True)
  24. grad_by_list = C.GradOperation(get_by_list=True)
  25. class CropAndResizeNet(nn.Cell):
  26. def __init__(self, crop_size):
  27. super(CropAndResizeNet, self).__init__()
  28. self.crop_and_resize = P.CropAndResize()
  29. self.crop_size = crop_size
  30. def construct(self, x, boxes, box_indices):
  31. return self.crop_and_resize(x, boxes, box_indices, self.crop_size)
  32. def bprop(self, x, boxes, box_indices, out, dout):
  33. return x, boxes, box_indices
  34. class TestUserDefinedBpropNet(nn.Cell):
  35. def __init__(self, in_channel, out_channel):
  36. super(TestUserDefinedBpropNet, self).__init__()
  37. self.relu = nn.ReLU()
  38. self.conv = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=2, stride=1, has_bias=False,
  39. weight_init='ones', pad_mode='same')
  40. self.crop = CropAndResizeNet((10, 10))
  41. self.boxes = Tensor(np.ones((128, 4)).astype(np.float32))
  42. self.box_indices = Tensor(np.ones((128,)).astype(np.int32))
  43. def construct(self, x):
  44. x = self.relu(x)
  45. x = self.conv(x)
  46. x = self.crop(x, self.boxes, self.box_indices)
  47. return x
  48. class TestUserDefinedBpropGradNet(nn.Cell):
  49. def __init__(self, net):
  50. super(TestUserDefinedBpropGradNet, self).__init__()
  51. self.net = net
  52. def construct(self, x):
  53. return grad_all(self.net)(x)
  54. def test_user_defined_bprop():
  55. context.set_context(mode=context.GRAPH_MODE)
  56. net = TestUserDefinedBpropNet(3, 10)
  57. grad_net = TestUserDefinedBpropGradNet(net)
  58. x = Tensor(np.ones((128, 3, 12, 12)).astype(np.float32))
  59. grad_net(x)
  60. class TwoInputBPropOperator(nn.Cell):
  61. def __init__(self):
  62. super().__init__()
  63. self.op = P.Mul()
  64. self.add = P.Add()
  65. def construct(self, x, y):
  66. return self.op(x, y)
  67. def bprop(self, x, y, out, dout):
  68. return self.add(5, x), self.add(y, 9)
  69. class BPropOperatatorNet(nn.Cell):
  70. def __init__(self, mul_size):
  71. super().__init__()
  72. mul_np = np.full(mul_size, 0.1, dtype=np.float32)
  73. floordiv_np = np.full(mul_size, 0.1, dtype=np.float32)
  74. self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
  75. self.floordiv_weight = Parameter(Tensor(floordiv_np), name="floordiv_weight")
  76. self.mul = TwoInputBPropOperator()
  77. self.floor_div = P.FloorDiv()
  78. self.bn = nn.BatchNorm1d(num_features=96)
  79. def construct(self, inputs):
  80. x = self.mul(inputs, self.mul_weight)
  81. x = self.floor_div(x, self.floordiv_weight)
  82. x = self.bn(x)
  83. return x
  84. def test_user_defined_bprop_with_u():
  85. net = BPropOperatatorNet(mul_size=(128, 96))
  86. grad_net = TestUserDefinedBpropGradNet(net)
  87. x = Tensor(np.random.randn(128, 96).astype(np.float32))
  88. grad_net(x)
  89. class SinNet(nn.Cell):
  90. def __init__(self):
  91. super(SinNet, self).__init__()
  92. self.sin = ops.Sin()
  93. def construct(self, x):
  94. out = self.sin(x)
  95. return out
  96. class SinGrad(nn.Cell):
  97. def __init__(self, network):
  98. super(SinGrad, self).__init__()
  99. self.grad = ops.GradOperation()
  100. self.network = network
  101. def construct(self, x):
  102. gout = self.grad(self.network)(x)
  103. return gout
  104. class SinGradSec(nn.Cell):
  105. def __init__(self, network):
  106. super(SinGradSec, self).__init__()
  107. self.grad = ops.GradOperation()
  108. self.network = network
  109. def construct(self, x):
  110. gout = self.grad(self.network)(x)
  111. return gout
  112. def test_second_grad_with_j_primitive():
  113. context.set_context(mode=context.GRAPH_MODE)
  114. net = SinNet()
  115. first_grad = SinGrad(net)
  116. second_grad = SinGradSec(first_grad)
  117. x = Tensor(np.array([1.0], dtype=np.float32))
  118. second_grad(x)
  119. # A CNode being used as FV is MapMorphism after MapMorphism of call-site CNode;
  120. def test_ad_fv_cnode_order():
  121. context.set_context(mode=context.GRAPH_MODE)
  122. class Net(nn.Cell):
  123. # cnode xay is not being MapMorphism when cnode second_level() is being MapMorphism and
  124. # BackPropagateFv as MapMorphism is started from output node and from left to right order.
  125. def construct(self, x, y):
  126. def first_level():
  127. xay = x + y
  128. def second_level():
  129. return xay
  130. return second_level() + xay
  131. return first_level()
  132. input_x = Tensor(np.array([1.0], dtype=np.float32))
  133. input_y = Tensor(np.array([2.0], dtype=np.float32))
  134. net = Net()
  135. net.add_flags_recursive(defer_inline=True)
  136. grad_net = grad_all(net)
  137. grad_net(input_x, input_y)
  138. # True and False branch of switch have different number of parameters.
  139. def test_if_branch_with_different_params():
  140. context.set_context(mode=context.GRAPH_MODE)
  141. class Net(nn.Cell):
  142. def __init__(self):
  143. super(Net, self).__init__()
  144. self.weight1 = Parameter(Tensor(np.array([1.0], dtype=np.float32)), name="weight1")
  145. self.weight2 = Parameter(Tensor(np.array([2.0], dtype=np.float32)), name="weight2")
  146. def construct(self, idx, end, x):
  147. out = x
  148. if idx < end:
  149. out = out + self.weight1 * self.weight2
  150. else:
  151. out = out + self.weight1
  152. return out
  153. class GradNet(nn.Cell):
  154. def __init__(self, net):
  155. super(GradNet, self).__init__()
  156. self.net = net
  157. self.weights = ParameterTuple(net.trainable_params())
  158. def construct(self, idx, end, x):
  159. return grad_by_list(self.net, self.weights)(idx, end, x)
  160. idx = Tensor(np.array((0), dtype=np.int32))
  161. end = Tensor(np.array((3), dtype=np.int32))
  162. x = Tensor(np.array([2.0], dtype=np.float32))
  163. net = Net()
  164. grad_net = GradNet(net)
  165. grad_net(idx, end, x)
  166. # Only lift fv in scope of lift_top_func_graph other than all func_graphs inside manager.
  167. # Otherwise, "Illegal AnfNode for evaluating" may be reported
  168. # because weight1 in Net may use old_parameter other than replicated one.
  169. def test_limit_lift_fv_scope():
  170. context.set_context(mode=context.GRAPH_MODE)
  171. class Net(nn.Cell):
  172. def __init__(self):
  173. super(Net, self).__init__()
  174. self.weight1 = Parameter(Tensor(np.array([1.0], dtype=np.float32)), name="weight1")
  175. def construct(self, x, y):
  176. def inner_add(a, b):
  177. return a + b
  178. out = inner_add(x, y) + self.weight1
  179. return out
  180. class GradNet(nn.Cell):
  181. def __init__(self, net):
  182. super(GradNet, self).__init__()
  183. self.net = net
  184. self.weights = ParameterTuple(net.trainable_params())
  185. def construct(self, x, y):
  186. def inner_grad_add(a, b):
  187. return a + b
  188. d_weight = grad_by_list(self.net, self.weights)(x, y)[0]
  189. d_out = inner_grad_add(d_weight, y)
  190. return d_out
  191. x = Tensor(np.array([2.0], dtype=np.float32))
  192. y = Tensor(np.array([2.0], dtype=np.float32))
  193. net = Net()
  194. net.add_flags_recursive(defer_inline=True)
  195. grad_net = GradNet(net)
  196. grad_net.add_flags_recursive(defer_inline=True)
  197. grad_net(x, y)
  198. def test_same_primal_used_by_multi_j():
  199. class Net(nn.Cell):
  200. def construct(self, x):
  201. return x
  202. class GradNet(nn.Cell):
  203. def __init__(self, net):
  204. super(GradNet, self).__init__()
  205. self.net = net
  206. self.grad = ops.GradOperation()
  207. def construct(self, x):
  208. out = self.net(x)
  209. gout = self.grad(self.net)(x)
  210. gout1 = self.grad(self.net)(x)
  211. return out, gout, gout1
  212. x = Tensor(np.array([1.0], dtype=np.float32))
  213. net = Net()
  214. grad = GradNet(net)
  215. grad(x)
  216. def test_same_primal_used_by_multi_j_with_monad1():
  217. context.set_context(mode=context.GRAPH_MODE)
  218. class AdamNet(nn.Cell):
  219. def __init__(self, var, m, v):
  220. super(AdamNet, self).__init__()
  221. self.apply_adam = P.Adam()
  222. self.var = Parameter(var, name="var")
  223. self.m = Parameter(m, name="m")
  224. self.v = Parameter(v, name="v")
  225. def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
  226. self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  227. return self.var
  228. class AdamGradNet(nn.Cell):
  229. def __init__(self, network):
  230. super(AdamGradNet, self).__init__()
  231. self.grad_fn = ops.GradOperation(sens_param=True)
  232. self.sens = [Tensor(np.ones([3, 3, 3]).astype(np.float32)), Tensor(np.ones([3, 3, 3]).astype(np.float32))]
  233. self.network = network
  234. def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
  235. out = self.network(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  236. gout1 = self.grad_fn(self.network)(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, self.sens[0])
  237. gout2 = self.grad_fn(self.network)(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, self.sens[1])
  238. return out, gout1, gout2
  239. var = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  240. m = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  241. v = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  242. beta1_power = Tensor(np.array([0.9], dtype=np.float32))
  243. beta2_power = Tensor(np.array([0.999], dtype=np.float32))
  244. lr = Tensor(np.array([0.001], dtype=np.float32))
  245. beta1 = Tensor(np.array([0.9], dtype=np.float32))
  246. beta2 = Tensor(np.array([0.999], dtype=np.float32))
  247. epsilon = Tensor(np.array([1e-8], dtype=np.float32))
  248. grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
  249. net = AdamNet(var, m, v)
  250. grad_net = AdamGradNet(net)
  251. grad_net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  252. def test_same_primal_used_by_multi_j_with_monad2():
  253. context.set_context(mode=context.GRAPH_MODE)
  254. class AdamNet(nn.Cell):
  255. def __init__(self, var, m, v):
  256. super(AdamNet, self).__init__()
  257. self.apply_adam = P.Adam()
  258. self.var = Parameter(var, name="var")
  259. self.m = Parameter(m, name="m")
  260. self.v = Parameter(v, name="v")
  261. def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
  262. self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  263. return self.var
  264. class AdamGradNet(nn.Cell):
  265. def __init__(self, network):
  266. super(AdamGradNet, self).__init__()
  267. self.grad = ops.GradOperation(sens_param=True)
  268. self.sens = [Tensor(np.ones([3, 3, 3]).astype(np.float32)), Tensor(np.ones([3, 3, 3]).astype(np.float32))]
  269. self.network = network
  270. def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
  271. out = self.network(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  272. grad_fn = self.grad(self.network)
  273. gout1 = grad_fn(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, self.sens[0])
  274. gout2 = grad_fn(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, self.sens[1])
  275. return out, gout1, gout2
  276. var = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  277. m = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  278. v = Tensor(np.ones([3, 3, 3]).astype(np.float32))
  279. beta1_power = Tensor(np.array([0.9], dtype=np.float32))
  280. beta2_power = Tensor(np.array([0.999], dtype=np.float32))
  281. lr = Tensor(np.array([0.001], dtype=np.float32))
  282. beta1 = Tensor(np.array([0.9], dtype=np.float32))
  283. beta2 = Tensor(np.array([0.999], dtype=np.float32))
  284. epsilon = Tensor(np.array([1e-8], dtype=np.float32))
  285. grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
  286. net = AdamNet(var, m, v)
  287. grad_net = AdamGradNet(net)
  288. grad_net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad)
  289. def test_grad_args_type_error1():
  290. class Net(nn.Cell):
  291. def __init__(self):
  292. super(Net, self).__init__()
  293. self.matmul = P.MatMul()
  294. def construct(self, x, y):
  295. out = self.matmul(x, y)
  296. return out
  297. class GradNetWrtX(nn.Cell):
  298. def __init__(self, net):
  299. super(GradNetWrtX, self).__init__()
  300. self.net = net
  301. self.grad_op = ops.GradOperation(get_all=2)
  302. def construct(self, x, y):
  303. gradient_function = self.grad_op(self.net)
  304. return gradient_function(x, y)
  305. x = Tensor(np.array([2.0], dtype=np.float32))
  306. y = Tensor(np.array([2.0], dtype=np.float32))
  307. try:
  308. GradNetWrtX(Net())(x, y)
  309. except TypeError as e:
  310. assert "For 'GradOperation', the 'get_all' should be bool, but got" in str(e)
  311. def test_grad_args_type_error2():
  312. class Net(nn.Cell):
  313. def __init__(self):
  314. super(Net, self).__init__()
  315. self.matmul = P.MatMul()
  316. def construct(self, x, y):
  317. out = self.matmul(x, y)
  318. return out
  319. class GradNetWrtX(nn.Cell):
  320. def __init__(self, net):
  321. super(GradNetWrtX, self).__init__()
  322. self.net = net
  323. self.grad_op = ops.GradOperation(get_by_list=2)
  324. def construct(self, x, y):
  325. gradient_function = self.grad_op(self.net)
  326. return gradient_function(x, y)
  327. x = Tensor(np.array([2.0], dtype=np.float32))
  328. y = Tensor(np.array([2.0], dtype=np.float32))
  329. try:
  330. GradNetWrtX(Net())(x, y)
  331. except TypeError as e:
  332. assert "For 'GradOperation', the 'get_by_list' should be bool, but got" in str(e)
  333. def test_grad_args_type_error3():
  334. class Net(nn.Cell):
  335. def __init__(self):
  336. super(Net, self).__init__()
  337. self.matmul = P.MatMul()
  338. def construct(self, x, y):
  339. out = self.matmul(x, y)
  340. return out
  341. class GradNetWrtX(nn.Cell):
  342. def __init__(self, net):
  343. super(GradNetWrtX, self).__init__()
  344. self.net = net
  345. self.grad_op = ops.GradOperation(sens_param=2)
  346. def construct(self, x, y):
  347. gradient_function = self.grad_op(self.net)
  348. return gradient_function(x, y)
  349. x = Tensor(np.array([2.0], dtype=np.float32))
  350. y = Tensor(np.array([2.0], dtype=np.float32))
  351. try:
  352. GradNetWrtX(Net())(x, y)
  353. except TypeError as e:
  354. assert "For 'GradOperation', the 'sens_param' should be bool, but got" in str(e)
  355. def test_grad_net_is_none():
  356. class Net(nn.Cell):
  357. def __init__(self):
  358. super(Net, self).__init__()
  359. self.add = P.Add()
  360. def construct(self, x, y):
  361. out = self.add(x, y)
  362. return out
  363. class GradNetWrtX(nn.Cell):
  364. def __init__(self, net):
  365. super(GradNetWrtX, self).__init__()
  366. self.net = P.Add()
  367. self.grad_op = ops.GradOperation()
  368. def construct(self, x, y):
  369. gradient_function = self.grad_op(None)
  370. return gradient_function(x, y)
  371. x = Tensor(np.array([2.0], dtype=np.float32))
  372. y = Tensor(np.array([2.0], dtype=np.float32))
  373. try:
  374. GradNetWrtX(Net())(x, y)
  375. except Exception as e:
  376. assert "For 'GradOperation', the first argument must be a 'Function' or 'Cell', but got" in str(e)
  377. def test_grad_missing_net():
  378. class Net(nn.Cell):
  379. def __init__(self):
  380. super(Net, self).__init__()
  381. self.add = P.Add()
  382. def construct(self, x, y):
  383. out = self.add(x, y)
  384. return out
  385. class GradNetWrtX(nn.Cell):
  386. def __init__(self, net):
  387. super(GradNetWrtX, self).__init__()
  388. self.net = net
  389. self.grad_op = ops.GradOperation()
  390. def construct(self, x, y):
  391. gradient_function = self.grad_op()
  392. return gradient_function(x, y)
  393. x = Tensor(np.array([2.0], dtype=np.float32))
  394. y = Tensor(np.array([2.0], dtype=np.float32))
  395. try:
  396. GradNetWrtX(Net())(x, y)
  397. except Exception as e:
  398. assert "'GradOperation' requires a forward network or function as an input, while the input is empty." in str(e)
  399. def test_user_defined_bprop_inputs_size_error():
  400. class BpropUserDefinedNet(nn.Cell):
  401. def __init__(self):
  402. super(BpropUserDefinedNet, self).__init__()
  403. self.zeros_like = P.ZerosLike()
  404. def construct(self, x, y):
  405. return x + y
  406. def bprop(self, out):
  407. return self.zeros_like(out), self.zeros_like(out)
  408. class BpropUserDefinedGradNet(nn.Cell):
  409. def __init__(self, net):
  410. super(BpropUserDefinedGradNet, self).__init__()
  411. self.net = net
  412. def construct(self, x, y):
  413. return grad_all(self.net)(x, y)
  414. net = BpropUserDefinedNet()
  415. grad_net = BpropUserDefinedGradNet(net)
  416. x = Tensor(np.array([2.0], dtype=np.float32))
  417. y = Tensor(np.array([2.0], dtype=np.float32))
  418. try:
  419. grad_net(x, y)
  420. except Exception as e:
  421. assert "The function 'bprop' of Primitive or Cell requires at least 2 params 'out' and 'dout', but got only"\
  422. in str(e)
  423. def test_user_defined_bprop_net_has_parameter():
  424. class BpropUserDefinedNet(nn.Cell):
  425. def __init__(self):
  426. super(BpropUserDefinedNet, self).__init__()
  427. self.zeros_like = P.ZerosLike()
  428. self.x = Parameter(Tensor(np.array([2.0], dtype=np.float32)), name="x")
  429. def construct(self, y):
  430. return self.x + y
  431. def bprop(self, y, out, dout):
  432. return (self.zeros_like(out),)
  433. class BpropUserDefinedGradNet(nn.Cell):
  434. def __init__(self, net):
  435. super(BpropUserDefinedGradNet, self).__init__()
  436. self.net = net
  437. def construct(self, y):
  438. return grad_all(self.net)(y)
  439. net = BpropUserDefinedNet()
  440. grad_net = BpropUserDefinedGradNet(net)
  441. y = Tensor(np.array([2.0], dtype=np.float32))
  442. try:
  443. grad_net(y)
  444. except Exception as e:
  445. assert "The Cell with user defined 'bprop' function in scope" in str(e)
  446. assert "does not support Parameter data type." in str(e)
  447. def test_user_defined_bprop_inputs_size_error1():
  448. class BpropUserDefinedNet(nn.Cell):
  449. def __init__(self):
  450. super(BpropUserDefinedNet, self).__init__()
  451. self.zeros_like = P.ZerosLike()
  452. def construct(self, x, y):
  453. return x + y
  454. def bprop(self, x, y, out):
  455. return self.zeros_like(out), self.zeros_like(out)
  456. class BpropUserDefinedGradNet(nn.Cell):
  457. def __init__(self, net):
  458. super(BpropUserDefinedGradNet, self).__init__()
  459. self.net = net
  460. def construct(self, x, y):
  461. return grad_all(self.net)(x, y)
  462. net = BpropUserDefinedNet()
  463. grad_net = BpropUserDefinedGradNet(net)
  464. x = Tensor(np.array([2.0], dtype=np.float32))
  465. y = Tensor(np.array([2.0], dtype=np.float32))
  466. try:
  467. grad_net(x, y)
  468. except TypeError as e:
  469. assert "The params of function 'bprop' of Primitive or Cell requires the forward inputs as well as the 'out' " \
  470. "and 'dout'." in str(e)
  471. def test_grad_hook():
  472. def var_hook_function(grad_out):
  473. assert grad_out[0].asnumpy().shape == (32, 120)
  474. class Net(nn.Cell):
  475. def __init__(self):
  476. super(Net, self).__init__()
  477. self.add = P.Add()
  478. self.hook = P.HookBackward(var_hook_function)
  479. def construct(self, x, y):
  480. x = self.hook(x)
  481. out = self.add(x, y)
  482. return out
  483. class GradNetWrtX(nn.Cell):
  484. def __init__(self, net):
  485. super(GradNetWrtX, self).__init__()
  486. self.net = net
  487. self.grad_op = ops.GradOperation()
  488. def construct(self, x, y):
  489. gradient_function = self.grad_op(self.net)
  490. return gradient_function(x, y)
  491. x = Tensor(np.array([2.0], dtype=np.float32))
  492. y = Tensor(np.array([2.0], dtype=np.float32))
  493. try:
  494. GradNetWrtX(Net())(x, y)
  495. except Exception as e:
  496. assert "The Primitive 'HookBackward' is not supported in graph mode, which is only supported in pynative " \
  497. "mode." in str(e)