You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn_ops_vm_impl.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate vm_impl function for nn ops"""
  16. import numpy as np
  17. from mindspore.ops import operations as P
  18. from mindspore.ops.operations import _grad_ops as G
  19. from mindspore.common.tensor import Tensor
  20. from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters
  21. from .vm_interface import vm
  22. # pylint: disable=unused-argument
  23. @vm_impl_getters.register(P.ScalarSummary)
  24. def vm_impl_scalar_summary(self):
  25. """Generate vm_impl function for ScalarSummary"""
  26. def vm_impl(string_in, scalar):
  27. """Implement by vm mode."""
  28. return scalar
  29. return vm_impl
  30. @vm_impl_getters.register(P.ReLU)
  31. def vm_impl_relu(self):
  32. """Generate vm_impl function for ReLU"""
  33. def vm_impl(x):
  34. x = x.asnumpy()
  35. output = Tensor(vm.relu(x))
  36. return output
  37. return vm_impl
  38. @vm_impl_getters.register(P.Flatten)
  39. def vm_impl_flatten(self):
  40. """Generate vm_impl function for Flatten"""
  41. def vm_impl(x):
  42. x = x.asnumpy()
  43. return Tensor(vm.flatten_batch(x))
  44. return vm_impl
  45. @vm_impl_getters.register(P.Softmax)
  46. def vm_impl_softmax(self):
  47. """Generate vm_impl function for Softmax"""
  48. def vm_impl(x):
  49. x = x.asnumpy()
  50. return Tensor(vm.softmax(x))
  51. return vm_impl
  52. @vm_impl_getters.register(P.LogSoftmax)
  53. def vm_impl_log_softmax(self):
  54. """Generate vm_impl function for LogSoftmax"""
  55. def vm_impl(x):
  56. x = x.asnumpy()
  57. return Tensor(vm.logsoftmax(x))
  58. return vm_impl
  59. @vm_impl_getters.register(P.Tanh)
  60. def vm_impl_tanh(self):
  61. """Generate vm_impl function for Tanh"""
  62. def vm_impl(x):
  63. x = x.asnumpy()
  64. return Tensor(vm.tanh(x))
  65. return vm_impl
  66. @vm_impl_getters.register(P.FusedBatchNorm)
  67. def vm_impl_fused_batch_norm(self):
  68. """Generate vm_impl function for FusedBatchNorm"""
  69. def vm_impl(x, scale, b, mean, variance):
  70. # pylint: disable=unused-argument
  71. x = x.asnumpy()
  72. scale = scale.asnumpy()
  73. b = b.asnumpy()
  74. mean = mean.asnumpy()
  75. variance = variance.asnumpy()
  76. out, x_mean, x_var, running_mean, running_var = vm.batch_norm(x, scale, b, mean, \
  77. variance, \
  78. eps=self.epsilon, \
  79. momentum=self.momentum)
  80. return Tensor(out), Tensor(x_mean), Tensor(x_var), \
  81. Tensor(running_mean), Tensor(running_var)
  82. return vm_impl
  83. @vm_impl_getters.register(P.BatchNorm)
  84. def vm_impl_batch_norm(self):
  85. """Generate vm_impl function for BatchNorm"""
  86. def vm_impl(x, scale, b, mean, variance):
  87. # pylint: disable=unused-argument
  88. x = x.asnumpy()
  89. scale = scale.asnumpy()
  90. b = b.asnumpy()
  91. mean = mean.asnumpy()
  92. variance = variance.asnumpy()
  93. out, x_mean, x_var, running_mean, running_var = vm.batch_norm(x, scale, b, mean, \
  94. variance, \
  95. eps=self.epsilon)
  96. return Tensor(out), Tensor(x_mean), Tensor(x_var), \
  97. Tensor(running_mean), Tensor(running_var)
  98. return vm_impl
  99. @vm_impl_getters.register(P.Conv2D)
  100. def vm_impl_conv2d(self):
  101. """Generate vm_impl function for Conv2D"""
  102. def vm_impl(x, w):
  103. x = x.asnumpy()
  104. weight = w.asnumpy()
  105. bias = None
  106. out = vm.conv2d(x, weight, bias, self.stride, self.pad, self.dilation)
  107. return Tensor(out)
  108. return vm_impl
  109. @vm_impl_getters.register(G.MaxPoolGradWithArgmax)
  110. def vm_impl_max_pool_grad_with_argmax(self):
  111. """Generate vm_impl function for MaxPoolGradWithArgmax"""
  112. def vm_impl(x, argmax, dout):
  113. x = x.asnumpy()
  114. dout = dout.asnumpy()
  115. arg_max = argmax.asnumpy()
  116. dx = vm.max_pool_grad_with_argmax(x, arg_max, dout, self.pool_h, self.pool_w, self.stride, self.pad)
  117. return Tensor(dx)
  118. return vm_impl
  119. @vm_impl_getters.register(P.MaxPoolWithArgmax)
  120. def vm_impl_max_pool_with_argmax(self):
  121. """Generate vm_impl function for MaxPoolWithArgmax"""
  122. def vm_impl(x):
  123. x = x.asnumpy()
  124. out, out_argmax = vm.max_pool_with_argmax(x, self.pool_h, self.pool_w, self.stride, self.pad)
  125. return Tensor(out), Tensor(out_argmax)
  126. return vm_impl
  127. @vm_impl_getters.register(P.MaxPool)
  128. def vm_impl_max_pool(self):
  129. """Generate vm_impl function for MaxPool"""
  130. def vm_impl(x):
  131. x = x.asnumpy()
  132. out = vm.max_pooling(x, self.pool_h, self.pool_w, self.stride_h, self.pad)
  133. return Tensor(out)
  134. return vm_impl
  135. @vm_impl_getters.register(G.MaxPoolGrad)
  136. def vm_impl_max_pool_grad(self):
  137. """Generate vm_impl function for MaxPoolGrad"""
  138. def vm_impl(x, out, dout):
  139. x = x.asnumpy()
  140. dout = dout.asnumpy()
  141. out = vm.max_pool_grad(x, dout, self.pool_h, self.pool_w, self.stride_h, self.pad)
  142. return Tensor(out)
  143. return vm_impl
  144. @vm_impl_getters.register(P.AvgPool)
  145. def vm_impl_max_pool(self):
  146. """Generate vm_impl function for AvgPool"""
  147. def vm_impl(x):
  148. x = x.asnumpy()
  149. out = vm.avg_pooling(x, self.pool_h, self.pool_w, self.stride_h, self.pad)
  150. return Tensor(out)
  151. return vm_impl
  152. @vm_impl_getters.register(G.AvgPoolGrad)
  153. def vm_impl_avg_pool_grad(self):
  154. """Generate vm_impl function for AvgPoolGrad"""
  155. def vm_impl(dout, origin_shape):
  156. dout = dout.asnumpy()
  157. out = vm.avg_pool_grad(dout, origin_shape, self.pool_h, self.pool_w, self.stride_h, self.pad)
  158. return Tensor(out)
  159. return vm_impl
  160. @vm_impl_getters.register(G.FusedBatchNormGrad)
  161. def vm_impl_fused_batch_norm_grad(self):
  162. """Generate vm_impl function for FusedBatchNormGrad"""
  163. def vm_impl(dy, x, scale, save_mean, save_inv_variance):
  164. dy = dy.asnumpy()
  165. x = x.asnumpy()
  166. scale = scale.asnumpy()
  167. save_mean = save_mean.asnumpy()
  168. save_inv_variance = save_inv_variance.asnumpy()
  169. dx, dscale, dshift = vm.batch_norm_grad(dy, x, scale, save_mean, save_inv_variance)
  170. return (Tensor(dx), Tensor(dscale), Tensor(dshift))
  171. return vm_impl
  172. @vm_impl_getters.register(G.BatchNormGrad)
  173. def vm_impl_fused_batch_norm_grad(self):
  174. """Generate vm_impl function for BatchNormGrad"""
  175. def vm_impl(dy, x, scale, save_mean, save_inv_variance):
  176. dy = dy.asnumpy()
  177. x = x.asnumpy()
  178. scale = scale.asnumpy()
  179. save_mean = save_mean.asnumpy()
  180. save_inv_variance = save_inv_variance.asnumpy()
  181. dx, dscale, dshift = vm.batch_norm_grad(dy, x, scale, save_mean, save_inv_variance)
  182. return (Tensor(dx), Tensor(dscale), Tensor(dshift))
  183. return vm_impl
  184. @vm_impl_getters.register(G.ReluGrad)
  185. def vm_impl_relu_grad(self):
  186. """Generate vm_impl function for ReluGrad"""
  187. def vm_impl(y_backprop, x):
  188. x = x.asnumpy()
  189. y_backprop = y_backprop.asnumpy()
  190. y_backprop = vm.relu_grad(x.copy())*y_backprop
  191. return Tensor(y_backprop)
  192. return vm_impl
  193. @vm_impl_getters.register(P.Conv2DBackpropInput)
  194. def vm_impl_conv2d_backprop_input(self):
  195. """Generate vm_impl function for Conv2DBackpropInput"""
  196. def vm_impl(dout, w, x_size):
  197. dout = dout.asnumpy()
  198. w = w.asnumpy()
  199. dx = vm.conv2d_backprop_input(dout, x_size, w, self.stride, self.pad)
  200. return Tensor(dx)
  201. return vm_impl
  202. @vm_impl_getters.register(G.Conv2DBackpropFilter)
  203. def vm_impl_conv2d_backprop_filter(self):
  204. """Generate vm_impl function for Conv2DBackpropFilter"""
  205. def vm_impl(dout, x, w_size):
  206. x = x.asnumpy()
  207. dout = dout.asnumpy()
  208. dw = vm.conv2d_backprop_filter(dout, x, w_size, self.stride, self.pad)
  209. return Tensor(dw)
  210. return vm_impl
  211. @vm_impl_getters.register(G.FlattenGrad)
  212. def vm_impl_flatten_grad(self):
  213. """Generate vm_impl function for FlattenGrad"""
  214. def vm_impl(dout, x):
  215. dout = dout.asnumpy()
  216. dout = vm.flatten_grad(dout, x)
  217. return Tensor(dout)
  218. return vm_impl
  219. @vm_impl_getters.register(P.BiasAdd)
  220. def vm_impl_bias_add(self):
  221. """Generate vm_impl function for BiasAdd"""
  222. def vm_impl(wx, bias):
  223. wx = wx.asnumpy()
  224. bias = bias.asnumpy()
  225. out = wx + bias
  226. return Tensor(out)
  227. return vm_impl
  228. @vm_impl_getters.register(G.BiasAddGrad)
  229. def vm_impl_bias_add_grad(self):
  230. """Generate vm_impl function for BiasAddGrad"""
  231. def vm_impl(dout):
  232. dout = dout.asnumpy()
  233. shape = np.shape(dout)
  234. return Tensor(np.add.reduce(dout, axis=tuple(range(len(shape) - 1))))
  235. return vm_impl
  236. @vm_impl_getters.register(P.SoftmaxCrossEntropyWithLogits)
  237. def vm_impl_softmax_cross_entropy_with_logits(self):
  238. """Generate vm_impl function for SoftmaxCrossEntropyWithLogits"""
  239. def vm_impl(logits, labels):
  240. logits = logits.asnumpy()
  241. labels = labels.asnumpy()
  242. loss, dx = vm.softmax_cross_entropy_with_logits(logits, labels)
  243. return (Tensor(np.array(loss)), Tensor(dx))
  244. return vm_impl
  245. @vm_impl_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
  246. def vm_impl_sparse_softmax_cross_entropy_with_logits(self):
  247. """Generate vm_impl function for SparseSoftmaxCrossEntropyWithLogits"""
  248. def vm_impl(logits, labels):
  249. logits = logits.asnumpy()
  250. labels = labels.asnumpy()
  251. n_class = labels.max() + 1
  252. n_sample = labels.shape[0]
  253. one_hot_label = np.zeros((n_sample, n_class))#3个样本,4个类别
  254. one_hot_label[:, labels] = 1#非零列赋值为1
  255. loss, dx = vm.softmax_cross_entropy_with_logits(logits, one_hot_label)
  256. if self.is_grad:
  257. return (Tensor(dx),)
  258. return (Tensor(np.array(loss)),)
  259. return vm_impl
  260. @vm_impl_getters.register(P.ApplyMomentum)
  261. def vm_impl_momentum(self):
  262. """Generate vm_impl function for Momentum"""
  263. def vm_impl(variable,
  264. accumulation,
  265. learning_rate,
  266. gradient,
  267. momentum,
  268. use_nesterov=False):
  269. gradient = gradient.asnumpy()
  270. accumulation = accumulation.asnumpy()
  271. variable = variable.asnumpy()
  272. shape = accumulation.shape
  273. learning_rate = np.full(shape, learning_rate)
  274. momentum = np.full(shape, momentum)
  275. accumulation = accumulation * momentum + gradient
  276. if use_nesterov is True:
  277. variable -= gradient * learning_rate + accumulation * momentum * learning_rate
  278. else:
  279. variable -= accumulation * learning_rate
  280. return Tensor(variable)
  281. return vm_impl
  282. @vm_impl_getters.register(P.ResizeBilinear)
  283. def vm_impl_resize_bilinear(self):
  284. """Generate vm_impl function for ResizeBilinear"""
  285. def vm_impl(x):
  286. out = vm.ResizeBilinear(x)
  287. return Tensor(out)
  288. return vm_impl
  289. @vm_impl_getters.register(G.ResizeBilinearGrad)
  290. def vm_impl_resize_bilinear_grad(self):
  291. """Generate vm_impl function for ResizeBilinearGrad"""
  292. def vm_impl(dout, original_image):
  293. out = vm.ResizeBilinearGrad(dout, original_image)
  294. return Tensor(out)
  295. return vm_impl