You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

grad_quant_ops.py 3.1 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Generate bprop for aware quantization ops"""
  16. from .. import operations as P
  17. from .grad_base import bprop_getters
  18. from ..composite.multitype_ops.zeros_like_impl import zeros_like
  19. @bprop_getters.register(P.FakeQuantWithMinMax)
  20. def get_bprop_fakequant_with_minmax(self):
  21. """Generate bprop for FakeQuantWithMinMax"""
  22. op = P.FakeQuantWithMinMaxGrad(num_bits=self.num_bits, quant_delay=self.quant_delay)
  23. def bprop(x, x_min, x_max, out, dout):
  24. dx = op(dout, x, x_min, x_max)
  25. return dx, zeros_like(x_min), zeros_like(x_max)
  26. return bprop
  27. @bprop_getters.register(P.FakeQuantWithMinMaxPerChannel)
  28. def get_bprop_fakequant_with_minmax_perchannel(self):
  29. """Generate bprop for FakeQuantWithMinMaxPerChannel"""
  30. op = P.FakeQuantWithMinMaxPerChannelGrad(num_bits=self.num_bits, quant_delay=self.quant_delay)
  31. def bprop(x, x_min, x_max, out, dout):
  32. dx = op(dout, x, x_min, x_max)
  33. return dx, zeros_like(x_min), zeros_like(x_max)
  34. return bprop
  35. @bprop_getters.register(P.BatchNormFold)
  36. def get_bprop_batchnorm_fold(self):
  37. """Generate bprop for BatchNormFold"""
  38. op = P.BatchNormFoldGrad(self.epsilon, self.is_training, self.freeze_bn)
  39. def bprop(x, mean, variance, global_step, out, dout):
  40. dx = op(dout[0], dout[1], x, out[0], out[1], global_step)
  41. return dx, zeros_like(mean), zeros_like(variance), zeros_like(global_step)
  42. return bprop
  43. @bprop_getters.register(P.CorrectionMul)
  44. def get_bprop_correction_mul(self):
  45. """Generate bprop for CorrectionMul"""
  46. grad = P.CorrectionMulGrad()
  47. def bprop(x, batch_std, running_std, out, dout):
  48. dx, d_batch_std = grad(dout, x, batch_std, running_std)
  49. return dx, d_batch_std, zeros_like(running_std)
  50. return bprop
  51. @bprop_getters.register(P.BatchNormFold2)
  52. def get_bprop_batchnorm_fold2(self):
  53. """Generate bprop for CorrectionAdd"""
  54. op_f = P.BatchNormFold2Grad(freeze_bn=self.freeze_bn)
  55. def bprop(x, beta, gamma, batch_std, batch_mean, running_std, running_mean, global_step, out, dout):
  56. d_batch_std, d_batch_mean, d_beta, d_gamma, d_x = op_f(dout, x, gamma, batch_std, batch_mean, running_std,
  57. running_mean, global_step)
  58. return d_x, d_beta, d_gamma, d_batch_std, d_batch_mean, zeros_like(running_std), zeros_like(running_mean), \
  59. zeros_like(global_step)
  60. return bprop