From 19c18eafba5a2adeaeda7ac73e0eed09a0c3f14c Mon Sep 17 00:00:00 2001 From: Erpim Date: Fri, 30 Apr 2021 18:59:32 +0800 Subject: [PATCH] fix the problem that failed to obtain quant op info --- mindspore/ops/_op_impl/_custom_op/__init__.py | 13 +++++++++++++ mindspore/ops/operations/_quant_ops.py | 3 +++ 2 files changed, 16 insertions(+) diff --git a/mindspore/ops/_op_impl/_custom_op/__init__.py b/mindspore/ops/_op_impl/_custom_op/__init__.py index 63e9419010..3b149fe263 100644 --- a/mindspore/ops/_op_impl/_custom_op/__init__.py +++ b/mindspore/ops/_op_impl/_custom_op/__init__.py @@ -14,9 +14,22 @@ # ============================================================================ """custom ops""" +from .batchnorm_fold import _batchnorm_fold_tbe +from .batchnorm_fold2 import _batchnorm_fold2_tbe +from .batchnorm_fold2_grad import _batchnorm_fold2_grad_tbe +from .batchnorm_fold2_grad_reduce import _batchnorm_fold2_grad_reduce_tbe +from .batchnorm_fold_grad import _batchnorm_fold_grad_tbe +from .correction_mul import _correction_mul_tbe +from .correction_mul_grad import _correction_mul_grad_tbe from .fake_learned_scale_quant_perlayer import _fake_learned_scale_quant_perlayer_tbe from .fake_learned_scale_quant_perlayer_grad import _fake_learned_scale_quant_perlayer_grad_d_tbe from .fake_learned_scale_quant_perlayer_grad_reduce import _fake_learned_scale_quant_perlayer_grad_d_reduce_tbe from .fake_learned_scale_quant_perchannel import _fake_learned_scale_quant_perchannel_tbe from .fake_learned_scale_quant_perchannel_grad import _fake_learned_scale_quant_perchannel_grad_d_tbe from .fake_learned_scale_quant_perchannel_grad_reduce import _fake_learned_scale_quant_perchannel_grad_d_reduce_tbe +from .fake_quant_perchannel import _fake_quant_perchannel_tbe +from .fake_quant_perchannel_grad import _fake_quant_perchannel_grad_tbe +from .fake_quant_perlayer import _fake_quant_per_layer_tbe +from .fake_quant_perlayer_grad import _fake_quant_per_layer_grad_tbe +from .minmax_update_perchannel import _minmax_update_perchannel_tbe +from .minmax_update_perlayer import _minmax_update_perlayer_tbe diff --git a/mindspore/ops/operations/_quant_ops.py b/mindspore/ops/operations/_quant_ops.py index e097c8df3c..2da242f0d1 100644 --- a/mindspore/ops/operations/_quant_ops.py +++ b/mindspore/ops/operations/_quant_ops.py @@ -22,6 +22,9 @@ from ..._checkparam import Rel from ..primitive import PrimitiveWithInfer, prim_attr_register from ...common import dtype as mstype +if context.get_context('device_target') == "Ascend": + import mindspore.ops._op_impl._custom_op + __all__ = ["MinMaxUpdatePerLayer", "MinMaxUpdatePerChannel", "FakeLearnedScaleQuantPerLayer",