From 11283ad8123fca4761b53a355125950db7956e1c Mon Sep 17 00:00:00 2001 From: zhangyi Date: Mon, 22 Mar 2021 16:03:17 +0800 Subject: [PATCH] fix api comments's gramer errors and formatting error. --- mindspore/compression/quant/qat.py | 28 ++++++++++++++-------------- mindspore/nn/cell.py | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/mindspore/compression/quant/qat.py b/mindspore/compression/quant/qat.py index 10d9758341..a4baa4344c 100644 --- a/mindspore/compression/quant/qat.py +++ b/mindspore/compression/quant/qat.py @@ -46,20 +46,20 @@ def create_quant_config(quant_observer=(nn.FakeQuantWithMinMaxObserver, nn.FakeQ Config the observer type of weights and data flow with quant params. Args: - quant_observer (Union[Observer, list, tuple]): The observer type to do quantization. The first element represent - weights and second element represent data flow. + quant_observer (Union[Observer, list, tuple]): The observer type to do quantization. The first element + represents weights and second element represents data flow. Default: (nn.FakeQuantWithMinMaxObserver, nn.FakeQuantWithMinMaxObserver) quant_delay (Union[int, list, tuple]): Number of steps after which weights and activations are quantized during - eval. The first element represent weights and second element represent data flow. Default: (0, 0) + eval. The first element represents weights and second element represents data flow. Default: (0, 0) quant_dtype (Union[QuantDtype, list, tuple]): Datatype to use for quantize weights and activations. The first - element represent weights and second element represent data flow. + element represents weights and second element represents data flow. Default: (QuantDtype.INT8, QuantDtype.INT8) per_channel (Union[bool, list, tuple]): Quantization granularity based on layer or on channel. If `True` - then base on per channel otherwise base on per layer. The first element represent weights - and second element represent data flow. Default: (False, False) + then base on per channel otherwise base on per layer. The first element represents weights + and second element represents data flow. Default: (False, False) symmetric (Union[bool, list, tuple]): Whether the quantization algorithm is symmetric or not. If `True` then - base on symmetric otherwise base on asymmetric. The first element represent weights and second - element represent data flow. Default: (False, False) + base on symmetric otherwise base on asymmetric. The first element represents weights and second + element represents data flow. Default: (False, False) narrow_range (Union[bool, list, tuple]): Whether the quantization algorithm uses narrow range or not. The first element represents weights and the second element represents data flow. Default: (False, False) @@ -124,16 +124,16 @@ class QuantizationAwareTraining(Quantizer): bn_fold (bool): Flag to used bn fold ops for simulation inference operation. Default: True. freeze_bn (int): Number of steps after which BatchNorm OP parameters used total mean and variance. Default: 1e7. quant_delay (Union[int, list, tuple]): Number of steps after which weights and activations are quantized during - eval. The first element represent weights and second element represent data flow. Default: (0, 0) + eval. The first element represents weights and second element represents data flow. Default: (0, 0) quant_dtype (Union[QuantDtype, list, tuple]): Datatype to use for quantize weights and activations. The first - element represent weights and second element represent data flow. + element represents weights and second element represents data flow. Default: (QuantDtype.INT8, QuantDtype.INT8) per_channel (Union[bool, list, tuple]): Quantization granularity based on layer or on channel. If `True` - then base on per channel otherwise base on per layer. The first element represent weights - and second element represent data flow. Default: (False, False) + then base on per channel otherwise base on per layer. The first element represents weights + and second element represents data flow. Default: (False, False) symmetric (Union[bool, list, tuple]): Whether the quantization algorithm is symmetric or not. If `True` then - base on symmetric otherwise base on asymmetric. The first element represent weights and second - element represent data flow. Default: (False, False) + base on symmetric otherwise base on asymmetric. The first element represents weights and second + element represents data flow. Default: (False, False) narrow_range (Union[bool, list, tuple]): Whether the quantization algorithm uses narrow range or not. The first element represents weights and the second element represents data flow. Default: (False, False) optimize_option (Union[OptimizeOption, list, tuple]): Specifies the quant algorithm and options, currently only diff --git a/mindspore/nn/cell.py b/mindspore/nn/cell.py index 621ee06d08..55559f63a5 100755 --- a/mindspore/nn/cell.py +++ b/mindspore/nn/cell.py @@ -733,7 +733,7 @@ class Cell(Cell_): """ Initialize all parameters and replace the original saved parameters in cell. - Notes: + Note: trainable_params() and other similar interfaces may return different parameter instance after `init_parameters_data`, do not save these result.