Browse Source

fix QuantizationAwareTraining note typo error

tags/v1.1.0
yuchaojie 5 years ago
parent
commit
1c095221e6
1 changed files with 2 additions and 2 deletions
  1. +2
    -2
      mindspore/compression/quant/qat.py

+ 2
- 2
mindspore/compression/quant/qat.py View File

@@ -140,7 +140,7 @@ class QuantizationAwareTraining(Quantizer):
support QAT. Default: OptimizeOption.QAT

Examples:
>>> class Net(nn.Cell):
>>> class LeNet5(nn.Cell):
>>> def __init__(self, num_class=10, channel=1):
>>> super(LeNet5, self).__init__()
>>> self.type = "fusion"
@@ -168,7 +168,7 @@ class QuantizationAwareTraining(Quantizer):
>>> x = self.fc3(x)
>>> return x
>>>
>>> net = Net()
>>> net = LeNet5()
>>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False])
>>> net_qat = quantizer.quantize(net)
"""


Loading…
Cancel
Save