You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_lenet_quant.py 8.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. train and infer lenet quantization network
  17. """
  18. import os
  19. import pytest
  20. from mindspore import context
  21. from mindspore import Tensor
  22. from mindspore.common import dtype as mstype
  23. import mindspore.nn as nn
  24. from mindspore.nn.metrics import Accuracy
  25. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
  26. from mindspore import load_checkpoint, load_param_into_net, export
  27. from mindspore.train import Model
  28. from mindspore.compression.quant import QuantizationAwareTraining
  29. from mindspore.compression.quant.quantizer import OptimizeOption
  30. from mindspore.compression.quant.quant_utils import load_nonquant_param_into_quant_net
  31. from dataset import create_dataset
  32. from config import quant_cfg
  33. from lenet_fusion import LeNet5 as LeNet5Fusion
  34. import numpy as np
  35. data_path = "/home/workspace/mindspore_dataset/mnist"
  36. lenet_ckpt_path = "/home/workspace/mindspore_dataset/checkpoint/lenet/ckpt_lenet_noquant-10_1875.ckpt"
  37. def train_lenet_quant(optim_option="QAT"):
  38. cfg = quant_cfg
  39. ckpt_path = lenet_ckpt_path
  40. ds_train = create_dataset(os.path.join(data_path, "train"), cfg.batch_size, 1)
  41. step_size = ds_train.get_dataset_size()
  42. # define fusion network
  43. network = LeNet5Fusion(cfg.num_classes)
  44. # load quantization aware network checkpoint
  45. param_dict = load_checkpoint(ckpt_path)
  46. load_nonquant_param_into_quant_net(network, param_dict)
  47. # convert fusion network to quantization aware network
  48. if optim_option == "LEARNED_SCALE":
  49. quant_optim_otions = OptimizeOption.LEARNED_SCALE
  50. quantizer = QuantizationAwareTraining(bn_fold=False,
  51. per_channel=[True, False],
  52. symmetric=[True, True],
  53. narrow_range=[True, True],
  54. freeze_bn=0,
  55. quant_delay=0,
  56. one_conv_fold=True,
  57. optimize_option=quant_optim_otions)
  58. else:
  59. quantizer = QuantizationAwareTraining(quant_delay=900,
  60. bn_fold=False,
  61. per_channel=[True, False],
  62. symmetric=[True, False])
  63. network = quantizer.quantize(network)
  64. # define network loss
  65. net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  66. # define network optimization
  67. net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
  68. # call back and monitor
  69. config_ckpt = CheckpointConfig(save_checkpoint_steps=cfg.epoch_size * step_size,
  70. keep_checkpoint_max=cfg.keep_checkpoint_max)
  71. ckpt_callback = ModelCheckpoint(prefix="ckpt_lenet_quant"+optim_option, config=config_ckpt)
  72. # define model
  73. model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
  74. print("============== Starting Training ==============")
  75. model.train(cfg['epoch_size'], ds_train, callbacks=[ckpt_callback, LossMonitor()],
  76. dataset_sink_mode=True)
  77. print("============== End Training ==============")
  78. def eval_quant(optim_option="QAT"):
  79. cfg = quant_cfg
  80. ds_eval = create_dataset(os.path.join(data_path, "test"), cfg.batch_size, 1)
  81. ckpt_path = './ckpt_lenet_quant'+optim_option+'-10_937.ckpt'
  82. # define fusion network
  83. network = LeNet5Fusion(cfg.num_classes)
  84. # convert fusion network to quantization aware network
  85. if optim_option == "LEARNED_SCALE":
  86. quant_optim_otions = OptimizeOption.LEARNED_SCALE
  87. quantizer = QuantizationAwareTraining(bn_fold=False,
  88. per_channel=[True, False],
  89. symmetric=[True, True],
  90. narrow_range=[True, True],
  91. freeze_bn=0,
  92. quant_delay=0,
  93. one_conv_fold=True,
  94. optimize_option=quant_optim_otions)
  95. else:
  96. quantizer = QuantizationAwareTraining(quant_delay=0,
  97. bn_fold=False,
  98. freeze_bn=10000,
  99. per_channel=[True, False],
  100. symmetric=[True, False])
  101. network = quantizer.quantize(network)
  102. # define loss
  103. net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  104. # define network optimization
  105. net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
  106. # call back and monitor
  107. model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
  108. # load quantization aware network checkpoint
  109. param_dict = load_checkpoint(ckpt_path)
  110. not_load_param = load_param_into_net(network, param_dict)
  111. if not_load_param:
  112. raise ValueError("Load param into net fail!")
  113. print("============== Starting Testing ==============")
  114. acc = model.eval(ds_eval, dataset_sink_mode=True)
  115. print("============== {} ==============".format(acc))
  116. assert acc['Accuracy'] > 0.98
  117. def export_lenet(optim_option="QAT", file_format="MINDIR"):
  118. cfg = quant_cfg
  119. # define fusion network
  120. network = LeNet5Fusion(cfg.num_classes)
  121. # convert fusion network to quantization aware network
  122. if optim_option == "LEARNED_SCALE":
  123. quant_optim_otions = OptimizeOption.LEARNED_SCALE
  124. quantizer = QuantizationAwareTraining(bn_fold=False,
  125. per_channel=[True, False],
  126. symmetric=[True, True],
  127. narrow_range=[True, True],
  128. freeze_bn=0,
  129. quant_delay=0,
  130. one_conv_fold=True,
  131. optimize_option=quant_optim_otions)
  132. else:
  133. quantizer = QuantizationAwareTraining(quant_delay=0,
  134. bn_fold=False,
  135. freeze_bn=10000,
  136. per_channel=[True, False],
  137. symmetric=[True, False])
  138. network = quantizer.quantize(network)
  139. # export network
  140. inputs = Tensor(np.ones([1, 1, cfg.image_height, cfg.image_width]), mstype.float32)
  141. export(network, inputs, file_name="lenet_quant", file_format=file_format, quant_mode='AUTO')
  142. @pytest.mark.level0
  143. @pytest.mark.platform_x86_gpu_training
  144. @pytest.mark.env_onecard
  145. def test_lenet_quant():
  146. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  147. train_lenet_quant()
  148. eval_quant()
  149. export_lenet()
  150. train_lenet_quant(optim_option="LEARNED_SCALE")
  151. eval_quant(optim_option="LEARNED_SCALE")
  152. export_lenet(optim_option="LEARNED_SCALE")
  153. @pytest.mark.level0
  154. @pytest.mark.platform_arm_ascend_training
  155. @pytest.mark.platform_x86_ascend_training
  156. @pytest.mark.env_onecard
  157. def test_lenet_quant_ascend():
  158. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  159. train_lenet_quant(optim_option="LEARNED_SCALE")
  160. eval_quant(optim_option="LEARNED_SCALE")
  161. export_lenet(optim_option="LEARNED_SCALE", file_format="AIR")