You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train_quant.py 3.7 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. ######################## train lenet example ########################
  17. train lenet and get network model files(.ckpt) :
  18. python train.py --data_path /YourDataPath
  19. """
  20. import os
  21. import argparse
  22. import mindspore.nn as nn
  23. from mindspore import context
  24. from mindspore.train.serialization import load_checkpoint
  25. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
  26. from mindspore.train import Model
  27. from mindspore.nn.metrics import Accuracy
  28. from mindspore.compression.quant import QuantizationAwareTraining
  29. from mindspore.compression.quant.quant_utils import load_nonquant_param_into_quant_net
  30. from mindspore.common import set_seed
  31. from src.dataset import create_dataset
  32. from src.config import mnist_cfg as cfg
  33. from src.lenet_fusion import LeNet5 as LeNet5Fusion
  34. from src.loss_monitor import LossMonitor
  35. set_seed(1)
  36. parser = argparse.ArgumentParser(description='MindSpore MNIST Example')
  37. parser.add_argument('--device_target', type=str, default="Ascend",
  38. choices=['Ascend', 'GPU'],
  39. help='device where the code will be implemented (default: Ascend)')
  40. parser.add_argument('--data_path', type=str, default="./MNIST_Data",
  41. help='path where the dataset is saved')
  42. parser.add_argument('--ckpt_path', type=str, default="",
  43. help='if mode is test, must provide path where the trained ckpt file')
  44. args = parser.parse_args()
  45. if __name__ == "__main__":
  46. context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
  47. ds_train = create_dataset(os.path.join(args.data_path, "train"), cfg.batch_size, 1)
  48. step_size = ds_train.get_dataset_size()
  49. # define fusion network
  50. network = LeNet5Fusion(cfg.num_classes)
  51. # load quantization aware network checkpoint
  52. param_dict = load_checkpoint(args.ckpt_path)
  53. load_nonquant_param_into_quant_net(network, param_dict)
  54. # convert fusion network to quantization aware network
  55. quantizer = QuantizationAwareTraining(quant_delay=900,
  56. bn_fold=False,
  57. per_channel=[True, False],
  58. symmetric=[True, False])
  59. network = quantizer.quantize(network)
  60. # define network loss
  61. net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  62. # define network optimization
  63. net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
  64. # call back and monitor
  65. config_ckpt = CheckpointConfig(save_checkpoint_steps=cfg.epoch_size * step_size,
  66. keep_checkpoint_max=cfg.keep_checkpoint_max)
  67. ckpt_callback = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ckpt)
  68. # define model
  69. model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
  70. print("============== Starting Training ==============")
  71. model.train(cfg['epoch_size'], ds_train, callbacks=[ckpt_callback, LossMonitor()],
  72. dataset_sink_mode=True)
  73. print("============== End Training ==============")