You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 7.0 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. ######################## train alexnet example ########################
  17. train alexnet and get network model files(.ckpt) :
  18. python train.py --data_path /YourDataPath
  19. """
  20. import ast
  21. import argparse
  22. import os
  23. from src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg
  24. from src.dataset import create_dataset_cifar10, create_dataset_imagenet
  25. from src.generator_lr import get_lr_cifar10, get_lr_imagenet
  26. from src.alexnet import AlexNet
  27. from src.get_param_groups import get_param_groups
  28. import mindspore.nn as nn
  29. from mindspore.communication.management import init, get_rank
  30. from mindspore import dataset as de
  31. from mindspore import context
  32. from mindspore import Tensor
  33. from mindspore.train import Model
  34. from mindspore.context import ParallelMode
  35. from mindspore.nn.metrics import Accuracy
  36. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
  37. from mindspore.common import set_seed
  38. set_seed(1)
  39. de.config.set_seed(1)
  40. if __name__ == "__main__":
  41. parser = argparse.ArgumentParser(description='MindSpore AlexNet Example')
  42. parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],
  43. help='dataset name.')
  44. parser.add_argument('--sink_size', type=int, default=-1, help='control the amount of data in each sink')
  45. parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'],
  46. help='device where the code will be implemented (default: Ascend)')
  47. parser.add_argument('--data_path', type=str, default="./", help='path where the dataset is saved')
  48. parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
  49. path where the trained ckpt file')
  50. parser.add_argument('--dataset_sink_mode', type=ast.literal_eval,
  51. default=True, help='dataset_sink_mode is False or True')
  52. parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
  53. args = parser.parse_args()
  54. device_num = int(os.environ.get("DEVICE_NUM", 1))
  55. if args.dataset_name == "cifar10":
  56. cfg = alexnet_cifar10_cfg
  57. if device_num > 1:
  58. cfg.learning_rate = cfg.learning_rate * device_num
  59. cfg.epoch_size = cfg.epoch_size * 2
  60. elif args.dataset_name == "imagenet":
  61. cfg = alexnet_imagenet_cfg
  62. else:
  63. raise ValueError("Unsupported dataset.")
  64. device_target = args.device_target
  65. context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
  66. context.set_context(save_graphs=False)
  67. if device_target == "Ascend":
  68. context.set_context(device_id=args.device_id)
  69. if device_num > 1:
  70. context.reset_auto_parallel_context()
  71. context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
  72. gradients_mean=True)
  73. init()
  74. elif device_target == "GPU":
  75. if device_num > 1:
  76. init()
  77. context.reset_auto_parallel_context()
  78. context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
  79. gradients_mean=True)
  80. else:
  81. raise ValueError("Unsupported platform.")
  82. if args.dataset_name == "cifar10":
  83. ds_train = create_dataset_cifar10(args.data_path, cfg.batch_size, target=args.device_target)
  84. elif args.dataset_name == "imagenet":
  85. ds_train = create_dataset_imagenet(args.data_path, cfg.batch_size)
  86. else:
  87. raise ValueError("Unsupported dataset.")
  88. if ds_train.get_dataset_size() == 0:
  89. raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
  90. network = AlexNet(cfg.num_classes, phase='train')
  91. loss_scale_manager = None
  92. metrics = None
  93. step_per_epoch = ds_train.get_dataset_size() if args.sink_size == -1 else args.sink_size
  94. if args.dataset_name == 'cifar10':
  95. loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  96. lr = Tensor(get_lr_cifar10(0, cfg.learning_rate, cfg.epoch_size, step_per_epoch))
  97. opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)
  98. metrics = {"Accuracy": Accuracy()}
  99. elif args.dataset_name == 'imagenet':
  100. loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  101. lr = Tensor(get_lr_imagenet(cfg.learning_rate, cfg.epoch_size, step_per_epoch))
  102. opt = nn.Momentum(params=get_param_groups(network),
  103. learning_rate=lr,
  104. momentum=cfg.momentum,
  105. weight_decay=cfg.weight_decay,
  106. loss_scale=cfg.loss_scale)
  107. from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager
  108. if cfg.is_dynamic_loss_scale == 1:
  109. loss_scale_manager = DynamicLossScaleManager(init_loss_scale=65536, scale_factor=2, scale_window=2000)
  110. else:
  111. loss_scale_manager = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False)
  112. else:
  113. raise ValueError("Unsupported dataset.")
  114. if device_target == "Ascend":
  115. model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", keep_batchnorm_fp32=False,
  116. loss_scale_manager=loss_scale_manager)
  117. elif device_target == "GPU":
  118. model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, loss_scale_manager=loss_scale_manager)
  119. else:
  120. raise ValueError("Unsupported platform.")
  121. if device_num > 1:
  122. ckpt_save_dir = os.path.join(args.ckpt_path + "_" + str(get_rank()))
  123. else:
  124. ckpt_save_dir = args.ckpt_path
  125. time_cb = TimeMonitor(data_size=step_per_epoch)
  126. config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
  127. keep_checkpoint_max=cfg.keep_checkpoint_max)
  128. ckpoint_cb = ModelCheckpoint(prefix="checkpoint_alexnet", directory=ckpt_save_dir, config=config_ck)
  129. print("============== Starting Training ==============")
  130. model.train(cfg.epoch_size, ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()],
  131. dataset_sink_mode=args.dataset_sink_mode, sink_size=args.sink_size)