You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """train imagenet."""
  16. import argparse
  17. import os
  18. from mindspore import Tensor
  19. from mindspore import context
  20. from mindspore import ParallelMode
  21. from mindspore.communication.management import init, get_rank, get_group_size
  22. from mindspore.nn.optim.rmsprop import RMSProp
  23. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
  24. from mindspore.train.model import Model
  25. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  26. from mindspore.common import set_seed
  27. from src.config import nasnet_a_mobile_config_gpu as cfg
  28. from src.dataset import create_dataset
  29. from src.nasnet_a_mobile import NASNetAMobile, CrossEntropy
  30. from src.lr_generator import get_lr
  31. set_seed(cfg.random_seed)
  32. if __name__ == '__main__':
  33. parser = argparse.ArgumentParser(description='image classification training')
  34. parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
  35. parser.add_argument('--resume', type=str, default='', help='resume training with existed checkpoint')
  36. parser.add_argument('--is_distributed', action='store_true', default=False,
  37. help='distributed training')
  38. parser.add_argument('--platform', type=str, default='GPU', choices=('Ascend', 'GPU'), help='run platform')
  39. args_opt = parser.parse_args()
  40. context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.platform, save_graphs=False)
  41. if os.getenv('DEVICE_ID', "not_set").isdigit():
  42. context.set_context(device_id=int(os.getenv('DEVICE_ID')))
  43. # init distributed
  44. if args_opt.is_distributed:
  45. if args_opt.platform == "Ascend":
  46. init()
  47. else:
  48. init("nccl")
  49. cfg.rank = get_rank()
  50. cfg.group_size = get_group_size()
  51. parallel_mode = ParallelMode.DATA_PARALLEL
  52. context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=cfg.group_size,
  53. gradients_mean=True)
  54. else:
  55. cfg.rank = 0
  56. cfg.group_size = 1
  57. # dataloader
  58. dataset = create_dataset(args_opt.dataset_path, cfg, True)
  59. batches_per_epoch = dataset.get_dataset_size()
  60. # network
  61. net = NASNetAMobile(cfg.num_classes)
  62. if args_opt.resume:
  63. ckpt = load_checkpoint(args_opt.resume)
  64. load_param_into_net(net, ckpt)
  65. #loss
  66. loss = CrossEntropy(smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes, factor=cfg.aux_factor)
  67. # learning rate schedule
  68. lr = get_lr(lr_init=cfg.lr_init, lr_decay_rate=cfg.lr_decay_rate,
  69. num_epoch_per_decay=cfg.num_epoch_per_decay, total_epochs=cfg.epoch_size,
  70. steps_per_epoch=batches_per_epoch, is_stair=True)
  71. lr = Tensor(lr)
  72. # optimizer
  73. decayed_params = []
  74. no_decayed_params = []
  75. for param in net.trainable_params():
  76. if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
  77. decayed_params.append(param)
  78. else:
  79. no_decayed_params.append(param)
  80. group_params = [{'params': decayed_params, 'weight_decay': cfg.weight_decay},
  81. {'params': no_decayed_params},
  82. {'order_params': net.trainable_params()}]
  83. optimizer = RMSProp(group_params, lr, decay=cfg.rmsprop_decay, weight_decay=cfg.weight_decay,
  84. momentum=cfg.momentum, epsilon=cfg.opt_eps, loss_scale=cfg.loss_scale)
  85. model = Model(net, loss_fn=loss, optimizer=optimizer)
  86. print("============== Starting Training ==============")
  87. loss_cb = LossMonitor(per_print_times=batches_per_epoch)
  88. time_cb = TimeMonitor(data_size=batches_per_epoch)
  89. callbacks = [loss_cb, time_cb]
  90. config_ck = CheckpointConfig(save_checkpoint_steps=batches_per_epoch, keep_checkpoint_max=cfg.keep_checkpoint_max)
  91. save_ckpt_path = os.path.join(cfg.ckpt_path, 'ckpt_' + str(cfg.rank) + '/')
  92. ckpoint_cb = ModelCheckpoint(prefix=f"nasnet-a-mobile-rank{cfg.rank}", directory=save_ckpt_path, config=config_ck)
  93. if args_opt.is_distributed & cfg.is_save_on_master:
  94. if cfg.rank == 0:
  95. callbacks.append(ckpoint_cb)
  96. model.train(cfg.epoch_size, dataset, callbacks=callbacks, dataset_sink_mode=True)
  97. else:
  98. callbacks.append(ckpoint_cb)
  99. model.train(cfg.epoch_size, dataset, callbacks=callbacks, dataset_sink_mode=True)
  100. print("train success")