You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 4.4 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """srcnn training"""
  16. import os
  17. import argparse
  18. import ast
  19. import mindspore as ms
  20. import mindspore.nn as nn
  21. from mindspore import context, Tensor
  22. from mindspore.common import set_seed
  23. from mindspore.train.model import Model
  24. from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
  25. from mindspore.communication.management import init, get_rank, get_group_size
  26. from mindspore.train.model import ParallelMode
  27. from src.config import srcnn_cfg as config
  28. from src.dataset import create_train_dataset
  29. from src.srcnn import SRCNN
  30. set_seed(1)
  31. def filter_checkpoint_parameter_by_list(origin_dict, param_filter):
  32. """remove useless parameters according to filter_list"""
  33. for key in list(origin_dict.keys()):
  34. for name in param_filter:
  35. if name in key:
  36. print("Delete parameter from checkpoint: ", key)
  37. del origin_dict[key]
  38. break
  39. if __name__ == '__main__':
  40. parser = argparse.ArgumentParser(description="srcnn training")
  41. parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
  42. parser.add_argument('--device_num', type=int, default=1, help='Device num.')
  43. parser.add_argument('--device_target', type=str, default='GPU', choices=("GPU"),
  44. help="Device target, support GPU.")
  45. parser.add_argument('--pre_trained', type=str, default='', help='model_path, local pretrained model to load')
  46. parser.add_argument("--run_distribute", type=ast.literal_eval, default=False,
  47. help="Run distribute, default: false.")
  48. parser.add_argument("--filter_weight", type=ast.literal_eval, default=False,
  49. help="Filter head weight parameters, default is False.")
  50. args, _ = parser.parse_known_args()
  51. if args.device_target == "GPU":
  52. context.set_context(mode=context.GRAPH_MODE,
  53. device_target=args.device_target,
  54. save_graphs=False)
  55. else:
  56. raise ValueError("Unsupported device target.")
  57. rank = 0
  58. device_num = 1
  59. if args.run_distribute:
  60. init()
  61. rank = get_rank()
  62. device_num = get_group_size()
  63. context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL)
  64. train_dataset = create_train_dataset(args.dataset_path, batch_size=config.batch_size,
  65. shard_id=rank, num_shard=device_num)
  66. step_size = train_dataset.get_dataset_size()
  67. # define net
  68. net = SRCNN()
  69. # init weight
  70. if args.pre_trained:
  71. param_dict = load_checkpoint(args.pre_trained)
  72. if args.filter_weight:
  73. filter_list = [x.name for x in net.end_point.get_parameters()]
  74. filter_checkpoint_parameter_by_list(param_dict, filter_list)
  75. load_param_into_net(net, param_dict)
  76. lr = Tensor(config.lr, ms.float32)
  77. opt = nn.Adam(params=net.trainable_params(), learning_rate=lr, eps=1e-07)
  78. loss = nn.MSELoss(reduction='mean')
  79. model = Model(net, loss_fn=loss, optimizer=opt)
  80. # define callbacks
  81. callbacks = [LossMonitor(), TimeMonitor(data_size=step_size)]
  82. if config.save_checkpoint and rank == 0:
  83. config_ck = CheckpointConfig(save_checkpoint_steps=step_size,
  84. keep_checkpoint_max=config.keep_checkpoint_max)
  85. save_ckpt_path = os.path.join(config.save_checkpoint_path, 'ckpt_' + str(rank) + '/')
  86. ckpt_cb = ModelCheckpoint(prefix="srcnn", directory=save_ckpt_path, config=config_ck)
  87. callbacks.append(ckpt_cb)
  88. model.train(config.epoch_size, train_dataset, callbacks=callbacks)