You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 2.8 kB

5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Cycle GAN train."""
  16. import mindspore.nn as nn
  17. from mindspore.common import set_seed
  18. from src.models import get_generator, get_discriminator, Generator, TrainOneStepG, TrainOneStepD, \
  19. DiscriminatorLoss, GeneratorLoss
  20. from src.utils import get_lr, get_args, Reporter, ImagePool, load_ckpt
  21. from src.dataset import create_dataset
  22. set_seed(1)
  23. def train():
  24. """Train function."""
  25. args = get_args("train")
  26. if args.need_profiler:
  27. from mindspore.profiler.profiling import Profiler
  28. profiler = Profiler(output_path=args.outputs_dir, is_detail=True, is_show_op_path=True)
  29. ds = create_dataset(args)
  30. G_A = get_generator(args)
  31. G_B = get_generator(args)
  32. D_A = get_discriminator(args)
  33. D_B = get_discriminator(args)
  34. load_ckpt(args, G_A, G_B, D_A, D_B)
  35. imgae_pool_A = ImagePool(args.pool_size)
  36. imgae_pool_B = ImagePool(args.pool_size)
  37. generator = Generator(G_A, G_B, args.lambda_idt > 0)
  38. loss_D = DiscriminatorLoss(args, D_A, D_B)
  39. loss_G = GeneratorLoss(args, generator, D_A, D_B)
  40. optimizer_G = nn.Adam(generator.trainable_params(), get_lr(args), beta1=args.beta1)
  41. optimizer_D = nn.Adam(loss_D.trainable_params(), get_lr(args), beta1=args.beta1)
  42. net_G = TrainOneStepG(loss_G, generator, optimizer_G)
  43. net_D = TrainOneStepD(loss_D, optimizer_D)
  44. data_loader = ds.create_dict_iterator()
  45. reporter = Reporter(args)
  46. reporter.info('==========start training===============')
  47. for _ in range(args.max_epoch):
  48. reporter.epoch_start()
  49. for data in data_loader:
  50. img_A = data["image_A"]
  51. img_B = data["image_B"]
  52. res_G = net_G(img_A, img_B)
  53. fake_A = res_G[0]
  54. fake_B = res_G[1]
  55. res_D = net_D(img_A, img_B, imgae_pool_A.query(fake_A), imgae_pool_B.query(fake_B))
  56. reporter.step_end(res_G, res_D)
  57. reporter.visualizer(img_A, img_B, fake_A, fake_B)
  58. reporter.epoch_end(net_G)
  59. if args.need_profiler:
  60. profiler.analyse()
  61. break
  62. reporter.info('==========end training===============')
  63. if __name__ == "__main__":
  64. train()