You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_lr_scheduler_callback.py 2.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """LearningRateScheduler Callback class."""
  16. import math
  17. import numpy as np
  18. import mindspore.common.dtype as mstype
  19. from mindspore.common.tensor import Tensor
  20. from mindspore.train.callback._callback import Callback
  21. from mindspore.ops import functional as F
  22. class LearningRateScheduler(Callback):
  23. """
  24. Change the learning_rate during training.
  25. Note:
  26. This class is not supported on CPU.
  27. Args:
  28. learning_rate_function (Function): The function about how to change the learning rate during training.
  29. Examples:
  30. >>> from _lr_scheduler_callback import LearningRateScheduler
  31. >>> import mindspore.nn as nn
  32. >>> from mindspore.train import Model
  33. ...
  34. >>> def learning_rate_function(lr, cur_step_num):
  35. ... if cur_step_num%1000 == 0:
  36. ... lr = lr*0.1
  37. ... return lr
  38. ...
  39. >>> lr = 0.1
  40. >>> momentum = 0.9
  41. >>> net = Net()
  42. >>> loss = nn.SoftmaxCrossEntropyWithLogits()
  43. >>> optim = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
  44. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  45. ...
  46. >>> dataset = create_custom_dataset("custom_dataset_path")
  47. >>> model.train(1, dataset, callbacks=[LearningRateScheduler(learning_rate_function)],
  48. ... dataset_sink_mode=False)
  49. """
  50. def __init__(self, learning_rate_function):
  51. super(LearningRateScheduler, self).__init__()
  52. self.learning_rate_function = learning_rate_function
  53. def step_end(self, run_context):
  54. cb_params = run_context.original_args()
  55. arr_lr = cb_params.optimizer.learning_rate.asnumpy()
  56. lr = float(np.array2string(arr_lr))
  57. new_lr = self.learning_rate_function(lr, cb_params.cur_step_num)
  58. if not math.isclose(lr, new_lr, rel_tol=1e-10):
  59. F.assign(cb_params.optimizer.learning_rate, Tensor(new_lr, mstype.float32))
  60. print(f'At step {cb_params.cur_step_num}, learning_rate change to {new_lr}')