You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

lamb.py 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """lamb"""
  16. import numpy as np
  17. from mindspore import context
  18. from mindspore.common import dtype as mstype
  19. from mindspore.common.initializer import initializer
  20. from mindspore.ops import operations as P
  21. from mindspore.ops import composite as C
  22. from mindspore.ops import functional as F
  23. from mindspore.common.parameter import Parameter
  24. from mindspore.common.tensor import Tensor
  25. from mindspore._checkparam import Validator as validator
  26. from mindspore._checkparam import Rel
  27. from .optimizer import Optimizer
  28. from .. import layer
  29. from .. import graph_kernels as G
  30. num_one = Tensor(np.ones([1]), mstype.float32)
  31. _lamb_opt = C.MultitypeFuncGraph("lamb_opt")
  32. @_lamb_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor",
  33. "Tensor", "Bool", "Bool")
  34. def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter):
  35. """
  36. Update parameters.
  37. Args:
  38. beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
  39. beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).
  40. eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
  41. lr (Tensor): Learning rate.
  42. weight_decay (Number): Weight decay. Should be equal to or greater than 0.
  43. global_step (Tensor): Global step.
  44. param (Tensor): Parameters.
  45. m (Tensor): m value of parameters.
  46. v (Tensor): v value of parameters.
  47. gradient (Tensor): Gradient of parameters.
  48. decay_flag (bool): Specifies whether param update with weight decay.
  49. optim_filter(bool): Applies parameter update or not.
  50. Returns:
  51. Tensor, the new value of v after updating.
  52. """
  53. if optim_filter:
  54. op_mul = P.Mul()
  55. op_sqrt = P.Sqrt()
  56. op_rsqrt = P.Rsqrt()
  57. op_square = P.Square()
  58. op_cast = P.Cast()
  59. op_reshape = P.Reshape()
  60. op_shape = P.Shape()
  61. op_pow = P.Pow()
  62. op_norm = layer.Norm()
  63. op_select = P.Select()
  64. op_greater = P.Greater()
  65. op_fill = P.Fill()
  66. op_dtype = P.DType()
  67. param_fp32 = op_cast(param, mstype.float32)
  68. m_fp32 = op_cast(m, mstype.float32)
  69. v_fp32 = op_cast(v, mstype.float32)
  70. gradient_fp32 = op_cast(gradient, mstype.float32)
  71. next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta1, gradient_fp32)
  72. next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta2, op_square(gradient_fp32))
  73. next_mm = next_m / (op_cast(num_one, mstype.float32)
  74. - op_pow(beta1, op_cast(global_step + num_one, mstype.float32)))
  75. next_vv = next_v / (op_cast(num_one, mstype.float32) -
  76. op_pow(beta2, op_cast(global_step + num_one, mstype.float32)))
  77. w_norm = op_norm(param_fp32)
  78. g_norm = op_norm(gradient_fp32)
  79. g_norm_hat = op_norm(op_mul(next_mm, op_rsqrt(next_vv + eps)) + weight_decay * param_fp32)
  80. zeros = F.zeros_like(w_norm)
  81. ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0)
  82. trust_ratio = op_select(
  83. op_greater(w_norm, zeros),
  84. op_select(op_greater(g_norm, zeros), w_norm / g_norm_hat, ones),
  85. ones)
  86. tens = op_fill(op_dtype(trust_ratio), op_shape(trust_ratio), 10.0)
  87. trust_ratio = C.clip_by_value(trust_ratio, zeros, tens)
  88. update = next_mm / (op_sqrt(next_vv) + eps)
  89. if decay_flag:
  90. update = update + op_mul(weight_decay, param_fp32)
  91. update_with_lr = op_mul(op_mul(trust_ratio, lr), update)
  92. next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
  93. next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param))))
  94. next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m))))
  95. next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v))))
  96. return op_cast(next_param, F.dtype(param))
  97. return gradient
  98. lamb_opt_graph_kernel = C.MultitypeFuncGraph("lamb_opt_graph_kernel")
  99. @lamb_opt_graph_kernel.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number",
  100. "Tensor", "Tensor", "Tensor", "Tensor", "Bool")
  101. def _update_run_op_graph_kernel(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag):
  102. """
  103. Update parameters.
  104. Args:
  105. beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
  106. beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).
  107. eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
  108. lr (Tensor): Learning rate.
  109. weight_decay (Number): Weight decay. Should be equal to or greater than 0.
  110. global_step (Tensor): Global step.
  111. param (Tensor): Parameters.
  112. m (Tensor): m value of parameters.
  113. v (Tensor): v value of parameters.
  114. gradient (Tensor): Gradient of parameters.
  115. decay_flag (bool): Specifies whether param update with weight decay.
  116. Returns:
  117. Tensor, the new value of v after updating.
  118. """
  119. op_mul = P.Mul()
  120. op_square = P.Square()
  121. op_cast = P.Cast()
  122. op_shape = P.Shape()
  123. op_pow = P.Pow()
  124. op_norm = layer.Norm()
  125. op_fill = P.Fill()
  126. op_dtype = P.DType()
  127. param_fp32 = op_cast(param, mstype.float32)
  128. gradient_fp32 = op_cast(gradient, mstype.float32)
  129. i6_ex = op_cast(global_step + num_one, mstype.float32)
  130. i9 = op_cast(num_one, mstype.float32) - beta1
  131. x1 = op_cast(num_one, mstype.float32) - beta2
  132. i6 = op_cast(num_one, mstype.float32) - op_pow(beta1, i6_ex)
  133. i3 = op_cast(num_one, mstype.float32) - op_pow(beta2, i6_ex)
  134. i1 = op_square(gradient_fp32)
  135. add3, update = G.LambNextMV()(i1, v, i3, gradient, m, i6, param, beta1, i9, beta2, x1, weight_decay, eps)
  136. if decay_flag:
  137. update = update + op_mul(weight_decay, param_fp32)
  138. w_norm = op_norm(param_fp32)
  139. g_norm = op_norm(gradient_fp32)
  140. g_norm_hat = op_norm(add3)
  141. zeros = F.zeros_like(w_norm)
  142. ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0)
  143. tens = op_fill(op_dtype(w_norm), op_shape(w_norm), 10.0)
  144. next_param = G.LambUpdateWithLR()(g_norm, w_norm, g_norm_hat, lr, update, param, zeros, ones, tens)
  145. next_v = F.control_depend(add3, next_param)
  146. return next_v
  147. def _check_param_value(beta1, beta2, eps, prim_name):
  148. validator.check_value_type("beta1", beta1, [float], prim_name)
  149. validator.check_value_type("beta2", beta2, [float], prim_name)
  150. validator.check_value_type("eps", eps, [float], prim_name)
  151. validator.check_float_range(beta1, 0.0, 1.0, Rel.INC_NEITHER, "beta1", prim_name)
  152. validator.check_float_range(beta2, 0.0, 1.0, Rel.INC_NEITHER, "beta2", prim_name)
  153. validator.check_positive_float(eps, "eps", prim_name)
  154. class Lamb(Optimizer):
  155. """
  156. Lamb Dynamic Learning Rate.
  157. LAMB is an optimization algorithm employing a layerwise adaptive large batch
  158. optimization technique. Refer to the paper `LARGE BATCH OPTIMIZATION FOR DEEP LEARNING: TRAINING BERT IN 76
  159. MINUTES <https://arxiv.org/abs/1904.00962>`_.
  160. Note:
  161. When separating parameter groups, the weight decay in each group will be applied on the parameters if the
  162. weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied
  163. on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
  164. To improve parameter groups performance, the customized order of parameters can be supported.
  165. Args:
  166. params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated,
  167. the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params",
  168. "lr", "weight_decay" and "order_params" are the keys can be parsed.
  169. - params: Required. The value must be a list of `Parameter`.
  170. - lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
  171. If not, the `learning_rate` in the API will be used.
  172. - weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
  173. will be used. If not, the `weight_decay` in the API will be used.
  174. - order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
  175. the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
  176. in the value of 'order_params' must be in one of group parameters.
  177. learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate.
  178. When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
  179. the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
  180. use dynamic learning rate, the i-th learning rate will be calculated during the process of training
  181. according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
  182. dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
  183. equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
  184. beta1 (float): The exponential decay rate for the 1st moment estimations. Default: 0.9.
  185. Should be in range (0.0, 1.0).
  186. beta2 (float): The exponential decay rate for the 2nd moment estimations. Default: 0.999.
  187. Should be in range (0.0, 1.0).
  188. eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
  189. Should be greater than 0.
  190. weight_decay (float): Weight decay (L2 penalty). Default: 0.0. Should be equal to or greater than 0.
  191. Inputs:
  192. - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
  193. Outputs:
  194. tuple[bool], all elements are True.
  195. Examples:
  196. >>> net = Net()
  197. >>> #1) All parameters use the same learning rate and weight decay
  198. >>> optim = nn.Lamb(params=net.trainable_params())
  199. >>>
  200. >>> #2) Use parameter groups and set different values
  201. >>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR()
  202. >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
  203. >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
  204. >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
  205. >>> {'params': no_conv_params, 'lr': poly_decay_lr},
  206. >>> {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}]
  207. >>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0)
  208. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
  209. >>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default
  210. >>> # weight decay of 0.0.
  211. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
  212. >>>
  213. >>> loss = nn.SoftmaxCrossEntropyWithLogits()
  214. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  215. """
  216. def __init__(self, params, learning_rate, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0):
  217. super(Lamb, self).__init__(learning_rate, params, weight_decay)
  218. _check_param_value(beta1, beta2, eps, self.cls_name)
  219. # turn them to scalar when me support scalar/tensor mix operations
  220. self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
  221. self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
  222. self.eps = Tensor(np.array([eps]).astype(np.float32))
  223. self.params = self.parameters
  224. self.moments1 = self.params.clone(prefix="lamb_m", init='zeros')
  225. self.moments2 = self.params.clone(prefix="lamb_v", init='zeros')
  226. if not self.dynamic_lr:
  227. self.global_step = Parameter(initializer(0, [1]), name='global_step')
  228. self.assignadd = P.AssignAdd()
  229. self.hyper_map = C.HyperMap()
  230. self.enable_graph_kernel = context.get_context("enable_graph_kernel") and \
  231. context.get_context("device_target") == "Ascend"
  232. def construct(self, gradients):
  233. lr = self.get_lr()
  234. if self.enable_graph_kernel:
  235. if self.is_group:
  236. if self.is_group_lr:
  237. optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
  238. self.global_step),
  239. lr, self.weight_decay, self.params, self.moments1, self.moments2,
  240. gradients, self.decay_flags)
  241. else:
  242. optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
  243. self.global_step, lr),
  244. self.weight_decay, self.params, self.moments1, self.moments2,
  245. gradients, self.decay_flags)
  246. else:
  247. optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
  248. self.global_step, lr, self.weight_decay),
  249. self.params, self.moments1, self.moments2, gradients, self.decay_flags)
  250. else:
  251. if self.is_group:
  252. if self.is_group_lr:
  253. optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
  254. self.global_step),
  255. lr, self.weight_decay, self.params, self.moments1, self.moments2,
  256. gradients, self.decay_flags, self.optim_filter)
  257. else:
  258. optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
  259. self.global_step, lr),
  260. self.weight_decay, self.params, self.moments1, self.moments2,
  261. gradients, self.decay_flags, self.optim_filter)
  262. else:
  263. optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
  264. self.global_step, lr, self.weight_decay),
  265. self.params, self.moments1, self.moments2, gradients,
  266. self.decay_flags, self.optim_filter)
  267. if self.use_parallel:
  268. self.broadcast_params(optim_result)
  269. if not self.dynamic_lr:
  270. F.control_depend(lr, self.assignadd(self.global_step, 1))
  271. return optim_result