You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ftrl.py 6.4 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """FTRL"""
  16. from mindspore.ops import functional as F, composite as C, operations as P
  17. from mindspore.common.parameter import Parameter
  18. from mindspore.common import Tensor
  19. import mindspore.common.dtype as mstype
  20. from mindspore._checkparam import Validator as validator
  21. from mindspore._checkparam import Rel
  22. from .optimizer import Optimizer, apply_decay, grad_scale
  23. ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
  24. @ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
  25. def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
  26. """Apply ftrl optimizer to the weight parameter."""
  27. success = True
  28. success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
  29. return success
  30. def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
  31. prim_name=None):
  32. """Check param."""
  33. validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
  34. validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
  35. validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
  36. validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
  37. validator.check_value_type("lr_power", lr_power, [float], prim_name)
  38. validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
  39. validator.check_value_type("l1", l1, [float], prim_name)
  40. validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
  41. validator.check_value_type("l2", l2, [float], prim_name)
  42. validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
  43. validator.check_value_type("use_locking", use_locking, [bool], prim_name)
  44. validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
  45. validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
  46. validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
  47. validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
  48. class FTRL(Optimizer):
  49. """
  50. Implement the FTRL algorithm with ApplyFtrl Operator.
  51. FTRL is an online convex optimization algorithm that adaptively chooses its regularization function
  52. based on the loss functions. Refer to paper `Adaptive Bound Optimization for Online Convex Optimization
  53. <https://arxiv.org/abs/1002.4908>`_. Refer to paper `Ad Click Prediction: a View from the Trenches
  54. <https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf>`_ for engineering document.
  55. Args:
  56. params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
  57. should be Parameter.
  58. initial_accum (float): The starting value for accumulators, must be zero or positive values. Default: 0.1.
  59. learning_rate (float): The learning rate value, should be positive. Default: 0.001.
  60. lr_power (float): Learning rate power controls how the learning rate decreases during training, must be less
  61. than or equal to zero. Use fixed learning rate if lr_power is zero. Default: -0.5.
  62. l1 (float): l1 regularization strength, must be greater than or equal to zero. Default: 0.0.
  63. l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0.
  64. use_locking (bool): If True use locks for update operation. Default: False.
  65. loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0.
  66. wegith_decay (float): Weight decay value to multiply weight, must be zero or positive value. Default: 0.0.
  67. Inputs:
  68. - **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`
  69. in optimizer.
  70. Outputs:
  71. tuple[Parameter], the updated parameters, the shape is the same as `params`.
  72. Examples:
  73. >>> net = Net()
  74. >>> loss = nn.SoftmaxCrossEntropyWithLogits()
  75. >>> opt = nn.FTRL(net.trainable_params())
  76. >>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None)
  77. """
  78. def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
  79. use_locking=False, loss_scale=1.0, weight_decay=0.0):
  80. super(FTRL, self).__init__(learning_rate, params)
  81. _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
  82. self.cls_name)
  83. self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
  84. self.linear = self.parameters.clone(prefix="linear", init='zeros')
  85. self.l1 = l1
  86. self.l2 = l2
  87. self.lr_power = lr_power
  88. self.reciprocal_scale = 1.0 / loss_scale
  89. self.weight_decay = weight_decay
  90. self.decay_tf = tuple((lambda: True)() for x in self.parameters)
  91. self.hyper_map = C.HyperMap()
  92. self.opt = P.ApplyFtrl(use_locking=use_locking)
  93. self.one = Tensor(1, mstype.int32)
  94. def construct(self, grads):
  95. params = self.parameters
  96. moments = self.moments
  97. linear = self.linear
  98. if self.weight_decay > 0.0:
  99. grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
  100. if self.reciprocal_scale != 1.0:
  101. grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
  102. lr = self.learning_rate
  103. success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
  104. linear, grads, params, moments)
  105. return success