From 293beb6a7cbad276297cb2453145ceb5cc4646d9 Mon Sep 17 00:00:00 2001 From: lilei Date: Fri, 24 Jul 2020 13:33:47 +0800 Subject: [PATCH] modify annotation weight_decay --- mindspore/nn/optim/ftrl.py | 2 +- mindspore/nn/optim/proximal_ada_grad.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/optim/ftrl.py b/mindspore/nn/optim/ftrl.py index 15051c22c0..098c382da2 100644 --- a/mindspore/nn/optim/ftrl.py +++ b/mindspore/nn/optim/ftrl.py @@ -144,7 +144,7 @@ class FTRL(Optimizer): l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0. use_locking (bool): If True use locks for update operation. Default: False. loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0. - wegith_decay (float): Weight decay value to multiply weight, should be in range [0.0, 1.0]. Default: 0.0. + weight_decay (float): Weight decay value to multiply weight, should be in range [0.0, 1.0]. Default: 0.0. Inputs: - **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params` diff --git a/mindspore/nn/optim/proximal_ada_grad.py b/mindspore/nn/optim/proximal_ada_grad.py index 616f070d32..4fcca24764 100644 --- a/mindspore/nn/optim/proximal_ada_grad.py +++ b/mindspore/nn/optim/proximal_ada_grad.py @@ -99,7 +99,7 @@ class ProximalAdagrad(Optimizer): l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0. use_locking (bool): If True use locks for update operation. Default: False. loss_scale (float): Value for the loss scale. It should be not less than 1.0. Default: 1.0. - wegith_decay (float): Weight decay value to multiply weight, should be in range [0.0, 1.0]. Default: 0.0. + weight_decay (float): Weight decay value to multiply weight, should be in range [0.0, 1.0]. Default: 0.0. Inputs: - **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`