Browse Source

support import dynamic_lr from nn

tags/v1.1.0
wangnan39@huawei.com 5 years ago
parent
commit
0fe9e2e4cb
2 changed files with 6 additions and 6 deletions
  1. +2
    -0
      mindspore/nn/__init__.py
  2. +4
    -6
      tests/ut/python/nn/optim/test_adam.py

+ 2
- 0
mindspore/nn/__init__.py View File

@@ -19,6 +19,7 @@ Pre-defined building blocks or computing units to construct Neural Networks.
"""
from . import layer, loss, optim, metrics, wrap, probability, sparse
from .learning_rate_schedule import *
from .dynamic_lr import *
from .cell import Cell, GraphKernel
from .layer import *
from .loss import *
@@ -36,5 +37,6 @@ __all__.extend(metrics.__all__)
__all__.extend(wrap.__all__)
__all__.extend(sparse.__all__)
__all__.extend(learning_rate_schedule.__all__)
__all__.extend(dynamic_lr.__all__)

__all__.sort()

+ 4
- 6
tests/ut/python/nn/optim/test_adam.py View File

@@ -22,8 +22,6 @@ from mindspore.common.api import _executor
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Adam, AdamWeightDecay
from mindspore.ops import operations as P
import mindspore.nn.learning_rate_schedule as lr_schedules
from mindspore.nn.dynamic_lr import polynomial_decay_lr

context.set_context(enable_sparse=True)

@@ -137,7 +135,7 @@ def test_adam_group1():
net_with_loss = WithLossCell(net, loss)
all_params = net.trainable_params()

poly_decay_lr = polynomial_decay_lr(0.01, 0.0001, total_step=10, step_per_epoch=1, decay_epoch=3, power=1.0)
poly_decay_lr = nn.polynomial_decay_lr(0.01, 0.0001, total_step=10, step_per_epoch=1, decay_epoch=3, power=1.0)

group_params = [{'params': [all_params[0]], 'lr': poly_decay_lr, 'weight_decay': 0.9},
{'params': [all_params[1]]}]
@@ -157,7 +155,7 @@ def test_adam_group2():
net_with_loss = WithLossCell(net, loss)
all_params = net.trainable_params()

schedule_lr = lr_schedules.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
schedule_lr = nn.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
group_params = [{'params': [all_params[0]], 'lr': 0.02, 'weight_decay': 0.9},
{'params': [all_params[1]]}]
optimizer = nn.Adam(group_params, learning_rate=schedule_lr)
@@ -175,7 +173,7 @@ def test_adamweightdecay_group():
net_with_loss = WithLossCell(net, loss)
all_params = net.trainable_params()

schedule_lr = lr_schedules.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
schedule_lr = nn.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
group_params = [{'params': [all_params[0]], 'lr': 0.02, 'weight_decay': 0.9},
{'params': [all_params[1]]}]
optimizer = nn.AdamWeightDecay(group_params, learning_rate=schedule_lr)
@@ -193,7 +191,7 @@ def test_adamoffload_group():
net_with_loss = WithLossCell(net, loss)
all_params = net.trainable_params()

schedule_lr = lr_schedules.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
schedule_lr = nn.PolynomialDecayLR(0.01, 0.0001, 3, power=1.0)
group_params = [{'params': [all_params[0]], 'lr': 0.02, 'weight_decay': 0.9},
{'params': [all_params[1]]}]
optimizer = nn.AdamOffload(group_params, learning_rate=schedule_lr)


Loading…
Cancel
Save