浏览代码

!9656 modify code document

From: @changzherui
Reviewed-by: @zh_qh,@kingxian
Signed-off-by: @kingxian
tags/v1.1.0
mindspore-ci-bot Gitee 5 年前
父节点
当前提交
7a02fae10c
共有 3 个文件被更改,包括 14 次插入11 次删除
  1. +1
    -1
      mindspore/_checkparam.py
  2. +7
    -8
      mindspore/nn/layer/container.py
  3. +6
    -2
      mindspore/train/loss_scale_manager.py

+ 1
- 1
mindspore/_checkparam.py 查看文件

@@ -430,7 +430,7 @@ class Validator:
@staticmethod
def check_file_name_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
if reg is None:
reg = r"^[0-9a-zA-Z\_\.\/\\]*$"
reg = r"^[0-9a-zA-Z\_\-\.\/\\]*$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(


+ 7
- 8
mindspore/nn/layer/container.py 查看文件

@@ -84,17 +84,16 @@ class SequentialCell(Cell):
``Ascend`` ``GPU``

Examples:
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid')
>>> bn = nn.BatchNorm2d(2)
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid', weight_init="ones")
>>> relu = nn.ReLU()
>>> seq = nn.SequentialCell([conv, bn, relu])
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
>>> seq = nn.SequentialCell([conv, relu])
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
>>> output = seq(x)
>>> print(output)
[[[[0.02531557 0. ]
[0.04933941 0.04880078]]
[[0. 0. ]
[0. 0. ]]]]
[[[[27. 27.]
[27. 27.]]
[[27. 27.]
[27. 27.]]]]
"""
def __init__(self, *args):
super(SequentialCell, self).__init__()


+ 6
- 2
mindspore/train/loss_scale_manager.py 查看文件

@@ -42,8 +42,10 @@ class FixedLossScaleManager(LossScaleManager):
drop_overflow_update (bool): whether to execute optimizer if there is an overflow. Default: True.

Examples:
>>> net = Net()
>>> loss_scale_manager = FixedLossScaleManager()
>>> model = Model(net, loss_scale_manager=loss_scale_manager)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
"""
def __init__(self, loss_scale=128.0, drop_overflow_update=True):
if loss_scale < 1:
@@ -85,8 +87,10 @@ class DynamicLossScaleManager(LossScaleManager):
scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000.

Examples:
>>> net = Net()
>>> loss_scale_manager = DynamicLossScaleManager()
>>> model = Model(net, loss_scale_manager=loss_scale_manager)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
"""
def __init__(self,
init_loss_scale=2 ** 24,


正在加载...
取消
保存