Browse Source

fix bugs

tags/v1.1.0
lihongkang 5 years ago
parent
commit
b7d1c970e7
3 changed files with 10 additions and 9 deletions
  1. +1
    -1
      mindspore/nn/optim/rmsprop.py
  2. +2
    -2
      mindspore/ops/operations/array_ops.py
  3. +7
    -6
      mindspore/ops/operations/nn_ops.py

+ 1
- 1
mindspore/nn/optim/rmsprop.py View File

@@ -133,7 +133,7 @@ class RMSProp(Optimizer):
Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
>>> optim = nn.RMSProp(params=net.trainable_params(), learning_rate=lr)
>>> optim = nn.RMSProp(params=net.trainable_params(), learning_rate=0.1)
>>>
>>> #2) Use parameter groups and set different values
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))


+ 2
- 2
mindspore/ops/operations/array_ops.py View File

@@ -379,7 +379,7 @@ class IsInstance(PrimitiveWithInfer):

Examples:
>>> a = 1
>>> result = P.IsInstance()(a, mindspore.int32)
>>> result = P.IsInstance()(a, mindspore.int64)
>>> print(result)
True
"""
@@ -1640,7 +1640,7 @@ class Tile(PrimitiveWithInfer):
x_shp = x['shape']
validator.check_value_type("multiples", multiples_v, [tuple], self.name)
for i, multiple in enumerate(multiples_v):
validator.check_value_type("multiples[%d]" % i, multiple, [int], self.name)
validator.check_positive_int(multiple, "multiples[%d]" % i, self.name)
validator.check_value_type("x[\'dtype\']", x["dtype"], mstype.tensor_type, self.name)
len_sub = len(multiples_v) - len(x_shp)
multiples_w = None


+ 7
- 6
mindspore/ops/operations/nn_ops.py View File

@@ -3037,11 +3037,12 @@ class Pad(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
>>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)
>>> print(output_tensor)
[[ 0. 0. 0. 0. 0. 0. ]
[ 0. 0. -0.1 0.3 3.6 0. ]
[ 0. 0. 0.4 0.5 -3.2 0. ]
[ 0. 0. 0. 0. 0. 0. ]
[ 0. 0. 0. 0. 0. 0. ]]
"""

@prim_attr_register
@@ -4857,7 +4858,7 @@ class ApplyPowerSign(PrimitiveWithInfer):
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
>>> self.sign_decay, self.beta, grad)
... self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))


Loading…
Cancel
Save