| @@ -133,7 +133,7 @@ class RMSProp(Optimizer): | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| >>> optim = nn.RMSProp(params=net.trainable_params(), learning_rate=lr) | |||||
| >>> optim = nn.RMSProp(params=net.trainable_params(), learning_rate=0.1) | |||||
| >>> | >>> | ||||
| >>> #2) Use parameter groups and set different values | >>> #2) Use parameter groups and set different values | ||||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | ||||
| @@ -379,7 +379,7 @@ class IsInstance(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> a = 1 | >>> a = 1 | ||||
| >>> result = P.IsInstance()(a, mindspore.int32) | |||||
| >>> result = P.IsInstance()(a, mindspore.int64) | |||||
| >>> print(result) | >>> print(result) | ||||
| True | True | ||||
| """ | """ | ||||
| @@ -1640,7 +1640,7 @@ class Tile(PrimitiveWithInfer): | |||||
| x_shp = x['shape'] | x_shp = x['shape'] | ||||
| validator.check_value_type("multiples", multiples_v, [tuple], self.name) | validator.check_value_type("multiples", multiples_v, [tuple], self.name) | ||||
| for i, multiple in enumerate(multiples_v): | for i, multiple in enumerate(multiples_v): | ||||
| validator.check_value_type("multiples[%d]" % i, multiple, [int], self.name) | |||||
| validator.check_positive_int(multiple, "multiples[%d]" % i, self.name) | |||||
| validator.check_value_type("x[\'dtype\']", x["dtype"], mstype.tensor_type, self.name) | validator.check_value_type("x[\'dtype\']", x["dtype"], mstype.tensor_type, self.name) | ||||
| len_sub = len(multiples_v) - len(x_shp) | len_sub = len(multiples_v) - len(x_shp) | ||||
| multiples_w = None | multiples_w = None | ||||
| @@ -3037,11 +3037,12 @@ class Pad(PrimitiveWithInfer): | |||||
| >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | ||||
| >>> pad_op = P.Pad(((1, 2), (2, 1))) | >>> pad_op = P.Pad(((1, 2), (2, 1))) | ||||
| >>> output_tensor = pad_op(input_tensor) | >>> output_tensor = pad_op(input_tensor) | ||||
| >>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ], | |||||
| >>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ], | |||||
| >>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ], | |||||
| >>> [ 0. , 0. , 0. , 0. , 0. , 0. ], | |||||
| >>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32) | |||||
| >>> print(output_tensor) | |||||
| [[ 0. 0. 0. 0. 0. 0. ] | |||||
| [ 0. 0. -0.1 0.3 3.6 0. ] | |||||
| [ 0. 0. 0.4 0.5 -3.2 0. ] | |||||
| [ 0. 0. 0. 0. 0. 0. ] | |||||
| [ 0. 0. 0. 0. 0. 0. ]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -4857,7 +4858,7 @@ class ApplyPowerSign(PrimitiveWithInfer): | |||||
| >>> self.beta = 0.9 | >>> self.beta = 0.9 | ||||
| >>> def construct(self, grad): | >>> def construct(self, grad): | ||||
| >>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase, | >>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase, | ||||
| >>> self.sign_decay, self.beta, grad) | |||||
| ... self.sign_decay, self.beta, grad) | |||||
| >>> return out | >>> return out | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) | >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) | ||||