| @@ -615,12 +615,16 @@ void ProcessVmArgInner(const py::tuple &args, const ResourcePtr &res, VectorRef | |||
| py::object arg = args[i]; | |||
| auto ms_context = MsContext::GetInstance(); | |||
| if (ms_context->backend_policy() == kMsConvert && py::isinstance<py::array>(arg)) { | |||
| MS_LOG(EXCEPTION) << "Args[" << i << "] is numpy array, not tensor"; | |||
| MS_LOG(EXCEPTION) << "The " << i << "th arg is numpy array, not tensor."; | |||
| } | |||
| ValuePtr converted = nullptr; | |||
| bool succ = parse::ConvertData(arg, &converted); | |||
| if (!succ) { | |||
| MS_LOG(EXCEPTION) << "Args convert error"; | |||
| MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed."; | |||
| } | |||
| if (MsContext::GetInstance()->execution_mode() == 0 && !converted->isa<tensor::Tensor>()) { | |||
| MS_EXCEPTION(TypeError) << "For 'graph mode', the " << i << "th arg: " << converted->ToString() | |||
| << " is not tensor."; | |||
| } | |||
| arg_list->push_back(converted); | |||
| } | |||
| @@ -460,12 +460,12 @@ void ProcessGeArg(const std::map<std::string, ExecutorInfoPtr> &info, const py:: | |||
| ValuePtr converted = nullptr; | |||
| bool succ = parse::ConvertData(args[i], &converted); | |||
| if (!succ) { | |||
| MS_LOG(EXCEPTION) << "Args convert error"; | |||
| MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed."; | |||
| } | |||
| if (converted->isa<tensor::Tensor>()) { | |||
| inputs->push_back(converted->cast<tensor::TensorPtr>()); | |||
| } else { | |||
| MS_EXCEPTION(TypeError) << "Args " << converted->ToString() << " is not tensor"; | |||
| MS_EXCEPTION(TypeError) << "The " << i << "th arg: " << converted->ToString() << " is not tensor."; | |||
| } | |||
| } | |||
| } | |||
| @@ -777,7 +777,7 @@ class Sub(_MathBinaryOp): | |||
| When the inputs are one tensor and one scalar, | |||
| the scalar only could be a constant. | |||
| Inputs: | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or | |||
| a bool or a tensor whose data type is number or bool. | |||
| - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or | |||
| @@ -1845,7 +1845,7 @@ class LogicalAnd(_LogicBinaryOp): | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. | |||
| - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or | |||
| a tensor whose data type is bool. | |||
| a tensor whose data type is bool. | |||
| Outputs: | |||
| Tensor, the shape is same as the shape after broadcasting, and the data type is bool. | |||
| @@ -1875,7 +1875,7 @@ class LogicalOr(_LogicBinaryOp): | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. | |||
| - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or | |||
| a tensor whose data type is bool. | |||
| a tensor whose data type is bool. | |||
| Outputs: | |||
| Tensor, the shape is same as the shape after broadcasting,and the data type is bool. | |||
| @@ -24,13 +24,15 @@ from mindspore.ops import operations as P | |||
| class Net(Cell): | |||
| def __init__(self): | |||
| def __init__(self, type0, type1): | |||
| super(Net, self).__init__() | |||
| self.Cast = P.Cast() | |||
| self.type0 = type0 | |||
| self.type1 = type1 | |||
| def construct(self, x0, type0, x1, type1): | |||
| output = (self.Cast(x0, type0), | |||
| self.Cast(x1, type1)) | |||
| def construct(self, x0, x1): | |||
| output = (self.Cast(x0, self.type0), | |||
| self.Cast(x1, self.type1)) | |||
| return output | |||
| @@ -44,8 +46,8 @@ def test_cast(): | |||
| t1 = mstype.float32 | |||
| context.set_context(mode=context.GRAPH_MODE, device_target='GPU') | |||
| net = Net() | |||
| output = net(x0, t0, x1, t1) | |||
| net = Net(t0, t1) | |||
| output = net(x0, x1) | |||
| type0 = output[0].asnumpy().dtype | |||
| assert type0 == 'float16' | |||
| type1 = output[1].asnumpy().dtype | |||
| @@ -62,8 +64,8 @@ def test_cast1(): | |||
| t1 = mstype.float32 | |||
| context.set_context(mode=context.GRAPH_MODE, device_target='GPU') | |||
| net = Net() | |||
| output = net(x0, t0, x1, t1) | |||
| net = Net(t0, t1) | |||
| output = net(x0, x1) | |||
| type0 = output[0].asnumpy().dtype | |||
| assert type0 == 'float32' | |||
| type1 = output[1].asnumpy().dtype | |||
| @@ -25,24 +25,29 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | |||
| class NetCenteredRMSProp(nn.Cell): | |||
| def __init__(self): | |||
| def __init__(self, lr, decay, momentum, epsilon): | |||
| super(NetCenteredRMSProp, self).__init__() | |||
| self.rms_opt = P.ApplyCenteredRMSProp() | |||
| self.lr = lr | |||
| self.decay = decay | |||
| self.momentum = momentum | |||
| self.epsilon = epsilon | |||
| def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): | |||
| return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) | |||
| def construct(self, var, g, mg, rms, mom): | |||
| return self.rms_opt(var, mg, rms, mom, g, self.lr, self.decay, self.momentum, self.epsilon) | |||
| class NetRMSProp(nn.Cell): | |||
| def __init__(self, decay, momentum, epsilon): | |||
| def __init__(self, lr, decay, momentum, epsilon): | |||
| super(NetRMSProp, self).__init__() | |||
| self.lr = lr | |||
| self.decay = decay | |||
| self.momentum = momentum | |||
| self.epsilon = epsilon | |||
| self.rms_opt = P.ApplyRMSProp() | |||
| def construct(self, var, g, mg, rms, mom, lr): | |||
| return self.rms_opt(var, rms, mom, lr, g, self.decay, self.momentum, self.epsilon) | |||
| def construct(self, var, g, mg, rms, mom): | |||
| return self.rms_opt(var, rms, mom, self.lr, g, self.decay, self.momentum, self.epsilon) | |||
| def rmsprop_numpy(variable, gradients, mean_square, moment, | |||
| @@ -82,16 +87,14 @@ def test_rmsprop(): | |||
| if centered: | |||
| rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, | |||
| learning_rate, decay, momentum, epsilon) | |||
| net = NetCenteredRMSProp() | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, | |||
| moment_ms, learning_rate, decay, momentum, epsilon) | |||
| net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) | |||
| else: | |||
| rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, | |||
| learning_rate, decay, momentum, epsilon) | |||
| net = NetRMSProp(decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, | |||
| moment_ms, learning_rate) | |||
| net = NetRMSProp(learning_rate, decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) | |||
| error = np.ones(shape=variable_np.shape) * 10e-6 | |||
| diff = variable_ms.asnumpy() - variable_np | |||
| @@ -135,15 +138,13 @@ def test_rmspropcenter(): | |||
| if centered: | |||
| rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, | |||
| learning_rate, decay, momentum, epsilon) | |||
| net = NetCenteredRMSProp() | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, | |||
| learning_rate, decay, momentum, epsilon) | |||
| net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) | |||
| else: | |||
| rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, | |||
| learning_rate, decay, momentum, epsilon) | |||
| net = NetRMSProp(decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, | |||
| learning_rate) | |||
| net = NetRMSProp(learning_rate, decay, momentum, epsilon) | |||
| _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms) | |||
| error = np.ones(shape=variable_np.shape) * 10e-6 | |||
| diff = variable_ms.asnumpy() - variable_np | |||