Browse Source

check arg is tensor with vm backend

tags/v0.5.0-beta
buxue 5 years ago
parent
commit
0cd57ddc5d
5 changed files with 40 additions and 33 deletions
  1. +6
    -2
      mindspore/ccsrc/pipeline/pipeline.cc
  2. +2
    -2
      mindspore/ccsrc/pipeline/pipeline_ge.cc
  3. +3
    -3
      mindspore/ops/operations/math_ops.py
  4. +10
    -8
      tests/st/ops/gpu/test_cast_op.py
  5. +19
    -18
      tests/st/ops/gpu/test_rmsprop.py

+ 6
- 2
mindspore/ccsrc/pipeline/pipeline.cc View File

@@ -615,12 +615,16 @@ void ProcessVmArgInner(const py::tuple &args, const ResourcePtr &res, VectorRef
py::object arg = args[i]; py::object arg = args[i];
auto ms_context = MsContext::GetInstance(); auto ms_context = MsContext::GetInstance();
if (ms_context->backend_policy() == kMsConvert && py::isinstance<py::array>(arg)) { if (ms_context->backend_policy() == kMsConvert && py::isinstance<py::array>(arg)) {
MS_LOG(EXCEPTION) << "Args[" << i << "] is numpy array, not tensor";
MS_LOG(EXCEPTION) << "The " << i << "th arg is numpy array, not tensor.";
} }
ValuePtr converted = nullptr; ValuePtr converted = nullptr;
bool succ = parse::ConvertData(arg, &converted); bool succ = parse::ConvertData(arg, &converted);
if (!succ) { if (!succ) {
MS_LOG(EXCEPTION) << "Args convert error";
MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed.";
}
if (MsContext::GetInstance()->execution_mode() == 0 && !converted->isa<tensor::Tensor>()) {
MS_EXCEPTION(TypeError) << "For 'graph mode', the " << i << "th arg: " << converted->ToString()
<< " is not tensor.";
} }
arg_list->push_back(converted); arg_list->push_back(converted);
} }


+ 2
- 2
mindspore/ccsrc/pipeline/pipeline_ge.cc View File

@@ -460,12 +460,12 @@ void ProcessGeArg(const std::map<std::string, ExecutorInfoPtr> &info, const py::
ValuePtr converted = nullptr; ValuePtr converted = nullptr;
bool succ = parse::ConvertData(args[i], &converted); bool succ = parse::ConvertData(args[i], &converted);
if (!succ) { if (!succ) {
MS_LOG(EXCEPTION) << "Args convert error";
MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed.";
} }
if (converted->isa<tensor::Tensor>()) { if (converted->isa<tensor::Tensor>()) {
inputs->push_back(converted->cast<tensor::TensorPtr>()); inputs->push_back(converted->cast<tensor::TensorPtr>());
} else { } else {
MS_EXCEPTION(TypeError) << "Args " << converted->ToString() << " is not tensor";
MS_EXCEPTION(TypeError) << "The " << i << "th arg: " << converted->ToString() << " is not tensor.";
} }
} }
} }


+ 3
- 3
mindspore/ops/operations/math_ops.py View File

@@ -777,7 +777,7 @@ class Sub(_MathBinaryOp):
When the inputs are one tensor and one scalar, When the inputs are one tensor and one scalar,
the scalar only could be a constant. the scalar only could be a constant.


Inputs:
Inputs:
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
a bool or a tensor whose data type is number or bool. a bool or a tensor whose data type is number or bool.
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
@@ -1845,7 +1845,7 @@ class LogicalAnd(_LogicBinaryOp):
Inputs: Inputs:
- **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
- **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type is bool.
a tensor whose data type is bool.


Outputs: Outputs:
Tensor, the shape is same as the shape after broadcasting, and the data type is bool. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
@@ -1875,7 +1875,7 @@ class LogicalOr(_LogicBinaryOp):
Inputs: Inputs:
- **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
- **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type is bool.
a tensor whose data type is bool.


Outputs: Outputs:
Tensor, the shape is same as the shape after broadcasting,and the data type is bool. Tensor, the shape is same as the shape after broadcasting,and the data type is bool.


+ 10
- 8
tests/st/ops/gpu/test_cast_op.py View File

@@ -24,13 +24,15 @@ from mindspore.ops import operations as P




class Net(Cell): class Net(Cell):
def __init__(self):
def __init__(self, type0, type1):
super(Net, self).__init__() super(Net, self).__init__()
self.Cast = P.Cast() self.Cast = P.Cast()
self.type0 = type0
self.type1 = type1


def construct(self, x0, type0, x1, type1):
output = (self.Cast(x0, type0),
self.Cast(x1, type1))
def construct(self, x0, x1):
output = (self.Cast(x0, self.type0),
self.Cast(x1, self.type1))
return output return output




@@ -44,8 +46,8 @@ def test_cast():
t1 = mstype.float32 t1 = mstype.float32


context.set_context(mode=context.GRAPH_MODE, device_target='GPU') context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
net = Net()
output = net(x0, t0, x1, t1)
net = Net(t0, t1)
output = net(x0, x1)
type0 = output[0].asnumpy().dtype type0 = output[0].asnumpy().dtype
assert type0 == 'float16' assert type0 == 'float16'
type1 = output[1].asnumpy().dtype type1 = output[1].asnumpy().dtype
@@ -62,8 +64,8 @@ def test_cast1():
t1 = mstype.float32 t1 = mstype.float32


context.set_context(mode=context.GRAPH_MODE, device_target='GPU') context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
net = Net()
output = net(x0, t0, x1, t1)
net = Net(t0, t1)
output = net(x0, x1)
type0 = output[0].asnumpy().dtype type0 = output[0].asnumpy().dtype
assert type0 == 'float32' assert type0 == 'float32'
type1 = output[1].asnumpy().dtype type1 = output[1].asnumpy().dtype


+ 19
- 18
tests/st/ops/gpu/test_rmsprop.py View File

@@ -25,24 +25,29 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU")




class NetCenteredRMSProp(nn.Cell): class NetCenteredRMSProp(nn.Cell):
def __init__(self):
def __init__(self, lr, decay, momentum, epsilon):
super(NetCenteredRMSProp, self).__init__() super(NetCenteredRMSProp, self).__init__()
self.rms_opt = P.ApplyCenteredRMSProp() self.rms_opt = P.ApplyCenteredRMSProp()
self.lr = lr
self.decay = decay
self.momentum = momentum
self.epsilon = epsilon


def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon):
return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon)
def construct(self, var, g, mg, rms, mom):
return self.rms_opt(var, mg, rms, mom, g, self.lr, self.decay, self.momentum, self.epsilon)




class NetRMSProp(nn.Cell): class NetRMSProp(nn.Cell):
def __init__(self, decay, momentum, epsilon):
def __init__(self, lr, decay, momentum, epsilon):
super(NetRMSProp, self).__init__() super(NetRMSProp, self).__init__()
self.lr = lr
self.decay = decay self.decay = decay
self.momentum = momentum self.momentum = momentum
self.epsilon = epsilon self.epsilon = epsilon
self.rms_opt = P.ApplyRMSProp() self.rms_opt = P.ApplyRMSProp()


def construct(self, var, g, mg, rms, mom, lr):
return self.rms_opt(var, rms, mom, lr, g, self.decay, self.momentum, self.epsilon)
def construct(self, var, g, mg, rms, mom):
return self.rms_opt(var, rms, mom, self.lr, g, self.decay, self.momentum, self.epsilon)




def rmsprop_numpy(variable, gradients, mean_square, moment, def rmsprop_numpy(variable, gradients, mean_square, moment,
@@ -82,16 +87,14 @@ def test_rmsprop():
if centered: if centered:
rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
learning_rate, decay, momentum, epsilon) learning_rate, decay, momentum, epsilon)
net = NetCenteredRMSProp()
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms,
moment_ms, learning_rate, decay, momentum, epsilon)
net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)


else: else:
rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
learning_rate, decay, momentum, epsilon) learning_rate, decay, momentum, epsilon)
net = NetRMSProp(decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms,
moment_ms, learning_rate)
net = NetRMSProp(learning_rate, decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)


error = np.ones(shape=variable_np.shape) * 10e-6 error = np.ones(shape=variable_np.shape) * 10e-6
diff = variable_ms.asnumpy() - variable_np diff = variable_ms.asnumpy() - variable_np
@@ -135,15 +138,13 @@ def test_rmspropcenter():
if centered: if centered:
rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np, rmspropcented_numpy(variable_np, gradients_np, mean_gradients_np, mean_square_np, moment_np,
learning_rate, decay, momentum, epsilon) learning_rate, decay, momentum, epsilon)
net = NetCenteredRMSProp()
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms,
learning_rate, decay, momentum, epsilon)
net = NetCenteredRMSProp(learning_rate, decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)
else: else:
rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np, rmsprop_numpy(variable_np, gradients_np, mean_square_np, moment_np,
learning_rate, decay, momentum, epsilon) learning_rate, decay, momentum, epsilon)
net = NetRMSProp(decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms,
learning_rate)
net = NetRMSProp(learning_rate, decay, momentum, epsilon)
_ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms)


error = np.ones(shape=variable_np.shape) * 10e-6 error = np.ones(shape=variable_np.shape) * 10e-6
diff = variable_ms.asnumpy() - variable_np diff = variable_ms.asnumpy() - variable_np


Loading…
Cancel
Save