Browse Source

[ME] Optimize the error message of the operator module

tags/v1.6.0
Margaret_wangrui 4 years ago
parent
commit
1a37dcfa94
9 changed files with 167 additions and 105 deletions
  1. +1
    -2
      mindspore/ccsrc/frontend/operator/cc_implementations.cc
  2. +10
    -10
      mindspore/ccsrc/frontend/operator/composite/composite.cc
  3. +2
    -4
      mindspore/ccsrc/frontend/operator/composite/do_signature.cc
  4. +20
    -19
      mindspore/ccsrc/frontend/operator/composite/map.cc
  5. +45
    -40
      mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc
  6. +3
    -3
      tests/syntax/simple_expression/test_hypermap.py
  7. +2
    -2
      tests/syntax/simple_expression/test_map.py
  8. +83
    -24
      tests/syntax/simple_expression/test_operator.py
  9. +1
    -1
      tests/ut/cpp/operator/cc_implementations_test.cc

+ 1
- 2
mindspore/ccsrc/frontend/operator/cc_implementations.cc View File

@@ -110,8 +110,7 @@ T InnerScalarFloordiv(T x, T y) {
template <typename T>
T InnerScalarMod(T x, T y) {
if (y == 0) {
MS_EXCEPTION(ValueError) << "The second input of ScalarMod operator could not be zero. "
<< "But the second input is zero now.";
MS_EXCEPTION(ValueError) << "Cannot perform modulo operation on zero.";
}
if constexpr (!std::is_integral<T>::value) {
return x - y * std::floor(x / y);


+ 10
- 10
mindspore/ccsrc/frontend/operator/composite/composite.cc View File

@@ -121,12 +121,12 @@ AnfNodePtr HyperMap::FullMake(const std::shared_ptr<List> &type, const FuncGraph
num++;
auto lhs = std::static_pointer_cast<List>(item.second);
if (lhs == nullptr) {
MS_LOG(EXCEPTION) << "The elements[" << (num - 1) << "] has wrong type, expected a List, but got "
<< item.second->ToString();
MS_LOG(EXCEPTION) << "The " << (num - 1) << "th element in HyperMap has wrong type, expected a List, but got "
<< item.second->ToString() << ".";
}
if (lhs->elements().size() != size) {
oss << "The length of elements[" << (num - 1) << "] is " << size << ", but got " << lhs->elements().size()
<< "\n";
oss << "The length of " << (num - 1) << "th List in HyperMap is " << size << ", but the length of " << num
<< "th List in HyperMap is " << lhs->elements().size() << ".\n";
return true;
}
return false;
@@ -180,12 +180,12 @@ AnfNodePtr HyperMap::FullMake(const std::shared_ptr<Tuple> &type, const FuncGrap
num++;
auto lhs = std::static_pointer_cast<Tuple>(item.second);
if (lhs == nullptr) {
MS_LOG(EXCEPTION) << "The elements[" << (num - 1) << "] has wrong type, expected a Tuple, but got "
<< item.second->ToString();
MS_LOG(EXCEPTION) << "The " << (num - 1) << "th element in HyperMap has wrong type, expected a Tuple, but got "
<< item.second->ToString() << ".";
}
if (lhs->elements().size() != size) {
oss << "The length of elements[" << (num - 1) << "] is " << size << ", but got " << lhs->elements().size()
<< "\n";
oss << "The length of " << (num - 1) << "th Tuple in HyperMap is " << size << ", but the length of " << num
<< "th Tuple in HyperMap is " << lhs->elements().size() << ".\n";
return true;
}
return false;
@@ -294,7 +294,7 @@ AnfNodePtr HyperMap::Make(const FuncGraphPtr &func_graph, const AnfNodePtr &fn_a
<< trace::GetDebugInfo(func_graph->debug_info()) << "\n";
int64_t idx = 0;
for (auto &item : arg_map) {
oss << "The type of " << ++idx << " argument is " << item.second->ToString() << "\n";
oss << "The type of " << (++idx + 1) << "th argument in HyperMap is " << item.second->ToString() << ".\n";
}
MS_LOG(EXCEPTION) << "The types of arguments in HyperMap must be consistent, "
<< "but the types of arguments are inconsistent.\n"
@@ -1026,7 +1026,7 @@ void GenerateTupleSliceParameter(const AbstractTuplePtr &tuple, const AbstractSl

*step_value = CheckSliceMember(slice->step(), step_default, step_name);
if (*step_value == 0) {
MS_EXCEPTION(ValueError) << "TupleSlice require the step value could not be 0, but got 0.";
MS_EXCEPTION(ValueError) << "Slice step cannot be zero.";
}

if (*step_value < 0) {


+ 2
- 4
mindspore/ccsrc/frontend/operator/composite/do_signature.cc View File

@@ -55,10 +55,8 @@ void ProcessDefault(const std::string &func_name, size_t actual_param_number, co
for (size_t i = actual_param_number; i < sig_size; ++i) {
auto default_value = signature[i].default_value;
if (default_value == nullptr) {
MS_LOG(EXCEPTION) << "The size of input in the operator should be equal to the size of the operator's "
<< "signature. But the size of input in the operator is " << actual_param_number
<< ", the length of the operator's signature is " << sig_size
<< ". Please check the size of inputs of the operator.";
MS_LOG(EXCEPTION) << "The size of input in the operator should be " << sig_size << ", but got "
<< actual_param_number << ". Please check inputs of the operator.";
} else {
(*op_inputs).push_back(NewValueNode(default_value));
}


+ 20
- 19
mindspore/ccsrc/frontend/operator/composite/map.cc View File

@@ -78,12 +78,12 @@ AnfNodePtr Map::FullMakeList(const std::shared_ptr<List> &type, const FuncGraphP
num++;
auto lhs = std::dynamic_pointer_cast<List>(item.second);
if (lhs == nullptr) {
MS_LOG(EXCEPTION) << "The elements[" << (num - 1) << "] has wrong type, expected a List, but got "
<< item.second->ToString();
MS_LOG(EXCEPTION) << "The " << (num - 1) << "th element in Map has wrong type, expected a List, but got "
<< item.second->ToString() << ".";
}
if (lhs->elements().size() != size) {
oss << "The length of elements[" << (num - 1) << "] is " << size << ", but got " << lhs->elements().size()
<< "\n";
oss << "The length of " << (num - 1) << "th List in Map is " << size << ", but the length of " << num
<< "th List in Map is " << lhs->elements().size() << ".\n";
return true;
}
return false;
@@ -98,7 +98,7 @@ AnfNodePtr Map::FullMakeList(const std::shared_ptr<List> &type, const FuncGraphP
inputs.push_back(NewValueNode(prim::kPrimMakeList));

for (size_t i = 0; i < size; i++) {
MS_LOG(DEBUG) << "FullMakeList for the " << i << "th arg of the target, reverse_: " << reverse_;
MS_LOG(DEBUG) << "FullMakeList for the " << i << "th arg of the target, reverse_: " << reverse_ << ".";
auto ptrGraph = GenerateLeafFunc(arg_pairs.size());
auto fn = NewValueNode(ptrGraph);

@@ -138,18 +138,18 @@ AnfNodePtr Map::FullMakeTuple(const std::shared_ptr<Tuple> &type, const FuncGrap
num++;
auto lhs = std::dynamic_pointer_cast<Tuple>(item.second);
if (lhs == nullptr) {
MS_LOG(EXCEPTION) << "The elements[" << (num - 1) << "] has wrong type, expected a Tuple, but got "
<< item.second->ToString();
MS_LOG(EXCEPTION) << "The " << (num - 1) << "th element in Map has wrong type, expected a Tuple, but got "
<< item.second->ToString() << ".";
}
if (lhs->elements().size() != size) {
oss << "The length of elements[" << (num - 1) << "] is " << size << ", but got " << lhs->elements().size()
<< "\n";
oss << "The length of " << (num - 1) << "th Tuple in Map is " << size << ", but the length of " << num
<< "th Tuple in Map is " << lhs->elements().size() << ".\n";
return true;
}
return false;
});
if (is_not_same) {
MS_LOG(EXCEPTION) << "The length of tuples in Map must the same. " << oss.str();
MS_LOG(EXCEPTION) << "The length of tuples in Map must be the same. " << oss.str();
}

constexpr size_t kPrimHoldLen = 1;
@@ -158,7 +158,7 @@ AnfNodePtr Map::FullMakeTuple(const std::shared_ptr<Tuple> &type, const FuncGrap
inputs.push_back(NewValueNode(prim::kPrimMakeTuple));

for (size_t i = 0; i < size; i++) {
MS_LOG(DEBUG) << "FullMakeTuple for the " << i << "th arg of the tuple inputs, reverse_: " << reverse_;
MS_LOG(DEBUG) << "FullMakeTuple for the " << i << "th arg of the tuple inputs, reverse_: " << reverse_ << ".";
auto ptrGraph = GenerateLeafFunc(arg_pairs.size());
auto fn = NewValueNode(ptrGraph);

@@ -198,7 +198,7 @@ AnfNodePtr Map::FullMakeClass(const std::shared_ptr<Class> &type, const FuncGrap
inputs.push_back(NewValueNode(type));

for (size_t i = 0; i < attrSize; i++) {
MS_LOG(DEBUG) << "FullMakeClass for the " << i << "th element of the inputs, reverse_: " << reverse_;
MS_LOG(DEBUG) << "FullMakeClass for the " << i << "th element of the inputs, reverse_: " << reverse_ << ".";
auto ptrGraph = GenerateLeafFunc(arg_pairs.size());
auto fn = NewValueNode(ptrGraph);

@@ -229,8 +229,8 @@ AnfNodePtr Map::FullMakeClass(const std::shared_ptr<Class> &type, const FuncGrap

AnfNodePtr Map::Make(const FuncGraphPtr &func_graph, const AnfNodePtr &fn_arg, const ArgsPairList &arg_pairs) {
if (arg_pairs.empty()) {
MS_EXCEPTION(TypeError) << "The Map operator must have at least one argument. But the size of arguments is "
<< arg_pairs.size() << ".";
MS_EXCEPTION(TypeError) << "The Map operator must have at least two arguments. But the size of arguments is "
<< (arg_pairs.size() + 1) << ".";
}
bool found = false;
TypeId id = kObjectTypeEnd;
@@ -256,11 +256,11 @@ AnfNodePtr Map::Make(const FuncGraphPtr &func_graph, const AnfNodePtr &fn_arg, c
});
if (is_not_same) {
std::ostringstream oss;
oss << "There are " << arg_pairs.size() << " inputs of `" << name_ << "`, corresponding type info:\n"
<< trace::GetDebugInfo(func_graph->debug_info()) << "\n";
oss << "There are " << (arg_pairs.size() + 1) << " inputs of `" << name_ << "`, corresponding type info:\n"
<< trace::GetDebugInfo(func_graph->debug_info()) << ".\n";
int64_t idx = 0;
for (auto &item : arg_pairs) {
oss << "The type of " << ++idx << " argument is: " << item.second->ToString() << "\n";
oss << "The type of " << (++idx + 1) << "th argument in Map is: " << item.second->ToString() << ".\n";
}
MS_LOG(EXCEPTION) << "The types of arguments in Map must be consistent, "
<< "but the types of arguments are inconsistent.\n"
@@ -282,7 +282,8 @@ AnfNodePtr Map::Make(const FuncGraphPtr &func_graph, const AnfNodePtr &fn_arg, c
return FullMakeClass(type, func_graph, fn_arg, arg_pairs);
}
default:
MS_LOG(EXCEPTION) << "Map can only be applied to list, tuple and class, but got " << pair.second->ToString();
MS_LOG(EXCEPTION) << "Map can only be applied to list, tuple and class, but got " << pair.second->ToString()
<< ".";
}
}

@@ -301,7 +302,7 @@ FuncGraphPtr Map::GenerateFromTypes(const TypePtrList &args_spec_list) {
ArgsPairList arg_pairs;
std::size_t size = args_spec_list.size();
for (; i < size; ++i) {
MS_LOG(DEBUG) << "GenerateFromTypes for elements from " << args_spec_list[i]->ToString();
MS_LOG(DEBUG) << "GenerateFromTypes for elements from " << args_spec_list[i]->ToString() << ".";
arg_pairs.push_back(std::make_pair(ptrGraph->add_parameter(), args_spec_list[i]));
}



+ 45
- 40
mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc View File

@@ -64,8 +64,8 @@ void CalcSlidePara(const AbstractBasePtrList &args_spec_list, SlideInfo *slide)
MS_EXCEPTION_IF_NULL(args_spec_list[0]);
auto arg_value = args_spec_list[0]->BuildValue();
if (!arg_value->isa<Int64Imm>()) {
MS_LOG(EXCEPTION) << "The type of inputs in MakeRange operator only support int64 number. "
<< "But get " << arg_value->ToString();
MS_LOG(EXCEPTION) << "The type of inputs in range operator only support int64 number. "
<< "But get a " << arg_value->type() << " number.";
}
arg1 = GetValue<int64_t>(arg_value);
}
@@ -74,8 +74,8 @@ void CalcSlidePara(const AbstractBasePtrList &args_spec_list, SlideInfo *slide)
MS_EXCEPTION_IF_NULL(args_spec_list[1]);
auto arg_value = args_spec_list[1]->BuildValue();
if (!arg_value->isa<Int64Imm>()) {
MS_LOG(EXCEPTION) << "The type of inputs in MakeRange operator only support int64 number. "
<< "But get " << arg_value->ToString();
MS_LOG(EXCEPTION) << "The type of inputs in range operator only support int64 number. "
<< "But get a " << arg_value->type() << " number.";
}
arg2 = GetValue<int64_t>(arg_value);
}
@@ -84,8 +84,8 @@ void CalcSlidePara(const AbstractBasePtrList &args_spec_list, SlideInfo *slide)
MS_EXCEPTION_IF_NULL(args_spec_list[2]);
auto arg_value = args_spec_list[2]->BuildValue();
if (!arg_value->isa<Int64Imm>()) {
MS_LOG(EXCEPTION) << "The type of inputs in MakeRange operator only support int64 number. "
<< "But get " << arg_value->ToString();
MS_LOG(EXCEPTION) << "The type of inputs in range operator only support int64 number. "
<< "But get a " << arg_value->type() << " number.";
}
slide->step = GetValue<int64_t>(arg_value);
slide->start = arg1;
@@ -124,7 +124,7 @@ void ComputeReduceIndex(const std::vector<int64_t> &reverse_x, const std::vector
grad_y_reduce_idy->push_back(reduce_idx);
curr = State::Y_ONE;
} else {
MS_LOG(EXCEPTION) << "not compatible shape input for BroadcastGradientArgs";
MS_LOG(EXCEPTION) << "not compatible shape input for BroadcastGradientArgs.";
}
if (curr == State::SAME && x_i == 1) {
grad_x_reduce_idx->push_back(reduce_idx);
@@ -236,7 +236,7 @@ AbstractBasePtr DoInferReduceShape(const AbstractTuplePtr &x_shape, const ValueP
MS_EXCEPTION_IF_NULL(x_shp_value->cast<ValueTuplePtr>());
auto x_shp_data = x_shp_value->cast<ValueTuplePtr>()->value();
if (x_shp_data.size() < x_rank) {
MS_LOG(EXCEPTION) << "x_shape_data.size() " << x_shp_data.size() << " less than x_shape.size() " << x_rank;
MS_LOG(EXCEPTION) << "x_shape_data.size() " << x_shp_data.size() << " less than x_shape.size() " << x_rank << ".";
}
AbstractBasePtrList values;
for (size_t i = 0; i < x_rank; i++) {
@@ -372,13 +372,14 @@ AbstractBasePtr InferImplReduceShape(const AnalysisEnginePtr &, const PrimitiveP

auto x_shp_value = shape_x->BuildValue();
if (x_shp_value->isa<AnyValue>()) {
MS_LOG(EXCEPTION) << "The ReduceShape operator's data field can't be anything: " << args_spec_list[1]->ToString();
MS_LOG(EXCEPTION) << "The ReduceShape operator's data field can't be anything: " << args_spec_list[1]->ToString()
<< ".";
}

// Axis can be scalar, tuple or list
AbstractSequencePtr axis = nullptr;
if (args_spec_list[1]->isa<AbstractScalar>()) {
MS_LOG(DEBUG) << op_name << " evaluator second parameter is scalar";
MS_LOG(DEBUG) << op_name << " evaluator second parameter is scalar.";
AbstractBasePtrList axis_list = {dyn_cast<AbstractScalar>(args_spec_list[1])};
axis = std::make_shared<AbstractTuple>(axis_list);
} else if (args_spec_list[1]->isa<AbstractSequence>()) {
@@ -386,12 +387,13 @@ AbstractBasePtr InferImplReduceShape(const AnalysisEnginePtr &, const PrimitiveP
axis = args_spec_list[1]->cast<AbstractSequencePtr>();
} else {
MS_LOG(EXCEPTION) << "The second argument of ReduceShape operator should be a scalar or tuple or list, "
<< "but got " << args_spec_list[1]->ToString();
<< "but got " << args_spec_list[1]->ToString() << ".";
}

auto axis_value = axis->BuildValue();
if (axis_value->isa<AnyValue>()) {
MS_LOG(EXCEPTION) << "The ReduceShape operator's data field can't be anything: " << args_spec_list[1]->ToString();
MS_LOG(EXCEPTION) << "The ReduceShape operator's data field can't be anything: " << args_spec_list[1]->ToString()
<< ".";
}
auto axis_value_ptr = axis_value->cast<ValueSequencePtr>();
MS_EXCEPTION_IF_NULL(axis_value_ptr);
@@ -414,19 +416,19 @@ AbstractBasePtr InferImplTupleDiv(const AnalysisEnginePtr &, const PrimitivePtr

auto div_shp_value = div_shp->BuildValue();
if (div_shp_value->isa<AnyValue>()) {
MS_LOG(EXCEPTION) << "The TupleDiv operator shape's data field can't be anything, but got "
<< args_spec_list[0]->ToString();
MS_LOG(EXCEPTION) << "The 'tuple_div' operator shape's data field can't be anything, but got "
<< args_spec_list[0]->ToString() << ".";
}

auto shape_x_value = shape_x->BuildValue();
if (shape_x_value->isa<AnyValue>()) {
MS_LOG(EXCEPTION) << "The TupleDiv operator shape's data field can't be anything, but got "
<< args_spec_list[1]->ToString();
MS_LOG(EXCEPTION) << "The 'tuple_div' operator shape's data field can't be anything, but got "
<< args_spec_list[1]->ToString() << ".";
}

if (div_shp->size() != shape_x->size()) {
MS_LOG(EXCEPTION) << "The size of inputs of TupleDiv operator must be the same, but the size of divisor tuple is "
<< div_shp->size() << ", the size of dividend tuple is " << shape_x->size() << ".";
MS_LOG(EXCEPTION) << "The size of inputs of 'tuple_div' operator must be the same, but the size of divisor tuple is"
<< " " << div_shp->size() << ", the size of dividend tuple is " << shape_x->size() << ".";
}

auto shape_x_data = shape_x_value->cast<ValueTuplePtr>()->value();
@@ -435,8 +437,8 @@ AbstractBasePtr InferImplTupleDiv(const AnalysisEnginePtr &, const PrimitivePtr

for (size_t i = 0; i < div_shape_data.size(); i++) {
if (div_shape_data[i]->cast<Int64ImmPtr>() == nullptr) {
MS_LOG(EXCEPTION) << "The data type of inputs of TupleDiv operator should be an int64 number, but got "
<< args_spec_list[1]->ToString();
MS_LOG(EXCEPTION) << "The data type of inputs of 'tuple_div' operator should be an int64 number, but got a "
<< div_shape_data[i]->type() << " number " << div_shape_data[i]->ToString() << ".";
}
int64_t shapex_value = GetValue<int64_t>(shape_x_data[i]);
int64_t div_value = GetValue<int64_t>(div_shape_data[i]);
@@ -445,8 +447,8 @@ AbstractBasePtr InferImplTupleDiv(const AnalysisEnginePtr &, const PrimitivePtr
MS_LOG(EXCEPTION) << "The divisor value should not be 0!";
}
if ((shapex_value % div_value) != 0) {
MS_LOG(EXCEPTION) << "The inputs of TupleDiv is not divisible, the dividend is " << shapex_value
<< ", the divisor is " << div_value << ".";
MS_LOG(EXCEPTION) << "The inputs of 'tuple_div' operator should be divisible, but they are not divisible now, "
<< "the dividend is " << shapex_value << ", the divisor is " << div_value << ".";
}

int64_t result = shapex_value / div_value;
@@ -469,7 +471,7 @@ AbstractBasePtr InferImplTuple2Array(const AnalysisEnginePtr &, const PrimitiveP
auto tensor = tensor::TensorPy::MakeTensor(data);
auto ret = tensor->ToAbstract();
ret->set_value(tensor);
MS_LOG(DEBUG) << "The infer result of Tuple2Array operator is tensor, the infer result is " << ret->ToString();
MS_LOG(DEBUG) << "The infer result of Tuple2Array operator is tensor, the infer result is " << ret->ToString() << ".";
return ret;
}

@@ -483,7 +485,8 @@ AbstractBasePtr InferImplShapeMul(const AnalysisEnginePtr &, const PrimitivePtr

auto shpx_value = shape_x->BuildValue();
if (shpx_value->isa<AnyValue>()) {
MS_LOG(EXCEPTION) << "The ShapeMul operator shape's data field can't be anything, but got " << shape_x->ToString();
MS_LOG(EXCEPTION) << "The ShapeMul operator shape's data field can't be anything, but got " << shape_x->ToString()
<< ".";
}

auto shpx_data = shpx_value->cast<ValueTuplePtr>()->value();
@@ -511,12 +514,12 @@ AbstractBasePtr InferImplSliceGetItem(const AnalysisEnginePtr &, const Primitive
MS_EXCEPTION_IF_NULL(slice_attr);
if (!slice_attr->isa<StringImm>()) {
MS_LOG(EXCEPTION) << "The second argument of SliceGetItem operator should be a string, but got "
<< slice_attr->ToString();
<< slice_attr->ToString() << ".";
}
auto slice_str = GetValue<std::string>(slice_attr);
auto iter = result_map.find(slice_str);
if (iter == result_map.end()) {
MS_EXCEPTION(AttributeError) << "The 'slice' object has no attribute:" << iter->second;
MS_EXCEPTION(AttributeError) << "The 'slice' object has no attribute:" << iter->second << ".";
}
return iter->second;
}
@@ -542,8 +545,8 @@ AbstractBasePtr InferImplMakeSlice(const AnalysisEnginePtr &, const PrimitivePtr
slice_args.push_back(scalar_index->ToAbstract());
} else {
auto type = scalar_value->type();
MS_EXCEPTION(TypeError) << "The " << index << "th input of scalar should be int or bool, but got "
<< type->ToString() << ":" << scalar_value->ToString();
MS_EXCEPTION(TypeError) << "Slice indices must be integers or bool. But got a " << type->ToString()
<< " number.";
}
} else if (args_spec_list[index]->isa<AbstractTensor>()) {
auto arg = args_spec_list[index]->cast<AbstractTensorPtr>();
@@ -575,7 +578,7 @@ AbstractBasePtr InferImplMakeSlice(const AnalysisEnginePtr &, const PrimitivePtr
}
} else {
MS_EXCEPTION(TypeError) << "The " << index << "th input of MakeSlice operator should be scalar, none or tensor, "
<< "but got " << args_spec_list[index]->ToString();
<< "but got " << args_spec_list[index]->ToString() << ".";
}
}
// Slice: start, end, step
@@ -585,12 +588,12 @@ AbstractBasePtr InferImplMakeSlice(const AnalysisEnginePtr &, const PrimitivePtr
AbstractBasePtr InferImplMakeRange(const AnalysisEnginePtr &, const PrimitivePtr &,
const AbstractBasePtrList &args_spec_list) {
if (args_spec_list.empty()) {
MS_LOG(EXCEPTION) << "The inputs of MakeRange operator could not be empty.";
MS_LOG(EXCEPTION) << "For 'range', the arguments could not be empty.";
}

constexpr size_t max_args_size = 3;
if (args_spec_list.size() > max_args_size) {
MS_LOG(EXCEPTION) << "The size of inputs of MakeRange operator could not exceed 3. But the size of inputs is "
MS_LOG(EXCEPTION) << "For 'range', the size of arguments could not exceed 3. But the size of inputs is "
<< args_spec_list.size() << ".";
}

@@ -598,34 +601,36 @@ AbstractBasePtr InferImplMakeRange(const AnalysisEnginePtr &, const PrimitivePtr
CalcSlidePara(args_spec_list, &slide);

if (slide.step == 0) {
MS_LOG(EXCEPTION) << "The step value of MakeRange operator could not be 0.";
MS_LOG(EXCEPTION) << "For 'range', the argument 'step' could not be 0.";
}

AbstractBasePtrList args;
if (slide.start <= slide.stop) {
if (slide.step <= 0) {
MS_LOG(EXCEPTION) << "Error slice[" << slide.start << ", " << slide.stop << ", " << slide.step
<< "], the slide.step should greater than zero, but got " << slide.step << ".";
MS_LOG(EXCEPTION) << "For 'range', while the argument 'start' " << slide.start
<< " less than or equal to the argument 'stop' " << slide.stop << ", "
<< "the argument 'step' must be more than 0, but the argument 'step' is " << slide.step << ".";
}

for (int64_t i = slide.start; i < slide.stop; i += slide.step) {
args.push_back(abstract::FromValue(i));
if (i > 0 && INT_MAX - i < slide.step) {
MS_EXCEPTION(ValueError) << "For MakeRange operator, the required cycles number is greater than max cycles "
<< "number, will cause integer overflow.";
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
} else {
if (slide.step >= 0) {
MS_LOG(EXCEPTION) << "Error slice[" << slide.start << ", " << slide.stop << ", " << slide.step
<< "], the slide.step should smaller than zero, but got " << slide.step << ".";
MS_LOG(EXCEPTION) << "For 'range', while the argument 'start' " << slide.start << " more than the argument 'stop'"
<< " " << slide.stop << ", the argument 'step' must be less than 0, but the argument 'step' is "
<< slide.step << ".";
}

for (int64_t i = slide.start; i > slide.stop; i += slide.step) {
args.push_back(abstract::FromValue(i));
if (i < 0 && INT_MIN - i > slide.step) {
MS_EXCEPTION(ValueError) << "For MakeRange operator, the required cycles number is greater than max cycles "
<< "number, will cause integer overflow.";
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
}


+ 3
- 3
tests/syntax/simple_expression/test_hypermap.py View File

@@ -106,7 +106,7 @@ def test_tuple_slice_stop_index():
Tensor(np.ones([2, 3, 4], np.int32)))

net = TupleSliceNet()
with pytest.raises(Exception, match="The 1th input of scalar should be int or bool"):
with pytest.raises(Exception, match="Slice indices must be integers or bool."):
output = net(data)
print("output:", output)

@@ -145,7 +145,7 @@ def test_tuple_slice_start_index():
Tensor(np.ones([2, 3, 4], np.int32)))

net = TupleSliceNet()
with pytest.raises(Exception, match="The 0th input of scalar should be int or bool"):
with pytest.raises(Exception, match="Slice indices must be integers or bool."):
output = net(data)
print("output:", output)

@@ -184,6 +184,6 @@ def test_tuple_slice_step():
Tensor(np.ones([2, 3, 4], np.int32)))

net = TupleSliceNet()
with pytest.raises(Exception, match="TupleSlice require the step value could not be 0, but got 0."):
with pytest.raises(Exception, match="Slice step cannot be zero."):
output = net(data)
print("output:", output)

+ 2
- 2
tests/syntax/simple_expression/test_map.py View File

@@ -43,7 +43,7 @@ def test_map_args_size():
input_me_x = Tensor(input_np_x)

net = MapNet()
with pytest.raises(Exception, match="The Map operator must have at least one argument."):
with pytest.raises(Exception, match="The Map operator must have at least two arguments."):
ret = net(input_me_x)
print("ret:", ret)

@@ -142,7 +142,7 @@ def test_map_args_full_make_tuple_same_length():
input_me_y = Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32))

net = MapNet()
with pytest.raises(Exception, match="The length of tuples in Map must the same"):
with pytest.raises(Exception, match="The length of tuples in Map must be the same."):
ret = net((input_me_x, input_me_x), (input_me_y, input_me_y, input_me_y))
print("ret:", ret)



+ 83
- 24
tests/syntax/simple_expression/test_operator.py View File

@@ -62,7 +62,7 @@ def test_inner_scalar_mod():

x = Tensor(2, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The second input of ScalarMod operator could not be zero."):
with pytest.raises(Exception, match="Cannot perform modulo operation on zero."):
ret = net(x)
print("ret:", ret)

@@ -84,8 +84,7 @@ def test_inner_scalar_mod_args_length():

x = Tensor(2, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The size of input in the operator should be equal to the size of the "
"operator's signature."):
with pytest.raises(Exception, match="The size of input in the operator should be 2"):
ret = net(x)
print("ret:", ret)

@@ -98,14 +97,14 @@ def test_make_range_input_is_empty():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range():
for _ in range():
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The inputs of MakeRange operator could not be empty."):
with pytest.raises(Exception, match="For 'range', the arguments could not be empty."):
ret = net(x, y)
print("ret:", ret)

@@ -118,14 +117,14 @@ def test_make_range_step_zero():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range(1, 2, 0):
for _ in range(1, 2, 0):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The step value of MakeRange operator could not be 0."):
with pytest.raises(Exception, match="For 'range', the argument 'step' could not be 0."):
ret = net(x, y)
print("ret:", ret)

@@ -138,14 +137,14 @@ def test_make_range_error_input_1():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range(1, -1, 3):
for _ in range(1, -1, 3):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="Error slice"):
with pytest.raises(Exception, match="For 'range', while the argument 'start'"):
ret = net(x, y)
print("ret:", ret)

@@ -158,14 +157,14 @@ def test_make_range_error_input_2():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range(-1, 1, -3):
for _ in range(-1, 1, -3):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="Error slice"):
with pytest.raises(Exception, match="For 'range', while the argument 'start'"):
ret = net(x, y)
print("ret:", ret)

@@ -178,14 +177,54 @@ def test_make_range_input_type():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range(0, 0.02):
for _ in range(0, 0.02):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The type of inputs in MakeRange operator only support int64 number."):
with pytest.raises(Exception, match="The type of inputs in range operator only support int64 number."):
ret = net(x, y)
print("ret:", ret)


def test_make_range_input_type_2():
"""
Feature: Check the type of inputs of make_range operator.
Description: The type of inputs of make_range operator must be int64.
Expectation: The type of inputs of make_range operator must be int64.
"""
class Net(Cell):
def construct(self, x, y):
for _ in range(0, 1, 3.00):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The type of inputs in range operator only support int64 number."):
ret = net(x, y)
print("ret:", ret)


def test_make_range_input_type_3():
"""
Feature: Check the type of inputs of make_range operator.
Description: The type of inputs of make_range operator must be int64.
Expectation: The type of inputs of make_range operator must be int64.
"""
class Net(Cell):
def construct(self, x, y):
for _ in range(3.00):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The type of inputs in range operator only support int64 number."):
ret = net(x, y)
print("ret:", ret)

@@ -198,36 +237,56 @@ def test_make_range_input_size():
"""
class Net(Cell):
def construct(self, x, y):
for _ in F.make_range(1, 2, 3, 4):
for _ in range(1, 2, 3, 4):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The size of inputs of MakeRange operator could not exceed 3."):
with pytest.raises(Exception, match="For 'range', the size of arguments could not exceed 3."):
ret = net(x, y)
print("ret:", ret)


def test_make_range_overflow():
"""
Feature: Check the size of inputs of make_range operator.
Feature: Check the size of inputs of range operator.
Description: The size of inputs of make_range operator could not exceed 3.
Expectation: The size of inputs of make_range operator could not exceed 3.
"""
class Net(Cell):
def construct(self, x, y):
max_index = sys.maxsize
for _ in F.make_range(max_index - 1, max_index, 3):
for _ in range(max_index - 1, max_index, 3):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="Integer overflow error occurred when traversing the range."):
ret = net(x, y)
print("ret:", ret)


def test_make_range_overflow_2():
"""
Feature: Check the size of inputs of make_range operator.
Description: The size of inputs of make_range operator could not exceed 3.
Expectation: The size of inputs of make_range operator could not exceed 3.
"""
class Net(Cell):
def construct(self, x, y):
min_index = -sys.maxsize
for _ in range(min_index, min_index - 1, -3):
x += y
return x

x = Tensor(2, dtype=ms.int32)
y = Tensor(4, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="For MakeRange operator, the required cycles number is greater than max cycles"
"number"):
with pytest.raises(Exception, match="Integer overflow error occurred when traversing the range."):
ret = net(x, y)
print("ret:", ret)

@@ -263,7 +322,7 @@ def test_tuple_div():
x = (8, 14, 20)
y = (2, 2)
net = Net()
with pytest.raises(Exception, match="The size of inputs of TupleDiv operator must be the same"):
with pytest.raises(Exception, match="The size of inputs of 'tuple_div' operator must be the same"):
ret = net(x, y)
print("ret:", ret)

@@ -281,7 +340,7 @@ def test_tuple_div_type():
x = (8, 14, 20)
y = (2, 2, 2.0)
net = Net()
with pytest.raises(Exception, match="The data type of inputs of TupleDiv operator should be an int64 number"):
with pytest.raises(Exception, match="The data type of inputs of 'tuple_div' operator should be an int64 number,"):
ret = net(x, y)
print("ret:", ret)

@@ -317,7 +376,7 @@ def test_tuple_div_input_is_not_divisible():
x = (8, 14)
y = (2, 3)
net = Net()
with pytest.raises(Exception, match="The inputs of TupleDiv is not divisible"):
with pytest.raises(Exception, match="The inputs of 'tuple_div' operator should be divisible,"):
ret = net(x, y)
print("ret:", ret)

@@ -330,10 +389,10 @@ def test_make_slice_scalar():
"""
class Net(Cell):
def construct(self, data):
return data[F.make_slice(1.01, None, None)]
return data[1.01:None:None]

x = Tensor((8, 10, 12), dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="The 0th input of scalar should be int or bool"):
with pytest.raises(Exception, match="Slice indices must be integers or bool."):
ret = net(x)
print("ret:", ret)

+ 1
- 1
tests/ut/cpp/operator/cc_implementations_test.cc View File

@@ -234,7 +234,7 @@ TEST_F(TestImplementations, ScalarModTest) {
ScalarMod(list);
FAIL();
} catch (std::runtime_error const &err) {
ASSERT_TRUE(std::string(err.what()).find("The second input of ScalarMod operator could not be zero.")
ASSERT_TRUE(std::string(err.what()).find("Cannot perform modulo operation on zero.")
!= std::string::npos);
}
list.clear();


Loading…
Cancel
Save