Merge pull request !23153 from dinglinhe/dlh_code_ms_I43QY0_nn_opstags/v1.5.0-rc1
| @@ -493,11 +493,12 @@ class Validator: | |||
| if addition_error_info is None: | |||
| addition_error_info = '' | |||
| type_str = (type(type_).__name__ if isinstance(type_, (tuple, list)) else "") + str(type_) | |||
| raise TypeError(f'For \'{prim_name}\', the type of `{arg_name}` should be subclass' | |||
| f' of {", ".join((str(x) for x in template_types))}, but got {type_str}' | |||
| f' {addition_error_info}. This message is only for reference. The supported data types' | |||
| f' depend on the hardware that executes the operator' | |||
| f' and it is a subset of the data types above.') | |||
| raise TypeError(f"For '{prim_name}', the type of '{arg_name}'" | |||
| f" should be {'one of ' if len(template_types) > 1 else ''}" | |||
| f"{', '.join((str(x) for x in template_types))}, but got {type_str}" | |||
| f" {addition_error_info}. The supported data types depend on the hardware that" | |||
| f" executes the operator, please refer the official api document to get" | |||
| f" more information about the data type.") | |||
| @staticmethod | |||
| def check_valid_input(arg_name, arg_value, prim_name): | |||
| @@ -607,10 +607,11 @@ class PReLU(Cell): | |||
| f"float32 when the 'w' is a tensor, but got {w.dtype}.") | |||
| if len(w.shape) != 1 or w.shape[0] != channel: | |||
| raise ValueError(f"For '{self.cls_name}', the dimension of 'w' should be 1, and the elements number " | |||
| f"should be equal to the 'channel' when the 'w' is a tensor, but got 'w' shape {w}, " | |||
| f"the 'channel' {channel}.") | |||
| f"should be equal to the 'channel' when the 'w' is a tensor, " | |||
| f"but got 'w' shape {w.shape}, the 'channel' {channel}.") | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the 'w' only supported float, list and tensor, but got {type(w)}.") | |||
| raise TypeError(f"For '{self.cls_name}', the 'w' only supported float, list and tensor, " | |||
| f"but got {type(w).__name__}.") | |||
| self.w = Parameter(w, name='a') | |||
| self.prelu = P.PReLU() | |||
| self.relu = P.ReLU() | |||
| @@ -313,7 +313,7 @@ class Dense(Cell): | |||
| self.activation = get_activation(activation) if isinstance(activation, str) else activation | |||
| if activation is not None and not isinstance(self.activation, (Cell, Primitive)): | |||
| raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got " | |||
| f"{type(activation)}.") | |||
| f"{type(activation).__name__}.") | |||
| self.activation_flag = self.activation is not None | |||
| def construct(self, x): | |||
| @@ -800,7 +800,8 @@ class Pad(Cell): | |||
| self.paddings = paddings | |||
| Validator.check_string(self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name) | |||
| if not isinstance(paddings, tuple): | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'paddings' must be tuple, but got {type(paddings)}.") | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'paddings' must be tuple, " | |||
| f"but got {type(paddings).__name__}.") | |||
| for item in paddings: | |||
| if len(item) != 2: | |||
| raise ValueError(f"For '{self.cls_name}', the dimension of 'paddings' must be (n, 2), " | |||
| @@ -827,14 +828,15 @@ def bilinear(shape, size, scale, align_corners, prim_name=None): | |||
| """Check input and calculate shape""" | |||
| msg_prefix = f"For '{prim_name}', the" if prim_name else "The" | |||
| if not isinstance(align_corners, bool): | |||
| raise TypeError(f"{msg_prefix} type of 'align_corners' should be boolean, but got {type(align_corners)}.") | |||
| raise TypeError(f"{msg_prefix} type of 'align_corners' should be boolean, " | |||
| f"but got {type(align_corners).__name__}.") | |||
| if size is None and scale is None: | |||
| raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.") | |||
| if size is not None and scale is not None: | |||
| raise ValueError(f"{msg_prefix} 'size' and 'scale' both not none.") | |||
| if size is not None: | |||
| if not isinstance(size, (tuple, list)): | |||
| raise ValueError(f"{msg_prefix} 'size' must be tuple or list, but got {type(size)}.") | |||
| raise ValueError(f"{msg_prefix} 'size' must be tuple or list, but got {type(size).__name__}.") | |||
| Validator.check_int(len(size), 2, Rel.EQ, "size", "bilinear") | |||
| Validator.check_int(size[0], 1, Rel.GE, "size[0]", "bilinear") | |||
| Validator.check_int(size[1], 1, Rel.GE, "size[1]", "bilinear") | |||
| @@ -299,8 +299,9 @@ class EmbeddingLookup(Cell): | |||
| self.embeddinglookup.shard(((1, 1), indices_strategy)) | |||
| else: | |||
| if is_auto_parallel: | |||
| Support_mode = ["field_slice", "table_row_slice", "table_column_slice", "batch_slice"] | |||
| validator.check_string(slice_mode, Support_mode, "slice_mode", self.cls_name) | |||
| support_mode = ["field_slice", "table_row_slice", "table_column_slice", "batch_slice"] | |||
| raise ValueError("For '{}', the 'slice_mode' must be in {}, " | |||
| "but got {}.".format(self.cls_name, support_mode, slice_mode)) | |||
| if self.cache_enable and not enable_ps: | |||
| if parallel_mode != ParallelMode.STAND_ALONE: | |||
| raise ValueError(f"For '{self.cls_name}', parallel mode haven't supported cache enable yet.") | |||
| @@ -46,7 +46,7 @@ def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name): | |||
| @constexpr | |||
| def _check_input_3d(input_shape, param_name, func_name): | |||
| if len(input_shape) != 3: | |||
| raise ValueError(f"For '{func_name}', the {param_name} should be 3d, but got the length of input_shape:" | |||
| raise ValueError(f"For '{func_name}', the '{param_name}' should be 3d, but got the length of input_shape:" | |||
| f" {len(input_shape)}.") | |||
| @@ -180,7 +180,7 @@ class _BatchNorm(Cell): | |||
| validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups") | |||
| if rid in seen: | |||
| raise ValueError(f"For '{self.cls_name}', rank id in 'process_groups' should not be duplicated, " | |||
| f"but got {rid}.") | |||
| f"but got {process_groups}.") | |||
| seen.add(rid) | |||
| def _create_global_groups(self): | |||
| @@ -241,7 +241,7 @@ class _BatchNorm(Cell): | |||
| def _channel_check(channel, num_channel, prim_name=None): | |||
| msg_prefix = f"For '{prim_name}', the" if prim_name else "The" | |||
| if channel != num_channel: | |||
| raise ValueError(f"{msg_prefix} channel should be equal with num_channel, but got channel: " | |||
| raise ValueError(f"{msg_prefix} channel should be equal to num_channel, but got channel: " | |||
| f"{channel}, num_channel: {num_channel}.") | |||
| @@ -888,7 +888,7 @@ class LayerNorm(Cell): | |||
| super(LayerNorm, self).__init__() | |||
| if not isinstance(normalized_shape, (tuple, list)): | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'normalized_shape' should be tuple[int] or list[int], " | |||
| f"but got '{normalized_shape}' and the type is {type(normalized_shape)}.") | |||
| f"but got {normalized_shape} and the type is {type(normalized_shape)}.") | |||
| self.normalized_shape = normalized_shape | |||
| self.begin_norm_axis = begin_norm_axis | |||
| self.begin_params_axis = begin_params_axis | |||
| @@ -1037,7 +1037,7 @@ class InstanceNorm2d(Cell): | |||
| val = args_dict[key] | |||
| if not isinstance(val, (Tensor, numbers.Number, str, Initializer)): | |||
| raise TypeError(f"For '{self.cls_name}', the type of args_dict['{key}'] should be in " | |||
| f"[Tensor, numbers.Number, str, Initializer], but got type {type(val)}.") | |||
| f"[Tensor, numbers.Number, str, Initializer], but got type {type(val).__name__}.") | |||
| if isinstance(val, Tensor) and val.dtype != mstype.float32: | |||
| raise TypeError(f"For '{self.cls_name}', the type of args_dict['{key}'] should be float32, " | |||
| f"but got {val.dtype}.") | |||
| @@ -409,7 +409,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver): | |||
| max_array = self._get_init_array(self.max_init) | |||
| if not np.greater(max_array, min_array).all(): | |||
| raise ValueError(f"For '{self.cls_name}', the 'max_init' should be greater than 'min_init', " | |||
| f"but got 'max_array': {max_array}, 'min_init': {min_init}.") | |||
| f"but got 'max_init': {max_init}, 'min_init': {min_init}.") | |||
| if self.mode == "DEFAULT": | |||
| self._default_init(min_array, max_array) | |||
| elif self.mode == "LEARNED_SCALE": | |||
| @@ -708,7 +708,7 @@ class Conv2dBnFoldQuantOneConv(Cell): | |||
| self.padding = padding | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int/tuple(int), but got " | |||
| f"{type(padding)}!") | |||
| f"{type(padding).__name__}!") | |||
| self.group = Validator.check_positive_int(group) | |||
| self.eps = eps | |||
| self.momentum = 1 - momentum | |||
| @@ -950,7 +950,7 @@ class Conv2dBnFoldQuant(Cell): | |||
| self.padding = padding | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int/tuple(int), " | |||
| f"but got {type(padding)}!") | |||
| f"but got {type(padding).__name__}!") | |||
| self.group = Validator.check_positive_int(group) | |||
| self.eps = eps | |||
| self.momentum = momentum | |||
| @@ -1162,7 +1162,7 @@ class Conv2dBnWithoutFoldQuant(Cell): | |||
| self.padding = padding | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int/tuple(int), " | |||
| f"but got {type(padding)}!") | |||
| f"but got {type(padding).__name__}!") | |||
| self.group = Validator.check_positive_int(group) | |||
| self.bias_add = P.BiasAdd() | |||
| if Validator.check_bool(has_bias): | |||
| @@ -1306,7 +1306,7 @@ class Conv2dQuant(Cell): | |||
| self.padding = padding | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int/tuple(int), " | |||
| f"but got {type(padding)}!") | |||
| f"but got {type(padding).__name__}!") | |||
| self.group = Validator.check_positive_int(group) | |||
| weight_shape = [out_channels, in_channels // group, *self.kernel_size] | |||
| @@ -217,7 +217,7 @@ class _ConvThor(Cell): | |||
| self.padding = padding | |||
| else: | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int/tuple(int), but got " | |||
| f"{type(padding)}.") | |||
| f"{type(padding).__name__}.") | |||
| self.dilation = dilation | |||
| self.group = Validator.check_positive_int(group) | |||
| @@ -766,7 +766,7 @@ class EmbeddingLookupThor(Cell): | |||
| f"when 'slice_mode' is 'field_slice'.") | |||
| if not isinstance(manual_shapes, tuple): | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'manual_shapes' must be tuple(int), but got " | |||
| f"type {type(manual_shapes)}.") | |||
| f"type {type(manual_shapes).__name__}.") | |||
| for dim in manual_shapes: | |||
| Validator.check_positive_int(dim, 'manual shape dim', self.cls_name) | |||
| self.gatherv2.add_prim_attr("manual_split", manual_shapes) | |||
| @@ -104,8 +104,8 @@ class TimeDistributed(Cell): | |||
| def __init__(self, layer, time_axis, reshape_with_axis=None): | |||
| """Initialize TimeDistributed.""" | |||
| if not isinstance(layer, (Cell, Primitive)): | |||
| raise TypeError(f"For '{self.cls_name}', the type of 'layer' should be mindspore.nn.Cell or " | |||
| "mindspore.ops.Primitive instance, but got type: {type(layer)}.") | |||
| raise TypeError(f"For '{self.cls_name}', the 'layer' should be Cell or Primitive instance, " | |||
| f"but got type: {type(layer).__name__}.") | |||
| super(TimeDistributed, self).__init__() | |||
| Validator.check_is_int(time_axis) | |||
| if reshape_with_axis is not None: | |||
| @@ -665,8 +665,8 @@ def _check_weights(weight_shape, label_shape, prim_name=None): | |||
| """Internal function, used to check whether the reduced shape meets the requirements.""" | |||
| msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The" | |||
| if weight_shape != label_shape: | |||
| raise ValueError(f"{msg_prefix} weight shape[0] should be equal to label.shape[1], " | |||
| f"but got weight_shape: {weight_shape} and label_shape: {label_shape}.") | |||
| raise ValueError(f"{msg_prefix} weight_shape[0] should be equal to label_shape[1], " | |||
| f"but got weight_shape[0]: {weight_shape} and label_shape[1]: {label_shape}.") | |||
| class MultiClassDiceLoss(LossBase): | |||
| @@ -64,7 +64,9 @@ def get_broadcast_shape(x_shape, y_shape, prim_name, shape_type=""): | |||
| elif shape_type == "max_shape": | |||
| broadcast_shape_back.append(min(x_shape[i], y_shape[i])) | |||
| else: | |||
| raise ValueError(f"For '{prim_name}', the x_shape {x_shape} and y_shape {y_shape} can not broadcast.") | |||
| raise ValueError(f"For '{prim_name}', x_shape and y_shape can broadcast means that " | |||
| f"x_shape[i] = 1 or -1 or y_shape[i] = 1 or -1 or x_shape[i] = y_shape[i], " | |||
| f"but got i: {i}, x_shape: {x_shape}, y_shape: {y_shape}.") | |||
| broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length] | |||
| broadcast_shape = list(broadcast_shape_front) + broadcast_shape_back | |||
| @@ -319,14 +319,13 @@ class GradOperation(GradOperation_): | |||
| def __init__(self, get_all=False, get_by_list=False, sens_param=False): | |||
| """Initialize GradOperation.""" | |||
| if not isinstance(get_all, bool): | |||
| raise TypeError(f"For 'GradOperation', the arg 'get_all' should be bool, but got {get_all} with type " | |||
| f"{type(get_all)}") | |||
| raise TypeError(f"For 'GradOperation', the 'get_all' should be bool, but got {type(get_all).__name__}") | |||
| if not isinstance(get_by_list, bool): | |||
| raise TypeError(f"For 'GradOperation', the arg 'get_by_list' should be bool, but got {get_by_list} with " | |||
| f"type {type(get_by_list)}") | |||
| raise TypeError(f"For 'GradOperation', the 'get_by_list' should be bool, " | |||
| f"but got {type(get_by_list).__name__}") | |||
| if not isinstance(sens_param, bool): | |||
| raise TypeError(f"For 'GradOperation', the arg 'sens_param' should be bool, but got {sens_param} with type " | |||
| f"{type(sens_param)}") | |||
| raise TypeError(f"For 'GradOperation', the 'sens_param' should be bool, " | |||
| f"but got {type(sens_param).__name__}") | |||
| self.get_all = get_all | |||
| self.get_by_list = get_by_list | |||
| self.sens_param = sens_param | |||
| @@ -488,7 +488,7 @@ def _check_axes_for_batch_dot(x1_shape, x2_shape, axes, prim_name=None): | |||
| axes = [axes, axes] | |||
| else: | |||
| raise ValueError(f"{msg_prefix} type of axes must be one of those: int, tuple(int), list(int), " | |||
| f"but got {type(axes)}.") | |||
| f"but got {type(axes).__name__}.") | |||
| return axes | |||
| @@ -17,12 +17,11 @@ | |||
| # ============================================================================ | |||
| """Operators for array.""" | |||
| from collections import Counter | |||
| import copy | |||
| import functools | |||
| import itertools | |||
| import numbers | |||
| import numpy as np | |||
| from mindspore import log as logger | |||
| @@ -87,7 +86,8 @@ class _ScatterOpDynamic(PrimitiveWithCheck): | |||
| def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name): | |||
| # x_shape cannot be dynamic | |||
| if np.any(np.array(x_shape) == -1): | |||
| raise ValueError(f"x does not support dynamic shape") | |||
| raise ValueError(f"For '{prim_name}', the 'input_x' does not support dynamic shape, " | |||
| f"but got the shape of 'input_x' is {x_shape}.") | |||
| # support indices and updates dynamic | |||
| if np.any(np.array(indices_shape) == -1) or np.any(np.array(updates_shape) == -1): | |||
| pass | |||
| @@ -492,7 +492,8 @@ class Reshape(PrimitiveWithInfer): | |||
| validator.check_value_type("shape[%d]" % i, shp_i, [int], self.name) | |||
| if shp_i == -1: | |||
| if neg_index != -1: | |||
| raise ValueError(f'The shape can only has one -1 at most, but {shape_v}.') | |||
| raise ValueError(f"For '{self.name}', the 'input_shape' can only has one -1 at most, " | |||
| f"but got {shape_v}.") | |||
| neg_index = i | |||
| else: | |||
| dim_prod *= shp_i | |||
| @@ -514,25 +515,27 @@ class Reshape(PrimitiveWithInfer): | |||
| max_shape[neg_index] = int(max_arr_prod / dim_prod) | |||
| min_shape[neg_index] = int(min_arr_prod / dim_prod) | |||
| else: | |||
| raise ValueError(f'For dynamic shape, Reshape must have neg index') | |||
| raise ValueError(f"For '{self.name}', the 'input_shape' must have -1 in the case of dynamic shape, " | |||
| f"but got {shape_v}.") | |||
| out = {'shape': shape['value'], | |||
| 'dtype': x['dtype'], | |||
| 'value': None, | |||
| 'max_shape': tuple(max_shape), | |||
| 'min_shape': tuple(min_shape)} | |||
| else: | |||
| if dim_prod <= 0 or arr_prod % dim_prod != 0: | |||
| raise ValueError(f'For \'{self.name}\' input_x\'s shape is {x_shp}, input_shape\'s value is {shape_v}.' | |||
| f'The product of input_x\'s shape should > 0, ' | |||
| f'and can be divided by product of input_shape, but ' | |||
| f'product of input_x\'s shape is {arr_prod}, product of input_shape is {dim_prod}.') | |||
| if dim_prod <= 0: | |||
| raise ValueError(f"For '{self.name}', the shape of 'input_x' is {x_shp}, " | |||
| f"the value of 'input_shape' is {shape_v}. " | |||
| f"The product of shape of 'input_shape' should > 0, but got {dim_prod}.") | |||
| if neg_index != -1: | |||
| shape_v[neg_index] = int(arr_prod / dim_prod) | |||
| dim_prod *= shape_v[neg_index] | |||
| if dim_prod != arr_prod: | |||
| raise ValueError(f'For \'{self.name}\' input_x\'s shape is {x_shp}, input_shape\'s value is {shape_v}.' | |||
| f'The product of input_x\'s shape should be equal to product of input_shape, but ' | |||
| f'product of input_x\'s shape is {arr_prod}, product of input_shape is {dim_prod}.') | |||
| raise ValueError(f"For '{self.name}', the shape of 'input_x' is {x_shp}, " | |||
| f"the value of 'input_shape' value is {shape_v}. " | |||
| f"The product of the shape of 'input_x' should be equal to product of 'input_shape', " | |||
| f"but product of the shape of 'input_x' is {arr_prod} " | |||
| f", product of 'input_shape' is {dim_prod}.") | |||
| value = None | |||
| if x['value'] is not None: | |||
| value = Tensor(x['value'].asnumpy().reshape(shape_v)) | |||
| @@ -675,7 +678,8 @@ class Squeeze(PrimitiveWithInfer): | |||
| for a in axis: | |||
| validator.check_int_range(a, -ndim, ndim - 1, Rel.INC_BOTH, 'axis or its elements', self.name) | |||
| if x_shape[a] != 1: | |||
| raise ValueError('Cannot select an axis to squeeze out which has size not equal to one.') | |||
| raise ValueError(f"For '{self.name}', the shape of 'input_x' at {a} dimension should be 1, " | |||
| f"but got {x_shape[a]}.") | |||
| ret = [x_shape[i] for i in range(ndim) if not (i in axis or (i - ndim) in axis)] | |||
| return ret | |||
| @@ -1077,8 +1081,9 @@ class Split(PrimitiveWithCheck): | |||
| # only validate when shape fully known | |||
| output_valid_check = x_shape[self.axis] % self.output_num | |||
| if output_valid_check != 0: | |||
| raise ValueError(f"x_shape[{self.axis}] {x_shape[self.axis]} must be divide exactly by" | |||
| f" output_num {self.output_num}") | |||
| raise ValueError(f"For '{self.name}', the shape of 'input_x' is {x_shape}, 'axis' is {self.axis}, " | |||
| f"the shape of 'input_x' in 'axis' {self.axis} is {x_shape[self.axis]}, " | |||
| f"which must be divide exactly by 'output_num': {self.output_num}.") | |||
| size_splits = [x_shape[self.axis] // self.output_num] * self.output_num | |||
| self.add_prim_attr('size_splits', size_splits) | |||
| @@ -1449,7 +1454,7 @@ class TupleToArray(PrimitiveWithInfer): | |||
| for i, item in enumerate(x): | |||
| validator.check_value_type(f"x[{i}]", item, [numbers.Number], self.name) | |||
| if not all(isinstance(item, dtype) for item in x): | |||
| raise TypeError("For \'{self.name}\' all elements of input x must be have same type.") | |||
| raise TypeError(f"For \'{self.name}\', all elements of 'input_x' must be have same type.") | |||
| if isinstance(x[0], int): | |||
| ret = np.array(x, np.int32) | |||
| else: | |||
| @@ -1591,14 +1596,14 @@ class InvertPermutation(PrimitiveWithInfer): | |||
| x_shp = x['shape'] | |||
| x_value = x['value'] | |||
| if x_value is None: | |||
| raise ValueError(f'For \'{self.name}\' the input value must be const.') | |||
| raise ValueError(f"For '{self.name}', the value of 'input_x' can not be None, but got {x_value}.") | |||
| validator.check_value_type("shape", x_shp, [tuple, list], self.name) | |||
| if mstype.issubclass_(x['dtype'], mstype.tensor): | |||
| raise ValueError(f'For \'{self.name}\' the input value must be non-Tensor.') | |||
| raise ValueError(f"For \'{self.name}\', the value of 'input_x' must be non-Tensor, but got {x['dtype']}") | |||
| for shp in x_shp: | |||
| if shp: | |||
| x_rank = len(np.array(x_value, np.int64).shape) | |||
| raise ValueError(f'For \'{self.name}\' the rank of input must be 1, but got {x_rank}.') | |||
| raise ValueError(f"For \'{self.name}\', the length of 'input_x' must be 1, but got {x_rank}.") | |||
| for i, value in enumerate(x_value): | |||
| validator.check_value_type("input[%d]" % i, value, [int], self.name) | |||
| z = [x_value[i] for i in range(len(x_value))] | |||
| @@ -1606,7 +1611,8 @@ class InvertPermutation(PrimitiveWithInfer): | |||
| for i in range(1, len(z)): | |||
| if z[i - 1] == z[i]: | |||
| raise ValueError(f"For {self.name}, {z[i]} is duplicated in the input.") | |||
| raise ValueError(f"For '{self.name}', the 'input_x' can not contain duplicate values, " | |||
| f"but got duplicated {z[i]} in the 'input_x'.") | |||
| validator.check(f'value min', min(x_value), '', 0, Rel.EQ, self.name) | |||
| validator.check(f'value max', max(x_value), '', len(x_value) - 1, Rel.EQ, self.name) | |||
| @@ -1949,7 +1955,8 @@ class Tile(PrimitiveWithInfer): | |||
| def check_elim(self, base_tensor, multiplier): | |||
| if (not isinstance(base_tensor, Tensor)) or (not isinstance(multiplier, tuple)): | |||
| raise TypeError("Expecting (Tensor, tuple), got: ({}, {})".format(base_tensor, multiplier)) | |||
| raise TypeError(f"For '{self.name}', the type of ('input_x', 'multiples') should be (Tensor, tuple), " | |||
| f"but got ({type(base_tensor).__name__}, {type(multiplier).__name__}).") | |||
| if all(v == 1 for v in multiplier): | |||
| return (True, base_tensor) | |||
| return (False, None) | |||
| @@ -1970,8 +1977,8 @@ class Tile(PrimitiveWithInfer): | |||
| x_shp.insert(0, 1) | |||
| multiples_w = multiples_v | |||
| elif len_sub < 0: | |||
| raise ValueError(f'For \'{self.name}\' the length of multiples can not be smaller than ' | |||
| f'the length of dimension in input_x.') | |||
| raise ValueError(f"For '{self.name}', the length of 'multiples' can not be smaller than " | |||
| f"the length of dimension in 'input_x'.") | |||
| for i, a in enumerate(multiples_w): | |||
| x_shp[i] *= a | |||
| value = None | |||
| @@ -2073,7 +2080,9 @@ class UnsortedSegmentSum(PrimitiveWithInfer): | |||
| output_min_shape = list(num_segments['min_value']) | |||
| else: | |||
| if isinstance(num_segments_type, type(mstype.tensor)): | |||
| raise ValueError("Num_segments only support int type when it is not a dynamic value") | |||
| raise ValueError(f"For '{self.name}', the dtype of 'num_segments' only support int type " | |||
| f"when it is not a dynamic value, but got type of 'num_segments': " | |||
| f"{num_segments_type}.") | |||
| output_max_shape = [num_segments_v] | |||
| output_min_shape = [num_segments_v] | |||
| if 'max_shape' in x and 'min_shape' in x: | |||
| @@ -2727,8 +2736,8 @@ class Slice(PrimitiveWithInfer): | |||
| validator.check_non_negative_int(begin_v[i], f'input begin[{i}]') | |||
| if x_shape[i] < begin_v[i] + size_v[i]: | |||
| y = begin_v[i] + size_v[i] | |||
| raise ValueError("For '%s' slice shape can not bigger than origin shape %d, %d." % | |||
| (self.name, x_shape[i], y)) | |||
| raise ValueError(f"For '{self.name}', the sliced shape can not greater than origin shape, but got " | |||
| f"sliced shape is {y}, and origin shape is {x_shape}.") | |||
| return {'shape': size_v, | |||
| 'dtype': x['dtype'], | |||
| 'value': None} | |||
| @@ -2794,7 +2803,9 @@ class ReverseV2(PrimitiveWithInfer): | |||
| normalized_axis.append(v) | |||
| if len(normalized_axis) != len(set(normalized_axis)): | |||
| raise ValueError('axis cannot contain duplicate dimensions.') | |||
| duplicated = [item for item, count in Counter(normalized_axis).items() if count > 1] | |||
| raise ValueError(f"For '{self.name}', the 'axis' cannot contain duplicate dimensions," | |||
| f" but got duplicated elements {duplicated}.") | |||
| return x_shape | |||
| @@ -2913,8 +2924,9 @@ class Select(Primitive): | |||
| def _compute_slicing_length(begin, end, stride, x_shape, i): | |||
| """Computes the length of the slicing.""" | |||
| if i >= len(x_shape): | |||
| raise ValueError(f"For 'StridedSlice', When their is no new axis, the index length must be less or " | |||
| f"equal than the dim of x.") | |||
| raise ValueError(f"For 'StridedSlice', the index length must be less than or equal to " | |||
| f"the dimension of 'input_x' when there is no new axis, but got " | |||
| f"the dimension of 'input_x': {len(x_shape)} and the index length: {i}.") | |||
| x_dim = x_shape[i] | |||
| if stride > 0: | |||
| # When slicing forward, convert begin and end to positive numbers. | |||
| @@ -3143,15 +3155,16 @@ class StridedSlice(PrimitiveWithInfer): | |||
| validator.check_value_type("strides", strides_v, [tuple], self.name) | |||
| if tuple(filter(lambda x: not isinstance(x, int), begin_v + end_v + strides_v)): | |||
| raise TypeError(f"For {self.name}, both the begins, ends, and strides must be a tuple of int, " | |||
| f"but got begins: {begin_v}, ends: {end_v}, strides: {strides_v}.") | |||
| raise TypeError(f"For {self.name}, both the 'begin', 'end', and 'strides' must be a tuple of int, " | |||
| f"but got 'begin': {begin_v}, 'end': {end_v}, 'strides': {strides_v}.") | |||
| if tuple(filter(lambda x: x == 0, strides_v)): | |||
| raise ValueError(f"For '{self.name}', the strides cannot contain 0, but got strides: {strides_v}.") | |||
| raise ValueError(f"For '{self.name}', the 'strides' cannot contain 0, but got 'strides': {strides_v}.") | |||
| if len(end_v) != len(begin_v) or len(strides_v) != len(begin_v): | |||
| raise ValueError(f"For '{self.name}' the length of begin index: {begin_v}, end index: {end_v} and " | |||
| f"strides: {strides_v} must be equal.") | |||
| raise ValueError(f"For '{self.name}', the length of 'begin' index and the length of 'end' index " | |||
| f"must be the same as 'strides', but got the length of 'begin': {begin_v}, " | |||
| f"'end' index: {end_v} and 'strides': {strides_v}.") | |||
| ret_shape = self._compute_slicing_shape(x['shape'], begin_v, end_v, strides_v) | |||
| @@ -3219,9 +3232,9 @@ class StridedSlice(PrimitiveWithInfer): | |||
| continue | |||
| if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1': | |||
| if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0: | |||
| raise IndexError(f"For {self.name}, when shrink axis, the stride cannot be negative number, " | |||
| f"and begin should be in [-{x_shape[i]}, {x_shape[i]}), " | |||
| f"but got stride: {stride}, begin: {begin}.") | |||
| raise IndexError(f"For '{self.name}', the 'strides' cannot be negative number and " | |||
| f"'begin' should be in [-{x_shape[i]}, {x_shape[i]}) when shrink axis, " | |||
| f"but got 'strides': {stride}, 'begin': {begin}.") | |||
| j += 1 | |||
| i += 1 | |||
| continue | |||
| @@ -3253,9 +3266,9 @@ class StridedSlice(PrimitiveWithInfer): | |||
| continue | |||
| if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1': | |||
| if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0: | |||
| raise ValueError(f"For {self.name}, when shrink axis, the stride cannot be negative number, " | |||
| f"and begin should be in [-{x_shape[i]}, {x_shape[i]}), " | |||
| f"but got stride: {stride}, begin: {begin}.") | |||
| raise IndexError(f"For '{self.name}', the 'strides' cannot be negative number and " | |||
| f"'begin' should be in [-{x_shape[i]}, {x_shape[i]}) when shrink axis, " | |||
| f"but got 'strides': {stride}, 'begin': {begin}.") | |||
| j += 1 | |||
| i += 1 | |||
| continue | |||
| @@ -3369,8 +3382,8 @@ class DiagPart(PrimitiveWithInfer): | |||
| def infer_shape(self, x_shape): | |||
| if len(x_shape) % 2 != 0 or \ | |||
| not x_shape: | |||
| raise ValueError(f"For \'{self.name}\' input rank must be non-zero and even, but got rank {len(x_shape)}, " | |||
| f"with shapes {x_shape}") | |||
| raise ValueError(f"For \'{self.name}\', the rank of 'input_x' must be non-zero and even, " | |||
| f"but got rank {len(x_shape)}, with shapes {x_shape}.") | |||
| length = len(x_shape) // 2 | |||
| for i in range(length): | |||
| validator.check('input_shape[i + len(input_shape)/2]', x_shape[i + length], | |||
| @@ -3533,7 +3546,9 @@ class ScatterNd(PrimitiveWithInfer): | |||
| indices_shape, update_shape = indices["shape"], update["shape"] | |||
| if indices_shape[0] != update_shape[0]: | |||
| raise ValueError(f'For \'{self.name}\' The indices_shape[0] and update_shape[0] must be equal.') | |||
| raise ValueError(f"For '{self.name}', the first shape of 'indices' must be the same as the first shape of " | |||
| f"'updates', but got the first shape of 'indices': {indices_shape[0]}, " | |||
| f"the first shape of 'updates': {update_shape[0]}.") | |||
| return {'shape': shp, | |||
| 'dtype': update['dtype'], | |||
| @@ -3704,15 +3719,22 @@ class TensorScatterUpdate(PrimitiveWithInfer): | |||
| def infer_shape(self, input_x_shape, indices_shape, updates_shape): | |||
| if len(indices_shape) < 2: | |||
| raise ValueError("For 'TensorScatterUpdate', rank of indices cannot be less than 2.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2," | |||
| f" but got {len(indices_shape)}.") | |||
| if indices_shape[-1] > len(input_x_shape): | |||
| raise ValueError("For 'TensorScatterUpdate', indices.shape[-1] cannot be greater than rank of input_x.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to " | |||
| f"the dimension of 'input_x', but got the " | |||
| f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': " | |||
| f"{len(indices_shape)}.") | |||
| updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] | |||
| if updates_shape_check != updates_shape: | |||
| raise ValueError("For 'TensorScatterUpdate', input_x.shape, "\ | |||
| "indices.shape and updates.shape are incompatible.") | |||
| raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to " | |||
| f"the shape of updates_shape_check, but got the shape of 'update': {updates_shape}," | |||
| f"and the shape of updates_shape_check: {updates_shape_check}. Please check the shape of " | |||
| f"'indices' and 'input_x', they should be meeting followings formula:\n" | |||
| f" updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:].") | |||
| return input_x_shape | |||
| @@ -3782,14 +3804,22 @@ class TensorScatterAdd(PrimitiveWithInfer): | |||
| def infer_shape(self, input_x_shape, indices_shape, updates_shape): | |||
| if len(indices_shape) < 2: | |||
| raise ValueError("For 'TensorScatterAdd', rank of indices cannot be less than 2.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2," | |||
| f" but got {len(indices_shape)}.") | |||
| if indices_shape[-1] > len(input_x_shape): | |||
| raise ValueError("For 'TensorScatterAdd', indices.shape[-1] cannot be greater than rank of input_x.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to " | |||
| f"the dimension of 'input_x', but got the " | |||
| f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': " | |||
| f"{len(indices_shape)}.") | |||
| updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] | |||
| if updates_shape_check != updates_shape: | |||
| raise ValueError("For 'TensorScatterAdd', input_x.shape, indices.shape and updates.shape are incompatible.") | |||
| raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to " | |||
| f"the shape of updates_shape_check, but got the shape of 'update': {updates_shape}," | |||
| f"and the shape of updates_shape_check: {updates_shape_check}. Please check the shape of " | |||
| f"'indices' and 'input_x', they should be meeting followings formula:\n" | |||
| f" updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:].") | |||
| return input_x_shape | |||
| @@ -4719,8 +4749,11 @@ class SpaceToDepth(PrimitiveWithInfer): | |||
| out_shape = copy.deepcopy(x_shape) | |||
| for i in range(2): | |||
| if out_shape[i + 2] % self.block_size != 0: | |||
| raise ValueError(f'For \'{self.name}\' input shape[{i + 2}] {out_shape[i + 2]} should be ' | |||
| f'fully divided by block_size {self.block_size}') | |||
| msg_prefix = "2nd" if i + 2 == 2 else "3rd" | |||
| raise ValueError(f"For '{self.name}', the shape of output with index {i + 2} must be divided " | |||
| f"exactly by 'block_size', but got the {msg_prefix} dimension " | |||
| f"of output: {out_shape[i + 2]} and " | |||
| f"'block_size': {self.block_size}.") | |||
| out_shape[i + 2] //= self.block_size | |||
| out_shape[1] *= self.block_size * self.block_size | |||
| @@ -4876,8 +4909,11 @@ class SpaceToBatch(PrimitiveWithInfer): | |||
| for i in range(2): | |||
| padded = out_shape[i + 2] + self.paddings[i][0] + self.paddings[i][1] | |||
| if padded % self.block_size != 0: | |||
| raise ValueError(f'For \'{self.name}\' padded[{i}] {padded} should be divisible by ' | |||
| f'block_size {self.block_size}') | |||
| msg_ndim = "2nd" if i + 2 == 2 else "3rd" | |||
| raise ValueError(f"For '{self.name}', the shape of the output tensor should be " | |||
| f"divisible by 'block_size', but got the {msg_ndim} dimension of output: {padded} and " | |||
| f"'block_size': {self.block_size}. Please check the official homepage " | |||
| f"for more information about the output tensor.") | |||
| out_shape[i + 2] = padded // self.block_size | |||
| out_shape[0] *= self.block_size * self.block_size | |||
| return out_shape | |||
| @@ -4964,8 +5000,9 @@ class BatchToSpace(PrimitiveWithInfer): | |||
| out_shape[i + 2] = x_block_prod - crops_sum | |||
| block_size_prod = self.block_size * self.block_size | |||
| if out_shape[0] % block_size_prod != 0: | |||
| raise ValueError(f'For \'{self.name}\' input_x dimension 0 {out_shape[0]} should be divisible by ' | |||
| f'block_size_prod {block_size_prod}') | |||
| raise ValueError(f"For '{self.name}', the shape of output with index 0 must be divided exactly " | |||
| f"by block_size_prod, but got the shape of output: {out_shape} and " | |||
| f"block_size_prod: {block_size_prod}.") | |||
| out_shape[0] = out_shape[0] // block_size_prod | |||
| return out_shape | |||
| @@ -5066,8 +5103,11 @@ class SpaceToBatchND(PrimitiveWithInfer): | |||
| padded = out_shape[i + offset] + self.paddings[i][0] + \ | |||
| self.paddings[i][1] | |||
| if padded % self.block_shape[i] != 0: | |||
| raise ValueError(f'For \'{self.name}\' padded[{i}] {padded} should be divisible by ' | |||
| f'block_shape[{i}] {self.block_shape[i]}') | |||
| msg_ndim = "2nd" if i + 2 == 2 else "3rd" | |||
| raise ValueError(f"For '{self.name}', the 2nd and 3rd dimension of the output tensor should be " | |||
| f"divisible by 'block_shape', but got the {msg_ndim} dimension of output: {padded} " | |||
| f"and the {i} dimension block_shape: {self.block_shape}. Please check the " | |||
| f"official homepage for more information about the output tensor.") | |||
| out_shape[i + offset] = padded // self.block_shape[i] | |||
| block_shape_prod = block_shape_prod * self.block_shape[i] | |||
| out_shape[0] *= block_shape_prod | |||
| @@ -5170,8 +5210,9 @@ class BatchToSpaceND(PrimitiveWithInfer): | |||
| out_shape[i + offset] = x_block_prod - crops_sum | |||
| if out_shape[0] % block_shape_prod != 0: | |||
| raise ValueError(f'For \'{self.name}\' input_x dimension 0 {out_shape[0]} should be divisible by ' | |||
| f'block_shape_prod {block_shape_prod}') | |||
| raise ValueError(f"For '{self.name}', the 0th dimension of the output tensor should be " | |||
| f"divisible by block_shape_prod, but got 0th dimension of the output tensor: " | |||
| f"{out_shape[0]} and the block_shape_prod: {block_shape_prod}.") | |||
| out_shape[0] = out_shape[0] // block_shape_prod | |||
| return out_shape | |||
| @@ -5317,8 +5358,7 @@ class Meshgrid(PrimitiveWithInfer): | |||
| def __init__(self, indexing="xy"): | |||
| """Initialize Meshgrid.""" | |||
| validator.check_value_type("indexing", indexing, (str), self.name) | |||
| if indexing not in ("xy", "ij"): | |||
| raise ValueError("indexing parameter must be either 'xy' or 'ij'") | |||
| validator.check_string(indexing.lower(), ["xy", "ij"], "indexing", self.name) | |||
| self.indexing = indexing | |||
| def infer_shape(self, x_shape): | |||
| @@ -5401,7 +5441,8 @@ class InplaceUpdate(PrimitiveWithInfer): | |||
| Rel.EQ, self.name) | |||
| for i in self.indices: | |||
| if i < 0 or i >= x_shape[0]: | |||
| raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.') | |||
| raise ValueError(f"For '{self.name}', the value of indices must be in [0, {x_shape[0]}), " | |||
| f"but got {i}.") | |||
| x_rank = len(x_shape) | |||
| for idx in range(x_rank)[1:]: | |||
| validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name) | |||
| @@ -5731,10 +5772,11 @@ class EmbeddingLookup(PrimitiveWithCheck): | |||
| validator.check_subclass("offset", offset['dtype'], mstype.int_, self.name) | |||
| indices_shp = indices['shape'] | |||
| if not indices_shp: | |||
| raise ValueError("'indices' should NOT be a scalar.") | |||
| raise ValueError(f"For '{self.name}', the 'input_indices' should not be a scalar, but got {indices_shp}.") | |||
| params_shp = params['shape'] | |||
| if len(params_shp) > 2: | |||
| raise ValueError("The dimension of 'params' in EmbeddingLookup must <= 2, but got %d." % len(params_shp)) | |||
| raise ValueError(f"For '{self.name}', the dimension of 'input_params' must <= 2, " | |||
| f"but got {len(params_shp)}.") | |||
| class GatherD(Primitive): | |||
| @@ -6014,9 +6056,11 @@ class SearchSorted(PrimitiveWithInfer): | |||
| def infer_shape(self, sequence_shape, values_shape): | |||
| if len(sequence_shape) != 1 and sequence_shape[:-1] != values_shape[:-1]: | |||
| raise ValueError(f"Sequence should be 1 dimensional or has all but the last dimension matching " | |||
| f" the dimensions of values, but got sequence's dimensions: {sequence_shape} " | |||
| f"and values' dimensions: {values_shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'sequence' should be 1 dimensional or " | |||
| f"all dimensions except the last dimension of 'sequence' " | |||
| f"must be the same as all dimensions except the last dimension of 'values'. " | |||
| f"but got dimension of 'sequence': {sequence_shape} " | |||
| f"and dimension of 'values': {values_shape}.") | |||
| return values_shape | |||
| def infer_dtype(self, sequence_dtype, values_dtype): | |||
| @@ -6081,14 +6125,22 @@ class TensorScatterMax(PrimitiveWithInfer): | |||
| def infer_shape(self, input_x_shape, indices_shape, updates_shape): | |||
| if len(indices_shape) < 2: | |||
| raise ValueError("For 'TensorScatterMax', rank of indices cannot be less than 2.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2," | |||
| f" but got {len(indices_shape)}.") | |||
| if indices_shape[-1] > len(input_x_shape): | |||
| raise ValueError("For 'TensorScatterMax', indices.shape[-1] cannot be greater than rank of input_x.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to " | |||
| f"the dimension of 'input_x', but got the " | |||
| f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': " | |||
| f"{len(indices_shape)}.") | |||
| updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] | |||
| if updates_shape_check != updates_shape: | |||
| raise ValueError("For 'TensorScatterMax', input_x.shape, indices.shape and updates.shape are incompatible.") | |||
| raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to " | |||
| f"the shape of updates_shape_check, but got the shape of 'update': {updates_shape}," | |||
| f"and the shape of updates_shape_check: {updates_shape_check}. Please check the shape of " | |||
| f"'indices' and 'input_x', they should be meeting followings formula:\n" | |||
| f" updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:].") | |||
| return input_x_shape | |||
| @@ -6157,14 +6209,22 @@ class TensorScatterMin(PrimitiveWithInfer): | |||
| def infer_shape(self, input_x_shape, indices_shape, updates_shape): | |||
| if len(indices_shape) < 2: | |||
| raise ValueError("For 'TensorScatterMin', rank of indices cannot be less than 2.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2," | |||
| f" but got {len(indices_shape)}.") | |||
| if indices_shape[-1] > len(input_x_shape): | |||
| raise ValueError("For 'TensorScatterMin', indices.shape[-1] cannot be greater than rank of input_x.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to " | |||
| f"the dimension of 'input_x', but got the " | |||
| f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': " | |||
| f"{len(indices_shape)}.") | |||
| updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] | |||
| if updates_shape_check != updates_shape: | |||
| raise ValueError("For 'TensorScatterMin', input_x.shape, indices.shape and updates.shape are incompatible.") | |||
| raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to " | |||
| f"the shape of updates_shape_check, but got the shape of 'update': {updates_shape}," | |||
| f"and the shape of updates_shape_check: {updates_shape_check}. Please check the shape of " | |||
| f"'indices' and 'input_x', they should be meeting followings formula:\n" | |||
| f" updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:].") | |||
| return input_x_shape | |||
| @@ -6234,14 +6294,22 @@ class TensorScatterSub(PrimitiveWithInfer): | |||
| def infer_shape(self, input_x_shape, indices_shape, updates_shape): | |||
| if len(indices_shape) < 2: | |||
| raise ValueError("For 'TensorScatterSub', rank of indices cannot be less than 2.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2," | |||
| f" but got {len(indices_shape)}.") | |||
| if indices_shape[-1] > len(input_x_shape): | |||
| raise ValueError("For 'TensorScatterSub', indices.shape[-1] cannot be greater than rank of input_x.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to " | |||
| f"the dimension of 'input_x', but got the " | |||
| f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': " | |||
| f"{len(indices_shape)}.") | |||
| updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] | |||
| if updates_shape_check != updates_shape: | |||
| raise ValueError("For 'TensorScatterSub', input_x.shape, indices.shape and updates.shape are incompatible.") | |||
| raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to " | |||
| f"the shape of updates_shape_check, but got the shape of 'update': {updates_shape}," | |||
| f"and the shape of updates_shape_check: {updates_shape_check}. Please check the shape of " | |||
| f"'indices' and 'input_x', they should be meeting followings formula:\n" | |||
| f" updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:].") | |||
| return input_x_shape | |||
| @@ -147,7 +147,7 @@ class AllReduce(PrimitiveWithInfer): | |||
| def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP): | |||
| """Initialize AllReduce.""" | |||
| if not isinstance(op, type(ReduceOp.SUM)): | |||
| raise TypeError(f"For '{self.name}', the 'op' of AllReduce should be str, but got {type(op)}.") | |||
| raise TypeError(f"For '{self.name}', the 'op' of AllReduce should be str, but got {type(op).__name__}.") | |||
| if not isinstance(_get_group(group), str): | |||
| raise TypeError(f"For '{self.name}', the 'group' of AllReduce should be str, " | |||
| f"but got {type(_get_group(group))}.") | |||
| @@ -563,7 +563,7 @@ class Broadcast(PrimitiveWithInfer): | |||
| def infer_dtype(self, x_dtype): | |||
| if not isinstance(x_dtype, tuple): | |||
| raise TypeError(f"For '{self.name}', the 'input_x' should be a tuple, but got {type(x_dtype)}!") | |||
| raise TypeError(f"For '{self.name}', the 'input_x' should be a tuple, but got {type(x_dtype).__name__}!") | |||
| for _ele in x_dtype: | |||
| validator.check_tensor_dtype_valid('x', _ele, target_dtypes, self.name) | |||
| return x_dtype | |||
| @@ -227,7 +227,7 @@ class HistogramSummary(PrimitiveWithInfer): | |||
| # In the summary, the histogram value should be a tensor whose shape is not []. | |||
| if not v_shape: | |||
| raise ValueError(f"For '{self.name}', the type of 'value' should be tensor, " | |||
| f"and whose shape should not be [], but got {v_shape}.") | |||
| f"its shape should not be [], but got {v_shape}.") | |||
| return SUMMARY_RETURN_VALUE | |||
| @@ -343,7 +343,7 @@ class HookBackward(PrimitiveWithInfer): | |||
| self.add_prim_attr("cell_id", cell_id) | |||
| self.init_attrs["cell_id"] = cell_id | |||
| if not isinstance(hook_fn, (FunctionType, MethodType)): | |||
| raise TypeError(f"For '{self.name}', the tye of 'hook_fn' should be python function, " | |||
| raise TypeError(f"For '{self.name}', the type of 'hook_fn' should be python function, " | |||
| f"but got {type(hook_fn)}.") | |||
| self.register_hook(hook_fn) | |||
| self.cell_id = cell_id | |||
| @@ -2353,7 +2353,7 @@ class Minimum(_MathBinaryOp): | |||
| >>> # case 1 : same data type | |||
| >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) | |||
| >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) | |||
| >>> minimum = ops.Minimum | |||
| >>> minimum = ops.Minimum() | |||
| >>> output = minimum(x, y) | |||
| >>> print(output) | |||
| [1. 2. 3.] | |||
| @@ -2527,7 +2527,7 @@ class Div(_MathBinaryOp): | |||
| >>> print(output) | |||
| [-2. 2.5 3.] | |||
| >>> print(output.dtype) | |||
| Flaot32 | |||
| Float32 | |||
| """ | |||
| def infer_value(self, x, y): | |||
| @@ -1264,7 +1264,9 @@ class BatchNorm(PrimitiveWithInfer): | |||
| validator.check_float_range(momentum, 0, 1, Rel.INC_BOTH, 'momentum', self.name) | |||
| self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name) | |||
| if context.get_context("device_target") != "GPU" and self.format == "NHWC": | |||
| raise ValueError("NHWC format only support in GPU target.") | |||
| raise ValueError(f"For '{self.name}', the \"NHWC\" format only support in GPU target, " | |||
| f"but got the format is {self.format} and " | |||
| f"the platform is {context.get_context('device_target')}.") | |||
| self.add_prim_attr('data_format', self.format) | |||
| self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'], | |||
| outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2']) | |||
| @@ -1331,7 +1333,7 @@ class Conv2D(Primitive): | |||
| mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution , | |||
| 2 deconvolution, 3 depthwise convolution. Default: 1. | |||
| pad_mode (str): Specifies padding mode. The optional values are | |||
| "same", "valid", "pad". Default: "valid". | |||
| "same", "valid", "pad", not case sensitive. Default: "valid". | |||
| - same: Adopts the way of completion. The height and width of the output will be the same as | |||
| the input `x`. The total number of padding will be calculated in horizontal and vertical | |||
| @@ -1413,10 +1415,11 @@ class Conv2D(Primitive): | |||
| validator.check_equal_int(len(pad), 4, 'pad size', self.name) | |||
| self.add_prim_attr("pad", pad) | |||
| self.padding = pad | |||
| self.pad_mode = validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| if pad_mode != 'pad' and pad != (0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be zero when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' and 'pad_mode' is {pad_mode}.") | |||
| if self.pad_mode == 'pad': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad item', self.name) | |||
| @@ -1424,7 +1427,9 @@ class Conv2D(Primitive): | |||
| self.mode = validator.check_equal_int(mode, 1, 'mode', self.name) | |||
| self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name) | |||
| if context.get_context("device_target") != "GPU" and self.format == "NHWC": | |||
| raise ValueError("NHWC format only support in GPU target.") | |||
| raise ValueError(f"For '{self.name}', the \"NHWC\" format only support in GPU target, " | |||
| f"but got the format is {self.format} " | |||
| f"and platform is {context.get_context('device_target')}.") | |||
| self.add_prim_attr('data_format', self.format) | |||
| self.out_channel = validator.check_positive_int(out_channel, 'out_channel', self.name) | |||
| self.group = validator.check_positive_int(group, 'group', self.name) | |||
| @@ -1454,7 +1459,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): | |||
| mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution , | |||
| 2 deconvolution, 3 depthwise convolution. Default: 3. | |||
| pad_mode (str): Specifies padding mode. The optional values are | |||
| "same", "valid", "pad". Default: "valid". | |||
| "same", "valid", "pad". Default: "valid", not case sensitive. | |||
| - same: Adopts the way of completion. The height and width of the output will be the same as | |||
| the input `x`. The total number of padding will be calculated in horizontal and vertical | |||
| @@ -1541,9 +1546,10 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): | |||
| validator.check_equal_int(len(pad), 4, 'pad size', self.name) | |||
| self.add_prim_attr("pad", pad) | |||
| self.padding = pad | |||
| self.pad_mode = validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| if pad_mode != 'pad' and pad != (0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be zero when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {pad} and 'pad_mode' is {pad_mode}") | |||
| if self.pad_mode == 'pad': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad item', self.name) | |||
| @@ -1563,7 +1569,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): | |||
| _, _, stride_h, stride_w = self.stride | |||
| _, _, dilation_h, dilation_w = self.dilation | |||
| if kernel_size_n != 1: | |||
| raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}") | |||
| raise ValueError(f"For '{self.name}', the batch of input weight should be 1, but got {kernel_size_n}") | |||
| if self.pad_mode == "valid": | |||
| h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h) | |||
| w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w) | |||
| @@ -1630,7 +1636,9 @@ class _Pool(PrimitiveWithInfer): | |||
| self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax") | |||
| self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name) | |||
| if context.get_context("device_target") != "GPU" and self.format == "NHWC": | |||
| raise ValueError("NHWC format only support in GPU target.") | |||
| raise ValueError(f"For '{self.name}', the \"NHWC\" format only support in GPU target, " | |||
| f"but got the format is {self.format} and " | |||
| f"the platform is {context.get_context('device_target')}.") | |||
| if not self.is_maxpoolwithargmax: | |||
| self.add_prim_attr('data_format', self.format) | |||
| @@ -1666,8 +1674,8 @@ class _Pool(PrimitiveWithInfer): | |||
| for shape_value in out_shape: | |||
| if shape_value <= 0: | |||
| raise ValueError(f"For '{self.name}', the kernel size is not valid, " | |||
| f"the input shape: {x_shape}, strides shape: {self.strides}.") | |||
| raise ValueError(f"For '{self.name}', the each element of the output shape must be larger than 0, " | |||
| f"but got one of shape in output shape is {shape_value}.") | |||
| return out_shape | |||
| def infer_dtype(self, x_dtype): | |||
| @@ -1925,10 +1933,11 @@ class MaxPool3D(PrimitiveWithInfer): | |||
| if len(self.pad_list) == 3: | |||
| self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[2]) | |||
| if len(self.pad_list) != 3 and len(self.pad_list) != 6: | |||
| raise ValueError(f"For `maxpool3d` attr 'pad_list' should be an positive int number or a tuple of " | |||
| f"three or six positive int numbers, but got `{len(self.pad_list)}` numbers.") | |||
| raise ValueError(f"For '{self.name}', attr 'pad_list' should be an positive int number or a tuple of " | |||
| f"three or six positive int numbers, but got {len(self.pad_list)} numbers.") | |||
| if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when pad_list is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad_list' must be zero when 'pad_mode' is not \"CALCULATED\", " | |||
| f"but got 'pad_list' is {self.pad_list} and 'pad_mode' is {pad_mode}.") | |||
| if self.pad_mode == 'CALCULATED': | |||
| for item in self.pad_list: | |||
| validator.check_non_negative_int(item, 'pad_list item', self.name) | |||
| @@ -2069,7 +2078,8 @@ class Conv2DBackpropInput(Primitive): | |||
| and width of the 2D convolution window. Single int means the value is for both the height and the width of | |||
| the kernel. A tuple of 2 ints means the first value is for the height and the other is for the | |||
| width of the kernel. | |||
| pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid". | |||
| pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad", not case sensitive. | |||
| Default: "valid". | |||
| pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of | |||
| top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the | |||
| padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. | |||
| @@ -2146,7 +2156,9 @@ class Conv2DBackpropInput(Primitive): | |||
| self.add_prim_attr('kernel_size', self.kernel_size) | |||
| self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name) | |||
| if context.get_context("device_target") != "GPU" and self.format == "NHWC": | |||
| raise ValueError("NHWC format only support in GPU target.") | |||
| raise ValueError(f"For '{self.name}', the \"NHWC\" format only support in GPU target, " | |||
| f"but got the format is {self.format} and " | |||
| f"the platform is {context.get_context('device_target')}.") | |||
| self.add_prim_attr('data_format', self.format) | |||
| self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True) | |||
| self.stride = _update_attr_by_format(self.stride, self.format) | |||
| @@ -2161,9 +2173,10 @@ class Conv2DBackpropInput(Primitive): | |||
| validator.check_equal_int(len(pad), 4, 'pad size', self.name) | |||
| self.add_prim_attr("pad", pad) | |||
| self.padding = pad | |||
| self.pad_mode = validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| if pad_mode != 'pad' and pad != (0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be zero when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {pad} and 'pad_mode' is {pad_mode}.") | |||
| if self.pad_mode == 'pad': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad item', self.name) | |||
| @@ -2284,7 +2297,9 @@ class BiasAdd(Primitive): | |||
| self.init_prim_io_names(inputs=['x', 'b'], outputs=['output']) | |||
| self.format = validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.name) | |||
| if context.get_context("device_target") != "GPU" and self.format == "NHWC": | |||
| raise ValueError("NHWC format only support in GPU target.") | |||
| raise ValueError(f"For '{self.name}', the \"NHWC\" format only support in GPU target, " | |||
| f"but got the format is {self.format} and " | |||
| f"the platform is {context.get_context('device_target')}.") | |||
| self.add_prim_attr('data_format', self.format) | |||
| @@ -2388,7 +2403,8 @@ class NLLLoss(PrimitiveWithInfer): | |||
| \end{array}\right. | |||
| Args: | |||
| reduction (str): Apply specific reduction method to the output: 'none', 'mean', 'sum'. Default: "mean". | |||
| reduction (str): Apply specific reduction method to the output: 'none', 'mean', 'sum', not case sensitive. | |||
| Default: "mean". | |||
| Inputs: | |||
| - **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type only support float32 or float16. | |||
| @@ -2432,7 +2448,7 @@ class NLLLoss(PrimitiveWithInfer): | |||
| def __init__(self, reduction="mean"): | |||
| """Initialize NLLLoss""" | |||
| self.init_prim_io_names(inputs=['x', 'target', "weight"], outputs=['loss']) | |||
| self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| self.reduction = validator.check_string(reduction.lower(), ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| self.add_prim_attr('reduction', self.reduction) | |||
| def infer_shape(self, x_shape, t_shape, w_shape): | |||
| @@ -2640,8 +2656,8 @@ class ApplyMomentum(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0): | |||
| """Initialize ApplyMomentum.""" | |||
| self.use_nesterov = validator.check_bool(use_nesterov) | |||
| self.use_locking = validator.check_bool(use_locking) | |||
| self.use_nesterov = validator.check_bool(use_nesterov, "use_nesterov", self.name) | |||
| self.use_locking = validator.check_bool(use_locking, "use_locking", self.name) | |||
| validator.check_value_type('gradient_scale', gradient_scale, [float], self.name) | |||
| self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'], | |||
| outputs=['output']) | |||
| @@ -2746,7 +2762,8 @@ class SoftMarginLoss(Primitive): | |||
| \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} | |||
| Args: | |||
| reduction (str): Apply specific reduction method to the output: 'none', 'mean', 'sum'. Default: "mean". | |||
| reduction (str): Apply specific reduction method to the output: 'none', 'mean', 'sum', not case sensitive. | |||
| Default: "mean". | |||
| Inputs: | |||
| - **logits** (Tensor) - Predict data. Data type must be float16 or float32. | |||
| @@ -2778,7 +2795,7 @@ class SoftMarginLoss(Primitive): | |||
| def __init__(self, reduction="mean"): | |||
| """Initialize SoftMarginLoss""" | |||
| self.init_prim_io_names(inputs=['predict', 'label'], outputs=['loss']) | |||
| self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| self.reduction = validator.check_string(reduction.lower(), ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| class L2Loss(PrimitiveWithInfer): | |||
| @@ -3000,7 +3017,8 @@ class SGD(PrimitiveWithCheck): | |||
| """Initialize SGD.""" | |||
| validator.check_value_type("nesterov", nesterov, [bool], self.name) | |||
| if nesterov and dampening != 0: | |||
| raise ValueError(f"Nesterov need zero dampening!") | |||
| raise ValueError(f"For '{self.name}', the 'dampening' must be 0 when 'nesterov' is True, " | |||
| f"but got 'dampening' is {dampening} and 'nesterov' is {nesterov}.") | |||
| self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'], | |||
| outputs=['output']) | |||
| self.add_prim_attr('side_effect_mem', True) | |||
| @@ -3128,7 +3146,8 @@ class ApplyRMSProp(PrimitiveWithInfer): | |||
| def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon): | |||
| if decay is None or momentum is None or epsilon is None: | |||
| raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.") | |||
| raise ValueError(f"For '{self.name}', 'decay', 'momentum' and 'epsilon' can not be None, " | |||
| f"but got 'decay': {decay}, 'momentum': {momentum} and 'epsilon':{epsilon}.") | |||
| class ApplyCenteredRMSProp(PrimitiveWithInfer): | |||
| @@ -3362,7 +3381,8 @@ class L2Normalize(PrimitiveWithInfer): | |||
| self.add_prim_attr('axis', axis) | |||
| self.init_attrs['axis'] = axis | |||
| if len(axis) != 1: | |||
| raise TypeError("The length of axis must be 1, later will support multiple axis!") | |||
| raise TypeError(f"For '{self.name}', the dimension of 'axis' must be 1, but got {len(axis)}, " | |||
| f"later will support multiple axis!") | |||
| self.axis = axis | |||
| def infer_shape(self, input_x): | |||
| @@ -3842,17 +3862,18 @@ class PReLU(PrimitiveWithInfer): | |||
| input_x_dim = len(input_x_shape) | |||
| if input_x_dim in (0, 1): | |||
| if context.get_context("device_target") == "Ascend": | |||
| raise ValueError(f"For '{self.name}', the 0-D or 1-D 'input_x' is not supported on Ascend.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'x' can not be 0-D or 1-D when the platform is " | |||
| f"\"Ascend\", but got dimension of 'x' is {input_x_dim}.") | |||
| channel_num = 1 | |||
| else: | |||
| channel_num = input_x_shape[1] | |||
| weight_dim = len(weight_shape) | |||
| if weight_dim != 1: | |||
| raise ValueError(f"For '{self.name}', the weight dimension should be 1, while got {weight_dim}.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'x' should be 1, while got {weight_dim}.") | |||
| if weight_shape[0] != 1 and weight_shape[0] != channel_num: | |||
| raise ValueError(f"For '{self.name}', the weight shape should be (1,) or " | |||
| f"matched with input channel ({channel_num},), but got {weight_shape}") | |||
| raise ValueError(f"For '{self.name}', the first dimension of 'weight' should be (1,) or " | |||
| f"it should be equal to number of channels: {channel_num}, but got {weight_shape}") | |||
| return input_x_shape | |||
| def infer_dtype(self, input_x_dtype, weight_dtype): | |||
| @@ -4055,8 +4076,8 @@ class BCEWithLogitsLoss(PrimitiveWithInfer): | |||
| and the third method is to calculate the sum of all losses. | |||
| Args: | |||
| reduction (str): Type of reduction to be applied to loss. The optional values are 'mean', 'sum', and 'none'. | |||
| If 'none', do not perform reduction. Default:'mean'. | |||
| reduction (str): Type of reduction to be applied to loss. The optional values are 'mean', 'sum', and 'none', | |||
| not case sensitive. If 'none', do not perform reduction. Default:'mean'. | |||
| Inputs: | |||
| - **logits** (Tensor) - Input logits. Data type must be float16 or float32. | |||
| @@ -4095,36 +4116,36 @@ class BCEWithLogitsLoss(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self, reduction='mean'): | |||
| """Initialize BCEWithLogitsLoss""" | |||
| self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| self.reduction = validator.check_string(reduction.lower(), ['none', 'sum', 'mean'], 'reduction', self.name) | |||
| def infer_shape(self, predict, target, weight, pos_weight): | |||
| validator.check('predict_shape', predict, 'target_shape', target, Rel.EQ, self.name) | |||
| def infer_shape(self, logits, label, weight, pos_weight): | |||
| validator.check('logits_shape', logits, 'label_shape', label, Rel.EQ, self.name) | |||
| reversed_weight_shape = tuple(reversed(weight)) | |||
| reversed_target = tuple(reversed(predict)) | |||
| reversed_label = tuple(reversed(logits)) | |||
| for i, v in enumerate(reversed_weight_shape): | |||
| if v not in (reversed_target[i], 1): | |||
| raise ValueError(f"For {self.name}, shapes can not broadcast. " | |||
| f"predict: {tuple(predict)}, weight shape {tuple(weight)}.") | |||
| if v not in (reversed_label[i], 1): | |||
| raise ValueError(f"For {self.name}, the shapes of 'logits' and 'weight' can not broadcast. " | |||
| f"'logits': {tuple(logits)}, 'weight' shape {tuple(weight)}.") | |||
| reversed_pos_shape = tuple(reversed(pos_weight)) | |||
| reversed_target = tuple(reversed(predict)) | |||
| reversed_label = tuple(reversed(logits)) | |||
| for i, v in enumerate(reversed_pos_shape): | |||
| if v not in (reversed_target[i], 1): | |||
| raise ValueError(f"For {self.name}, shapes can not broadcast. " | |||
| f"predict: {tuple(predict)}, pos_weight shape {tuple(pos_weight)}.") | |||
| if v not in (reversed_label[i], 1): | |||
| raise ValueError(f"For {self.name}, the shapes of 'logits' and 'pos_weight' can not broadcast. " | |||
| f"'logits': {tuple(logits)}, 'pos_weight' shape {tuple(pos_weight)}.") | |||
| if self.reduction in ('mean', 'sum'): | |||
| shape = [] | |||
| else: | |||
| shape = predict | |||
| shape = logits | |||
| return shape | |||
| def infer_dtype(self, predict, target, weight, pos_weight): | |||
| validator.check_tensor_dtype_valid('predict dtype', predict, [mstype.float16, mstype.float32], self.name) | |||
| validator.check_tensor_dtype_valid('target dtype', target, [mstype.float16, mstype.float32], self.name) | |||
| def infer_dtype(self, logits, label, weight, pos_weight): | |||
| validator.check_tensor_dtype_valid('logits dtype', logits, [mstype.float16, mstype.float32], self.name) | |||
| validator.check_tensor_dtype_valid('label dtype', label, [mstype.float16, mstype.float32], self.name) | |||
| validator.check_tensor_dtype_valid('weight dtype', weight, [mstype.float16, mstype.float32], self.name) | |||
| validator.check_tensor_dtype_valid('pos_weight dtype', pos_weight, [mstype.float16, mstype.float32], self.name) | |||
| return predict | |||
| return logits | |||
| class Pad(PrimitiveWithInfer): | |||
| @@ -4170,17 +4191,19 @@ class Pad(PrimitiveWithInfer): | |||
| """Initialize Pad""" | |||
| self.init_prim_io_names(inputs=['x'], outputs=['y']) | |||
| if not isinstance(paddings, tuple): | |||
| raise TypeError('Paddings must be tuple type.') | |||
| raise TypeError(f"For '{self.name}', the type of 'paddings' must be tuple, " | |||
| f"but got {type(paddings)}.") | |||
| for item in paddings: | |||
| if len(item) != 2: | |||
| raise ValueError('The shape of paddings must be (n, 2).') | |||
| raise ValueError(f"For '{self.name}', the shape of paddings must be (n, 2), " | |||
| f"but got {item}.") | |||
| self.paddings = paddings | |||
| def infer_shape(self, x_shape): | |||
| validator.check_int(len(self.paddings), len(x_shape), Rel.EQ, 'paddings.shape', self.name) | |||
| paddings = np.array(self.paddings) | |||
| if not np.all(paddings >= 0): | |||
| raise ValueError('All elements of paddings must be >= 0.') | |||
| raise ValueError(f"For '{self.name}', all elements of paddings must be >= 0.") | |||
| y_shape = () | |||
| for i in range(int(paddings.size / 2)): | |||
| y_shape += ((x_shape[i] + paddings[i, 0] + paddings[i, 1]),) | |||
| @@ -4272,13 +4295,14 @@ class MirrorPad(PrimitiveWithInfer): | |||
| paddings_size = paddings_value.size | |||
| validator.check_int(paddings_size, len(x_shape) * 2, Rel.EQ, 'paddings.shape', self.name) | |||
| if not np.all(paddings_value >= 0): | |||
| raise ValueError('All elements of paddings must be >= 0.') | |||
| raise ValueError(f"For '{self.name}', all elements of 'paddings' must be >= 0.") | |||
| adjust = 0 | |||
| if self.mode == 'SYMMETRIC': | |||
| adjust = 1 | |||
| for i in range(0, int(paddings_size / 2)): | |||
| if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust): | |||
| raise ValueError('At least one dim has too high a padding value for this input and mode') | |||
| raise ValueError(f"For '{self.name}', both paddings[D, 0] and paddings[D, 1] must be no greater than " | |||
| f"the dimension corresponding to 'x'.") | |||
| y_shape = () | |||
| for i in range(0, int(paddings_size / 2)): | |||
| y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),) | |||
| @@ -5307,7 +5331,7 @@ class KLDivLoss(PrimitiveWithInfer): | |||
| Args: | |||
| reduction (str): Specifies the reduction to be applied to the output. | |||
| Its value must be one of 'none', 'mean', 'sum'. Default: 'mean'. | |||
| Its value must be one of 'none', 'mean', 'sum', not case sensitive. Default: 'mean'. | |||
| Inputs: | |||
| - **logits** (Tensor) - The input Tensor. The data type must be float32. | |||
| @@ -5345,7 +5369,7 @@ class KLDivLoss(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self, reduction='mean'): | |||
| """Initialize KLDivLoss.""" | |||
| self.reduction = validator.check_string(reduction, ['none', 'mean', 'sum'], 'reduction', self.name) | |||
| self.reduction = validator.check_string(reduction.lower(), ['none', 'mean', 'sum'], 'reduction', self.name) | |||
| def infer_shape(self, x_shape, y_shape): | |||
| validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name) | |||
| @@ -5389,7 +5413,7 @@ class BinaryCrossEntropy(PrimitiveWithInfer): | |||
| Args: | |||
| reduction (str): Specifies the reduction to be applied to the output. | |||
| Its value must be one of 'none', 'mean', 'sum'. Default: 'mean'. | |||
| Its value must be one of 'none', 'mean', 'sum', not case sensitive. Default: 'mean'. | |||
| Inputs: | |||
| - **logits** (Tensor) - The input Tensor. The data type must be float16 or float32, | |||
| @@ -5432,7 +5456,7 @@ class BinaryCrossEntropy(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self, reduction='mean'): | |||
| """Initialize BinaryCrossEntropy.""" | |||
| self.reduction = validator.check_string(reduction, ['none', 'mean', 'sum'], 'reduction', self.name) | |||
| self.reduction = validator.check_string(reduction.lower(), ['none', 'mean', 'sum'], 'reduction', self.name) | |||
| def infer_shape(self, x_shape, y_shape, weight_shape): | |||
| validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name) | |||
| @@ -6002,7 +6026,6 @@ class SparseApplyAdagrad(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self, lr, update_slots=True, use_locking=False): | |||
| """Initialize SparseApplyAdagrad.""" | |||
| validator.check_value_type("lr", lr, [float], self.name) | |||
| validator.check_is_float(lr, "lr", self.name) | |||
| validator.check_value_type("update_slots", update_slots, [bool], self.name) | |||
| validator.check_value_type("use_locking", use_locking, [bool], self.name) | |||
| @@ -7674,14 +7697,15 @@ class DynamicRNN(PrimitiveWithInfer): | |||
| validator.check_int(len(h_shape), 3, Rel.EQ, "h_shape", self.name) | |||
| validator.check_int(len(c_shape), 3, Rel.EQ, "c_shape", self.name) | |||
| if seq_shape is not None: | |||
| raise ValueError(f"For {self.name}, seq_shape should be None.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'seq_length' should be None, but got {seq_shape}.") | |||
| num_step, batch_size, input_size = x_shape | |||
| hidden_size = w_shape[-1] // 4 | |||
| validator.check("b_shape[-1]", b_shape[-1], "w_shape[-1]", w_shape[-1], Rel.EQ, self.name) | |||
| if w_shape[-1] % 4 != 0: | |||
| raise ValueError(f"For {self.name}, w_shape[-1] should multiple of 4.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'w' should be a multiple of 4, " | |||
| f"but got {w_shape[-1]}.") | |||
| validator.check("w_shape[0]", w_shape[0], "input_size + hidden_size", | |||
| input_size + hidden_size, Rel.EQ, self.name) | |||
| validator.check("b_shape[0]", b_shape[0], "w_shape[1]", w_shape[1], Rel.EQ, self.name) | |||
| @@ -7841,7 +7865,8 @@ class DynamicGRUV2(PrimitiveWithInfer): | |||
| num_step, batch_size, input_size = x_shape | |||
| hidden_size = winput_shape[-1] // 3 | |||
| if winput_shape[-1] % 3 != 0: | |||
| raise ValueError(f"For {self.name}, weight_input_shape[-1] should multiple of 3.") | |||
| raise ValueError(f"For '{self.name}', the last dimension of 'w' should be a multiple of 3, " | |||
| f"but got {winput_shape[-1]}.") | |||
| self.placeholder_index = [3, 4, 5] | |||
| if binput_shape is not None: | |||
| @@ -7854,7 +7879,8 @@ class DynamicGRUV2(PrimitiveWithInfer): | |||
| "3 * hidden_shape", [3 * hidden_size], Rel.EQ, self.name) | |||
| self.placeholder_index.remove(4) | |||
| if seq_shape is not None: | |||
| raise ValueError(f"For {self.name}, seq_shape should be None.") | |||
| raise ValueError(f"For '{self.name}', the dimension of 'seq_length' should be None, " | |||
| f"but got {seq_shape}.") | |||
| validator.check_int(len(h_shape), 2, Rel.EQ, "init_h shape rank", self.name) | |||
| validator.check("init_h_shape[0]", h_shape[0], "batch_size", batch_size, Rel.EQ, self.name) | |||
| @@ -8036,8 +8062,8 @@ class AvgPool3D(Primitive): | |||
| strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents | |||
| the depth, height and width of movement are both strides, or a tuple of three int numbers that | |||
| represent depth, height and width of movement respectively. Default: 1. | |||
| pad_mode (str): The optional value for pad mode, is "same", "valid", "pad", not case sensitive. | |||
| Default: "valid". | |||
| pad_mode (str): The optional value for pad mode, is "SAME", "VALID", "PAD", not case sensitive. | |||
| Default: "VALID". | |||
| - same: Adopts the way of completion. The depth, height and width of the output will be the same as | |||
| the input. The total number of padding will be calculated in depth, horizontal and vertical | |||
| @@ -8106,7 +8132,7 @@ class AvgPool3D(Primitive): | |||
| if isinstance(pad, int): | |||
| pad = (pad,) * 6 | |||
| if len(pad) != 6: | |||
| raise ValueError(f"For `AvgPool3D` attr 'pad' should be an positive int number or a tuple of " | |||
| raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " | |||
| f"six positive int numbers, but got `{len(pad)}`.") | |||
| self.pad_list = pad | |||
| self.add_prim_attr('pad_list', self.pad_list) | |||
| @@ -8115,7 +8141,8 @@ class AvgPool3D(Primitive): | |||
| self.add_prim_attr('pad_mode', self.pad_mode) | |||
| if self.pad_mode != 'PAD' and pad != (0, 0, 0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be (0, 0, 0, 0, 0, 0) when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {pad} and 'pad_mode' is {self.pad_mode}.") | |||
| if self.pad_mode == 'PAD': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad or item of pad', self.name) | |||
| @@ -8250,7 +8277,7 @@ class Conv3D(PrimitiveWithInfer): | |||
| if isinstance(pad, int): | |||
| pad = (pad,) * 6 | |||
| if len(pad) != 6: | |||
| raise ValueError(f"For `conv3d` attr 'pad' should be an positive int number or a tuple of " | |||
| raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " | |||
| f"six positive int numbers, but got `{len(pad)}`.") | |||
| self.add_prim_attr("pad", pad) | |||
| self.padding = pad | |||
| @@ -8259,7 +8286,8 @@ class Conv3D(PrimitiveWithInfer): | |||
| self.add_prim_attr('pad_mode', self.pad_mode) | |||
| if self.pad_mode != 'pad' and pad != (0, 0, 0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be (0, 0, 0, 0, 0, 0) when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {pad} and 'pad_mode' is {self.pad_mode}.") | |||
| if self.pad_mode == 'pad': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad item', self.name) | |||
| @@ -8277,7 +8305,7 @@ class Conv3D(PrimitiveWithInfer): | |||
| validator.check_equal_int(len(w_shape), 5, "weight rank", self.name) | |||
| validator.check_equal_int(len(x_shape), 5, "x rank", self.name) | |||
| if b_shape is not None: | |||
| raise ValueError("Bias currently only support None.") | |||
| raise ValueError(f"For '{self.name}', the 'bias' currently only support None, but got {b_shape}.") | |||
| validator.check(f"x_shape[1] // group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name) | |||
| validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name) | |||
| validator.check('kernel_size', self.kernel_size, 'w_shape[1:4]', tuple(w_shape[2:]), Rel.EQ, self.name) | |||
| @@ -8443,7 +8471,9 @@ class Conv3DBackpropInput(PrimitiveWithInfer): | |||
| self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) | |||
| if self.pad_mode != 'pad' and self.pad_list != (0, 0, 0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be (0, 0, 0, 0, 0, 0) " | |||
| f"when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {self.pad_list} and 'pad_mode' is {self.pad_mode}.") | |||
| if self.pad_mode == 'pad': | |||
| for item in pad: | |||
| validator.check_non_negative_int(item, 'pad item', self.name) | |||
| @@ -8526,8 +8556,8 @@ class CTCLossV2(Primitive): | |||
| Args: | |||
| blank (int): The blank label. Default: 0. | |||
| reduction (string): Apply specific reduction method to the output. Currently only support 'none'. | |||
| Default: "none". | |||
| reduction (string): Apply specific reduction method to the output. Currently only support 'none', | |||
| not case sensitive. Default: "none". | |||
| zero_infinity (bool): Whether to set infinite loss and correlation gradient to zero. Default: False. | |||
| Inputs: | |||
| @@ -8557,7 +8587,7 @@ class CTCLossV2(Primitive): | |||
| self.add_prim_attr("blank", blank) | |||
| validator.check_value_type("reduction", reduction, [str], self.name) | |||
| self.reduction = self.reduction.lower() | |||
| validator.check_string(self.reduction, ['none'], 'reduction', self.name) | |||
| validator.check_string(self.reduction.lower(), ['none'], 'reduction', self.name) | |||
| self.add_prim_attr("reduction", self.reduction) | |||
| validator.check_value_type("zero_infinity", zero_infinity, [bool], self.name) | |||
| self.add_prim_attr("zero_infinity", zero_infinity) | |||
| @@ -8640,7 +8670,7 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| other is for the width of the kernel. | |||
| mode (int): Modes for different convolutions. Default is 1. It is currently not used. | |||
| pad_mode (str): Specifies padding mode. The optional values are | |||
| "same", "valid", "pad". Default: "valid". | |||
| "same", "valid", "pad", not case sensitive. Default: "valid". | |||
| - same: Adopts the way of completion. The depth, height and width of the output will be the same as | |||
| the input. The total number of padding will be calculated in depth, horizontal and vertical | |||
| @@ -8736,7 +8766,7 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| if isinstance(pad, int): | |||
| pad = (pad,) * 6 | |||
| if len(pad) != 6: | |||
| raise ValueError(f"For `Conv3DTranspose` attr 'pad' should be an positive int number or a tuple of " | |||
| raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " | |||
| f"six positive int numbers, but got `{len(pad)}`.") | |||
| self.pad_list = pad | |||
| validator.check_value_type('pad_mode', pad_mode, [str], self.name) | |||
| @@ -8744,7 +8774,8 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| self.add_prim_attr('pad_mode', self.pad_mode) | |||
| if self.pad_mode != 'pad' and pad != (0, 0, 0, 0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'pad' must be (0, 0, 0, 0, 0, 0) when 'pad_mode' is not \"pad\", " | |||
| f"but got 'pad' is {pad} and 'pad_mode' is {self.pad_mode}.") | |||
| if self.pad_mode == 'pad': | |||
| for item in self.pad_list: | |||
| @@ -8760,7 +8791,9 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| allow_five=False, ret_five=True, greater_zero=False) | |||
| output_padding = (self.output_padding[2], self.output_padding[3], self.output_padding[4]) | |||
| if self.pad_mode != 'pad' and output_padding != (0, 0, 0): | |||
| raise ValueError(f"For '{self.name}', when output_padding is not 0, pad_mode should be set as 'pad'.") | |||
| raise ValueError(f"For '{self.name}', the 'output_padding' must be (0, 0, 0) " | |||
| f"when 'pad_mode' is not \"pad\", " | |||
| f"but got 'output_padding' is {output_padding} and 'pad_mode' is {self.pad_mode}.") | |||
| validator.check_int_range(self.kernel_size[0] * self.kernel_size[1] * self.kernel_size[2], 1, 343, Rel.INC_BOTH, | |||
| 'The product of height, width and depth of kernel_size belonging [1, 343]', self.name) | |||
| validator.check_int_range(self.stride[0] * self.stride[1] * self.stride[2], 1, 343, Rel.INC_BOTH, | |||
| @@ -8777,7 +8810,7 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| def __infer__(self, x, w, b=None): | |||
| args = {'x': x['dtype'], 'w': w['dtype']} | |||
| if b is not None: | |||
| raise ValueError("Bias currently only support None.") | |||
| raise ValueError(f"For '{self.name}', the 'bias' currently only support None, but got {b}.") | |||
| valid_dtypes = [mstype.float16, mstype.float32] | |||
| validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, self.name) | |||
| @@ -552,10 +552,11 @@ class CheckBprop(PrimitiveWithInfer): | |||
| return xshapes | |||
| def infer_dtype(self, xdtypes, ydtypes): | |||
| validator.check_value_type('grads', xdtypes, (tuple,), self.name) | |||
| validator.check_value_type('params', ydtypes, (tuple,), self.name) | |||
| tips = f'Bprop of {self.prim_to_check}' | |||
| validator.check_value_type('grads', xdtypes, (tuple,), tips) | |||
| validator.check_value_type('params', ydtypes, (tuple,), tips) | |||
| if len(xdtypes) < len(ydtypes): | |||
| raise ValueError(f"For '{self.name}', the size of 'xdtypes' should not be less than {len(ydtypes)}," | |||
| raise ValueError(f"{tips}, the size of output should be {len(ydtypes)}," | |||
| f" but got {len(xdtypes)}.") | |||
| checking_range = len(ydtypes) | |||
| for i in range(checking_range): | |||
| @@ -565,11 +566,11 @@ class CheckBprop(PrimitiveWithInfer): | |||
| continue | |||
| if isinstance(ydtype, mstype.function_type): | |||
| if not isinstance(xdtype, mstype.env_type_type): | |||
| raise TypeError(f"For '{self.name}', the dtype of {i}th 'xdtypes' should be {mstype.env_type_type}," | |||
| raise TypeError(f"{tips}, the dtype of {i}th output should be {mstype.env_type_type}," | |||
| f" but got {xdtype}.") | |||
| continue | |||
| if xdtype != ydtype: | |||
| raise TypeError(f"For '{self.name}', the shape of {i}th 'xdtypes' should be {ydtype}," | |||
| raise TypeError(f"{tips}, the dtype of {i}th output should be {ydtype}," | |||
| f" but got {xdtype}.") | |||
| return xdtypes | |||
| @@ -69,7 +69,7 @@ class StandardNormal(PrimitiveWithInfer): | |||
| def __infer__(self, shape): | |||
| shape_v = shape["value"] | |||
| if shape_v is None: | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None, but got {shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None.") | |||
| Validator.check_value_type("shape", shape_v, [tuple], self.name) | |||
| for i, shape_i in enumerate(shape_v): | |||
| Validator.check_positive_int(shape_i, f'shape[{i}]', self.name) | |||
| @@ -126,7 +126,7 @@ class StandardLaplace(PrimitiveWithInfer): | |||
| def __infer__(self, shape): | |||
| shape_v = shape["value"] | |||
| if shape_v is None: | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None, but got {shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None.") | |||
| Validator.check_value_type("shape", shape_v, [tuple], self.name) | |||
| for i, shape_i in enumerate(shape_v): | |||
| Validator.check_positive_int(shape_i, f'shape[{i}]', self.name) | |||
| @@ -189,7 +189,7 @@ class Gamma(PrimitiveWithInfer): | |||
| def __infer__(self, shape, alpha, beta): | |||
| shape_v = shape["value"] | |||
| if shape_v is None: | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None, but got {shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None.") | |||
| Validator.check_value_type("shape", shape_v, [tuple], self.name) | |||
| for i, shape_i in enumerate(shape_v): | |||
| Validator.check_positive_int(shape_i, f'shape[{i}]', self.name) | |||
| @@ -253,7 +253,7 @@ class Poisson(PrimitiveWithInfer): | |||
| def __infer__(self, shape, mean): | |||
| shape_v = shape["value"] | |||
| if shape_v is None: | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None, but got {shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None.") | |||
| Validator.check_value_type("shape", shape_v, [tuple], self.name) | |||
| for i, shape_i in enumerate(shape_v): | |||
| Validator.check_positive_int(shape_i, f'shape[{i}]', self.name) | |||
| @@ -325,7 +325,7 @@ class UniformInt(PrimitiveWithInfer): | |||
| def __infer__(self, shape, minval, maxval): | |||
| shape_v = shape["value"] | |||
| if shape_v is None: | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None, but got {shape}.") | |||
| raise ValueError(f"For '{self.name}', the 'shape' cannot be None.") | |||
| Validator.check_value_type("shape", shape_v, [tuple], self.name) | |||
| for i, shape_i in enumerate(shape_v): | |||
| Validator.check_positive_int(shape_i, f'shape[{i}]', self.name) | |||
| @@ -564,7 +564,7 @@ class Multinomial(PrimitiveWithInfer): | |||
| Validator.check_tensor_dtype_valid('inputs', inputs['dtype'], [mstype.float32], self.name) | |||
| num_samples_value = num_samples["value"] | |||
| if num_samples_value is None: | |||
| raise ValueError(f"For '{self.name}', the 'num_samples' cannot be None, but got {num_samples}.") | |||
| raise ValueError(f"For '{self.name}', the 'num_samples' cannot be None.") | |||
| Validator.check_value_type("num_samples", num_samples_value, (int,), self.name) | |||
| Validator.check_positive_int(num_samples_value, "num_samples") | |||
| y_shape = (num_samples_value,) | |||
| @@ -107,9 +107,9 @@ def test_reverse_v2_invalid_axis(): | |||
| with pytest.raises(ValueError) as info: | |||
| reverse_v2_net = ReverseV2Net((0, 1, 2, 1)) | |||
| _ = reverse_v2_net(x) | |||
| assert "axis cannot contain duplicate dimensions" in str(info.value) | |||
| assert "'axis' cannot contain duplicate dimensions" in str(info.value) | |||
| with pytest.raises(ValueError) as info: | |||
| reverse_v2_net = ReverseV2Net((-2, -1, 3)) | |||
| _ = reverse_v2_net(x) | |||
| assert "axis cannot contain duplicate dimensions" in str(info.value) | |||
| assert "'axis' cannot contain duplicate dimensions" in str(info.value) | |||
| @@ -1204,7 +1204,7 @@ def test_tensor_slice_reduce_out_of_bounds_neg(): | |||
| net = NetWork() | |||
| with pytest.raises(IndexError) as ex: | |||
| net(input_tensor) | |||
| assert "begin should be in [-6, 6), but got stride: 1, begin: -7." in str(ex.value) | |||
| assert "'begin' should be in [-6, 6) when shrink axis, but got 'strides': 1, 'begin': -7." in str(ex.value) | |||
| @pytest.mark.level1 | |||
| @@ -1226,7 +1226,7 @@ def test_tensor_slice_reduce_out_of_bounds_positive(): | |||
| net = NetWork() | |||
| with pytest.raises(IndexError) as ex: | |||
| net(input_tensor) | |||
| assert "begin should be in [-6, 6), but got stride: 1, begin: 6." in str(ex.value) | |||
| assert "'begin' should be in [-6, 6) when shrink axis, but got 'strides': 1, 'begin': 6." in str(ex.value) | |||
| @pytest.mark.level0 | |||
| @@ -386,7 +386,7 @@ def test_grad_args_type_error1(): | |||
| try: | |||
| GradNetWrtX(Net())(x, y) | |||
| except TypeError as e: | |||
| assert "For 'GradOperation', the arg 'get_all' should be bool, but got" in str(e) | |||
| assert "For 'GradOperation', the 'get_all' should be bool, but got" in str(e) | |||
| def test_grad_args_type_error2(): | |||
| @@ -412,7 +412,7 @@ def test_grad_args_type_error2(): | |||
| try: | |||
| GradNetWrtX(Net())(x, y) | |||
| except TypeError as e: | |||
| assert "For 'GradOperation', the arg 'get_by_list' should be bool, but got" in str(e) | |||
| assert "For 'GradOperation', the 'get_by_list' should be bool, but got" in str(e) | |||
| def test_grad_args_type_error3(): | |||
| @@ -438,7 +438,7 @@ def test_grad_args_type_error3(): | |||
| try: | |||
| GradNetWrtX(Net())(x, y) | |||
| except TypeError as e: | |||
| assert "For 'GradOperation', the arg 'sens_param' should be bool, but got" in str(e) | |||
| assert "For 'GradOperation', the 'sens_param' should be bool, but got" in str(e) | |||
| def test_grad_net_is_none(): | |||
| @@ -53,11 +53,11 @@ def test_invert_int_tensor(): | |||
| context.set_context(mode=context.PYNATIVE_MODE) | |||
| with pytest.raises(TypeError) as err: | |||
| net(input_x) | |||
| assert "For 'LogicalNot or '~' operator', the type of `x` should be subclass of Tensor[Bool], " \ | |||
| assert "For 'LogicalNot or '~' operator', the type of 'x' should be Tensor[Bool], " \ | |||
| "but got Tensor[Int32]" in str(err.value) | |||
| context.set_context(mode=context.GRAPH_MODE) | |||
| with pytest.raises(TypeError) as err: | |||
| net(input_x) | |||
| assert "For 'LogicalNot or '~' operator', the type of `x` should be subclass of Tensor[Bool], " \ | |||
| assert "For 'LogicalNot or '~' operator', the type of 'x' should be Tensor[Bool], " \ | |||
| "but got Tensor[Int32]" in str(err.value) | |||