From: @yepei6 Reviewed-by: @yanglf1121,@kingxian Signed-off-by: @kingxianpull/15436/MERGE
| @@ -125,10 +125,6 @@ class Tensor(Tensor_): | |||||
| return Tensor_.__repr__(self) | return Tensor_.__repr__(self) | ||||
| return '' | return '' | ||||
| def __add__(self, other): | |||||
| out = tensor_operator_registry.get('__add__')(self, other) | |||||
| return out | |||||
| def __eq__(self, other): | def __eq__(self, other): | ||||
| if not isinstance(other, (int, float, Tensor)): | if not isinstance(other, (int, float, Tensor)): | ||||
| return False | return False | ||||
| @@ -150,10 +146,6 @@ class Tensor(Tensor_): | |||||
| def __hash__(self): | def __hash__(self): | ||||
| return hash(id(self)) | return hash(id(self)) | ||||
| def __mul__(self, other): | |||||
| out = tensor_operator_registry.get('__mul__')(self, other) | |||||
| return out | |||||
| def __neg__(self): | def __neg__(self): | ||||
| out = tensor_operator_registry.get('__neg__')(self) | out = tensor_operator_registry.get('__neg__')(self) | ||||
| return out | return out | ||||
| @@ -187,38 +179,59 @@ class Tensor(Tensor_): | |||||
| def __pos__(self): | def __pos__(self): | ||||
| return self | return self | ||||
| def __add__(self, other): | |||||
| return tensor_operator_registry.get('__add__')(self, other) | |||||
| def __radd__(self, other): | |||||
| return self.__add__(other) | |||||
| def __iadd__(self, other): | def __iadd__(self, other): | ||||
| return self.__add__(other) | return self.__add__(other) | ||||
| def __radd__(self, other): | |||||
| out = tensor_operator_registry.get('__add__')(self, other) | |||||
| return out | |||||
| def __sub__(self, other): | |||||
| return tensor_operator_registry.get('__sub__')(self, other) | |||||
| def __imul__(self, other): | |||||
| return self.__mul__(other) | |||||
| def __rsub__(self, other): | |||||
| return tensor_operator_registry.get('__sub__')(other, self) | |||||
| def __isub__(self, other): | |||||
| return self.__sub__(other) | |||||
| def __mul__(self, other): | |||||
| return tensor_operator_registry.get('__mul__')(self, other) | |||||
| def __rmul__(self, other): | def __rmul__(self, other): | ||||
| out = tensor_operator_registry.get('__mul__')(self, other) | |||||
| return out | |||||
| return self.__mul__(other) | |||||
| def __imul__(self, other): | |||||
| return self.__mul__(other) | |||||
| def __truediv__(self, other): | def __truediv__(self, other): | ||||
| out = tensor_operator_registry.get('__truediv__')(self, other) | |||||
| return out | |||||
| return tensor_operator_registry.get('__truediv__')(self, other) | |||||
| def __rtruediv__(self, other): | def __rtruediv__(self, other): | ||||
| out = tensor_operator_registry.get('__truediv__')(other, self) | |||||
| return out | |||||
| return tensor_operator_registry.get('__truediv__')(other, self) | |||||
| def __sub__(self, other): | |||||
| out = tensor_operator_registry.get('__sub__')(self, other) | |||||
| return out | |||||
| def __mod__(self, other): | |||||
| return tensor_operator_registry.get('__mod__')(self, other) | |||||
| def __isub__(self, other): | |||||
| return self.__sub__(other) | |||||
| def __rmod__(self, other): | |||||
| return tensor_operator_registry.get('__mod__')(other, self) | |||||
| def __rsub__(self, other): | |||||
| out = tensor_operator_registry.get('__sub__')(other, self) | |||||
| return out | |||||
| def __imod__(self, other): | |||||
| return self.__mod__(other) | |||||
| def __pow__(self, other): | |||||
| return tensor_operator_registry.get('__pow__')(self, other) | |||||
| def __floordiv__(self, other): | |||||
| return tensor_operator_registry.get('__floordiv__')(self, other) | |||||
| def __rfloordiv__(self, other): | |||||
| return tensor_operator_registry.get('__floordiv__')(other, self) | |||||
| def __ifloordiv__(self, other): | |||||
| return self.__floordiv__(other) | |||||
| def __lt__(self, other): | def __lt__(self, other): | ||||
| out = tensor_operator_registry.get('__lt__')(self, other) | out = tensor_operator_registry.get('__lt__')(self, other) | ||||
| @@ -229,8 +242,6 @@ class Tensor(Tensor_): | |||||
| return out | return out | ||||
| def __getitem__(self, index): | def __getitem__(self, index): | ||||
| if isinstance(index, int) and not isinstance(index, bool) and self.shape and index >= self.shape[0]: | |||||
| raise IndexError("index {} is out of bounds for axis 0 with size {}".format(index, self.shape[0])) | |||||
| out = tensor_operator_registry.get('__getitem__')(self, index) | out = tensor_operator_registry.get('__getitem__')(self, index) | ||||
| return out | return out | ||||
| @@ -253,27 +264,6 @@ class Tensor(Tensor_): | |||||
| return out[0] | return out[0] | ||||
| raise TypeError("Not support len of a 0-D tensor") | raise TypeError("Not support len of a 0-D tensor") | ||||
| def __mod__(self, other): | |||||
| return tensor_operator_registry.get('__mod__')(self, other) | |||||
| def __imod__(self, other): | |||||
| return self.__mod__(other) | |||||
| def __rmod__(self, other): | |||||
| return tensor_operator_registry.get('__mod__')(other, self) | |||||
| def __pow__(self, other): | |||||
| return tensor_operator_registry.get('__pow__')(self, other) | |||||
| def __floordiv__(self, other): | |||||
| return tensor_operator_registry.get('__floordiv__')(self, other) | |||||
| def __ifloordiv__(self, other): | |||||
| return self.__floordiv__(other) | |||||
| def __rfloordiv__(self, other): | |||||
| return tensor_operator_registry.get('__floordiv__')(other, self) | |||||
| def __str__(self): | def __str__(self): | ||||
| if self.dtype == mstype.type_none: | if self.dtype == mstype.type_none: | ||||
| return "Unknown Tensor type!" | return "Unknown Tensor type!" | ||||
| @@ -28,22 +28,31 @@ stack = P.Stack(axis=-1) | |||||
| def _tensor_getitem(self, index): | def _tensor_getitem(self, index): | ||||
| """Handle tensor getitem""" | """Handle tensor getitem""" | ||||
| if isinstance(index, Tensor): | |||||
| return tensor_index_by_tensor(self, index) | |||||
| if isinstance(index, list): | if isinstance(index, list): | ||||
| return tensor_index_by_list(self, index) | return tensor_index_by_list(self, index) | ||||
| if isinstance(index, tuple): | if isinstance(index, tuple): | ||||
| return tensor_index_by_tuple(self, index) | return tensor_index_by_tuple(self, index) | ||||
| if isinstance(index, (Tensor, int, slice)) or index in (None, ...): | |||||
| return tensor_index_by_tuple(self, (index,)) | |||||
| if isinstance(index, bool): | |||||
| return _tensor_index_by_bool(self, index) | |||||
| if isinstance(index, int): | |||||
| return _tensor_index_by_integer(self, index) | |||||
| if isinstance(index, slice): | |||||
| return tensor_index_by_slice(self, index) | |||||
| if index is None: | |||||
| return F.expand_dims(self, 0) | |||||
| if index is ...: | |||||
| return self | |||||
| raise IndexError(f"Only support integers, slices(`:`), ellipsis(`...`), None, bool, tensor with int, " | raise IndexError(f"Only support integers, slices(`:`), ellipsis(`...`), None, bool, tensor with int, " | ||||
| f"list and tuple ,but got {index} with type {type(index)}.") | f"list and tuple ,but got {index} with type {type(index)}.") | ||||
| tensor_operator_registry.register("__getitem__", _tensor_getitem) | |||||
| def _tensor_setitem(self, index, value): | def _tensor_setitem(self, index, value): | ||||
| """Handle tensor setitem""" | """Handle tensor setitem""" | ||||
| if not isinstance(value, (int, float, bool, list, tuple, Tensor)): | if not isinstance(value, (int, float, bool, list, tuple, Tensor)): | ||||
| raise ValueError(f"only support numbers, Tensor, tuple, list as value," | raise ValueError(f"only support numbers, Tensor, tuple, list as value," | ||||
| f"but got {value} with type {type(value)}.") | f"but got {value} with type {type(value)}.") | ||||
| if isinstance(index, list): | if isinstance(index, list): | ||||
| index = format_list_indices(index, self.shape[0]) | index = format_list_indices(index, self.shape[0]) | ||||
| if isinstance(index, Tensor): | if isinstance(index, Tensor): | ||||
| @@ -62,10 +71,75 @@ def _tensor_setitem(self, index, value): | |||||
| if index is ...: | if index is ...: | ||||
| return tensor_setitem_by_ellipsis(self, index, value) | return tensor_setitem_by_ellipsis(self, index, value) | ||||
| raise IndexError("Tensor setitem index only support integers, slices(`:`), ellipsis(`...`), None, bool\ | |||||
| and tensor with int32, got {} with type{}".format(index, type(index))) | |||||
| raise IndexError("Tensor setitem index only support integers, slices(`:`), ellipsis(`...`), bool, tensor, \ | |||||
| list and tuple, but got {index} with type{type(index)}") | |||||
| tensor_operator_registry.register("__getitem__", _tensor_getitem) | |||||
| tensor_operator_registry.register("__setitem__", _tensor_setitem) | tensor_operator_registry.register("__setitem__", _tensor_setitem) | ||||
| def _tensor_add(self, other): | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.add(self, other) | |||||
| def _tensor_sub(self, other): | |||||
| if isinstance(self, (tuple, list)): | |||||
| self = sequence_to_tensor(self, F.dtype(other)) | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.sub(self, other) | |||||
| def _tensor_mul(self, other): | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.mul(self, other) | |||||
| def _tensor_div(self, other): | |||||
| if isinstance(self, (tuple, list)): | |||||
| self = sequence_to_tensor(self, F.dtype(other)) | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.div(self, other) | |||||
| def _tensor_mod(self, other): | |||||
| if isinstance(self, (tuple, list)): | |||||
| self = sequence_to_tensor(self, F.dtype(other)) | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.floormod(self, other) | |||||
| def _tensor_pow(self, other): | |||||
| if isinstance(self, (tuple, list)): | |||||
| self = sequence_to_tensor(self, F.dtype(other)) | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.tensor_pow(self, other) | |||||
| def _tensor_floordiv(self, other): | |||||
| if isinstance(self, (tuple, list)): | |||||
| self = sequence_to_tensor(self, F.dtype(other)) | |||||
| if isinstance(other, (tuple, list)): | |||||
| other = sequence_to_tensor(other, F.dtype(self)) | |||||
| return F.floordiv(self, other) | |||||
| tensor_operator_registry.register('__add__', _tensor_add) | |||||
| tensor_operator_registry.register('__sub__', _tensor_sub) | |||||
| tensor_operator_registry.register('__mul__', _tensor_mul) | |||||
| tensor_operator_registry.register('__truediv__', _tensor_div) | |||||
| tensor_operator_registry.register('__mod__', _tensor_mod) | |||||
| tensor_operator_registry.register('__pow__', _tensor_pow) | |||||
| tensor_operator_registry.register('__floordiv__', _tensor_floordiv) | |||||
| def _broadcast(broadcast_shape, x): | def _broadcast(broadcast_shape, x): | ||||
| """Broadcast tensor to the required shape.""" | """Broadcast tensor to the required shape.""" | ||||
| if F.shape(x) == broadcast_shape: | if F.shape(x) == broadcast_shape: | ||||
| @@ -120,7 +194,9 @@ def _expand_data_dims(data, tuple_index): | |||||
| tuple_index_new += (const_utils.make_empty_slice(),) | tuple_index_new += (const_utils.make_empty_slice(),) | ||||
| expand_positions += (i,) | expand_positions += (i,) | ||||
| elif const_utils.judge_index_type(index_type, mstype.bool_): | elif const_utils.judge_index_type(index_type, mstype.bool_): | ||||
| tuple_index_new += (const_utils.make_tensor([0] if index else[], mstype.int64),) | |||||
| if not index: | |||||
| const_utils.raise_index_error("Dose not support 'False'.") | |||||
| tuple_index_new += (const_utils.make_tensor([0], mstype.int64),) | |||||
| expand_positions += (i,) | expand_positions += (i,) | ||||
| else: | else: | ||||
| tuple_index_new += (index,) | tuple_index_new += (index,) | ||||
| @@ -131,47 +207,66 @@ def _expand_data_dims(data, tuple_index): | |||||
| return data, tuple_index_new | return data, tuple_index_new | ||||
| def tensor_index_by_slice(data, slice_index): | |||||
| """Tensor getitem by a slice.""" | |||||
| min_data_dim, max_data_dim = 1, 8 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| data_shape = F.shape(data) | |||||
| begin_strides, end_strides, step_strides = const_utils.get_stride_info_from_slice(data_shape, slice_index) | |||||
| return F.strided_slice(data, begin_strides, end_strides, step_strides) | |||||
| def tensor_index_by_number(data, number_index): | def tensor_index_by_number(data, number_index): | ||||
| """Tensor getitem by a Number which may be integer/float/bool value""" | """Tensor getitem by a Number which may be integer/float/bool value""" | ||||
| data_type = F.typeof(data) | |||||
| if const_utils.judge_index_type(data_type, mstype.tensor_type): | |||||
| data_shape = F.shape(data) | |||||
| data_rank = len(data_shape) | |||||
| min_data_rank, max_data_rank = 0, 8 | |||||
| const_utils.judge_data_rank(data_rank, min_data_rank, max_data_rank) | |||||
| number_type = const_utils.check_number_index_type(number_index) | number_type = const_utils.check_number_index_type(number_index) | ||||
| if number_type == const_utils.BOOL_: | if number_type == const_utils.BOOL_: | ||||
| return tensor_index_by_tuple(data, (number_index,)) | |||||
| return _tensor_index_by_bool(data, number_index) | |||||
| if number_type == const_utils.INT_: | if number_type == const_utils.INT_: | ||||
| return _tensor_index_by_integer(data, number_index) | return _tensor_index_by_integer(data, number_index) | ||||
| return const_utils.raise_index_error("Only support integers, slices(`:`), ellipsis(`...`), None and bool.") | return const_utils.raise_index_error("Only support integers, slices(`:`), ellipsis(`...`), None and bool.") | ||||
| def _tensor_index_by_bool(data, bool_value): | |||||
| """Tensor getitem by a single bool value""" | |||||
| min_data_dim, max_data_dim = 0, 7 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| if bool_value: | |||||
| return F.expand_dims(data, 0) | |||||
| return const_utils.raise_index_error("When tensor is indexed by a bool object, the value only support 'True'.") | |||||
| def _tensor_index_by_integer(data, int_index): | def _tensor_index_by_integer(data, int_index): | ||||
| """Tensor getitem by a single integer number""" | """Tensor getitem by a single integer number""" | ||||
| if const_utils.judge_index_type(F.typeof(data), mstype.tensor_type): | |||||
| min_data_dim, max_data_dim = 1, 8 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| data_shape = F.shape(data) | data_shape = F.shape(data) | ||||
| data_rank = len(data_shape) | |||||
| if data_rank == 0: | |||||
| return const_utils.raise_type_error("When tensor is indexed by an integer, the dimension of the tensor " | |||||
| "cannot be 0.") | |||||
| transformed_number = const_utils.check_and_transform_int_index(int_index, data_shape[0], const_utils.TENSOR_GETITEM) | transformed_number = const_utils.check_and_transform_int_index(int_index, data_shape[0], const_utils.TENSOR_GETITEM) | ||||
| begin_strides, end_strides, step_strides = const_utils.get_stride_info_from_integer(data_shape, transformed_number) | begin_strides, end_strides, step_strides = const_utils.get_stride_info_from_integer(data_shape, transformed_number) | ||||
| shrink_axis_mask = 1 | shrink_axis_mask = 1 | ||||
| return P.StridedSlice(0, 0, 0, 0, shrink_axis_mask)(data, begin_strides, end_strides, step_strides) | return P.StridedSlice(0, 0, 0, 0, shrink_axis_mask)(data, begin_strides, end_strides, step_strides) | ||||
| def tensor_index_by_tensor(data, tensor_index): | |||||
| """Tensor getitem by a single tensor""" | |||||
| min_data_dim, max_data_dim = 0, 7 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| const_utils.check_type_valid(F.dtype(tensor_index), mstype.int_type, const_utils.TENSOR_GETITEM) | |||||
| return F.gather(data, tensor_index, 0) | |||||
| def tensor_index_by_list(data, list_index): | def tensor_index_by_list(data, list_index): | ||||
| """Tensor getitem by list of int and bool""" | """Tensor getitem by list of int and bool""" | ||||
| min_data_dim, max_data_dim = 1, 8 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| data_shape = F.shape(data) | data_shape = F.shape(data) | ||||
| indexes_types = hyper_map(F.typeof, list_index) | indexes_types = hyper_map(F.typeof, list_index) | ||||
| if const_utils.judge_indexes_types(indexes_types, mstype.int_type + (mstype.bool_,)): | if const_utils.judge_indexes_types(indexes_types, mstype.int_type + (mstype.bool_,)): | ||||
| sub_tuple_index = const_utils.transform_sequence_index(list_index, data_shape[0], const_utils.TENSOR_GETITEM) | sub_tuple_index = const_utils.transform_sequence_index(list_index, data_shape[0], const_utils.TENSOR_GETITEM) | ||||
| if not sub_tuple_index: | if not sub_tuple_index: | ||||
| data_rank = len(data_shape) | |||||
| if data_rank == 1: | |||||
| return const_utils.make_tensor([], data.dtype, ()) | |||||
| return const_utils.make_tensor([], data.dtype, data_shape[1:]) | |||||
| const_utils.raise_index_error("Getitem does not support empty list, this will reference shape '0'.") | |||||
| tensor_index = const_utils.make_tensor(sub_tuple_index, mstype.int64) | tensor_index = const_utils.make_tensor(sub_tuple_index, mstype.int64) | ||||
| return F.gather(data, tensor_index, 0) | return F.gather(data, tensor_index, 0) | ||||
| @@ -183,18 +278,15 @@ def tensor_index_by_list(data, list_index): | |||||
| def tensor_index_by_tuple(data, tuple_index): | def tensor_index_by_tuple(data, tuple_index): | ||||
| """Tensor getitem by tuple of various types with None""" | """Tensor getitem by tuple of various types with None""" | ||||
| tuple_index_len = len(tuple_index) | |||||
| if tuple_index_len == 0: | |||||
| if not tuple_index: | |||||
| return data | return data | ||||
| op_name = const_utils.TENSOR_GETITEM | op_name = const_utils.TENSOR_GETITEM | ||||
| tuple_index = _transform_ellipsis_to_slice(data, tuple_index, op_name) | tuple_index = _transform_ellipsis_to_slice(data, tuple_index, op_name) | ||||
| data, tuple_index = _expand_data_dims(data, tuple_index) | data, tuple_index = _expand_data_dims(data, tuple_index) | ||||
| data_shape = F.shape(data) | |||||
| data_rank = len(data_shape) | |||||
| min_data_rank, max_data_rank = 0, 8 | |||||
| const_utils.judge_data_rank(data_rank, min_data_rank, max_data_rank) | |||||
| min_data_dim, max_data_dim = 1, 8 | |||||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||||
| indexes_types = hyper_map(F.typeof, tuple_index) | indexes_types = hyper_map(F.typeof, tuple_index) | ||||
| contain_type = const_utils.tuple_index_type_cnt(indexes_types, op_name) | contain_type = const_utils.tuple_index_type_cnt(indexes_types, op_name) | ||||
| @@ -382,21 +474,27 @@ def _generate_updates_from_scalar(data, indices, value, op_type): | |||||
| return const_utils.convert_scalar_to_tensor(data_shape, data_dtype, indices_shape, value, op_type) | return const_utils.convert_scalar_to_tensor(data_shape, data_dtype, indices_shape, value, op_type) | ||||
| def _generate_updates_from_sequence(data, index, value, op_type): | |||||
| def sequence_to_tensor(value, dtype): | |||||
| """Generate an updates tensor from a tuple, can only handle 1-D tensor/non-tensor mixtures.""" | """Generate an updates tensor from a tuple, can only handle 1-D tensor/non-tensor mixtures.""" | ||||
| value_types = hyper_map(F.typeof, value) | value_types = hyper_map(F.typeof, value) | ||||
| value_elements_type = const_utils.check_value_elements(value_types) | value_elements_type = const_utils.check_value_elements(value_types) | ||||
| if value_elements_type == const_utils.ALL_TENSOR: | if value_elements_type == const_utils.ALL_TENSOR: | ||||
| value = F.stack(value).astype(data.dtype) | |||||
| value = F.stack(value).astype(dtype) | |||||
| elif value_elements_type == const_utils.NO_TENSOR: | elif value_elements_type == const_utils.NO_TENSOR: | ||||
| value = const_utils.make_tensor(value, data.dtype) | |||||
| value = const_utils.make_tensor(value, dtype) | |||||
| else: | else: | ||||
| new_value = () | new_value = () | ||||
| for ele in value: | for ele in value: | ||||
| ele = ele if isinstance(ele, Tensor) else const_utils.make_tensor(ele) | ele = ele if isinstance(ele, Tensor) else const_utils.make_tensor(ele) | ||||
| new_value += (ele,) | new_value += (ele,) | ||||
| value = F.stack(new_value).astype(data.dtype) | |||||
| value = F.stack(new_value).astype(dtype) | |||||
| return value | |||||
| def _generate_updates_from_sequence(data, index, value, op_type): | |||||
| """Generate an updates tensor from a tuple, can only handle 1-D tensor/non-tensor mixtures.""" | |||||
| value = sequence_to_tensor(value, F.dtype(data)) | |||||
| if op_type == const_utils.SET_ITEM_BY_NON_TENSOR: | if op_type == const_utils.SET_ITEM_BY_NON_TENSOR: | ||||
| return value | return value | ||||
| return _generate_updates_from_tensor(data, index, value, op_type) | return _generate_updates_from_tensor(data, index, value, op_type) | ||||
| @@ -157,10 +157,10 @@ def make_tensor(a, dtype=mstype.int32, data_shape=None): | |||||
| @constexpr | @constexpr | ||||
| def judge_data_rank(data_rank, min_data_rank=0, max_data_rank=8): | |||||
| if data_rank < min_data_rank or data_rank > max_data_rank: | |||||
| raise ValueError(f"The input data's rank should in the range of[{min_data_rank}, " | |||||
| f"{max_data_rank}], bug actually is '{data_rank}'") | |||||
| def judge_data_dim(data_dim, min_data_dim=0, max_data_dim=8): | |||||
| if data_dim < min_data_dim or data_dim > max_data_dim: | |||||
| raise ValueError(f"The input data's dim should in the range of[{min_data_dim}, " | |||||
| f"{max_data_dim}], bug actually is '{data_dim}'") | |||||
| @constexpr | @constexpr | ||||
| @@ -636,15 +636,6 @@ def transform_slice_to_ele_list(slice_index, dim_len): | |||||
| return slice_ele_list | return slice_ele_list | ||||
| @constexpr | |||||
| def check_tuple_index_len(data_rank, tuple_index_len, op_name): | |||||
| """Check if the number of index tensor exceeds the dimension of the operated tensor.""" | |||||
| if tuple_index_len <= data_rank: | |||||
| return True | |||||
| raise IndexError(f"For '{op_name}', the number {tuple_index_len} of tuple_index size" | |||||
| f"is greater than the dimension {data_rank} of the operated tensor.") | |||||
| @constexpr | @constexpr | ||||
| def generate_index_info_from_tuple_of_mixed_tensors(tensor_positions, tensor_indexes_shapes, | def generate_index_info_from_tuple_of_mixed_tensors(tensor_positions, tensor_indexes_shapes, | ||||
| slice_shapes, op_name): | slice_shapes, op_name): | ||||
| @@ -669,6 +660,7 @@ def generate_index_info_from_tuple_of_mixed_tensors(tensor_positions, tensor_ind | |||||
| return broadcast_shape, index_tensor_new_shape, final_shape, fancy_position | return broadcast_shape, index_tensor_new_shape, final_shape, fancy_position | ||||
| def _judge_order_continuous(order_sequence): | def _judge_order_continuous(order_sequence): | ||||
| if not order_sequence: | if not order_sequence: | ||||
| return False | return False | ||||
| @@ -710,6 +702,20 @@ def check_number_index_type(number): | |||||
| .format(number, type(number))) | .format(number, type(number))) | ||||
| @constexpr | |||||
| def get_stride_info_from_slice(data_shape, slice_index): | |||||
| """Get stride info from a python slice""" | |||||
| begin, end, step = get_slice_stride(data_shape[0], slice_index) | |||||
| begin_strides = [begin] | |||||
| end_strides = [end] | |||||
| step_strides = [step] | |||||
| for end in data_shape[1:]: | |||||
| begin_strides.append(0) | |||||
| end_strides.append(end) | |||||
| step_strides.append(1) | |||||
| return tuple(begin_strides), tuple(end_strides), tuple(step_strides) | |||||
| @constexpr | @constexpr | ||||
| def get_stride_info_from_integer(data_shape, number): | def get_stride_info_from_integer(data_shape, number): | ||||
| """Get stride info from a integer""" | """Get stride info from a integer""" | ||||
| @@ -741,7 +747,7 @@ def get_stride_info_from_tuple(data_shape, tuple_index): | |||||
| """Get stride info from a tuple""" | """Get stride info from a tuple""" | ||||
| begin_strides, end_strides, step_strides = [], [], [] | begin_strides, end_strides, step_strides = [], [], [] | ||||
| tuple_index_len = len(tuple_index) | tuple_index_len = len(tuple_index) | ||||
| data_rank = len(data_shape) | |||||
| data_dim = len(data_shape) | |||||
| shrink_axis, index_count, ellipsis_count = 0, 0, 0 | shrink_axis, index_count, ellipsis_count = 0, 0, 0 | ||||
| for idx, item in enumerate(tuple_index): | for idx, item in enumerate(tuple_index): | ||||
| if isinstance(item, slice): | if isinstance(item, slice): | ||||
| @@ -760,7 +766,7 @@ def get_stride_info_from_tuple(data_shape, tuple_index): | |||||
| ellipsis_count = ellipsis_count + 1 | ellipsis_count = ellipsis_count + 1 | ||||
| if ellipsis_count > 1: | if ellipsis_count > 1: | ||||
| raise IndexError("An index can have only one ellipsis (...)") | raise IndexError("An index can have only one ellipsis (...)") | ||||
| ellipsis_range_size = data_rank - tuple_index_len + 1 | |||||
| ellipsis_range_size = data_dim - tuple_index_len + 1 | |||||
| begin_strides.extend([0] * (ellipsis_range_size)) | begin_strides.extend([0] * (ellipsis_range_size)) | ||||
| end_strides.extend( | end_strides.extend( | ||||
| [shape for shape in data_shape[index_count: index_count + ellipsis_range_size]]) | [shape for shape in data_shape[index_count: index_count + ellipsis_range_size]]) | ||||
| @@ -769,7 +775,7 @@ def get_stride_info_from_tuple(data_shape, tuple_index): | |||||
| else: | else: | ||||
| raise IndexError("Not supported index data type, got ", | raise IndexError("Not supported index data type, got ", | ||||
| item, " type is ", type(item)) | item, " type is ", type(item)) | ||||
| for item in range(index_count, data_rank): | |||||
| for item in range(index_count, data_dim): | |||||
| begin_strides.append(0) | begin_strides.append(0) | ||||
| end_strides.append(data_shape[item]) | end_strides.append(data_shape[item]) | ||||
| step_strides.append(1) | step_strides.append(1) | ||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `add` operations.""" | """Implementation for internal polymorphism `add` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -114,6 +115,70 @@ def _tensor_add_scalar(x, y): | |||||
| return F.add(x, y) | return F.add(x, y) | ||||
| @add.register("Tuple", "Tensor") | |||||
| def _tuple_add_tensor(x, y): | |||||
| """ | |||||
| Tuple is added to tensor. | |||||
| Args: | |||||
| x (Tuple): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_add(x, y) | |||||
| @add.register("Tensor", "Tuple") | |||||
| def _tensor_add_tuple(x, y): | |||||
| """ | |||||
| Tensor is added to number. | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (Tuple): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_add(x, y) | |||||
| @add.register("List", "Tensor") | |||||
| def _list_add_tensor(x, y): | |||||
| """ | |||||
| Tuple is added to tensor. | |||||
| Args: | |||||
| x (List): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_add(x, y) | |||||
| @add.register("Tensor", "List") | |||||
| def _tensor_add_list(x, y): | |||||
| """ | |||||
| Tensor is added to number. | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (List): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_add(x, y) | |||||
| @add.register("Tensor", "Tensor") | @add.register("Tensor", "Tensor") | ||||
| def _tensor_add_tensor(x, y): | def _tensor_add_tensor(x, y): | ||||
| """ | """ | ||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `div` operations.""" | """Implementation for internal polymorphism `div` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -84,3 +85,67 @@ def _tensor_div_scalar(x, y): | |||||
| Tensor, has the same dtype as x. | Tensor, has the same dtype as x. | ||||
| """ | """ | ||||
| return F.tensor_div(x, y) | return F.tensor_div(x, y) | ||||
| @div.register("Tuple", "Tensor") | |||||
| def _tuple_div_tensor(x, y): | |||||
| """ | |||||
| Tuple divided by tensor. | |||||
| Args: | |||||
| x (Tuple): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_div(x, y) | |||||
| @div.register("Tensor", "Tuple") | |||||
| def _tensor_div_tuple(x, y): | |||||
| """ | |||||
| Tensor divided by tuple. | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (Tuple): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_div(x, y) | |||||
| @div.register("List", "Tensor") | |||||
| def _list_div_tensor(x, y): | |||||
| """ | |||||
| List divided by tensor. | |||||
| Args: | |||||
| x (List): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_div(x, y) | |||||
| @div.register("Tensor", "List") | |||||
| def _tensor_div_list(x, y): | |||||
| """ | |||||
| Tensor divided by list | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (List): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_div(x, y) | |||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `floordiv` operations.""" | """Implementation for internal polymorphism `floordiv` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -48,3 +49,31 @@ def _tensor_floordiv_scalar(x, y): | |||||
| def _scalar_floordiv_tensor(x, y): | def _scalar_floordiv_tensor(x, y): | ||||
| """Returns x // y where x is a scalar and y is a tensor. x and y should have same dtype.""" | """Returns x // y where x is a scalar and y is a tensor. x and y should have same dtype.""" | ||||
| return F.tensor_floordiv(x, y) | return F.tensor_floordiv(x, y) | ||||
| @floordiv.register("Tuple", "Tensor") | |||||
| def _tuple_floordiv_tensor(x, y): | |||||
| """Returns x // y where x is a tuple and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_floordiv(x, y) | |||||
| @floordiv.register("Tensor", "Tuple") | |||||
| def _tensor_floordiv_tuple(x, y): | |||||
| """Returns x // y where x is a tensor and y is a tuple. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_floordiv(x, y) | |||||
| @floordiv.register("List", "Tensor") | |||||
| def _list_floordiv_tensor(x, y): | |||||
| """Returns x // y where x is a list and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_floordiv(x, y) | |||||
| @floordiv.register("Tensor", "List") | |||||
| def _tensor_floordiv_list(x, y): | |||||
| """Returns x // y where x is a tensor and y is a list. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_floordiv(x, y) | |||||
| @@ -163,7 +163,7 @@ def _tensor_getitem_by_number(data, number_index): | |||||
| @getitem.register("Tensor", "None") | @getitem.register("Tensor", "None") | ||||
| def _tensor_getitem_by_none(data, none_index): | |||||
| def _tensor_getitem_by_none(data, _): | |||||
| """ | """ | ||||
| For none indexing , expand data with one dim. | For none indexing , expand data with one dim. | ||||
| @@ -174,7 +174,7 @@ def _tensor_getitem_by_none(data, none_index): | |||||
| Outputs: | Outputs: | ||||
| Tensor, element type is as same as the element type of data. | Tensor, element type is as same as the element type of data. | ||||
| """ | """ | ||||
| return compile_utils.tensor_index_by_tuple(data, (none_index,)) | |||||
| return F.expand_dims(data, 0) | |||||
| @getitem.register("Tensor", "Slice") | @getitem.register("Tensor", "Slice") | ||||
| @@ -189,7 +189,7 @@ def _tensor_getitem_by_slice(data, slice_index): | |||||
| Outputs: | Outputs: | ||||
| Tensor, element type is the same as the element type of data. | Tensor, element type is the same as the element type of data. | ||||
| """ | """ | ||||
| return compile_utils.tensor_index_by_tuple(data, (slice_index,)) | |||||
| return compile_utils.tensor_index_by_slice(data, slice_index) | |||||
| @getitem.register("Tensor", "Tensor") | @getitem.register("Tensor", "Tensor") | ||||
| @@ -204,11 +204,11 @@ def _tensor_getitem_by_tensor(data, tensor_index): | |||||
| Outputs: | Outputs: | ||||
| Tensor, element type is the same as the element type of data. | Tensor, element type is the same as the element type of data. | ||||
| """ | """ | ||||
| return compile_utils.tensor_index_by_tuple(data, (tensor_index,)) | |||||
| return compile_utils.tensor_index_by_tensor(data, tensor_index) | |||||
| @getitem.register("Tensor", "Ellipsis") | @getitem.register("Tensor", "Ellipsis") | ||||
| def _tensor_getitem_by_ellipsis(data, ellipsis_index): | |||||
| def _tensor_getitem_by_ellipsis(data, _): | |||||
| """ | """ | ||||
| Getting item of tensor by Ellipsis. | Getting item of tensor by Ellipsis. | ||||
| @@ -219,7 +219,7 @@ def _tensor_getitem_by_ellipsis(data, ellipsis_index): | |||||
| Outputs: | Outputs: | ||||
| Tensor, same as data. | Tensor, same as data. | ||||
| """ | """ | ||||
| return compile_utils.tensor_index_by_tuple(data, (ellipsis_index,)) | |||||
| return data | |||||
| @getitem.register("Tensor", "List") | @getitem.register("Tensor", "List") | ||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `mod` operations.""" | """Implementation for internal polymorphism `mod` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -48,3 +49,31 @@ def _tensor_mod_scalar(x, y): | |||||
| def _scalar_mod_tensor(x, y): | def _scalar_mod_tensor(x, y): | ||||
| """Returns x % y where x is a scalar and y is a tensor. x and y should have same dtype.""" | """Returns x % y where x is a scalar and y is a tensor. x and y should have same dtype.""" | ||||
| return F.tensor_mod(x, y) | return F.tensor_mod(x, y) | ||||
| @mod.register("Tuple", "Tensor") | |||||
| def _tuple_mod_tensor(x, y): | |||||
| """Returns x % y where x is a tuple and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_mod(x, y) | |||||
| @mod.register("Tensor", "Tuple") | |||||
| def _tensor_mod_tuple(x, y): | |||||
| """Returns x % y where x is a tensor and y is a tuple. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_mod(x, y) | |||||
| @mod.register("List", "Tensor") | |||||
| def _list_mod_tensor(x, y): | |||||
| """Returns x % y where x is a list and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_mod(x, y) | |||||
| @mod.register("Tensor", "List") | |||||
| def _tensor_mod_list(x, y): | |||||
| """Returns x % y where x is a tensor and y is a list. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_mod(x, y) | |||||
| @@ -16,6 +16,7 @@ | |||||
| """Implementation for internal polymorphism `mul` operations.""" | """Implementation for internal polymorphism `mul` operations.""" | ||||
| from . import _constexpr_utils as const_utils | from . import _constexpr_utils as const_utils | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -82,26 +83,26 @@ def _list_mul_scalar(x, y): | |||||
| return const_utils.sequence_mul_int(x, y) | return const_utils.sequence_mul_int(x, y) | ||||
| @mul.register("Tuple", "Number") | |||||
| def _tuple_mul_scalar(x, y): | |||||
| @mul.register("Number", "List") | |||||
| def _scalar_mul_list(x, y): | |||||
| """ | """ | ||||
| Returns x * y where x is a tuple and y is a number. y must be integer. | |||||
| Returns x * y where x is a number and y is a list. x must be integer. | |||||
| Outputs: | Outputs: | ||||
| Tuple. | |||||
| List. | |||||
| """ | """ | ||||
| return const_utils.sequence_mul_int(x, y) | |||||
| return const_utils.sequence_mul_int(y, x) | |||||
| @mul.register("Number", "List") | |||||
| def _scalar_mul_list(x, y): | |||||
| @mul.register("Tuple", "Number") | |||||
| def _tuple_mul_scalar(x, y): | |||||
| """ | """ | ||||
| Returns x * y where x is a number and y is a list. x must be integer. | |||||
| Returns x * y where x is a tuple and y is a number. y must be integer. | |||||
| Outputs: | Outputs: | ||||
| List. | |||||
| Tuple. | |||||
| """ | """ | ||||
| return const_utils.sequence_mul_int(y, x) | |||||
| return const_utils.sequence_mul_int(x, y) | |||||
| @mul.register("Number", "Tuple") | @mul.register("Number", "Tuple") | ||||
| @@ -113,3 +114,67 @@ def _scalar_mul_tuple(x, y): | |||||
| Tuple. | Tuple. | ||||
| """ | """ | ||||
| return const_utils.sequence_mul_int(y, x) | return const_utils.sequence_mul_int(y, x) | ||||
| @mul.register("Tensor", "Tuple") | |||||
| def _tensor_mul_tuple(x, y): | |||||
| """ | |||||
| Returns x * y where x is a tensor and y is a tuple. | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (Tuple): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_mul(x, y) | |||||
| @mul.register("Tuple", "Tensor") | |||||
| def _tuple_mul_tensor(x, y): | |||||
| """ | |||||
| Returns x * y where x is a tuple and y is a tensor. | |||||
| Args: | |||||
| x (Tuple): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_mul(x, y) | |||||
| @mul.register("Tensor", "List") | |||||
| def _tensor_mul_list(x, y): | |||||
| """ | |||||
| Returns x * y where x is a tensor and y is a list. | |||||
| Args: | |||||
| x (Tensor): x | |||||
| y (List): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_mul(x, y) | |||||
| @mul.register("List", "Tensor") | |||||
| def _list_mul_tensor(x, y): | |||||
| """ | |||||
| Returns x * y where x is a list and y is a tensor. | |||||
| Args: | |||||
| x (List): x | |||||
| y (Tensor): The dtype is same as x. | |||||
| Returns: | |||||
| Tensor, has the same dtype as x. | |||||
| """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_mul(x, y) | |||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `pow` operations.""" | """Implementation for internal polymorphism `pow` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -48,3 +49,31 @@ def _tensor_pow_scalar(x, y): | |||||
| def _scalar_pow_tensor(x, y): | def _scalar_pow_tensor(x, y): | ||||
| """Returns x ** y where x is a scalar and y is a tensor. x and y should have same dtype.""" | """Returns x ** y where x is a scalar and y is a tensor. x and y should have same dtype.""" | ||||
| return F.tensor_pow(x, y) | return F.tensor_pow(x, y) | ||||
| @pow_.register("Tuple", "Tensor") | |||||
| def _tuple_pow_tensor(x, y): | |||||
| """Returns x ** y where x is a tuple and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_pow(x, y) | |||||
| @pow_.register("Tensor", "Tuple") | |||||
| def _tensor_pow_tuple(x, y): | |||||
| """Returns x ** y where x is a tensor and y is a tuple. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_pow(x, y) | |||||
| @pow_.register("List", "Tensor") | |||||
| def _list_pow_tensor(x, y): | |||||
| """Returns x ** y where x is a list and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_pow(x, y) | |||||
| @pow_.register("Tensor", "List") | |||||
| def _tensor_pow_list(x, y): | |||||
| """Returns x ** y where x is a tensor and y is a list. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_pow(x, y) | |||||
| @@ -15,6 +15,7 @@ | |||||
| """Implementation for internal polymorphism `sub` operations.""" | """Implementation for internal polymorphism `sub` operations.""" | ||||
| from . import _compile_utils as utils | |||||
| from ...composite import base | from ...composite import base | ||||
| from ... import functional as F | from ... import functional as F | ||||
| @@ -48,3 +49,31 @@ def _scalar_sub_tensor(x, y): | |||||
| def _tensor_sub_scalar(x, y): | def _tensor_sub_scalar(x, y): | ||||
| """Returns x - y where x is a tensor and y is a scalar. x and y should have same dtype.""" | """Returns x - y where x is a tensor and y is a scalar. x and y should have same dtype.""" | ||||
| return F.tensor_sub(x, y) | return F.tensor_sub(x, y) | ||||
| @sub.register("Tuple", "Tensor") | |||||
| def _tuple_sub_tensor(x, y): | |||||
| """Returns x - y where x is a tuple and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_sub(x, y) | |||||
| @sub.register("Tensor", "Tuple") | |||||
| def _tensor_sub_tuple(x, y): | |||||
| """Returns x - y where x is a tensor and y is a tuple. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_sub(x, y) | |||||
| @sub.register("List", "Tensor") | |||||
| def _list_sub_tensor(x, y): | |||||
| """Returns x - y where x is a list and y is a tensor. """ | |||||
| x = utils.sequence_to_tensor(x, y.dtype) | |||||
| return F.tensor_sub(x, y) | |||||
| @sub.register("Tensor", "List") | |||||
| def _tensor_sub_list(x, y): | |||||
| """Returns x - y where x is a tensor and y is a list. """ | |||||
| y = utils.sequence_to_tensor(y, x.dtype) | |||||
| return F.tensor_sub(x, y) | |||||
| @@ -131,11 +131,13 @@ scatter_update = P.ScatterUpdate() | |||||
| scatter_nd_update = P.ScatterNdUpdate() | scatter_nd_update = P.ScatterNdUpdate() | ||||
| stack = P.Stack() | stack = P.Stack() | ||||
| def pack(x): | def pack(x): | ||||
| print("WARNING: 'pack' is deprecated from version 1.1 and will be removed in a future version, use 'stack' instead" | print("WARNING: 'pack' is deprecated from version 1.1 and will be removed in a future version, use 'stack' instead" | ||||
| ".") | ".") | ||||
| return stack(x) | return stack(x) | ||||
| partial = P.Partial() | partial = P.Partial() | ||||
| # depend: mount a node to another node | # depend: mount a node to another node | ||||
| depend = P.Depend() | depend = P.Depend() | ||||
| @@ -221,13 +223,6 @@ sparse_tensor_get_values = Primitive('SparseTensorGetValues') | |||||
| sparse_tensor_get_indices = Primitive('SparseTensorGetIndices') | sparse_tensor_get_indices = Primitive('SparseTensorGetIndices') | ||||
| sparse_tensor_get_dense_shape = Primitive('SparseTensorGetDenseShape') | sparse_tensor_get_dense_shape = Primitive('SparseTensorGetDenseShape') | ||||
| tensor_operator_registry.register('__add__', tensor_add) | |||||
| tensor_operator_registry.register('__sub__', tensor_sub) | |||||
| tensor_operator_registry.register('__mul__', tensor_mul) | |||||
| tensor_operator_registry.register('__truediv__', tensor_div) | |||||
| tensor_operator_registry.register('__mod__', tensor_mod) | |||||
| tensor_operator_registry.register('__pow__', tensor_pow) | |||||
| tensor_operator_registry.register('__floordiv__', tensor_floordiv) | |||||
| tensor_operator_registry.register('all', P.ReduceAll) | tensor_operator_registry.register('all', P.ReduceAll) | ||||
| tensor_operator_registry.register('any', P.ReduceAny) | tensor_operator_registry.register('any', P.ReduceAny) | ||||
| tensor_operator_registry.register('abs', P.Abs) | tensor_operator_registry.register('abs', P.Abs) | ||||
| @@ -0,0 +1,651 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| """ test_tensor_setitem """ | |||||
| import numpy as np | |||||
| import pytest | |||||
| from mindspore import Tensor, context | |||||
| from mindspore import dtype as mstype | |||||
| def setup_module(): | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_slice(): | |||||
| input_np_3d = np.arange(120).reshape(4, 5, 6).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| index_slice_1 = slice(1, None, None) | |||||
| index_slice_2 = slice(None, 4, None) | |||||
| index_slice_3 = slice(-3, 4, None) | |||||
| index_slice_4 = slice(2, -1, None) | |||||
| index_slice_7 = slice(1, 5, None) | |||||
| index_slice_8 = slice(-5, 3, None) | |||||
| value_number = 3 | |||||
| value_list_1_ele = [2] | |||||
| value_list_mul_ele = [10, 20, 30, 40, 50, 60] | |||||
| value_list_much_ele = [10, 20, 30, 40, 50, 60, 70] | |||||
| input_tensor_3d[index_slice_1] += value_number | |||||
| input_np_3d[index_slice_1] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_slice_2] -= value_list_1_ele | |||||
| input_np_3d[index_slice_2] -= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_slice_3] *= value_list_mul_ele | |||||
| input_np_3d[index_slice_3] *= value_list_mul_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_slice_4] /= value_number | |||||
| input_np_3d[index_slice_4] /= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_slice_7] /= value_number | |||||
| input_np_3d[index_slice_7] /= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_slice_8] += value_number | |||||
| input_np_3d[index_slice_8] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| with pytest.raises(ValueError): | |||||
| input_tensor_3d[index_slice_8] /= value_list_much_ele | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_ellipsis(): | |||||
| input_np_3d = np.arange(24).reshape(2, 3, 4).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| value_number_1, value_number_2 = 1, 2.0 | |||||
| value_np_1 = np.array([1]) | |||||
| value_np_2 = np.array([1, 2, 3, 4]) | |||||
| value_np_3 = np.arange(12).reshape(3, 4) | |||||
| value_tensor_1 = Tensor(value_np_1) | |||||
| value_tensor_2 = Tensor(value_np_2) | |||||
| value_tensor_3 = Tensor(value_np_3) | |||||
| value_tuple_1_ele = (0.5,) | |||||
| value_tuple_4_ele = (0.1, 0.2, 0.3, 0.4) | |||||
| value_list_1_ele = [1.5] | |||||
| value_list_4_ele = [1.1, 1.2, 1.3, 1.4] | |||||
| input_tensor_3d[...] += value_number_1 | |||||
| input_np_3d[...] += value_number_1 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] -= value_number_2 | |||||
| input_np_3d[...] -= value_number_2 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] *= value_tensor_1 | |||||
| input_np_3d[...] *= value_np_1 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] /= value_tensor_2 | |||||
| input_np_3d[...] /= value_np_2 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] /= value_tensor_3 | |||||
| input_np_3d[...] /= value_np_3 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] -= value_tuple_1_ele | |||||
| input_np_3d[...] -= value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] *= value_tuple_4_ele | |||||
| input_np_3d[...] *= value_tuple_4_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] -= value_list_1_ele | |||||
| input_np_3d[...] -= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[...] *= value_list_4_ele | |||||
| input_np_3d[...] *= value_list_4_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_bool(): | |||||
| input_np_3d = np.arange(120).reshape(4, 5, 6).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| index_bool_1 = True | |||||
| index_bool_2 = False | |||||
| value_number = 1 | |||||
| value_np_1 = np.array([1], np.float32) | |||||
| value_np_2 = np.array([1, 2, 3, 4, 5, 6], np.float32) | |||||
| value_np_3 = np.arange(1, 31).astype(np.float32).reshape(5, 6) | |||||
| value_np_4 = np.arange(1, 121).astype(np.float32).reshape(4, 5, 6) | |||||
| value_tensor_1 = Tensor(value_np_1, mstype.float32) | |||||
| value_tensor_2 = Tensor(value_np_2, mstype.float32) | |||||
| value_tensor_3 = Tensor(value_np_3, mstype.float32) | |||||
| value_tensor_4 = Tensor(value_np_4, mstype.float32) | |||||
| value_tuple_1_ele = (0.5,) | |||||
| value_tuple_6_ele = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6) | |||||
| value_list_1_ele = [1.5] | |||||
| value_list_6_ele = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6] | |||||
| input_tensor_3d[index_bool_1] += value_number | |||||
| input_np_3d[index_bool_1] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] -= value_tensor_1 | |||||
| input_np_3d[index_bool_1] -= value_np_1 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] *= value_tensor_2 | |||||
| input_np_3d[index_bool_1] *= value_np_2 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] -= value_tensor_3 | |||||
| input_np_3d[index_bool_1] -= value_np_3 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] //= value_tensor_4 | |||||
| input_np_3d[index_bool_1] //= value_np_4 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] %= value_tuple_1_ele | |||||
| input_np_3d[index_bool_1] %= value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] %= value_tuple_6_ele | |||||
| input_np_3d[index_bool_1] %= value_tuple_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] %= value_list_1_ele | |||||
| input_np_3d[index_bool_1] %= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_bool_1] -= value_list_6_ele | |||||
| input_np_3d[index_bool_1] -= value_list_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[index_bool_2] *= value_tensor_2 | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_number(): | |||||
| input_np_1d = np.arange(4).astype(np.float32) | |||||
| input_tensor_1d = Tensor(input_np_1d, mstype.float32) | |||||
| input_np_3d = np.arange(80).reshape(4, 5, 4).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| number_index_1, number_index_2, number_index_3, number_index_4 = 0, 3, 4, 3.4 | |||||
| value_number = 2 | |||||
| value_np_scalar = np.array(5) | |||||
| value_np_1_ele = np.array([1]) | |||||
| value_np_1d = np.array([1, 2, 3, 4]) | |||||
| value_np_2d = np.arange(20).reshape(5, 4) | |||||
| value_tensor_scalar = Tensor(value_np_scalar, mstype.float32) | |||||
| value_tensor_1_ele = Tensor(value_np_1_ele, mstype.float32) | |||||
| value_tensor_1d = Tensor(value_np_1d, mstype.float32) | |||||
| value_tensor_2d = Tensor(value_np_2d, mstype.float32) | |||||
| value_tuple_1_ele = (100,) | |||||
| value_tuple_mul_ele = (10, 20, 30, 40) | |||||
| value_tuple_much_ele = (10, 20, 30, 40, 10) | |||||
| value_tuple_empty = () | |||||
| value_list_1_ele = [101] | |||||
| value_list_mul_ele = [11, 21, 31, 41] | |||||
| value_list_much_ele = [12, 22, 33, 43, 18] | |||||
| value_list_empty = [] | |||||
| input_tensor_1d[number_index_1] += value_number | |||||
| input_np_1d[number_index_1] += value_number | |||||
| assert np.allclose(input_tensor_1d.asnumpy(), input_np_1d, 0.0001, 0.0001) | |||||
| input_tensor_1d[number_index_2] -= value_number | |||||
| input_np_1d[number_index_2] -= value_number | |||||
| assert np.allclose(input_tensor_1d.asnumpy(), input_np_1d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] *= value_number | |||||
| input_np_3d[number_index_1] *= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_2] /= value_number | |||||
| input_np_3d[number_index_2] /= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_1d[number_index_1] //= value_tensor_scalar | |||||
| input_np_1d[number_index_1] //= value_np_scalar | |||||
| assert np.allclose(input_tensor_1d.asnumpy(), input_np_1d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] *= value_tensor_scalar | |||||
| input_np_3d[number_index_1] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_2] %= value_tensor_1_ele | |||||
| input_np_3d[number_index_2] %= value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] += value_tensor_1d | |||||
| input_np_3d[number_index_1] += value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_2] -= value_tensor_2d | |||||
| input_np_3d[number_index_2] -= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_1d[number_index_1] += value_tuple_1_ele | |||||
| input_np_1d[number_index_1] += value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_1d.asnumpy(), input_np_1d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] -= value_tuple_1_ele | |||||
| input_np_3d[number_index_1] -= value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] *= value_tuple_mul_ele | |||||
| input_np_3d[number_index_1] *= value_tuple_mul_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_1d[number_index_2] += value_list_1_ele | |||||
| input_np_1d[number_index_2] += value_list_1_ele | |||||
| assert np.allclose(input_tensor_1d.asnumpy(), input_np_1d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_1] -= value_list_1_ele | |||||
| input_np_3d[number_index_1] -= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[number_index_2] *= value_list_mul_ele | |||||
| input_np_3d[number_index_2] *= value_list_mul_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_1d[number_index_3] += value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[number_index_3] -= value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_1d[number_index_4] *= value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[number_index_4] /= value_number | |||||
| with pytest.raises(ValueError): | |||||
| input_tensor_1d[number_index_1] *= value_tuple_mul_ele | |||||
| with pytest.raises(ValueError): | |||||
| input_tensor_3d[number_index_1] *= value_tuple_much_ele | |||||
| with pytest.raises(RuntimeError): | |||||
| input_tensor_1d[number_index_1] /= value_tuple_empty | |||||
| with pytest.raises(ValueError): | |||||
| input_tensor_3d[number_index_2] //= value_list_much_ele | |||||
| with pytest.raises(ValueError): | |||||
| input_tensor_3d[number_index_2] *= value_list_empty | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_tensor(): | |||||
| input_np_3d = np.arange(120).reshape(4, 5, 6).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| index_np_1d_1ele = np.random.randint(4, size=1) | |||||
| index_np_1d = np.random.randint(4, size=6) | |||||
| index_np_2d = np.random.randint(4, size=(5, 6)) | |||||
| index_np_3d = np.random.randint(4, size=(4, 5, 6)) | |||||
| index_tensor_1d_1ele = Tensor(index_np_1d_1ele, mstype.int32) | |||||
| index_tensor_1d = Tensor(index_np_1d, mstype.int32) | |||||
| index_tensor_2d = Tensor(index_np_2d, mstype.int32) | |||||
| index_tensor_3d = Tensor(index_np_3d, mstype.int32) | |||||
| value_number = 1 | |||||
| value_np_1 = np.array([1]) | |||||
| value_np_2 = np.array([1, 2, 3, 4, 5, 6]) | |||||
| value_np_3 = np.arange(1, 31).reshape(5, 6) | |||||
| value_np_4 = np.arange(1, 181).reshape(6, 5, 6) | |||||
| value_tensor_1 = Tensor(value_np_1) | |||||
| value_tensor_2 = Tensor(value_np_2) | |||||
| value_tensor_3 = Tensor(value_np_3) | |||||
| value_tensor_4 = Tensor(value_np_4) | |||||
| value_tuple_1_ele = (0.5,) | |||||
| value_tuple_6_ele = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6) | |||||
| value_list_1_ele = [1.5] | |||||
| value_list_6_ele = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6] | |||||
| input_tensor_3d[index_tensor_1d_1ele] += value_number | |||||
| input_np_3d[index_np_1d_1ele] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d_1ele] -= value_tensor_2 | |||||
| input_np_3d[index_np_1d_1ele] -= value_np_2 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d_1ele] /= value_tuple_6_ele | |||||
| input_np_3d[index_np_1d_1ele] /= value_tuple_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d_1ele] *= value_list_1_ele | |||||
| input_np_3d[index_np_1d_1ele] *= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d] += value_number | |||||
| input_np_3d[index_np_1d] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d] -= value_tensor_1 | |||||
| input_np_3d[index_np_1d] -= value_np_1 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d] /= value_tuple_1_ele | |||||
| input_np_3d[index_np_1d] /= value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_1d] += value_list_6_ele | |||||
| input_np_3d[index_np_1d] += value_list_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_2d] -= value_number | |||||
| input_np_3d[index_np_2d] -= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_2d] *= value_tensor_2 | |||||
| input_np_3d[index_np_2d] *= value_np_2 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_2d] /= value_tensor_4 | |||||
| input_np_3d[index_np_2d] /= value_np_4 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_2d] += value_tuple_6_ele | |||||
| input_np_3d[index_np_2d] += value_tuple_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_2d] -= value_list_1_ele | |||||
| input_np_3d[index_np_2d] -= value_list_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_3d] *= value_number | |||||
| input_np_3d[index_np_3d] *= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_3d] /= value_tensor_1 | |||||
| input_np_3d[index_np_3d] /= value_np_1 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_3d] += value_tensor_3 | |||||
| input_np_3d[index_np_3d] += value_np_3 | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_3d] /= value_tuple_1_ele | |||||
| input_np_3d[index_np_3d] /= value_tuple_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tensor_3d] -= value_list_6_ele | |||||
| input_np_3d[index_np_3d] -= value_list_6_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_list(): | |||||
| input_np_3d = np.arange(120).reshape(4, 5, 6).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| list_index_empty = [] | |||||
| list_index_int_1 = [2] | |||||
| list_index_int_2 = [3, 1] | |||||
| list_index_int_overflow = [4, 2] | |||||
| list_index_bool_1 = [False, False, False, False] | |||||
| list_index_bool_2 = [True, True, True, True] | |||||
| list_index_bool_3 = [True, False, True, False] | |||||
| list_index_mix_1 = [True, 0] | |||||
| list_index_mix_2 = [3, False] | |||||
| value_number = 2 | |||||
| value_np_scalar = np.array(100) | |||||
| value_np_1_ele = np.array([1]) | |||||
| value_np_1d = np.array([1, 2, 3, 4, 5, 6]) | |||||
| value_np_2d = np.arange(1, 31).reshape(5, 6) | |||||
| value_np_3d = np.arange(1, 61).reshape(2, 5, 6) | |||||
| value_tensor_scalar = Tensor(value_np_scalar, mstype.float32) | |||||
| value_tensor_1_ele = Tensor(value_np_1_ele, mstype.float32) | |||||
| value_tensor_1d = Tensor(value_np_1d, mstype.float32) | |||||
| value_tensor_2d = Tensor(value_np_2d, mstype.float32) | |||||
| value_tensor_3d = Tensor(value_np_3d, mstype.float32) | |||||
| input_tensor_3d[list_index_int_1] += value_number | |||||
| input_np_3d[list_index_int_1] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_1] += value_tensor_scalar | |||||
| input_np_3d[list_index_int_1] += value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_1] -= value_tensor_1_ele | |||||
| input_np_3d[list_index_int_1] -= value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_1] *= value_tensor_1d | |||||
| input_np_3d[list_index_int_1] *= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_1] /= value_tensor_2d | |||||
| input_np_3d[list_index_int_1] /= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] += value_number | |||||
| input_np_3d[list_index_int_2] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] //= value_tensor_scalar | |||||
| input_np_3d[list_index_int_2] //= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] *= value_tensor_1_ele | |||||
| input_np_3d[list_index_int_2] *= value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] %= value_tensor_1d | |||||
| input_np_3d[list_index_int_2] %= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] += value_tensor_2d | |||||
| input_np_3d[list_index_int_2] += value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_int_2] -= value_tensor_3d | |||||
| input_np_3d[list_index_int_2] -= value_np_3d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_2] += value_number | |||||
| input_np_3d[list_index_bool_2] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_2] *= value_tensor_scalar | |||||
| input_np_3d[list_index_bool_2] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_2] /= value_tensor_1_ele | |||||
| input_np_3d[list_index_bool_2] /= value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_2] //= value_tensor_1d | |||||
| input_np_3d[list_index_bool_2] //= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_2] %= value_tensor_2d | |||||
| input_np_3d[list_index_bool_2] %= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] += value_number | |||||
| input_np_3d[list_index_bool_3] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] *= value_tensor_scalar | |||||
| input_np_3d[list_index_bool_3] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] += value_tensor_1_ele | |||||
| input_np_3d[list_index_bool_3] += value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] -= value_tensor_1d | |||||
| input_np_3d[list_index_bool_3] -= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] *= value_tensor_2d | |||||
| input_np_3d[list_index_bool_3] *= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_bool_3] /= value_tensor_3d | |||||
| input_np_3d[list_index_bool_3] /= value_np_3d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] += value_number | |||||
| input_np_3d[list_index_mix_1] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] *= value_tensor_scalar | |||||
| input_np_3d[list_index_mix_1] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] += value_tensor_1_ele | |||||
| input_np_3d[list_index_mix_1] += value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] -= value_tensor_1d | |||||
| input_np_3d[list_index_mix_1] -= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] *= value_tensor_2d | |||||
| input_np_3d[list_index_mix_1] *= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_1] /= value_tensor_3d | |||||
| input_np_3d[list_index_mix_1] /= value_np_3d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] += value_number | |||||
| input_np_3d[list_index_mix_2] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] *= value_tensor_scalar | |||||
| input_np_3d[list_index_mix_2] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] += value_tensor_1_ele | |||||
| input_np_3d[list_index_mix_2] += value_np_1_ele | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] -= value_tensor_1d | |||||
| input_np_3d[list_index_mix_2] -= value_np_1d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] *= value_tensor_2d | |||||
| input_np_3d[list_index_mix_2] *= value_np_2d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[list_index_mix_2] /= value_tensor_3d | |||||
| input_np_3d[list_index_mix_2] /= value_np_3d | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[list_index_empty] += value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[list_index_int_overflow] += value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[list_index_bool_1] += value_number | |||||
| # GPU: does not supported op "FloorMod" | |||||
| @pytest.mark.level0 | |||||
| @pytest.mark.platform_arm_ascend_training | |||||
| @pytest.mark.platform_x86_ascend_training | |||||
| @pytest.mark.env_onecard | |||||
| def test_tesnsor_augassign_by_tuple(): | |||||
| input_np_3d = np.arange(120).reshape(4, 5, 6).astype(np.float32) | |||||
| input_tensor_3d = Tensor(input_np_3d, mstype.float32) | |||||
| index_tuple_1 = (slice(1, 3, 1), ..., [1, 3, 2]) | |||||
| index_tuple_2 = (2, 3, 4) | |||||
| index_tuple_4 = ([2, 3], True) | |||||
| index_tuple_5 = (False, 3) | |||||
| index_tuple_6 = (False, slice(3, 1, -1)) | |||||
| index_tuple_7 = (..., slice(None, 6, 2)) | |||||
| value_number = 2 | |||||
| value_np_scalar = np.array(100) | |||||
| value_tensor_scalar = Tensor(value_np_scalar, mstype.float32) | |||||
| input_tensor_3d[index_tuple_1] += value_number | |||||
| input_np_3d[index_tuple_1] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tuple_1] -= Tensor(np.ones((2, 5, 3)), mstype.float32) | |||||
| input_np_3d[index_tuple_1] -= np.ones((2, 5, 3)) | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tuple_2] *= value_tensor_scalar | |||||
| input_np_3d[index_tuple_2] *= value_np_scalar | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tuple_4] //= value_number | |||||
| input_np_3d[index_tuple_4] //= value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| input_tensor_3d[index_tuple_7] += value_number | |||||
| input_np_3d[index_tuple_7] += value_number | |||||
| assert np.allclose(input_tensor_3d.asnumpy(), input_np_3d, 0.0001, 0.0001) | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[index_tuple_5] *= value_number | |||||
| with pytest.raises(IndexError): | |||||
| input_tensor_3d[index_tuple_6] %= value_number | |||||
| @@ -470,8 +470,8 @@ def test_tensor_operation(): | |||||
| assert np.all(res.asnumpy() == np.ones((3, 3))) | assert np.all(res.asnumpy() == np.ones((3, 3))) | ||||
| x %= 3 | x %= 3 | ||||
| assert np.all(x.asnumpy() == np.ones((3, 3))) | assert np.all(x.asnumpy() == np.ones((3, 3))) | ||||
| with pytest.raises(ValueError): | |||||
| res = x * (2, 3) | |||||
| res = x * (2, 3, 4) | |||||
| assert np.all(res.asnumpy() == np.ones((3, 3)) * (2, 3, 4)) | |||||
| res = 5 % x | res = 5 % x | ||||
| assert np.all(x.asnumpy() == np.ones((3, 3))) | assert np.all(x.asnumpy() == np.ones((3, 3))) | ||||
| res = 5 // x | res = 5 // x | ||||
| @@ -117,17 +117,17 @@ def test_float_tensor_and_str_add(): | |||||
| def test_float_tensor_and_tuple_add(): | def test_float_tensor_and_tuple_add(): | ||||
| x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) | x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) | ||||
| y = (1, 2, 3) | y = (1, 2, 3) | ||||
| with pytest.raises(TypeError) as er: | |||||
| ret = x + y | |||||
| assert "For 'Add', the 1th input is a not support implicit conversion type: tuple" in str(er.value) | |||||
| ret_actual = x + y | |||||
| ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [1.4, 2.5, 3.6]], dtype=np.float32)) | |||||
| assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() | |||||
| def test_float_tensor_and_list_add(): | def test_float_tensor_and_list_add(): | ||||
| x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) | x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) | ||||
| y = [1, 2, 3] | y = [1, 2, 3] | ||||
| with pytest.raises(TypeError) as er: | |||||
| ret = x + y | |||||
| assert "For 'Add', the 1th input is a not support implicit conversion type: list" in str(er.value) | |||||
| ret_actual = x + y | |||||
| ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [1.4, 2.5, 3.6]], dtype=np.float32)) | |||||
| assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() | |||||
| def test_float_tensor_and_bool_tensors_add_grad(): | def test_float_tensor_and_bool_tensors_add_grad(): | ||||