From: @yanglf1121 Reviewed-by: @kingxian,@kisnwang Signed-off-by: @kingxianpull/15746/MERGE
| @@ -2464,7 +2464,7 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0, | |||
| end_values (Union[tuple, list, int], optional): Used in 'linear_ramp'. The values | |||
| used for the ending value of the linear_ramp and that will form the edge of | |||
| the padded `arr`. :class:`((before_1, after_1), ... (before_N, after_N))` | |||
| unique end values for each axis. :class`((before, after),)` yields same before | |||
| unique end values for each axis. :class:`((before, after),)` yields same before | |||
| and after end values for each axis. :class:`(constant,)` or :class:`constant` | |||
| is a shortcut for :class:`before = after = constant` for all axes. Default is 0. | |||
| reflect_type(string, optional) can choose between \'even\' and \'odd\'. Used in | |||
| @@ -1445,16 +1445,22 @@ def _split(x, indices_or_sections, opname, axis=0): | |||
| should be integer, tuple(int) or list(int), but got", indices_or_sections) | |||
| return res | |||
| @constexpr | |||
| def convert_neg_indices(indices, ndim): | |||
| """converts negative values in tuple/list indices""" | |||
| def canonicalizer(ax): | |||
| return ax + ndim if ax < 0 else ax | |||
| indices = tuple([canonicalizer(axis) for axis in indices]) | |||
| return indices | |||
| def _split_sub_tensors(x, indices, axis): | |||
| """ | |||
| Splits the input tensor `x` into multiple sub-tensors | |||
| along the axis according to the given indices. | |||
| """ | |||
| if isinstance(indices, list): | |||
| indices.append(x.shape[axis]) | |||
| elif isinstance(indices, tuple): | |||
| indices += (x.shape[axis],) | |||
| length_along_dim = x.shape[axis] | |||
| indices = convert_neg_indices(indices, length_along_dim) | |||
| indices += (length_along_dim,) | |||
| sub_tensors = [] | |||
| strides = _list_comprehensions(x.ndim, 1, True) | |||
| @@ -2202,17 +2208,6 @@ def choose(a, choices, mode='clip'): | |||
| [[ 10 -10 10] | |||
| [-10 10 -10] | |||
| [ 10 -10 10]] | |||
| >>> a = np.array([0, 1]).reshape((2,1,1)) | |||
| >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) | |||
| >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) | |||
| >>> print(np.choose(a, (c1, c2))) | |||
| [[[ 1 1 1 1 1] | |||
| [ 2 2 2 2 2] | |||
| [ 3 3 3 3 3]] | |||
| [[-1 -2 -3 -4 -5] | |||
| [-1 -2 -3 -4 -5] | |||
| [-1 -2 -3 -4 -5]]] | |||
| """ | |||
| a = _to_tensor(a) | |||
| if isinstance(choices, (tuple, list)): | |||
| @@ -2346,11 +2341,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): | |||
| [[[1 0 0] | |||
| [0 2 0] | |||
| [0 0 3]] | |||
| [[4 0 0] | |||
| [0 5 0] | |||
| [0 0 6]] | |||
| [[7 0 0] | |||
| [0 8 0] | |||
| [0 0 9]]] | |||
| @@ -2516,6 +2516,8 @@ def nanmax(a, axis=None, dtype=None, keepdims=False): | |||
| [3. 2.] | |||
| """ | |||
| a = _to_tensor(a) | |||
| if not isinstance(keepdims, int): | |||
| _raise_type_error("integer argument expected, got", keepdims) | |||
| nan_mask = _isnan(a) | |||
| a = F.select(nan_mask, full(F.shape(a), -sys.maxsize - 1, F.dtype(a)), a) | |||
| reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default | |||
| @@ -2561,6 +2563,8 @@ def nanmin(a, axis=None, dtype=None, keepdims=False): | |||
| [1. 2.] | |||
| """ | |||
| a = _to_tensor(a) | |||
| if not isinstance(keepdims, int): | |||
| _raise_type_error("integer argument expected, got", keepdims) | |||
| nan_mask = _isnan(a) | |||
| a = F.select(nan_mask, full(F.shape(a), sys.maxsize, F.dtype(a)), a) | |||
| reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default | |||
| @@ -3859,7 +3863,7 @@ def corrcoef(x, y=None, rowvar=True, dtype=None): | |||
| multiple variables and observations. Each row of `x` represents a variable, | |||
| and each column a single observation of all those variables. Also see rowvar below. | |||
| y (Union[int, float, bool, tuple, list, Tensor], optional): An additional set | |||
| of variables and observations. `y` has the same shape as `x`. | |||
| of variables and observations. | |||
| rowvar (bool, optional): If rowvar is `True` (default), then each row represents | |||
| a variable, with observations in the columns. Otherwise, the relationship | |||
| is transposed: each column represents a variable, while the rows contain observations. | |||
| @@ -4252,6 +4256,7 @@ def argmax(a, axis=None): | |||
| >>> print(np.argmax(b)) | |||
| 1 | |||
| """ | |||
| a = _to_tensor(a) | |||
| return a.argmax(axis) | |||
| @@ -4292,6 +4297,7 @@ def argmin(a, axis=None): | |||
| >>> print(np.argmin(b)) | |||
| 0 | |||
| """ | |||
| a = _to_tensor(a) | |||
| return a.argmin(axis) | |||
| @@ -5716,7 +5722,7 @@ def invert(x, dtype=None): | |||
| For signed integer inputs, the two’s complement is returned. In a two’s-complement system | |||
| negative numbers are represented by the two’s complement of the absolute value. This is | |||
| the most common method of representing signed integers on computers | |||
| `[1] <https://en.wikipedia.org/wiki/Two’s_complement>`. A N-bit two’s-complement system | |||
| `[1] <https://en.wikipedia.org/wiki/Two’s_complement>`_. A N-bit two’s-complement system | |||
| can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``. | |||
| Note: | |||
| @@ -215,10 +215,9 @@ def tensor_index_by_slice(data, slice_index): | |||
| def tensor_index_by_number(data, number_index): | |||
| """Tensor getitem by a Number which may be integer/float/bool value""" | |||
| number_type = const_utils.check_number_index_type(number_index) | |||
| if number_type == const_utils.BOOL_: | |||
| if isinstance(number_index, bool): | |||
| return _tensor_index_by_bool(data, number_index) | |||
| if number_type == const_utils.INT_: | |||
| if isinstance(number_index, int): | |||
| return _tensor_index_by_integer(data, number_index) | |||
| return const_utils.raise_index_error("Only support integers, slices(`:`), ellipsis(`...`), None and bool.") | |||
| @@ -234,9 +233,8 @@ def _tensor_index_by_bool(data, bool_value): | |||
| def _tensor_index_by_integer(data, int_index): | |||
| """Tensor getitem by a single integer number""" | |||
| if const_utils.judge_index_type(F.typeof(data), mstype.tensor_type): | |||
| min_data_dim, max_data_dim = 1, 8 | |||
| const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim) | |||
| if data.ndim < 1 or data.ndim > 8: | |||
| const_utils.raise_value_error("Expect Tensor to have dimension between 1 and 8.") | |||
| data_shape = F.shape(data) | |||
| transformed_number = const_utils.check_range(int_index, data_shape[0]) | |||
| @@ -78,7 +78,7 @@ def make_empty_slice(): | |||
| def _deep_list(array_like, ndim=-1): | |||
| """convert nested tuple/list mixtures to pure nested list""" | |||
| if ndim != -1: | |||
| check_range(array_like, ndim) | |||
| array_like = check_range(array_like, ndim) | |||
| if isinstance(array_like, (list, tuple)): | |||
| return list(map(lambda x: _deep_list(x, ndim), array_like)) | |||
| return array_like | |||
| @@ -151,7 +151,7 @@ def make_tensor(a, dtype=mstype.int64, data_shape=None, ndim=-1): | |||
| raise TypeError("input data must be `int`, `float`, `bool`, `list` or `tuple`") | |||
| if ndim != -1: | |||
| check_range(a, ndim) | |||
| a = check_range(a, ndim) | |||
| if isinstance(a, (list, tuple)): | |||
| # Convert all tuple/nested tuples to lists | |||