Browse Source

Add mindnumpy to mindspore

tags/v1.1.0
yanglf1121 yanglf1121 5 years ago
parent
commit
59e5afd2bb
9 changed files with 2346 additions and 5 deletions
  1. +22
    -5
      mindspore/_extends/parse/standard_method.py
  2. +44
    -0
      mindspore/numpy/__init__.py
  3. +1022
    -0
      mindspore/numpy/array_ops.py
  4. +96
    -0
      mindspore/numpy/dtypes.py
  5. +160
    -0
      mindspore/numpy/math_ops.py
  6. +316
    -0
      mindspore/numpy/utils.py
  7. +21
    -0
      tests/ut/python/numpy_native/__init__.py
  8. +577
    -0
      tests/ut/python/numpy_native/test_array_ops.py
  9. +88
    -0
      tests/ut/python/numpy_native/test_math_ops.py

+ 22
- 5
mindspore/_extends/parse/standard_method.py View File

@@ -68,13 +68,22 @@ def any_(x, axis=(), keep_dims=False):
return reduce_any(x, axis)


def transpose(x):
def transpose(x, *axis):
"""Implementation of `transpose`."""
new_order = None
shape = F.shape(x)
length = F.tuple_len(shape)
perm = F.make_range(0, length)
revert_perm = F.tuple_reversed(perm)
out = trans(x, revert_perm)
if not axis:
perm = F.make_range(0, length)
new_order = F.tuple_reversed(perm)

elif len(axis) == 1:
new_order = convert_list_to_tuple(axis[0])

elif len(axis) == length:
new_order = axis

out = trans(x, new_order)
return out


@@ -194,7 +203,7 @@ def check_type_same(x_type, base_type):

@constexpr
def check_is_tensor(x):
"""check whether x is list or tuple."""
"""check whether x is tensor."""
if isinstance(x, mstype.tensor_type):
return True
return False
@@ -250,6 +259,14 @@ def check_view_shape(x):
x = x[0]
return x

@constexpr
def convert_list_to_tuple(shp):
"""Check the type of the shape, if is list, convert to tuple"""
if not isinstance(shp, (list, tuple)):
raise ValueError(f"The shape variable should be a list or tuple, but got {type(shp)}")
if isinstance(shp, list):
shp = tuple(shp)
return shp

def tensor_bool(x):
"""tensor as conditon, if is constant, return immediate bool value"""


+ 44
- 0
mindspore/numpy/__init__.py View File

@@ -0,0 +1,44 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Numpy-like interfaces in mindspore.

Examples:
>>> import mindspore.numpy as np

Note:
- array_ops.py define all the array generation and operation interfaces.
- math_ops.py define all the math operations on tensors.
- dtypes.py define all the mindspore.numpy dtypes (mainly redirected from mindspore)
- random/ defines all the random operations.
"""

from .array_ops import (array, asarray, asfarray, ones, zeros, full, arange,
linspace, logspace, eye, identity, transpose, expand_dims,
squeeze, rollaxis, swapaxes, reshape, ravel, concatenate)
from .array_ops import copy_ as copy
from .dtypes import (int_, int8, int16, int32, int64, uint, uint8, uint16,
uint32, uint64, float_, float16, float32, float64, bool_, inf,
numeric_types)
from .math_ops import mean, inner


array_ops_module = ['array', 'asarray', 'asfarray', 'copy', 'ones', 'zeros', 'arange',
'linspace', 'logspace', 'eye', 'identity', 'transpose', 'expand_dims',
'squeeze', 'rollaxis', 'swapaxes', 'reshape', 'ravel', 'concatenate']

math_module = ['mean', 'inner']

__all__ = array_ops_module + math_module + numeric_types

+ 1022
- 0
mindspore/numpy/array_ops.py
File diff suppressed because it is too large
View File


+ 96
- 0
mindspore/numpy/dtypes.py View File

@@ -0,0 +1,96 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dtypes and utilities"""

from mindspore import (int8, int16, int32, int64, uint8, uint16, uint32, uint64, \
float16, float32, float64, bool_)

# original numpy has int->int64, float->float64, uint->uint64 mapping. we map
# them to 32 bit, since 64 bit calculation is not supported from mindspore
# backend for now.

inf = float('inf')

int_ = int32
uint = uint32
float_ = float32

numeric_types = [
'int_',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
'uint32',
'uint64',
'float_',
'float16',
'float32',
'float64',
'bool_']

dtype_tuple = (
int_,
int8,
int16,
int32,
int64,
uint,
uint8,
uint16,
uint32,
uint64,
float_,
float16,
float32,
float64,
bool_)

dtype_map = {
'int': int_,
'int8': int8,
'int16': int16,
'int32': int32,
'int64': int64,
'uint': uint,
'uint8': uint8,
'uint16': uint16,
'uint32': uint32,
'uint64': uint64,
'float': float_,
'float16': float16,
'float32': float32,
'float64': float64,
'bool': bool_
}

all_types = [
'np.int',
'np.int8',
'np.int16',
'np.int32',
'np.int64',
'np.uint',
'np.uint8',
'np.uint16',
'np.uint32',
'np.uint64',
'np.float',
'np.float16',
'np.float32',
'np.float64',
'np.bool']

+ 160
- 0
mindspore/numpy/math_ops.py View File

@@ -0,0 +1,160 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""math operations, the function docs are adapted from Numpy API."""
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from .array_ops import squeeze
from .utils import _infer_out_shape, _is_scalar, _check_axis_valid, _get_device_compile, \
_check_shape_aligned


def mean(a, axis=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.

Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
axis.

Note:
Numpy arguments dtype and out are not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, and np.float32.

Args:
a (Tensor): input tensor containing numbers whose mean is desired.
If a is not an array, a conversion is attempted.
axis (None or int or tuple of ints): optional. Axis or axes along
which the means are computed. The default is to compute
the mean of the flattened array. If this is a tuple of
ints, a mean is performed over multiple axes.
keepdims(bool): optional. If this is set to True, the axes which
are reduced are left in the result as dimensions with
size one. With this option, the result will broadcast
correctly against the input tensor.

Returns:
Tensor or scalar, an array containing the mean values.

Raises:
ValueError: if axes are out of the range of [-a.ndim, a.ndim), or
if the axes contain duplicates.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> a = np.arange(6, dtype='float32')
>>> output = np.mean(a, 0)
>>> print(output)
2.5
"""
axis = _check_axis_valid(axis, P.Rank()(a))
if _is_scalar(a.shape):
if keepdims:
return a
return squeeze(a)
if keepdims:
res = P.ReduceMean(True)(a, axis)
else:
res = P.ReduceMean(False)(a, axis)
return res


def inner(a, b):
"""
Inner product of two tensors.

Ordinary inner product of vectors for 1-D tensors (without complex
conjugation), in higher dimensions a sum product over the last
axes.

Note:
Numpy argument out is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtype is np.float32.

Args:
a (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.
b (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.

Returns:
Tensor or scalar, out.shape = a.shape[:-1] + b.shape[:-1].

Raises:
ValueError: if x1.shape[-1] != x2.shape[-1].

Supported Platforms:
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> a = np.ones((5, 3))
>>> b = np.ones((2, 7, 3))
>>> output = np.inner(a, b)
>>> print(output)
[[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]

[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]

[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]

[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]

[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]]
"""
if P.Rank()(a) == 0 or P.Rank()(b) == 0:
if _is_scalar(a.shape):
a, b = b, a
return _apply_bin_op(P.Mul(), a, b)

_ = _check_shape_aligned(a.shape, b.shape)
aligned_shape_a = (F.shape_mul(a.shape[:-1]), a.shape[-1])
aligned_shape_b = (F.shape_mul(b.shape[:-1]), a.shape[-1])
a_aligned = P.Reshape()(a, aligned_shape_a)
b_aligned = P.Reshape()(b, aligned_shape_b)

res = P.MatMul(False, True)(a_aligned, b_aligned)
res = P.Reshape()(res, a.shape[:-1] + b.shape[:-1])
return res


def _expand(x, ndim):
"""Expand x to ndim"""
while P.Rank()(x) < ndim:
x = P.ExpandDims()(x, 0)
return x


def _apply_bin_op(fn, x1, x2):
"""apply binary operations based on fn."""
device = _get_device_compile()
out_shape = _infer_out_shape(device, x1.shape, x2.shape)
if device == 'CPU':
# built-in operations on CPU does not support operands with
# dimensions of size 1 or with shape 0, therefore squeeze
# and scalar promotion is performed
x1, x2 = squeeze(x1), squeeze(x2)
x1, x2 = _expand(x1, 1), _expand(x2, 1)
res = fn(x1, x2)
res = P.Reshape()(res, out_shape)
return res

+ 316
- 0
mindspore/numpy/utils.py View File

@@ -0,0 +1,316 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""internal utility functions"""
from functools import partial

import numpy as onp

import mindspore
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.primitive import constexpr
from mindspore.common import dtype as mstype

from .dtypes import dtype_tuple, all_types, dtype_map

@constexpr
def _check_shape_compile(shape):
"""check the shape param to match the numpy style inside the graph"""
if not isinstance(shape, (int, tuple, list)):
raise TypeError(
f"only int, tuple and list are allowed for shape, but got {type(shape)}")
if isinstance(shape, int):
shape = (shape,)
if isinstance(shape, list):
shape = tuple(shape)
return shape


@constexpr
def _check_is_int(x):
"""Check the type of x is int."""
if isinstance(x, int):
return True
raise TypeError(f"integer argument expected, but got {type(x)}.")


@constexpr
def _check_start_normalize(start, ndim):
"""check and normalize start argument for rollaxis."""
if start < -ndim or start > ndim:
raise ValueError(
f"For rollaxis, start {start} is out of bounds. Ranging from {-ndim} to {ndim} is allowed.")
if start < 0:
start = start + ndim
return start


@constexpr
def _check_axes_range(axes, ndim):
"""
Check axes are within the number of dimensions of tensor x and normalize the negative axes.
Args:
axes (Union[int, tuple(int), list(int)]): Axes of the tensor.
ndim (int): The number of dimensions of the tensor.
Return:
Axes (Union[int, tuple(int)]). If input is integer, return integer, else tuple.
"""
if not isinstance(axes, int) and not isinstance(axes, tuple) and not isinstance(axes, list):
raise TypeError(
f"int, tuple(int) or list(int) expected, but got {type(axes)}.")
low = -ndim
up = ndim - 1
if low > up:
raise ValueError(
f"Lower bound {low} and upper bound {up} of axes are not allowed.")
if isinstance(axes, int):
if axes < low or axes > up:
raise TypeError(
f"axis {axes} is out of bounds for tensor of dimension {ndim}.")
return axes if axes >= 0 else axes + ndim
new_axes = []
for item in axes:
if not isinstance(item, int):
raise TypeError(
f"int in tuple or list expected, but got {type(item)}.")
if item < low or item > up:
raise TypeError(
f"axis {item} in {axes} is out of bounds for tensor of dimension {ndim}.")
new_axes.append(item if item >= 0 else item + ndim)
return tuple(new_axes)


def _check_shape_contain_zero(shp):
"""Check whether shape contains 0"""
if isinstance(shp, int):
return shp == 0
if isinstance(shp, (list, tuple)):
for s in shp:
if s == 0:
return True
return False


def _check_shape(shape):
"""check the shape param to match the numpy style outside the graph"""
if not isinstance(shape, (int, tuple, list)):
raise TypeError(
f"only int, tuple and list are allowed for shape, but got {type(shape)}")
if isinstance(shape, int):
shape = (shape,)
if isinstance(shape, list):
shape = tuple(shape)
return shape


def _check_dtype(dtype):
"""check the input dtype and make conversions"""
# convert the string dtype to mindspore.dtype
if isinstance(dtype, str):
dtype = dtype.lower()
dtype = dtype_map[dtype]
elif isinstance(dtype, type):
if dtype is int:
dtype = mindspore.int32
if dtype is float:
dtype = mindspore.float32
if dtype is bool:
dtype = mindspore.bool_
if dtype not in dtype_tuple:
raise TypeError(
f"only {all_types} are allowed for dtype, but got {type(dtype)}")
return dtype


def _check_input_for_asarray(array_like):
"""check whether array_like argument is a valid type for np.asarray conversion"""
if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):
return True
raise TypeError(
"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`" + \
f"or numpy.ndarray, but got {type(array_like)}")


def _cast_to(array, dtype):
"""cast the input to specified dtype"""
cast = P.Cast()
return cast(array, dtype)


def _is_scalar(shape):
"""check whether input shape is a scalar"""
return F.shape_mul(shape) == 1


@constexpr
def _get_device_compile():
"""Get the current device (`GPU`, `CPU`, `Ascend`)"""
return context.get_context('device_target')


def _get_device():
"""Get the current device (`GPU`, `CPU`, `Ascend`)"""
return context.get_context('device_target')


def _get_mode():
"""Get the current mode (0 is Graph mode, 1 is PyNative mode)"""
return context.get_context('mode')


@constexpr
def _reverse_index(idx, arr):
"""
Returns 1 if shape[idx:] is broadcastable to shape_out[idx:],
2 situations if the function returns 1:
- 1. Tensor's shape has 1 at the designated dimension.
- 2. Tensor's dimension is less than the designated idx. (The Tensor shape
has been reversed)
For both cases, 2 tensors are broadcastable.
otherwise returns the element at position of shape
"""
if len(arr) <= idx:
return 1
return arr[-1 - idx]


@constexpr
def _infer_out_shape(device, *shapes):
"""
Returns shape of output after broadcasting
Raises ValueError if shape1 and shape2 cannot be broadcast
"""
shapes_unbroadcastable = False
cpu_shapes_different = False
contains_scalar = any(_is_scalar(shape) for shape in shapes)
ndim_max = max(map(len, shapes))
shape_out = [0]*ndim_max
i = 0
for i in range(ndim_max):
shape_out[-1 - i] = max(map(partial(_reverse_index, i), shapes))
for shape in shapes:
if _reverse_index(i, shape) != shape_out[-1 - i]:
if _reverse_index(i, shape) != 1:
shapes_unbroadcastable = True
if device == 'CPU' and not contains_scalar:
cpu_shapes_different = True
if not shapes_unbroadcastable and not cpu_shapes_different:
return tuple(shape_out)
if shapes_unbroadcastable:
raise ValueError(
f'operands could not be broadcast together with shapes {*shapes,}')
raise ValueError('broadcasting is currently not supported on CPU. Non-scalar' + \
f'operands must have the same shape, but got {*shapes,}')


@constexpr
def _check_axis_in_range(axis, ndim):
"""Checks axes are with the bounds of ndim"""
if -ndim <= axis < ndim:
return True
raise ValueError(
f'axis {axis} is out of bounds for array of dimension {ndim}')


@constexpr
def _check_axis_valid(axes, ndim):
"""
Checks axes are valid given ndim, and returns axes that can be passed
to the built-in operator (non-negative, int or tuple)
"""
if isinstance(axes, int):
_ = _check_axis_in_range(axes, ndim)
return (axes % ndim,)
if isinstance(axes, tuple):
for axis in axes:
_ = _check_axis_in_range(axis, ndim)
axes = tuple(map(lambda x: x % ndim, axes))
if all(axes.count(el) <= 1 for el in axes):
return axes
if axes is None:
axes = F.make_range(ndim)
return axes
raise ValueError('duplicate value in \'axis\'')


@constexpr
def _check_shape_aligned(shape1, shape2):
"""Checks shape1 and shape2 are valid shapes to perform inner product"""
if shape1[-1] == shape2[-1]:
return True
raise ValueError(
f'shapes {shape1} {shape2} not aligned: {shape1[-1]} (dim 0) != {shape2[-1]} (dim 0)')


@constexpr
def _check_dim_cpu(shape, bound):
"""Checks input shape is upper-bounded by parameter bound"""
ndim = len(shape)
if _is_scalar(shape):
return True
if ndim <= bound:
return True
raise ValueError(
f'dimension {ndim} larger than {bound} is not supported on CPU')


@constexpr
def _tile_size(shape, out_shape, ndim):
"""Returns tile_size such that shape*tile_size = out_shape"""
size = [1]*ndim
for idx, (i, j) in enumerate(zip(shape, out_shape)):
if i != j:
size[idx] = j
return tuple(size)


@constexpr
def _check_core_match(shape1, shape2):
"""Checks shape1 and shape2 are valid shapes to perform matmul"""
ndim1, ndim2 = len(shape1), len(shape2)
if ndim1 < 1 or ndim2 < 2:
return True
if shape1[-1] == shape2[-2]:
return True
raise ValueError(f'mismatch in core dimension of input operands (size {shape1[-1]} ' +
f'is different from {shape2[-2]})')


@constexpr
def _cpu_not_support(name):
"""Checks if a function not supported on cpu is executed on cpu device"""
if _get_device() != 'CPU':
return True
raise ValueError(f'{name} is not supported on CPU')


@constexpr
def _check_is_tuple(obj):
"""Check whether obj is a tuple"""
return isinstance(obj, mstype.tuple_type)


@constexpr
def _check_is_list(obj):
"""Check whether obj is a list"""
return isinstance(obj, mstype.list_type)


@constexpr
def _check_is_tensor(obj):
"""Check whether obj is a tensor"""
return isinstance(obj, mstype.tensor_type)

+ 21
- 0
tests/ut/python/numpy_native/__init__.py View File

@@ -0,0 +1,21 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""setup for pytest in mindspore.numpy"""
import mindspore.context as context


# pylint: disable=unused-argument
def setup_module(module):
context.set_context(mode=context.GRAPH_MODE)

+ 577
- 0
tests/ut/python/numpy_native/test_array_ops.py View File

@@ -0,0 +1,577 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""unit tests for array operations"""

import functools

import pytest
import numpy as onp

import mindspore.context as context
import mindspore.numpy as mnp
from mindspore.nn import Cell

from ..ut_filter import non_graph_engine
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config


class Cases():
def __init__(self):
self.all_shapes = [
0, 1, 2, (), (1,), (2,), (1, 2, 3), [], [1], [2], [1, 2, 3]
]
self.onp_dtypes = [onp.int32, 'int32', int,
onp.float32, 'float32', float,
onp.uint32, 'uint32',
onp.bool_, 'bool', bool]

self.mnp_dtypes = [mnp.int32, 'int32', int,
mnp.float32, 'float32', float,
mnp.uint32, 'uint32',
mnp.bool_, 'bool', bool]

self.array_sets = [1, 1.1, True, [1, 0, True], [1, 1.0, 2], (1,),
[(1, 2, 3), (4, 5, 6)], onp.random.random(
(100, 100)).astype(onp.float32),
onp.random.random((100, 100)).astype(onp.bool)]


def match_array(actual, expected, error=0):
if error > 0:
onp.testing.assert_almost_equal(actual.tolist(), expected.tolist(),
decimal=error)
else:
onp.testing.assert_equal(actual.tolist(), expected.tolist())


def check_all_results(onp_results, mnp_results):
"""Check all results from numpy and mindspore.numpy"""
for i, _ in enumerate(onp_results):
match_array(onp_results[i], mnp_results[i].asnumpy())


def test_asarray():
test_case = Cases()
for array in test_case.array_sets:
# Check for dtype matching
actual = onp.asarray(array)
expected = mnp.asarray(array).asnumpy()
# Since we set float32/int32 as the default dtype in mindspore, we need
# to make a conversion between numpy.asarray and mindspore.numpy.asarray
if actual.dtype is onp.dtype('float64'):
assert expected.dtype == onp.dtype('float32')
elif actual.dtype is onp.dtype('int64'):
assert expected.dtype == onp.dtype('int32')
else:
assert actual.dtype == expected.dtype
match_array(actual, expected, error=7)

for i in range(len(test_case.onp_dtypes)):
actual = onp.asarray(array, test_case.onp_dtypes[i])
expected = mnp.asarray(array, test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected, error=7)


def test_array():
# array's function is very similar to asarray, so we mainly test the
# `copy` argument.
test_case = Cases()
for array in test_case.array_sets:
arr1 = mnp.asarray(array)
arr2 = mnp.array(arr1, copy=False)
arr3 = mnp.array(arr1)
arr4 = mnp.asarray(array, dtype='int32')
arr5 = mnp.asarray(arr4, dtype=mnp.int32)
assert arr1 is arr2
assert arr1 is not arr3
assert arr4 is arr5


def test_asfarray():
test_case = Cases()
for array in test_case.array_sets:
# Check for dtype matching
actual = onp.asfarray(array)
expected = mnp.asfarray(array).asnumpy()
# Since we set float32/int32 as the default dtype in mindspore, we need
# to make a conversion between numpy.asarray and mindspore.numpy.asarray
if actual.dtype is onp.dtype('float64'):
assert expected.dtype == onp.dtype('float32')
else:
assert actual.dtype == expected.dtype
match_array(actual, expected, error=7)

for i in range(len(test_case.onp_dtypes)):
actual = onp.asfarray(array, test_case.onp_dtypes[i])
expected = mnp.asfarray(array, test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected, error=7)


def test_zeros():
test_case = Cases()
for shape in test_case.all_shapes:
for i in range(len(test_case.onp_dtypes)):
actual = onp.zeros(shape, test_case.onp_dtypes[i])
expected = mnp.zeros(shape, test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected)
actual = onp.zeros(shape)
expected = mnp.zeros(shape).asnumpy()
match_array(actual, expected)


def test_ones():
test_case = Cases()
for shape in test_case.all_shapes:
for i in range(len(test_case.onp_dtypes)):
actual = onp.ones(shape, test_case.onp_dtypes[i])
expected = mnp.ones(shape, test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected)
actual = onp.ones(shape)
expected = mnp.ones(shape).asnumpy()
match_array(actual, expected)


def test_full():
actual = onp.full((2, 2), [1, 2])
expected = mnp.full((2, 2), [1, 2]).asnumpy()
match_array(actual, expected)

actual = onp.full((2, 0), onp.inf)
expected = mnp.full((2, 0), mnp.inf).asnumpy()
match_array(actual, expected)

actual = onp.full((2, 3), True)
expected = mnp.full((2, 3), True).asnumpy()
match_array(actual, expected)

actual = onp.full((3, 4, 5), 7.5)
expected = mnp.full((3, 4, 5), 7.5).asnumpy()
match_array(actual, expected)


def test_eye():
test_case = Cases()
for i in range(len(test_case.onp_dtypes)):
for m in range(1, 5):
actual = onp.eye(m, dtype=test_case.onp_dtypes[i])
expected = mnp.eye(m, dtype=test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected)
for n in range(1, 5):
for k in range(0, 5):
actual = onp.eye(m, n, k, dtype=test_case.onp_dtypes[i])
expected = mnp.eye(
m, n, k, dtype=test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected)


def test_identity():
test_case = Cases()
for i in range(len(test_case.onp_dtypes)):
for m in range(1, 5):
actual = onp.identity(m, dtype=test_case.onp_dtypes[i])
expected = mnp.identity(m, dtype=test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected)


def test_arange():
actual = onp.arange(10)
expected = mnp.arange(10).asnumpy()
match_array(actual, expected)

actual = onp.arange(0, 10)
expected = mnp.arange(0, 10).asnumpy()
match_array(actual, expected)

actual = onp.arange(start=10)
expected = mnp.arange(start=10).asnumpy()
match_array(actual, expected)

actual = onp.arange(start=10, step=0.1)
expected = mnp.arange(start=10, step=0.1).asnumpy()
match_array(actual, expected, error=6)

actual = onp.arange(10, step=0.1)
expected = mnp.arange(10, step=0.1).asnumpy()
match_array(actual, expected, error=6)

actual = onp.arange(0.1, 9.9)
expected = mnp.arange(0.1, 9.9).asnumpy()
match_array(actual, expected, error=6)


def test_linspace():
actual = onp.linspace(2.0, 3.0, dtype=onp.float32)
expected = mnp.linspace(2.0, 3.0).asnumpy()
match_array(actual, expected, error=7)

actual = onp.linspace(2.0, 3.0, num=5, dtype=onp.float32)
expected = mnp.linspace(2.0, 3.0, num=5).asnumpy()
match_array(actual, expected, error=7)

actual = onp.linspace(
2.0, 3.0, num=5, endpoint=False, dtype=onp.float32)
expected = mnp.linspace(2.0, 3.0, num=5, endpoint=False).asnumpy()
match_array(actual, expected, error=7)

actual = onp.linspace(2.0, 3.0, num=5, retstep=True, dtype=onp.float32)
expected = mnp.linspace(2.0, 3.0, num=5, retstep=True)
match_array(actual[0], expected[0].asnumpy())
assert actual[1] == expected[1]

actual = onp.linspace(2.0, [3, 4, 5], num=5,
endpoint=False, dtype=onp.float32)
expected = mnp.linspace(
2.0, [3, 4, 5], num=5, endpoint=False).asnumpy()
match_array(actual, expected)


def test_logspace():
actual = onp.logspace(2.0, 3.0, dtype=onp.float32)
expected = mnp.logspace(2.0, 3.0).asnumpy()
match_array(actual, expected)

actual = onp.logspace(2.0, 3.0, num=5, dtype=onp.float32)
expected = mnp.logspace(2.0, 3.0, num=5).asnumpy()
match_array(actual, expected)

actual = onp.logspace(
2.0, 3.0, num=5, endpoint=False, dtype=onp.float32)
expected = mnp.logspace(2.0, 3.0, num=5, endpoint=False).asnumpy()
match_array(actual, expected)

actual = onp.logspace(2.0, [3, 4, 5], num=5,
endpoint=False, dtype=onp.float32)
expected = mnp.logspace(
2.0, [3, 4, 5], num=5, endpoint=False).asnumpy()
match_array(actual, expected)


# Test np.transpose and np.ndarray.transpose


def mnp_transpose(input_tensor):
a = mnp.transpose(input_tensor, (0, 2, 1))
b = mnp.transpose(input_tensor, [2, 1, 0])
c = mnp.transpose(input_tensor, (1, 0, 2))
d = mnp.transpose(input_tensor)
return a, b, c, d


def onp_transpose(input_array):
a = onp.transpose(input_array, (0, 2, 1))
b = onp.transpose(input_array, [2, 1, 0])
c = onp.transpose(input_array, (1, 0, 2))
d = onp.transpose(input_array)
return a, b, c, d

# Test np.expand_dims


def mnp_expand_dims(input_tensor):
a = mnp.expand_dims(input_tensor, 0)
b = mnp.expand_dims(input_tensor, -1)
c = mnp.expand_dims(input_tensor, axis=2)
d = mnp.expand_dims(input_tensor, axis=-2)
return a, b, c, d


def onp_expand_dims(input_array):
a = onp.expand_dims(input_array, 0)
b = onp.expand_dims(input_array, -1)
c = onp.expand_dims(input_array, axis=2)
d = onp.expand_dims(input_array, axis=-2)
return a, b, c, d

# Test np.squeeze


def mnp_squeeze(input_tensor):
a = mnp.squeeze(input_tensor)
b = mnp.squeeze(input_tensor, 0)
c = mnp.squeeze(input_tensor, axis=None)
d = mnp.squeeze(input_tensor, axis=-3)
e = mnp.squeeze(input_tensor, (2,))
f = mnp.squeeze(input_tensor, (0, 2))
return a, b, c, d, e, f


def onp_squeeze(input_array):
a = onp.squeeze(input_array)
b = onp.squeeze(input_array, 0)
c = onp.squeeze(input_array, axis=None)
d = onp.squeeze(input_array, axis=-3)
e = onp.squeeze(input_array, (2,))
f = onp.squeeze(input_array, (0, 2))
return a, b, c, d, e, f

# Test np.rollaxis


def mnp_rollaxis(input_tensor):
a = mnp.rollaxis(input_tensor, 0, 1)
b = mnp.rollaxis(input_tensor, 0, 2)
c = mnp.rollaxis(input_tensor, 2, 1)
d = mnp.rollaxis(input_tensor, 2, 2)
e = mnp.rollaxis(input_tensor, 0)
f = mnp.rollaxis(input_tensor, 1)
return a, b, c, d, e, f


def onp_rollaxis(input_array):
a = onp.rollaxis(input_array, 0, 1)
b = onp.rollaxis(input_array, 0, 2)
c = onp.rollaxis(input_array, 2, 1)
d = onp.rollaxis(input_array, 2, 2)
e = onp.rollaxis(input_array, 0)
f = onp.rollaxis(input_array, 1)
return a, b, c, d, e, f

# Test np.swapaxes


def mnp_swapaxes(input_tensor):
a = mnp.swapaxes(input_tensor, 0, 1)
b = mnp.swapaxes(input_tensor, 1, 0)
c = mnp.swapaxes(input_tensor, 1, 1)
d = mnp.swapaxes(input_tensor, 2, 1)
e = mnp.swapaxes(input_tensor, 1, 2)
f = mnp.swapaxes(input_tensor, 2, 2)
return a, b, c, d, e, f


def onp_swapaxes(input_array):
a = onp.swapaxes(input_array, 0, 1)
b = onp.swapaxes(input_array, 1, 0)
c = onp.swapaxes(input_array, 1, 1)
d = onp.swapaxes(input_array, 2, 1)
e = onp.swapaxes(input_array, 1, 2)
f = onp.swapaxes(input_array, 2, 2)
return a, b, c, d, e, f

# Test np.reshape


def mnp_reshape(input_tensor):
a = mnp.reshape(input_tensor, (3, 8))
b = mnp.reshape(input_tensor, [3, -1])
c = mnp.reshape(input_tensor, (-1, 12))
d = mnp.reshape(input_tensor, (-1,))
e = mnp.reshape(input_tensor, 24)
f = mnp.reshape(input_tensor, [2, 4, -1])
return a, b, c, d, e, f


def onp_reshape(input_array):
a = onp.reshape(input_array, (3, 8))
b = onp.reshape(input_array, [3, -1])
c = onp.reshape(input_array, (-1, 12))
d = onp.reshape(input_array, (-1,))
e = onp.reshape(input_array, 24)
f = onp.reshape(input_array, [2, 4, -1])
return a, b, c, d, e, f

# Test np.ravel


def mnp_ravel(input_tensor):
a = mnp.ravel(input_tensor)
return a


def onp_ravel(input_array):
a = onp.ravel(input_array)
return a

# Test np.concatenate


def mnp_concatenate(input_tensor):
a = mnp.concatenate(input_tensor, None)
b = mnp.concatenate(input_tensor, 0)
c = mnp.concatenate(input_tensor, 1)
d = mnp.concatenate(input_tensor, 2)
return a, b, c, d


def onp_concatenate(input_array):
a = onp.concatenate(input_array, None)
b = onp.concatenate(input_array, 0)
c = onp.concatenate(input_array, 1)
d = onp.concatenate(input_array, 2)
return a, b, c, d


def test_transpose():
onp_array = onp.random.random((3, 4, 5)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_transposed = onp_transpose(onp_array)
m_transposed = mnp_transpose(mnp_array)
check_all_results(o_transposed, m_transposed)


def test_expand_dims():
onp_array = onp.random.random((3, 4, 5)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_expanded = onp_expand_dims(onp_array)
m_expanded = mnp_expand_dims(mnp_array)
check_all_results(o_expanded, m_expanded)


def test_squeeze():
onp_array = onp.random.random((1, 3, 1, 4, 2)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_squeezed = onp_squeeze(onp_array)
m_squeezed = mnp_squeeze(mnp_array)
check_all_results(o_squeezed, m_squeezed)

onp_array = onp.random.random((1, 1, 1, 1, 1)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_squeezed = onp_squeeze(onp_array)
m_squeezed = mnp_squeeze(mnp_array)
check_all_results(o_squeezed, m_squeezed)


def test_rollaxis():
onp_array = onp.random.random((3, 4, 5)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_rolled = onp_rollaxis(onp_array)
m_rolled = mnp_rollaxis(mnp_array)
check_all_results(o_rolled, m_rolled)


def test_swapaxes():
onp_array = onp.random.random((3, 4, 5)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_swaped = onp_swapaxes(onp_array)
m_swaped = mnp_swapaxes(mnp_array)
check_all_results(o_swaped, m_swaped)


def test_reshape():
onp_array = onp.random.random((2, 3, 4)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_reshaped = onp_reshape(onp_array)
m_reshaped = mnp_reshape(mnp_array)
check_all_results(o_reshaped, m_reshaped)


def test_ravel():
onp_array = onp.random.random((2, 3, 4)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_ravel = onp_ravel(onp_array)
m_ravel = mnp_ravel(mnp_array).asnumpy()
match_array(o_ravel, m_ravel)


def test_concatenate():
onp_array = onp.random.random((5, 4, 3, 2)).astype('float32')
mnp_array = mnp.asarray(onp_array)
o_concatenate = onp_concatenate(onp_array)
m_concatenate = mnp_concatenate(mnp_array)
check_all_results(o_concatenate, m_concatenate)


class ReshapeExpandSqueeze(Cell):
def __init__(self):
super(ReshapeExpandSqueeze, self).__init__()

def construct(self, x):
x = mnp.expand_dims(x, 2)
x = mnp.reshape(x, (1, 2, 3, 4, 1, 1))
x = mnp.squeeze(x)
return x


class TransposeConcatRavel(Cell):
def __init__(self):
super(TransposeConcatRavel, self).__init__()

def construct(self, x1, x2, x3):
x1 = mnp.transpose(x1, [0, 2, 1])
x2 = x2.transpose(0, 2, 1)
x = mnp.concatenate((x1, x2, x3), -1)
x = mnp.ravel(x)
return x


class RollSwap(Cell):
def __init__(self):
super(RollSwap, self).__init__()

def construct(self, x):
x = mnp.rollaxis(x, 2)
x = mnp.swapaxes(x, 0, 1)
return x


test_case_array_ops = [
('ReshapeExpandSqueeze', {
'block': ReshapeExpandSqueeze(),
'desc_inputs': [mnp.ones((2, 3, 4))]}),

('TransposeConcatRavel', {
'block': TransposeConcatRavel(),
'desc_inputs': [mnp.ones((2, 3, 4)),
mnp.ones((2, 3, 4)),
mnp.ones((2, 4, 1))]}),

('RollSwap', {
'block': RollSwap(),
'desc_inputs': [mnp.ones((2, 3, 4))]})
]

test_case_lists = [test_case_array_ops]
test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
# use -k to select certain testcast
# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm


@non_graph_engine
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_exec():
context.set_context(mode=context.GRAPH_MODE)
return test_exec_case


raise_set = [
('Expand_dims_Error', {
'block': (lambda x: mnp.expand_dims, {'exception': ValueError}),
'desc_inputs': [mnp.ones((2, 3, 4)), 0]}),
]


def expand_dims_exception(input_tensor):
return mnp.expand_dims(input_tensor, 1.2)


def test_expand_dims_exception():
with pytest.raises(TypeError):
expand_dims_exception(mnp.ones((3, 3)))


def test_asarray_exception():
with pytest.raises(TypeError):
mnp.asarray({1, 2, 3})


def swapaxes_exception(input_tensor):
return mnp.swapaxes(input_tensor, 1, 10)


def test_swapaxes_exception():
with pytest.raises(TypeError):
swapaxes_exception(mnp.ones((3, 3)))

+ 88
- 0
tests/ut/python/numpy_native/test_math_ops.py View File

@@ -0,0 +1,88 @@

import pytest
import numpy as onp

import mindspore.context as context
import mindspore.numpy as mnp


def rand_int(*shape):
"""return an random integer array with parameter shape"""
res = onp.random.randint(low=1, high=5, size=shape)
if isinstance(res, onp.ndarray):
res = res.astype(onp.float32)
return res


class Cases():
def __init__(self):
self.device_cpu = context.get_context('device_target') == 'CPU'

self.arrs = [
rand_int(2),
rand_int(2, 3),
rand_int(2, 3, 4),
rand_int(2, 3, 4, 5),
]

# scalars expanded across the 0th dimension
self.scalars = [
rand_int(),
rand_int(1),
rand_int(1, 1),
rand_int(1, 1, 1),
]

# arrays with last dimension aligned
self.aligned_arrs = [
rand_int(2, 3),
rand_int(1, 4, 3),
rand_int(5, 1, 2, 3),
rand_int(4, 2, 1, 1, 3),
]


test_case = Cases()


def mnp_inner(a, b):
return mnp.inner(a, b)


def onp_inner(a, b):
return onp.inner(a, b)


def test_inner():
for arr1 in test_case.aligned_arrs:
for arr2 in test_case.aligned_arrs:
match_res(mnp_inner, onp_inner, arr1, arr2)

for scalar1 in test_case.scalars:
for scalar2 in test_case.scalars:
match_res(mnp_inner, onp_inner,
scalar1, scalar2)


# check if the output from mnp function and onp function applied on the arrays are matched


def match_res(mnp_fn, onp_fn, arr1, arr2):
actual = mnp_fn(mnp.asarray(arr1, dtype='float32'),
mnp.asarray(arr2, dtype='float32')).asnumpy()
expected = onp_fn(arr1, arr2)
match_array(actual, expected)


def match_array(actual, expected, error=5):
if error > 0:
onp.testing.assert_almost_equal(actual.tolist(), expected.tolist(),
decimal=error)
else:
onp.testing.assert_equal(actual.tolist(), expected.tolist())


def test_exception_innner():
with pytest.raises(ValueError):
mnp.inner(mnp.asarray(test_case.arrs[0]),
mnp.asarray(test_case.arrs[1]))

Loading…
Cancel
Save