You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 208 kB

4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """math operations, the function docs are adapted from Numpy API."""
  16. import operator
  17. import functools
  18. import itertools
  19. import sys
  20. from numpy import dtype as nptype
  21. from ..ops import operations as P
  22. from ..ops import functional as F
  23. from ..ops import composite as C
  24. from ..ops.primitive import constexpr
  25. from ..common import dtype as mstype
  26. from ..common import Tensor
  27. from .._c_expression import typing
  28. from .dtypes import nan, pi, dtype_map, inf
  29. from .array_creations import asarray_const, ones, zeros, empty, full, full_like, diag, \
  30. arange, histogram_bin_edges, eye
  31. from .array_ops import where as where_
  32. from .array_ops import ravel, expand_dims, moveaxis, concatenate, flip, stack, atleast_1d, \
  33. split
  34. from .utils_const import _infer_out_shape, _check_axis_valid, _get_device, \
  35. _check_shape_aligned, _raise_type_error, _check_same_type, _check_is_float, \
  36. _raise_value_error, _promote, _check_axis_type, _canonicalize_axis, \
  37. _is_shape_empty, _check_is_int, _expanded_shape, _check_axis_in_range, \
  38. _check_dtype, _list_comprehensions, _tuple_setitem, _add_unit_axes, _seq_prod, \
  39. _make_tensor, _promote_for_trigonometric, _raise_runtime_error, _max, _type_convert, \
  40. _raise_unimplemented_error, _abs, _in, _tuple_slice, _check_is_inf
  41. from .utils import _expand, _broadcast_to, _broadcast_to_shape, _check_input_tensor, \
  42. _to_tensor, _to_tensor_origin_dtype, _isnan
  43. ZERO_TENSOR = asarray_const(0)
  44. _mean_keepdims = P.ReduceMean(True)
  45. _matmul = P.MatMul(False, False)
  46. _matmul_t = P.MatMul(False, True)
  47. _reduce_sum_default = P.ReduceSum()
  48. _reduce_sum_keepdims = P.ReduceSum(True)
  49. _reduce_min_default = P.ReduceMin()
  50. _reduce_min_keepdims = P.ReduceMin(True)
  51. _reduce_max_default = P.ReduceMax()
  52. _reduce_max_keepdims = P.ReduceMax(True)
  53. _cumsum_default = P.CumSum()
  54. _concat = P.Concat(-1)
  55. _cumprod_default = P.CumProd()
  56. _round = P.Round()
  57. _rint = P.Rint()
  58. def absolute(x, dtype=None):
  59. """
  60. Calculates the absolute value element-wise.
  61. Note:
  62. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  63. not supported.
  64. Currently the backend kernel only supports float calculation, if the input
  65. is not a `float`, then it will be casted to :class:`mstype.float32` and casted back.
  66. Args:
  67. x (Tensor): Tensor to be used for calculation.
  68. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  69. output Tensor.
  70. Returns:
  71. Tensor.
  72. Raises:
  73. TypeError: If input arguments have types not specified above.
  74. Supported Platforms:
  75. ``Ascend`` ``GPU`` ``CPU``
  76. Examples:
  77. >>> import mindspore.numpy as np
  78. >>> x = np.asarray([1, 2, 3, -4, -5], np.float32)
  79. >>> output = np.absolute(x)
  80. >>> print(output)
  81. [1. 2. 3. 4. 5.]
  82. """
  83. original_dtype = x.dtype
  84. allowed_types = None
  85. if _get_device() == "Ascend":
  86. allowed_types = (mstype.float16, mstype.float32)
  87. else:
  88. allowed_types = (mstype.int32, mstype.float16, mstype.float32, mstype.float64)
  89. if original_dtype not in allowed_types and dtype is None:
  90. x = x.astype(mstype.float32)
  91. return _apply_tensor_op(F.absolute, x, dtype=dtype).astype(original_dtype)
  92. return _apply_tensor_op(F.absolute, x, dtype=dtype)
  93. def count_nonzero(x, axis=None, keepdims=False):
  94. """
  95. Counts the number of non-zero values in the tensor `x`.
  96. Args:
  97. x (Tensor): The tensor for which to count non-zeros.
  98. axis (Union[int,tuple], optional): Axis or tuple of axes along which to
  99. count non-zeros. Default is None, meaning that non-zeros will be counted
  100. along a flattened version of `x`.
  101. keepdims (bool, optional): If this is set to True, the axes that are counted
  102. are left in the result as dimensions with size one. With this option,
  103. the result will broadcast correctly against `x`.
  104. Returns:
  105. Tensor, indicating number of non-zero values in the `x` along a given axis.
  106. Otherwise, the total number of non-zero values in `x` is returned.
  107. Raises:
  108. TypeError: If axis is not int or tuple.
  109. ValueError: If axis is not in range [-x.ndim, x.ndim)
  110. Supported Platforms:
  111. ``Ascend`` ``GPU`` ``CPU``
  112. Examples:
  113. >>> import mindspore.numpy as np
  114. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  115. >>> output = np.count_nonzero(x)
  116. >>> print(output)
  117. 6
  118. """
  119. if _is_shape_empty(x.shape):
  120. return ZERO_TENSOR
  121. if axis is None:
  122. axis = ()
  123. return C.count_nonzero(x=x, axis=axis, keep_dims=keepdims)
  124. def clip(x, xmin, xmax, dtype=None):
  125. """
  126. Clips (limits) the values in an array.
  127. Given an interval, values outside the interval are clipped to the interval edges.
  128. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
  129. and values larger than 1 become 1.
  130. Args:
  131. x (Tensor): Tensor containing elements to clip.
  132. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
  133. on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
  134. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
  135. on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
  136. If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
  137. to match their shapes.
  138. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  139. output Tensor.
  140. Returns:
  141. Tensor, a tensor with the elements of `x`, but where values
  142. < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
  143. Raises:
  144. TypeError: If inputs have types not specified above.
  145. ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
  146. Supported Platforms:
  147. ``Ascend`` ``GPU`` ``CPU``
  148. Examples:
  149. >>> import mindspore.numpy as np
  150. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  151. >>> output = np.clip(x, 0, 2)
  152. >>> print(output)
  153. [1 2 2 0 0 2 2 0]
  154. """
  155. if xmin is None and xmax is None:
  156. _raise_value_error("One of max or min must be given.")
  157. if xmin is not None:
  158. x = maximum(x, xmin, dtype=dtype)
  159. if xmax is not None:
  160. x = minimum(x, xmax, dtype=dtype)
  161. return x
  162. def deg2rad(x, dtype=None):
  163. """
  164. Converts angles from degrees to radians.
  165. Args:
  166. x (Tensor): Angles in degrees.
  167. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  168. output Tensor.
  169. Returns:
  170. Tensor, the corresponding angle in radians. This is a tensor scalar if `x`
  171. is a tensor scalar.
  172. Raises:
  173. TypeError: if `x` is not a tensor.
  174. Supported Platforms:
  175. ``Ascend`` ``GPU`` ``CPU``
  176. Examples:
  177. >>> import mindspore.numpy as np
  178. >>> x = np.asarray([1, 2, 3, -4, -5])
  179. >>> output = np.deg2rad(x)
  180. >>> print(output)
  181. [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647]
  182. """
  183. _check_input_tensor(x)
  184. def convert(a):
  185. return a * pi / 180.0
  186. return _apply_tensor_op(convert, x, dtype=dtype)
  187. def rad2deg(x, dtype=None):
  188. """
  189. Converts angles from radians to degrees.
  190. Args:
  191. x (Tensor): Angles in radians.
  192. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  193. output Tensor.
  194. Returns:
  195. Tensor, the corresponding angle in degrees. This is a tensor scalar if `x`
  196. is a tensor scalar.
  197. Supported Platforms:
  198. ``Ascend`` ``GPU`` ``CPU``
  199. Examples:
  200. >>> import mindspore.numpy as np
  201. >>> x = np.asarray([1, 2, 3, -4, -5])
  202. >>> output = np.rad2deg(x)
  203. >>> print(output)
  204. [ 57.295776 114.59155 171.88733 -229.1831 -286.47888 ]
  205. """
  206. _check_input_tensor(x)
  207. def convert(a):
  208. return a * 180.0 / pi
  209. return _apply_tensor_op(convert, x, dtype=dtype)
  210. def add(x1, x2, dtype=None):
  211. """
  212. Adds arguments element-wise.
  213. Note:
  214. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  215. not supported.
  216. Args:
  217. x1 (Tensor): input to be added.
  218. x2 (Tensor): input to be added.
  219. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  220. output Tensor.
  221. Returns:
  222. Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
  223. if both `x1` and `x2` are scalars.
  224. Supported Platforms:
  225. ``Ascend`` ``GPU`` ``CPU``
  226. Examples:
  227. >>> import mindspore.numpy as np
  228. >>> x1 = np.full((3, 2), [1, 2])
  229. >>> x2 = np.full((3, 2), [3, 4])
  230. >>> output = np.add(x1, x2)
  231. >>> print(output)
  232. [[4 6]
  233. [4 6]
  234. [4 6]]
  235. """
  236. # broadcast is not fully supported in tensor_add on CPU,
  237. # so we use tensor_sub as a substitute solution
  238. if _get_device() == 'CPU':
  239. return subtract(x1, F.neg_tensor(_to_tensor(x2)), dtype=dtype)
  240. return _apply_tensor_op(F.tensor_add, x1, x2, dtype=dtype)
  241. def subtract(x1, x2, dtype=None):
  242. """
  243. Subtracts arguments, element-wise.
  244. Note:
  245. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  246. not supported.
  247. Args:
  248. x1 (Tensor): the input to be subtracted from.
  249. x2 (Tensor): the input to be subtracted by.
  250. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  251. output Tensor.
  252. Returns:
  253. Tensor or scalar, the difference of `x1` and `x2`, element-wise. This is a
  254. scalar if both `x1` and `x2` are scalars.
  255. Supported Platforms:
  256. ``Ascend`` ``GPU`` ``CPU``
  257. Examples:
  258. >>> import mindspore.numpy as np
  259. >>> x1 = np.full((3, 2), [1, 2])
  260. >>> x2 = np.full((3, 2), [3, 4])
  261. >>> output = np.subtract(x1, x2)
  262. >>> print(output)
  263. [[-2 -2]
  264. [-2 -2]
  265. [-2 -2]]
  266. """
  267. return _apply_tensor_op(F.tensor_sub, x1, x2, dtype=dtype)
  268. def multiply(x1, x2, dtype=None):
  269. """
  270. Multiplies arguments element-wise.
  271. Note:
  272. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  273. not supported.
  274. Args:
  275. x1 (Tensor): input tensor to be multiplied.
  276. x2 (Tensor): input tensor to be multiplied.
  277. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  278. output Tensor.
  279. Returns:
  280. Tensor or scalar, the product of `x1` and `x2`, element-wise. This is a scalar
  281. if both `x1` and `x2` are scalars.
  282. Supported Platforms:
  283. ``Ascend`` ``GPU`` ``CPU``
  284. Examples:
  285. >>> import mindspore.numpy as np
  286. >>> x1 = np.full((3, 2), [1, 2])
  287. >>> x2 = np.full((3, 2), [3, 4])
  288. >>> output = np.multiply(x1, x2)
  289. >>> print(output)
  290. [[3 8]
  291. [3 8]
  292. [3 8]]
  293. """
  294. if _get_device() == 'CPU':
  295. _check_input_tensor(x1, x2)
  296. # broadcast is not fully supported on CPU backend,
  297. # and explicit broadcasting is performed
  298. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  299. x1 = _broadcast_to_shape(x1, shape_out)
  300. x2 = _broadcast_to_shape(x2, shape_out)
  301. return _apply_tensor_op(F.tensor_mul, x1, x2, dtype=dtype)
  302. def divide(x1, x2, dtype=None):
  303. """
  304. Returns a true division of the inputs, element-wise.
  305. Instead of the Python traditional "floor division", this returns a true
  306. division.
  307. Note:
  308. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  309. not supported.
  310. Args:
  311. x1 (Tensor): the divident.
  312. x2 (Tensor): the divisor.
  313. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  314. output Tensor.
  315. Returns:
  316. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  317. Supported Platforms:
  318. ``Ascend`` ``GPU`` ``CPU``
  319. Examples:
  320. >>> import mindspore.numpy as np
  321. >>> x1 = np.full((3, 2), [1, 2])
  322. >>> x2 = np.full((3, 2), [3, 4])
  323. >>> output = np.divide(x1, x2)
  324. >>> print(output)
  325. [[0.33333334 0.5 ]
  326. [0.33333334 0.5 ]
  327. [0.33333334 0.5 ]]
  328. """
  329. x1, x2 = _to_tensor(x1, x2)
  330. if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
  331. x1 = F.cast(x1, mstype.float32)
  332. x2 = F.cast(x2, mstype.float32)
  333. return _apply_tensor_op(F.tensor_div, x1, x2, dtype=dtype)
  334. def true_divide(x1, x2, dtype=None):
  335. """
  336. Returns a true division of the inputs, element-wise.
  337. Instead of the Python traditional "floor division", this returns a true
  338. division.
  339. Note:
  340. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  341. not supported.
  342. Args:
  343. x1 (Tensor): the divident.
  344. x2 (Tensor): the divisor.
  345. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  346. output Tensor.
  347. Returns:
  348. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  349. Supported Platforms:
  350. ``Ascend`` ``GPU`` ``CPU``
  351. Examples:
  352. >>> import mindspore.numpy as np
  353. >>> x1 = np.full((3, 2), [1, 2])
  354. >>> x2 = np.full((3, 2), [3, 4])
  355. >>> output = np.true_divide(x1, x2)
  356. >>> print(output)
  357. [[0.33333334 0.5 ]
  358. [0.33333334 0.5 ]
  359. [0.33333334 0.5 ]]
  360. """
  361. return divide(x1, x2, dtype=dtype)
  362. def power(x1, x2, dtype=None):
  363. """
  364. First array elements raised to powers from second array, element-wise.
  365. Raises each base in `x1` to the positionally-corresponding power in `x2`.
  366. Note:
  367. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  368. not supported.
  369. On GPU, the supported dtypes are np.float16, and np.float32.
  370. Args:
  371. x1 (Tensor): the bases.
  372. x2 (Tensor): the exponents.
  373. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  374. output Tensor.
  375. Returns:
  376. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  377. is a scalar if both `x1` and `x2` are scalars.
  378. Supported Platforms:
  379. ``Ascend`` ``GPU`` ``CPU``
  380. Examples:
  381. >>> import mindspore.numpy as np
  382. >>> x1 = np.full((3, 2), [1, 2]).astype('float32')
  383. >>> x2 = np.full((3, 2), [3, 4]).astype('float32')
  384. >>> output = np.power(x1, x2)
  385. >>> print(output)
  386. [[ 1. 16.]
  387. [ 1. 16.]
  388. [ 1. 16.]]
  389. """
  390. return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
  391. def float_power(x1, x2, dtype=None):
  392. """
  393. First array elements raised to powers from second array, element-wise.
  394. Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and
  395. `x2` must be broadcastable to the same shape. This differs from the power
  396. function in that integers, float16, and float64 are promoted to floats with
  397. a minimum precision of float32 so that the result is always inexact. The
  398. intent is that the function will return a usable result for negative powers
  399. and seldom overflow for positive powers.
  400. Note:
  401. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  402. not supported.
  403. Integers and floats are promoted to float32 instead of float64.
  404. Args:
  405. x1 (Tensor): the bases.
  406. x2 (Tensor): the exponenets.
  407. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  408. output Tensor.
  409. Returns:
  410. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  411. is a scalar if both `x1` and `x2` are scalars.
  412. Supported Platforms:
  413. ``Ascend`` ``GPU`` ``CPU``
  414. Examples:
  415. >>> import mindspore.numpy as np
  416. >>> x1 = np.arange(6)
  417. >>> x2 = np.array(3)
  418. >>> output = np.float_power(x1, x2)
  419. >>> print(output)
  420. [ 0. 1. 8. 27. 64. 125.]
  421. """
  422. if not _check_same_type(F.dtype(x1), mstype.float32):
  423. x1 = F.cast(x1, mstype.float32)
  424. if not _check_same_type(F.dtype(x2), mstype.float32):
  425. x2 = F.cast(x2, mstype.float32)
  426. return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
  427. def minimum(x1, x2, dtype=None):
  428. """
  429. Element-wise minimum of tensor elements.
  430. Compares two tensors and returns a new tensor containing the element-wise minima.
  431. Note:
  432. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  433. not supported.
  434. On Ascend, input arrays containing inf or NaN are not supported.
  435. Args:
  436. x1 (Tensor): first input tensor to be compared.
  437. x2 (Tensor): second input tensor to be compared.
  438. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  439. output Tensor.
  440. Returns:
  441. Tensor, element-wise minimum of `x1` and `x2`.
  442. Raises:
  443. TypeError: If inputs have types not specified above.
  444. ValueError: If the shapes of `x1` and `x2` cannot be broadcast.
  445. Supported Platforms:
  446. ``Ascend`` ``GPU`` ``CPU``
  447. Examples:
  448. >>> import mindspore.numpy as np
  449. >>> a = np.asarray([1, 2])
  450. >>> b = np.asarray([[1, 3],[1, 4]])
  451. >>> print(np.minimum(a, b))
  452. [[1 2]
  453. [1 2]]
  454. """
  455. if isinstance(x1, (int, float, bool, list, tuple)):
  456. x1 = asarray_const(x1)
  457. elif not isinstance(x1, Tensor):
  458. _raise_type_error("Input x1 is expected to be array_like")
  459. if isinstance(x2, (int, float, bool, list, tuple)):
  460. x2 = asarray_const(x2)
  461. elif not isinstance(x2, Tensor):
  462. _raise_type_error("Input x2 is expected to be array_like")
  463. # if both are scalars, expand x1 to 1d tensor, since cpu kernel doesn't support
  464. # comparisons with 2 scalars
  465. if x1.ndim == 0 and x2.ndim == 0:
  466. x1 = expand_dims(x1, 0)
  467. return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype).squeeze()
  468. if x1.ndim == 0:
  469. dtype = x2.dtype
  470. elif x2.ndim == 0:
  471. dtype = x1.dtype
  472. return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype)
  473. def mean(a, axis=None, keepdims=False, dtype=None):
  474. """
  475. Computes the arithmetic mean along the specified axis.
  476. Returns the average of the array elements. The average is taken
  477. over the flattened array by default, otherwise over the specified
  478. axis.
  479. Note:
  480. Numpy arguments `out` is not supported.
  481. On GPU, the supported dtypes are np.float16, and np.float32.
  482. Args:
  483. a (Tensor): input tensor containing numbers whose mean is desired.
  484. If a is not an array, a conversion is attempted.
  485. axis (None or int or tuple of integers, optional): Axis or axes along
  486. which the means are computed. The default is to compute
  487. the mean of the flattened array. If this is a tuple of
  488. ints, a mean is performed over multiple axes.
  489. keepdims (bool, optional): If this is set to True, the axes which
  490. are reduced are left in the result as dimensions with
  491. size one. With this option, the result will broadcast
  492. correctly against the input tensor.
  493. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  494. output Tensor.
  495. Returns:
  496. Tensor or scalar, an array containing the mean values.
  497. Raises:
  498. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  499. if the axes contain duplicates.
  500. Supported Platforms:
  501. ``Ascend`` ``GPU`` ``CPU``
  502. Examples:
  503. >>> import mindspore.numpy as np
  504. >>> a = np.arange(6, dtype='float32')
  505. >>> output = np.mean(a, 0)
  506. >>> print(output)
  507. 2.5
  508. """
  509. return _reduce(a, P.ReduceMean(keepdims), axis=axis, keepdims=keepdims, dtype=dtype)
  510. def inner(a, b):
  511. """
  512. Returns the inner product of two tensors.
  513. Ordinary inner product of vectors for 1-D tensors (without complex
  514. conjugation), in higher dimensions a sum product over the last
  515. axes.
  516. Note:
  517. Numpy argument `out` is not supported.
  518. On GPU, the supported dtypes are np.float16, and np.float32.
  519. On CPU, the supported dtypes are np.float16, np.float32, and
  520. np.float64.
  521. Args:
  522. a (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  523. dimensions must match.
  524. b (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  525. dimensions must match.
  526. Returns:
  527. Tensor or scalar.
  528. Raises:
  529. ValueError: if ``x1.shape[-1] != x2.shape[-1]``.
  530. Supported Platforms:
  531. ``Ascend`` ``GPU`` ``CPU``
  532. Examples:
  533. >>> import mindspore.numpy as np
  534. >>> a = np.ones((5, 3))
  535. >>> b = np.ones((2, 7, 3))
  536. >>> output = np.inner(a, b)
  537. >>> print(output)
  538. [[[3. 3. 3. 3. 3. 3. 3.]
  539. [3. 3. 3. 3. 3. 3. 3.]]
  540. [[3. 3. 3. 3. 3. 3. 3.]
  541. [3. 3. 3. 3. 3. 3. 3.]]
  542. [[3. 3. 3. 3. 3. 3. 3.]
  543. [3. 3. 3. 3. 3. 3. 3.]]
  544. [[3. 3. 3. 3. 3. 3. 3.]
  545. [3. 3. 3. 3. 3. 3. 3.]]
  546. [[3. 3. 3. 3. 3. 3. 3.]
  547. [3. 3. 3. 3. 3. 3. 3.]]]
  548. """
  549. if F.rank(a) == 0 or F.rank(b) == 0:
  550. return F.tensor_mul(a, b)
  551. _check_shape_aligned(F.shape(a), F.shape(b))
  552. aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
  553. aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
  554. a_aligned = F.reshape(a, aligned_shape_a)
  555. b_aligned = F.reshape(b, aligned_shape_b)
  556. res = _matmul_t(a_aligned, b_aligned)
  557. res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
  558. return res
  559. def dot(a, b):
  560. """
  561. Returns the dot product of two arrays.
  562. Specifically,
  563. If both `a` and `b` are 1-D arrays, it is inner product of vectors
  564. (without complex conjugation).
  565. If both `a` and `b` are 2-D arrays, it is matrix multiplication.
  566. If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
  567. If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
  568. over the last axis of `a` and `b`.
  569. If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
  570. sum product over the last axis of `a` and the second-to-last axis of `b`:
  571. ``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
  572. Note:
  573. Numpy argument `out` is not supported.
  574. On GPU, the supported dtypes are np.float16, and np.float32.
  575. On CPU, the supported dtypes are np.float16, np.float32, and
  576. np.float64.
  577. Args:
  578. a (Tensor): input tensor
  579. b (Tensor): input tensor
  580. Returns:
  581. Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
  582. both scalars or both 1-D arrays then a scalar is returned;
  583. otherwise an array is returned
  584. Raises:
  585. ValueError: If the last dimension of `a` is not the same size
  586. as the second-to-last dimension of `b`.
  587. Supported Platforms:
  588. ``Ascend`` ``GPU`` ``CPU``
  589. Examples:
  590. >>> import mindspore.numpy as np
  591. >>> a = np.full((1, 3), 7).astype('float32')
  592. >>> b = np.full((2, 3, 4), 5).astype('float32')
  593. >>> output = np.dot(a, b)
  594. >>> print(output)
  595. [[[105. 105. 105. 105.]
  596. [105. 105. 105. 105.]]]
  597. """
  598. ndim_a, ndim_b = F.rank(a), F.rank(b)
  599. if ndim_a == 0 or ndim_b == 0:
  600. return F.tensor_mul(a, b)
  601. if ndim_a > 0 and ndim_b >= 2:
  602. perm = F.make_range(ndim_b)
  603. perm = perm[:-2] + (perm[-1],) + (perm[-2],)
  604. b = F.transpose(b, perm)
  605. if F.shape(a)[-1] != F.shape(b)[-1]:
  606. _raise_value_error('shapes are not aligned')
  607. a_aligned = F.reshape(a, (-1, F.shape(a)[-1]))
  608. b_aligned = F.reshape(b, (-1, F.shape(b)[-1]))
  609. res = _matmul_t(a_aligned, b_aligned)
  610. res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
  611. return res
  612. def outer(a, b):
  613. """
  614. Computes the outer product of two vectors.
  615. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``,
  616. the outer product is:
  617. ``[[a0*b0 a0*b1 ... a0*bN ]``
  618. ``[a1*b0 . ]``
  619. ``[ ... . ]``
  620. ``[aM*b0 aM*bN ]]``
  621. Note:
  622. Numpy argument ``out`` is not supported.
  623. On GPU, the supported dtypes are np.float16, and np.float32.
  624. On CPU, the supported dtypes are np.float16, np.float32, and
  625. np.float64.
  626. Args:
  627. a (Tensor): first input vector. Input is flattened if not
  628. already 1-dimensional.
  629. b (Tensor): second input vector. Input is flattened if not
  630. already 1-dimensional.
  631. Returns:
  632. Tensor or scalar, ``out[i, j] = a[i] * b[j]``.
  633. Raises:
  634. TypeError: if the input is not a tensor.
  635. Supported Platforms:
  636. ``Ascend`` ``GPU`` ``CPU``
  637. Examples:
  638. >>> import mindspore.numpy as np
  639. >>> a = np.full(7, 2).astype('float32')
  640. >>> b = np.full(4, 3).astype('float32')
  641. >>> output = np.outer(a, b)
  642. >>> print(output)
  643. [[6. 6. 6. 6.]
  644. [6. 6. 6. 6.]
  645. [6. 6. 6. 6.]
  646. [6. 6. 6. 6.]
  647. [6. 6. 6. 6.]
  648. [6. 6. 6. 6.]
  649. [6. 6. 6. 6.]]
  650. """
  651. _check_input_tensor(a, b)
  652. if F.rank(a) != 1:
  653. a = ravel(a)
  654. if F.rank(b) != 1:
  655. b = ravel(b)
  656. a = F.reshape(a, (F.shape(a)[0], 1))
  657. b = _expand(b, 2)
  658. return _matmul(a, b)
  659. def tensordot(a, b, axes=2):
  660. """
  661. Computes tensor dot product along specified axes.
  662. Given two tensors, `a` and `b`, and an array_like object containing two array_like
  663. objects, `(a_axes, b_axes)`, sum the products of `a`'s and `b`'s elements (components)
  664. over the axes specified by `a_axes` and `b_axes`. The third argument can be a single
  665. non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of
  666. `a` and the first `N` dimensions of `b` are summed over.
  667. Three common use cases are:
  668. - ``axes = 0`` : tensor product
  669. - ``axes = 1`` : tensor dot product
  670. - ``axes = 2`` : (default) tensor double contraction
  671. When axes is integer_like, the sequence for evaluation will be: first the `-Nth`
  672. axis in `a` and 0th axis in `b`, and the -1th axis in `a` and `Nth` axis in `b` last.
  673. When there is more than one axis to sum over - and they are not the last (first)
  674. axes of `a` `(b)` - the argument axes should consist of two sequences of the same
  675. length, with the first axis to sum over given first in both sequences, the second
  676. axis second, and so forth.
  677. The shape of the result consists of the non-contracted axes of the first tensor,
  678. followed by the non-contracted axes of the second.
  679. Note:
  680. On CPU, the supported dypes are np.float16 and np.float32.
  681. On GPU, the supported dypes are np.float16 and np.float32.
  682. Args:
  683. a (Tensor): Tensor to "dot".
  684. b (Tensor): Tensor to "dot".
  685. axes (int or sequence of ints):
  686. integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N`
  687. axes of `b` in order. The sizes of the corresponding axes must match.
  688. sequence of ints: Or, a list of axes to be summed over, first sequence
  689. applying to `a`, second to `b`. Both elements `array_like` must be of the same
  690. length.
  691. Returns:
  692. Tensor, or list of tensors, the tensor dot product of the input.
  693. Supported Platforms:
  694. ``Ascend`` ``GPU`` ``CPU``
  695. Examples:
  696. >>> import mindspore.numpy as np
  697. >>> a = np.ones((3, 4, 5))
  698. >>> b = np.ones((4, 3, 2))
  699. >>> output = np.tensordot(a, b, axes=([1,0],[0,1]))
  700. >>> print(output.shape)
  701. (5, 2)
  702. """
  703. if F.rank(a)*F.rank(b) == 0 and axes == 0:
  704. return F.tensor_mul(a, b)
  705. return C.tensor_dot(a, b, axes)
  706. def std(x, axis=None, ddof=0, keepdims=False):
  707. """
  708. Computes the standard deviation along the specified axis.
  709. The standard deviation is the square root of the average of the squared deviations
  710. from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
  711. Returns the standard deviation, which is computed for the flattened array by default,
  712. otherwise over the specified axis.
  713. Note:
  714. Numpy arguments `dtype`, `out` and `where` are not supported.
  715. Args:
  716. x (Tensor): A Tensor to be calculated.
  717. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
  718. deviation is computed. Default: `None`.
  719. If `None`, compute the standard deviation of the flattened array.
  720. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
  721. where :math:`N` represents the number of elements. Default: 0.
  722. keepdims: If this is set to True, the axes which are reduced are left in the result as
  723. dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
  724. If the default value is passed, then keepdims will not be passed through to the std method of
  725. sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not
  726. implement keepdims any exceptions will be raised. Default: `False`.
  727. Returns:
  728. Standard deviation tensor.
  729. Supported Platforms:
  730. ``Ascend`` ``GPU`` ``CPU``
  731. Examples:
  732. >>> import mindspore.numpy as np
  733. >>> input_x = np.array([1., 2., 3., 4.])
  734. >>> output = np.std(input_x)
  735. >>> print(output)
  736. 1.118034
  737. """
  738. x = _to_tensor(x)
  739. return x.std(axis, ddof, keepdims)
  740. def var(x, axis=None, ddof=0, keepdims=False):
  741. """
  742. Computes the variance along the specified axis.
  743. The variance is the average of the squared deviations from the mean, i.e.,
  744. :math:`var = mean(abs(x - x.mean())**2)`.
  745. Returns the variance, which is computed for the flattened array by default,
  746. otherwise over the specified axis.
  747. Note:
  748. Numpy arguments `dtype`, `out` and `where` are not supported.
  749. Args:
  750. x (Tensor): A Tensor to be calculated.
  751. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
  752. The default is to compute the variance of the flattened array. Default: `None`.
  753. ddof (int): Means Delta Degrees of Freedom. Default: 0.
  754. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
  755. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  756. dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
  757. If the default value is passed, then keepdims will not be passed through to the var method of
  758. sub-classes of tensor, however any non-default value will be. If the sub-class method does not
  759. implement keepdims any exceptions will be raised. Default: `False`.
  760. Supported Platforms:
  761. ``Ascend`` ``GPU`` ``CPU``
  762. Returns:
  763. Standard deviation tensor.
  764. Examples:
  765. >>> import mindspore.numpy as np
  766. >>> input_x = np.array([1., 2., 3., 4.])
  767. >>> output = np.var(input_x)
  768. >>> print(output)
  769. 1.25
  770. """
  771. x = _to_tensor(x)
  772. return x.var(axis, ddof, keepdims)
  773. def ptp(x, axis=None, keepdims=False):
  774. """
  775. Range of values (maximum - minimum) along an axis.
  776. The name of the function comes from the acronym for "peak to peak".
  777. Note:
  778. Numpy arguments `dtype` and `out` are not supported.
  779. Args:
  780. x (Tensor): Input tensor.
  781. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
  782. The default is to compute the variance of the flattened array. Default: None.
  783. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  784. dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
  785. If the default value is passed, then keepdims will not be passed through to the ptp method of
  786. sub-classes of tensor, however any non-default value will be. Default is False.
  787. Returns:
  788. Tensor.
  789. Raises:
  790. TypeError: if inputs have types not specified above.
  791. Supported Platforms:
  792. ``Ascend`` ``GPU`` ``CPU``
  793. Examples:
  794. >>> import mindspore.numpy as np
  795. >>> x = np.array([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]])
  796. >>> print(np.ptp(x, axis=1))
  797. [8. 6.]
  798. >>> print(np.ptp(x, axis=0))
  799. [2. 0. 5. 2.]
  800. """
  801. _check_input_tensor(x)
  802. return x.ptp(axis, keepdims)
  803. def average(x, axis=None, weights=None, returned=False):
  804. """
  805. Computes the weighted average along the specified axis.
  806. Args:
  807. x (Tensor): A Tensor to be averaged.
  808. axis (Union[None, int, tuple(int)]): Axis along which to average `x`. Default: `None`.
  809. If the axis is `None`, it will average over all of the elements of the tensor `x`.
  810. If the axis is negative, it counts from the last to the first axis.
  811. weights (Union[None, Tensor]): Weights associated with the values in `x`. Default: `None`.
  812. If `weights` is `None`, all the data in `x` are assumed to have a weight equal to one.
  813. If `weights` is 1-D tensor, the length must be the same as the given axis.
  814. Otherwise, `weights` should have the same shape as `x`.
  815. returned (bool): Default: `False`.
  816. If `True`, the tuple (average, sum_of_weights) is returned.
  817. If `False`, only the average is returned.
  818. Returns:
  819. Averaged Tensor. If returned is `True`, return tuple.
  820. Supported Platforms:
  821. ``Ascend`` ``GPU`` ``CPU``
  822. Examples:
  823. >>> import mindspore.numpy as np
  824. >>> input_x = np.array([[1., 2.], [3., 4.]])
  825. >>> output = np.average(input_x, axis=0, weights=input_x, returned=True)
  826. >>> print(output)
  827. (Tensor(shape=[2], dtype=Float32, value= [ 2.50000000e+00, 3.33333325e+00]),
  828. Tensor(shape=[2], dtype=Float32, value= [ 4.00000000e+00, 6.00000000e+00]))
  829. """
  830. _check_input_tensor(x)
  831. if axis is not None:
  832. _check_axis_type(axis, True, True, False)
  833. axis = _canonicalize_axis(axis, x.ndim)
  834. x_avg = full((), nan, F.dtype(x))
  835. sum_of_weights = None
  836. if weights is None:
  837. x_avg = mean(x, axis)
  838. sum_of_weights = compute_weights_for_mean(x, x_avg, axis)
  839. else:
  840. _check_input_tensor(weights)
  841. if x.shape == weights.shape:
  842. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  843. elif F.rank(weights) == 1:
  844. if not isinstance(axis, int):
  845. _raise_type_error("Axis must be specified when shapes of x and weights differ.")
  846. perm = _expanded_shape(x.ndim, weights.shape[0], axis)
  847. weights = weights.reshape(perm)
  848. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  849. else:
  850. _raise_type_error("Weights should be None, 1-D or the same shape as input x.")
  851. if returned:
  852. if x_avg.shape != sum_of_weights.shape:
  853. sum_of_weights = _broadcast_to(sum_of_weights, sum_of_weights.shape, x_avg.shape, x_avg.ndim)
  854. return (x_avg, sum_of_weights)
  855. return x_avg
  856. def compute_weights_for_mean(x, x_avg, axis):
  857. """Computes weights for np.average."""
  858. if axis is None:
  859. sum_of_weights = full((), x.size, F.dtype(x))
  860. else:
  861. fill_value = 1
  862. if isinstance(axis, int) or (isinstance(axis, tuple) and F.tuple_len(axis) == 1):
  863. fill_value = x.shape[axis] if isinstance(axis, int) else x.shape[axis[0]]
  864. elif axis is None:
  865. for sh in x.shape:
  866. fill_value *= sh
  867. else:
  868. for ax in axis:
  869. fill_value *= x.shape[ax]
  870. sum_of_weights = full_like(x_avg, fill_value, F.dtype(x))
  871. return sum_of_weights
  872. def comput_avg(x, axis, weights):
  873. """Computes average value of input x with given parameters."""
  874. axis = () if axis is None else axis
  875. x_mul = F.tensor_mul(x, weights)
  876. x_sum = _reduce_sum_default(x_mul, axis)
  877. sum_of_weights = _reduce_sum_default(weights, axis)
  878. x_avg = F.tensor_div(x_sum, sum_of_weights)
  879. return x_avg, sum_of_weights
  880. def matmul(x1, x2, dtype=None):
  881. """
  882. Returns the matrix product of two arrays.
  883. Note:
  884. Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  885. not supported.
  886. On GPU, the supported dtypes are np.float16 and np.float32.
  887. On CPU, the supported dtypes are np.float16 and np.float32.
  888. Args:
  889. x1 (Tensor): Input tensor, scalar not allowed.
  890. x2 (Tensor): Input tensor, scalar not allowed.
  891. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  892. output Tensor.
  893. Returns:
  894. Tensor or scalar, the matrix product of the inputs. This is a scalar only
  895. when both `x1`, `x2` are 1-d vectors.
  896. Raises:
  897. ValueError: If the last dimension of `x1` is not the same size as the
  898. second-to-last dimension of `x2`, or if a scalar value is passed in.
  899. Supported Platforms:
  900. ``Ascend`` ``GPU`` ``CPU``
  901. Examples:
  902. >>> import mindspore.numpy as np
  903. >>> x1 = np.arange(2*3*4).reshape(2, 3, 4).astype('float32')
  904. >>> x2 = np.arange(4*5).reshape(4, 5).astype('float32')
  905. >>> output = np.matmul(x1, x2)
  906. >>> print(output)
  907. [[[ 70. 76. 82. 88. 94.]
  908. [ 190. 212. 234. 256. 278.]
  909. [ 310. 348. 386. 424. 462.]]
  910. [[ 430. 484. 538. 592. 646.]
  911. [ 550. 620. 690. 760. 830.]
  912. [ 670. 756. 842. 928. 1014.]]]
  913. """
  914. return C.matmul(x1, x2, dtype=dtype)
  915. def square(x, dtype=None):
  916. """
  917. Returns the element-wise square of the input.
  918. Note:
  919. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  920. not supported.
  921. On GPU, the supported dtypes are np.float16 and np.float32.
  922. Args:
  923. x (Tensor): Input data.
  924. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  925. output Tensor.
  926. Returns:
  927. Tensor or scalar, element-wise ``x*x``, of the same shape and dtype as `x`.
  928. This is a scalar if `x` is a scalar..
  929. Supported Platforms:
  930. ``Ascend`` ``GPU`` ``CPU``
  931. Examples:
  932. >>> import mindspore.numpy as np
  933. >>> x = np.square(np.arange(6).reshape(2, 3).astype('float32'))
  934. >>> print(x)
  935. [[ 0. 1. 4.]
  936. [ 9. 16. 25.]]
  937. """
  938. return _apply_tensor_op(F.square, x, dtype=dtype)
  939. def sqrt(x, dtype=None):
  940. """
  941. Returns the non-negative square-root of an array, element-wise.
  942. Note:
  943. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  944. not supported.
  945. On GPU, the supported dtypes are np.float16 and np.float32.
  946. Args:
  947. x (Tensor): The values whose square-roots are required.
  948. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  949. output Tensor.
  950. Returns:
  951. Tensor or scalar, an array of the same shape as `x`, containing the positive
  952. square-root of each element in `x`. For negative elements, nan is returned.
  953. This is a scalar if `x` is a scalar.
  954. Supported Platforms:
  955. ``Ascend`` ``GPU`` ``CPU``
  956. Examples:
  957. >>> import mindspore.numpy as np
  958. >>> x = np.arange(6).reshape(2, 3).astype('float32')
  959. >>> x_squared = np.square(x)
  960. >>> output = np.sqrt(x_squared)
  961. >>> print(output)
  962. [[ 0. 1. 2.]
  963. [ 3. 4. 5.]]
  964. """
  965. return _apply_tensor_op(F.sqrt, x, dtype=dtype)
  966. def reciprocal(x, dtype=None):
  967. """
  968. Returns the reciprocal of the argument, element-wise.
  969. Calculates ``1/x``.
  970. Note:
  971. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  972. not supported.
  973. When `where` is provided, `out` must have a tensor value. `out` is not supported
  974. for storing the result, however it can be used in combination with `where` to set
  975. the value at indices for which `where` is set to False.
  976. Args:
  977. x (Tensor): Input array. For integer arguments with absolute value larger
  978. than 1 the result is always zero because of the way Python handles
  979. integer division. For integer zero the result is an overflow.
  980. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  981. output Tensor.
  982. Returns:
  983. Tensor or scalar, this is a scalar if `x` is a scalar.
  984. Supported Platforms:
  985. ``Ascend`` ``GPU`` ``CPU``
  986. Examples:
  987. >>> import mindspore.numpy as np
  988. >>> x = np.arange(1, 7).reshape(2, 3).astype('float32')
  989. >>> output = np.reciprocal(x)
  990. >>> print(output)
  991. [[1. 0.5 0.33333334]
  992. [0.25 0.2 0.16666667]]
  993. """
  994. return _apply_tensor_op(lambda x: F.tensor_div(1, x), x, dtype=dtype)
  995. def log(x, dtype=None):
  996. """
  997. Returns the natural logarithm, element-wise.
  998. The natural logarithm log is the inverse of the exponential function, so that
  999. ``log(exp(x)) = x``. The natural logarithm is logarithm in base e.
  1000. Note:
  1001. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1002. not supported.
  1003. On GPU, the supported dtypes are np.float16, and np.float32.
  1004. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1005. Args:
  1006. x (Tensor): Input array.
  1007. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1008. output Tensor.
  1009. Returns:
  1010. Tensor or scalar, the natural logarithm of `x`, element-wise. This is a
  1011. scalar if `x` is a scalar.
  1012. Supported Platforms:
  1013. ``Ascend`` ``GPU`` ``CPU``
  1014. Examples:
  1015. >>> import mindspore.numpy as np
  1016. >>> x = np.array([2, 3, 4]).astype('float32')
  1017. >>> output = np.log(x)
  1018. >>> print(output)
  1019. [0.69314575 1.09861 1.3862929 ]
  1020. """
  1021. return _apply_tensor_op(F.log, x, dtype=dtype)
  1022. def _prop_nan(fn, x1, x2):
  1023. """Selects NaN if either element is NaN"""
  1024. has_nan = F.logical_or(_isnan(x1), _isnan(x2))
  1025. nan_tensor = F.fill(_promote(F.dtype(x1), F.dtype(x2)), F.shape(has_nan), nan)
  1026. res = fn(x1, x2)
  1027. return F.select(has_nan, nan_tensor, res)
  1028. def maximum(x1, x2, dtype=None):
  1029. """
  1030. Returns the element-wise maximum of array elements.
  1031. Compares two arrays and returns a new array containing the element-wise maxima.
  1032. Note:
  1033. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1034. not supported.
  1035. On Ascend, input arrays containing inf or NaN are not supported.
  1036. Args:
  1037. x1 (Tensor): Input array
  1038. x2 (Tensor): The array holding the elements to be compared. If
  1039. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1040. (which becomes the shape of the output).
  1041. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1042. output Tensor.
  1043. Returns:
  1044. Tensor or scalar, the maximum of `x1` and `x2`, element-wise. This is a scalar
  1045. if both `x1` and `x2` are scalars.
  1046. Supported Platforms:
  1047. ``Ascend`` ``GPU`` ``CPU``
  1048. Examples:
  1049. >>> import mindspore.numpy as np
  1050. >>> output = np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
  1051. >>> print(output)
  1052. [2 5 4]
  1053. """
  1054. if isinstance(x1, (int, float, bool, list, tuple)):
  1055. x1 = asarray_const(x1)
  1056. elif not isinstance(x1, Tensor):
  1057. _raise_type_error("Input x1 is expected to be array_like")
  1058. if isinstance(x2, (int, float, bool, list, tuple)):
  1059. x2 = asarray_const(x2)
  1060. elif not isinstance(x2, Tensor):
  1061. _raise_type_error("Input x2 is expected to be array_like")
  1062. # F.maximum does not support when both operands are scalar
  1063. if x1.ndim == 0 and x2.ndim == 0:
  1064. x1 = expand_dims(x1, 0)
  1065. return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype).squeeze()
  1066. if x1.ndim == 0:
  1067. dtype = x2.dtype
  1068. elif x2.ndim == 0:
  1069. dtype = x1.dtype
  1070. return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype)
  1071. def heaviside(x1, x2, dtype=None):
  1072. """
  1073. Computes the Heaviside step function.
  1074. Note:
  1075. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1076. not supported.
  1077. Args:
  1078. x1 (Tensor): Input values.
  1079. x2 (Tensor): The value of the function when `x1` is 0. If
  1080. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1081. (which becomes the shape of the output).
  1082. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1083. output Tensor.
  1084. Returns:
  1085. Tensor or scalar, the output array, element-wise Heaviside step function
  1086. of `x1`. This is a scalar if both `x1` and `x2` are scalars.
  1087. Supported Platforms:
  1088. ``Ascend`` ``GPU`` ``CPU``
  1089. Examples:
  1090. >>> import mindspore.numpy as np
  1091. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(0.5))
  1092. >>> print(output)
  1093. [0. 0.5 1. ]
  1094. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(1))
  1095. >>> print(output)
  1096. [0. 1. 1.]
  1097. """
  1098. def _heaviside(x1, x2):
  1099. """Computes heaviside without passing keyword arguments"""
  1100. # performs type promotion
  1101. dtype1 = F.dtype(x1)
  1102. dtype2 = F.dtype(x2)
  1103. dtype_out = _promote(dtype1, dtype2)
  1104. if not _check_same_type(dtype1, dtype_out):
  1105. x1 = F.cast(x1, dtype_out)
  1106. if not _check_same_type(dtype2, dtype_out):
  1107. x2 = F.cast(x2, dtype_out)
  1108. # performs broadcast
  1109. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  1110. x1 = _broadcast_to_shape(x1, shape_out)
  1111. x2 = _broadcast_to_shape(x2, shape_out)
  1112. x2 = F.select(x1 < 0, zeros(shape_out, dtype_out), x2)
  1113. x2 = F.select(x1 > 0, ones(shape_out, dtype_out), x2)
  1114. return x2
  1115. return _apply_tensor_op(_heaviside, x1, x2, dtype=dtype)
  1116. def amax(a, axis=None, keepdims=False, initial=None, where=True):
  1117. """
  1118. Returns the maximum of an array or maximum along an axis.
  1119. Note:
  1120. Numpy argument `out` is not supported.
  1121. On GPU, the supported dtypes are np.float16, and np.float32.
  1122. Args:
  1123. a (Tensor): Input data.
  1124. axis (None or int or tuple of integers, optional): defaults to None. Axis or
  1125. axes along which to operate. By default, flattened input is used. If
  1126. this is a tuple of integers, the maximum is selected over multiple axes,
  1127. instead of a single axis or all the axes as before.
  1128. keepdims (boolean, optional): defaults to False.
  1129. If this is set to True, the axes which are reduced are left in the
  1130. result as dimensions with size one. With this option, the result will
  1131. broadcast correctly against the input array.
  1132. initial (scalar, optional): defaults to None.
  1133. The minimum value of an output element. Must be present to allow
  1134. computation on empty slice.
  1135. where (boolean Tensor, optional): defaults to True.
  1136. A boolean array which is broadcasted to match the dimensions of array,
  1137. and selects elements to include in the reduction. If non-default value
  1138. is passed, initial must also be provided.
  1139. Returns:
  1140. Tensor or scalar, maximum of `a`. If `axis` is None, the result is a scalar
  1141. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1142. Raises:
  1143. TypeError: if the input is not a tensor.
  1144. Supported Platforms:
  1145. ``Ascend`` ``GPU`` ``CPU``
  1146. Examples:
  1147. >>> import mindspore.numpy as np
  1148. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1149. >>> output = np.amax(a)
  1150. >>> print(output)
  1151. 3.0
  1152. >>> output = np.amax(a, axis=0)
  1153. >>> print(output)
  1154. [2. 3.]
  1155. >>> output = np.amax(a, axis=1)
  1156. >>> print(output)
  1157. [1. 3.]
  1158. >>> output = np.amax(a, where=np.array([False, True]), initial=-1, axis=0)
  1159. >>> print(output)
  1160. [-1. 3.]
  1161. """
  1162. return a.max(axis, keepdims, initial, where)
  1163. def amin(a, axis=None, keepdims=False, initial=None, where=True):
  1164. """
  1165. Returns the minimum of an array or minimum along an axis.
  1166. Note:
  1167. Numpy argument `out` is not supported.
  1168. On GPU, the supported dtypes are np.float16, and np.float32.
  1169. Args:
  1170. a (Tensor): Input data.
  1171. axis (None or int or tuple of integers, optional): defaults to None. Axis or
  1172. axes along which to operate. By default, flattened input is used. If
  1173. this is a tuple of integers, the minimum is selected over multiple axes,
  1174. instead of a single axis or all the axes as before.
  1175. keepdims (bool, optional): defaults to False.
  1176. If this is set to True, the axes which are reduced are left in the
  1177. result as dimensions with size one. With this option, the result will
  1178. broadcast correctly against the input array.
  1179. initial (Number, optional): defaults to None.
  1180. The maximum value of an output element. Must be present to allow
  1181. computation on empty slice.
  1182. where (bool Tensor, optional): defaults to True.
  1183. A boolean array which is broadcasted to match the dimensions of array,
  1184. and selects elements to include in the reduction. If non-default value
  1185. is passed, initial must also be provided.
  1186. Returns:
  1187. Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
  1188. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1189. Raises:
  1190. TypeError: if the input is not a tensor.
  1191. Supported Platforms:
  1192. ``Ascend`` ``GPU`` ``CPU``
  1193. Examples:
  1194. >>> import mindspore.numpy as np
  1195. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1196. >>> output = np.amin(a)
  1197. >>> print(output)
  1198. 0.0
  1199. >>> output = np.amin(a, axis=0)
  1200. >>> print(output)
  1201. [0. 1.]
  1202. >>> output = np.amin(a, axis=1)
  1203. >>> print(output)
  1204. [0. 2.]
  1205. >>> output = np.amin(a, where=np.array([False, True]), initial=10, axis=0)
  1206. >>> print(output)
  1207. [10. 1.]
  1208. """
  1209. return a.min(axis, keepdims, initial, where)
  1210. def hypot(x1, x2, dtype=None):
  1211. """
  1212. Given the "legs" of a right triangle, returns its hypotenuse.
  1213. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like
  1214. (i.e., unambiguously cast-able to a scalar type), it is broadcast for use
  1215. with each element of the other argument. (See Examples)
  1216. Note:
  1217. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1218. not supported.
  1219. On GPU, the supported dtypes are np.float16 and np.float32.
  1220. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1221. Args:
  1222. x1 (Tensor): Leg of the traingle(s).
  1223. x2 (Tensor): Leg of the triangle(s). If ``x1.shape != x2.shape``, they
  1224. must be broadcastable to a common shape (which becomes the shape of
  1225. the output).
  1226. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1227. output Tensor.
  1228. Returns:
  1229. Tensor or scalar, the hypotenuse of the triangle(s). This is a scalar if
  1230. both `x1` and `x2` are scalars.
  1231. Supported Platforms:
  1232. ``Ascend`` ``GPU`` ``CPU``
  1233. Examples:
  1234. >>> import mindspore.numpy as np
  1235. >>> output = np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
  1236. >>> print(output)
  1237. [[5. 5. 5.]
  1238. [5. 5. 5.]
  1239. [5. 5. 5.]]
  1240. >>> output = np.hypot(3*np.ones((3, 3)), np.array([4.0]))
  1241. >>> print(output)
  1242. [[5. 5. 5.]
  1243. [5. 5. 5.]
  1244. [5. 5. 5.]]
  1245. """
  1246. def _hypot(x1, x2):
  1247. """Computes hypotenuse without passing keyword arguments"""
  1248. if _get_device() == 'CPU':
  1249. # broadcast is not fully supported in tensor_add on CPU,
  1250. # so we use tensor_sub as a substitute solution
  1251. return F.sqrt(F.tensor_sub(F.square(x1), F.neg_tensor(F.square(x2))))
  1252. return F.sqrt(F.tensor_add(F.square(x1), F.square(x2)))
  1253. return _apply_tensor_op(_hypot, x1, x2, dtype=dtype)
  1254. def floor(x, dtype=None):
  1255. """
  1256. Returns the floor of the input, element-wise.
  1257. The floor of the scalar `x` is the largest integer `i`, such that ``i <= x``.
  1258. Note:
  1259. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1260. not supported.
  1261. On GPU, the supported dtypes are np.float16 and np.float32.
  1262. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1263. Args:
  1264. x (Tensor): input data.
  1265. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1266. output Tensor.
  1267. Returns:
  1268. Tensor or scalar, the floor of each element in `x`. This is a scalar if `x`
  1269. is a scalar.
  1270. Supported Platforms:
  1271. ``Ascend`` ``GPU`` ``CPU``
  1272. Examples:
  1273. >>> import mindspore.numpy as np
  1274. >>> output = np.floor(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1275. >>> print(output)
  1276. [-2. -2. -1. 0. 1. 1. 2.]
  1277. """
  1278. return _apply_tensor_op(F.floor, x, dtype=dtype)
  1279. def floor_divide(x1, x2, dtype=None):
  1280. """
  1281. Returns the largest integer smaller or equal to the division of the inputs.
  1282. It is equivalent to the Python // operator and pairs with the
  1283. Python % (remainder), function so that ``a = a % b + b * (a // b)`` up to roundoff.
  1284. Note:
  1285. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1286. not supported.
  1287. Args:
  1288. x1 (Tensor): Input array.
  1289. x2 (Tensor): Input array.
  1290. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1291. output Tensor.
  1292. Returns:
  1293. Tensor or scalar.
  1294. Supported Platforms:
  1295. ``Ascend`` ``GPU`` ``CPU``
  1296. Examples:
  1297. >>> import mindspore.numpy as np
  1298. >>> output = np.floor_divide(np.array([1., 2., 3., 4.]), np.array(2.5))
  1299. >>> print(output)
  1300. [0. 0. 1. 1.]
  1301. """
  1302. return _apply_tensor_op(F.tensor_floordiv, x1, x2, dtype=dtype)
  1303. def _remainder(x1, x2, c_style=False):
  1304. """Computes remainder without applying keyword arguments."""
  1305. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1306. if not _check_is_float(dtype):
  1307. x1 = F.cast(x1, mstype.float32)
  1308. x2 = F.cast(x2, mstype.float32)
  1309. quotient = F.tensor_div(x1, x2)
  1310. if c_style:
  1311. quotient = fix(quotient)
  1312. else:
  1313. quotient = F.floor(quotient)
  1314. prod = F.tensor_mul(x2, quotient)
  1315. res = F.tensor_sub(x1, prod)
  1316. if _check_is_int(dtype):
  1317. zeros_tensor = zeros(F.shape(quotient), F.dtype(quotient))
  1318. x2_zeros = F.equal(x2, zeros_tensor)
  1319. res = F.select(x2_zeros, zeros_tensor, res)
  1320. if not _check_same_type(F.dtype(res), dtype):
  1321. res = F.cast(res, dtype)
  1322. return res
  1323. def remainder(x1, x2, dtype=None):
  1324. """
  1325. Returns element-wise remainder of division.
  1326. Computes the remainder complementary to the floor_divide function. It is
  1327. equivalent to the Python modulus operator ``x1 % x2`` and has the same sign
  1328. as the divisor `x2`. The MATLAB function equivalent to np.remainder is mod.
  1329. Note:
  1330. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1331. not supported.
  1332. Args:
  1333. x1 (Tensor): input array.
  1334. x2 (Tensor): input array.
  1335. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1336. output Tensor.
  1337. Returns:
  1338. Tensor or scalar, the element-wise remainder of the quotient
  1339. ``floor_divide(x1, x2)``. This is a scalar if both `x1` and `x2` are scalars.
  1340. Supported Platforms:
  1341. ``Ascend`` ``GPU`` ``CPU``
  1342. Examples:
  1343. >>> import mindspore.numpy as np
  1344. >>> output = np.remainder(np.array([4, 7]), np.array([2, 3]))
  1345. >>> print(output)
  1346. [0 1]
  1347. >>> output = np.remainder(np.arange(7), np.array(5))
  1348. >>> print(output)
  1349. [0 1 2 3 4 0 1]
  1350. """
  1351. return _apply_tensor_op(_remainder, x1, x2, dtype=dtype)
  1352. def fix(x):
  1353. """
  1354. Rounds to nearest integer towards zero.
  1355. Rounds an array of floats element-wise to nearest integer towards zero. The
  1356. rounded values are returned as floats.
  1357. Note:
  1358. Numpy argument `out` is not supported.
  1359. Args:
  1360. x (Tensor): An array of floats to be rounded.
  1361. Returns:
  1362. Tensor.
  1363. Raises:
  1364. TypeError: if the input is not a tensor.
  1365. Supported Platforms:
  1366. ``Ascend`` ``GPU`` ``CPU``
  1367. Examples:
  1368. >>> import mindspore.numpy as np
  1369. >>> output = np.fix(np.array([2.1, 2.9, -2.1, -2.9]))
  1370. >>> print(output)
  1371. [ 2. 2. -2. -2.]
  1372. """
  1373. _check_input_tensor(x)
  1374. if not _check_is_float(F.dtype(x)):
  1375. x = F.cast(x, mstype.float32)
  1376. floored = F.floor(x)
  1377. # change to F.ceil once supported on CPU.
  1378. ceiled = F.neg_tensor(F.floor(F.neg_tensor(x)))
  1379. is_neg = F.tensor_lt(x, zeros(F.shape(x), F.dtype(x)))
  1380. return F.select(is_neg, ceiled, floored)
  1381. def fmod(x1, x2, dtype=None):
  1382. """
  1383. Returns the element-wise remainder of division.
  1384. This is the NumPy implementation of the C library function fmod, the remainder
  1385. has the same sign as the dividend `x1`. It is equivalent to the Matlab(TM) rem
  1386. function and should not be confused with the Python modulus operator ``x1 % x2``.
  1387. Note:
  1388. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1389. not supported.
  1390. Args:
  1391. x1 (Tensor): the first input arrays.
  1392. x2 (Tensor): the second input arrays.
  1393. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1394. output Tensor.
  1395. Returns:
  1396. Tensor or scalar, the remainder of the division of `x1` by `x2`. This is a
  1397. scalar if both `x1` and `x2` are scalars.
  1398. Supported Platforms:
  1399. ``Ascend`` ``GPU`` ``CPU``
  1400. Examples:
  1401. >>> import mindspore.numpy as np
  1402. >>> output = np.fmod(np.array([-3, -2, -1, 1, 2, 3]), np.array(2))
  1403. >>> print(output)
  1404. [-1 0 -1 1 0 1]
  1405. """
  1406. return _apply_tensor_op(lambda x1, x2: _remainder(x1, x2, c_style=True), x1, x2, dtype=dtype)
  1407. def trunc(x, dtype=None):
  1408. """
  1409. Returns the truncated value of the input, element-wise.
  1410. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero
  1411. than `x` is. In short, the fractional part of the signed number `x` is discarded.
  1412. Note:
  1413. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1414. not supported.
  1415. Args:
  1416. x (Tensor): input data.
  1417. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1418. output Tensor.
  1419. Returns:
  1420. Tensor or scalar, the truncated value of each element in `x`. This is a scalar if `x` is
  1421. a scalar.
  1422. Supported Platforms:
  1423. ``Ascend`` ``GPU`` ``CPU``
  1424. Examples:
  1425. >>> import mindspore.numpy as np
  1426. >>> output = np.trunc(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1427. >>> print(output)
  1428. [-1. -1. -0. 0. 1. 1. 2.]
  1429. """
  1430. return _apply_tensor_op(fix, x, dtype=dtype)
  1431. def exp(x, dtype=None):
  1432. """
  1433. Calculates the exponential of all elements in the input array.
  1434. Note:
  1435. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  1436. not supported.
  1437. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1438. for storing the result, however it can be used in combination with `where` to set
  1439. the value at indices for which `where` is set to False.
  1440. On GPU, the supported dtypes are np.float16, and np.float32.
  1441. On CPU, the supported dtypes are np.float16, np.float32, np.float64.
  1442. Args:
  1443. x (Tensor): input data.
  1444. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1445. output Tensor.
  1446. Returns:
  1447. Tensor or scalar, element-wise exponential of `x`. This is a scalar if both
  1448. `x1` and `x2` are scalars.
  1449. Supported Platforms:
  1450. ``Ascend`` ``GPU`` ``CPU``
  1451. Examples:
  1452. >>> import mindspore.numpy as np
  1453. >>> output = np.exp(np.arange(5).astype(np.float32))
  1454. >>> print(output)
  1455. [ 1. 2.718282 7.3890557 20.085537 54.598145 ]
  1456. """
  1457. return _apply_tensor_op(F.tensor_exp, x, dtype=dtype)
  1458. def expm1(x, dtype=None):
  1459. """
  1460. Calculates ``exp(x) - 1`` for all elements in the array.
  1461. Note:
  1462. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1463. not supported.
  1464. On GPU, the supported dtypes are np.float16, and np.float32.
  1465. On CPU, the supported dtypes are np.float16, and np.float32.
  1466. Args:
  1467. x (Tensor): input data.
  1468. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1469. output Tensor.
  1470. Returns:
  1471. Tensor or scalar, element-wise exponential minus one, ``out = exp(x) - 1``.
  1472. This is a scalar if both `x1` and `x2` are scalars.
  1473. Supported Platforms:
  1474. ``Ascend`` ``GPU`` ``CPU``
  1475. Examples:
  1476. >>> import mindspore.numpy as np
  1477. >>> output = np.expm1(np.arange(5).astype(np.float32))
  1478. >>> print(output)
  1479. [ 0. 1.7182819 6.389056 19.085537 53.59815 ]
  1480. """
  1481. return _apply_tensor_op(F.tensor_expm1, x, dtype=dtype)
  1482. def divmod_(x1, x2, dtype=None):
  1483. """
  1484. Returns element-wise quotient and remainder simultaneously.
  1485. Args:
  1486. x1(Union[Tensor]): Dividend tensor.
  1487. x2(Union[Tensor, int, float, bool]): Divisor. If ``x1.shape != x2.shape``,
  1488. they must be broadcastable to a common shape.
  1489. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1490. output Tensor.
  1491. Returns:
  1492. Element-wise quotient and remainder from floor division, in format of (quotient, remainder)
  1493. Raises:
  1494. TypeError: if `x1` and `x2` are not Tensor or scalar.
  1495. Supported Platforms:
  1496. ``Ascend`` ``GPU`` ``CPU``
  1497. Examples:
  1498. >>> import mindspore.numpy as np
  1499. >>> a = np.array([1, 2, 3, 4, 5])
  1500. >>> print(np.divmod(a, 1.5))
  1501. (Tensor(shape=[5], dtype=Float32,
  1502. value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 3.00000000e+00]),
  1503. Tensor(shape=[5], dtype=Float32,
  1504. value= [ 1.00000000e+00, 5.00000000e-01, 0.00000000e+00, 1.00000000e+00, 5.00000000e-01]))
  1505. """
  1506. q = F.tensor_floordiv(x1, x2)
  1507. r = remainder(x1, x2)
  1508. if dtype is not None:
  1509. q = q.astype(dtype)
  1510. r = r.astype(dtype)
  1511. return (q, r)
  1512. def _handle_prepend_append(combined, tensor, additional_tensor, axis):
  1513. """Concatenates prepend or append to tensor."""
  1514. if isinstance(additional_tensor, (int, float, bool)):
  1515. additional_tensor = asarray_const(additional_tensor)
  1516. elif not isinstance(additional_tensor, Tensor):
  1517. _raise_type_error("prepend must be scalar or Tensor, but got ", additional_tensor)
  1518. additional_shape = tensor.shape
  1519. additional_shape = _tuple_setitem(additional_shape, axis, 1)
  1520. additional_tensor = _broadcast_to_shape(additional_tensor, additional_shape)
  1521. combined += (additional_tensor,)
  1522. return combined
  1523. def diff(a, n=1, axis=-1, prepend=None, append=None):
  1524. """
  1525. Calculates the n-th discrete difference along the given axis.
  1526. The first difference is given by :math:`out[i] = a[i+1] - a[i]` along the given axis,
  1527. higher differences are calculated by using `diff` iteratively.
  1528. Note:
  1529. Since zero-shaped Tensor is not supported in MindSpore, a value error is raised if
  1530. an empty Tensor is encountered.
  1531. Args:
  1532. a (Tensor): Input tensor.
  1533. n (int, optional): The number of times values are differenced. If zero,
  1534. the input is returned as-is.
  1535. axis (int, optional): The axis along which the difference is taken, default
  1536. is the last axis.
  1537. prepend/append (Tensor, optional): Values to prepend or append to a along
  1538. `axis` prior to performing the difference. Scalar values are expanded to
  1539. arrays with length 1 in the direction of `axis` and the shape of the input
  1540. array in along all other axes. Otherwise the dimension and shape must
  1541. match `a` except along axis.
  1542. Returns:
  1543. The n-th differences. The shape of the output is the same as a except along
  1544. `axis` where the dimension is smaller by `n`. The type of the output is the same
  1545. as the type of the difference between any two elements of `a`. This is the same
  1546. as the type of `a` in most cases.
  1547. Raises:
  1548. TypeError: If inputs have types not specified above.
  1549. ValueError: If ``n < 0``.
  1550. Supported Platforms:
  1551. ``Ascend`` ``GPU`` ``CPU``
  1552. Examples:
  1553. >>> import mindspore.numpy as np
  1554. >>> arr = np.array([1, 3, -1, 0, 4])
  1555. >>> print(np.diff(arr, n=2))
  1556. [-6 5 3]
  1557. """
  1558. # This implementation is inspired by jax.numpy
  1559. _check_input_tensor(a)
  1560. axis = _canonicalize_axis(axis, a.ndim)
  1561. if not isinstance(n, int):
  1562. _raise_type_error("Input n should be int, but got ", n)
  1563. if n < 0:
  1564. _raise_value_error("Input n must > 0.")
  1565. if n == 0:
  1566. return a
  1567. combined = ()
  1568. if prepend is not None:
  1569. combined = _handle_prepend_append(combined, a, prepend, axis)
  1570. combined += (a,)
  1571. if append is not None:
  1572. combined = _handle_prepend_append(combined, a, append, axis)
  1573. if combined:
  1574. a = concatenate(combined, axis)
  1575. # if n > maximum length allowed, the tensor is empty, and is not supported
  1576. if n >= a.shape[axis]:
  1577. _raise_value_error("n is bigger then the specified dimension, this will result in an empty tensor.")
  1578. original_dtype = a.dtype
  1579. # will change once F.tensor_slice supports types other than float32
  1580. if not _check_is_float(original_dtype):
  1581. a = a.astype(mstype.float32)
  1582. a = moveaxis(a, axis, -1)
  1583. for _ in F.make_range(n):
  1584. slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
  1585. slice_size = F.shape(a)[:-1] + (F.shape(a)[-1] - 1,)
  1586. minuend = F.tensor_slice(a, slice_start + (1,), slice_size)
  1587. subtrahend = F.tensor_slice(a, slice_start + (0,), slice_size)
  1588. a = F.tensor_sub(minuend, subtrahend)
  1589. if not _check_is_float(original_dtype):
  1590. a = a.astype(original_dtype)
  1591. return moveaxis(a, -1, axis)
  1592. def ediff1d(ary, to_end=None, to_begin=None):
  1593. """
  1594. The differences between consecutive elements of a tensor.
  1595. Args:
  1596. ary (Tensor): If necessary, will be flattened before the differences are taken.
  1597. to_end (Tensor or scalar, optional): Number(s) to append at the end of the
  1598. returned differences.
  1599. to_begin (Tensor or scalar, optional): Number(s) to prepend at the beginning
  1600. of the returned differences.
  1601. Returns:
  1602. The differences.
  1603. Raises:
  1604. TypeError: If inputs have types not specified above.
  1605. Supported Platforms:
  1606. ``Ascend`` ``GPU`` ``CPU``
  1607. Examples:
  1608. >>> import mindspore.numpy as np
  1609. >>> arr = np.array([1, 3, -1, 0, 4])
  1610. >>> print(np.ediff1d(arr))
  1611. [ 2 -4 1 4]
  1612. """
  1613. _check_input_tensor(ary)
  1614. combined = ()
  1615. if to_begin is not None:
  1616. if isinstance(to_begin, Tensor):
  1617. to_begin = to_begin.ravel()
  1618. else:
  1619. to_begin = _to_tensor(to_begin).ravel()
  1620. to_begin = to_begin.astype(ary.dtype)
  1621. combined += (to_begin,)
  1622. combined += (diff(ary.ravel()),)
  1623. if to_end is not None:
  1624. if isinstance(to_end, Tensor):
  1625. to_end = to_end.ravel()
  1626. else:
  1627. to_end = _to_tensor(to_end).ravel()
  1628. to_end = to_end.astype(ary.dtype)
  1629. combined += (to_end,)
  1630. return P.Concat(0)(combined)
  1631. def trapz(y, x=None, dx=1.0, axis=-1):
  1632. """
  1633. Integrates along the given axis using the composite trapezoidal rule.
  1634. Integrates `y` (x) along given axis.
  1635. Args:
  1636. y (Tensor): Input array to integrate.
  1637. x (Union[int, float, bool, list, tuple, Tensor], optional): The sample points
  1638. corresponding to the `y` values. If `x` is None, the sample points are
  1639. assumed to be evenly spaced `dx` apart. The default is None.
  1640. dx (scalar, optional): The spacing between sample points when `x` is None. The
  1641. default is 1.0.
  1642. axis (int, optional): The axis along which to integrate. Defaults to -1.
  1643. Returns:
  1644. Tensor of float, definite integral as approximated by trapezoidal rule.
  1645. Raises:
  1646. ValueError: If axis is out of range of ``[-y.ndim, y.ndim)``.
  1647. Supported Platforms:
  1648. ``Ascend`` ``GPU`` ``CPU``
  1649. Examples:
  1650. >>> import mindspore.numpy as np
  1651. >>> a = np.arange(6).reshape(2, 3)
  1652. >>> output = np.trapz(a, x=[-2, 1, 2], axis=1)
  1653. >>> print(output)
  1654. [ 3. 15.]
  1655. >>> output = np.trapz(a, dx=3, axis=0)
  1656. >>> print(output)
  1657. [ 4.5 7.5 10.5]
  1658. """
  1659. y = _to_tensor(y)
  1660. ndim = F.rank(y)
  1661. _check_axis_in_range(axis, ndim)
  1662. axis = axis + ndim if axis < 0 else axis
  1663. y_start_axis_left = _list_comprehensions(axis, 0, True)
  1664. y_start_axis_right = _list_comprehensions(ndim - axis - 1, 0, True)
  1665. shape = F.shape(y)
  1666. y_slice_size = _tuple_setitem(shape, axis, shape[axis] - 1)
  1667. if x is not None:
  1668. x = _to_tensor(x)
  1669. dx = diff(x)
  1670. else:
  1671. dx = _to_tensor(dx)
  1672. dx = _expand(dx, ndim - axis, axis=-1)
  1673. dx = _broadcast_to_shape(dx, y_slice_size)
  1674. if not _check_is_float(F.dtype(y)):
  1675. # trapz returns float
  1676. y = F.cast(y, mstype.float32)
  1677. dx = F.cast(dx, F.dtype(y))
  1678. # product of dx and y with the last column removed
  1679. y_slice_left = F.tensor_slice(y, y_start_axis_left + (0,) + y_start_axis_right, y_slice_size)
  1680. prod_left = F.tensor_mul(y_slice_left, dx)
  1681. # product of dx and y with the first column removed
  1682. y_slice_right = F.tensor_slice(y, y_start_axis_left + (1,) + y_start_axis_right, y_slice_size)
  1683. prod_right = F.tensor_mul(y_slice_right, dx)
  1684. prod_sum = F.tensor_div(F.tensor_add(prod_left, prod_right), _to_tensor(2.0).astype(F.dtype(y)))
  1685. return F.reduce_sum(prod_sum, axis)
  1686. def _gcd(x1, x2):
  1687. """Calculates gcd without applying keyword arguments."""
  1688. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1689. if not _check_is_float(dtype):
  1690. # F.reduce_sum only supports float
  1691. x1 = F.cast(x1, mstype.float32)
  1692. x2 = F.cast(x2, mstype.float32)
  1693. x1 = F.absolute(x1)
  1694. x2 = F.absolute(x2)
  1695. cond_ge = F.tensor_ge(x1, x2)
  1696. a = where_(cond_ge, x1, x2)
  1697. b = where_(cond_ge, x2, x1)
  1698. b = where_(F.equal(b, ZERO_TENSOR), a, b)
  1699. r = _remainder(a, b)
  1700. while F.tensor_gt(F.reduce_sum(r), ZERO_TENSOR):
  1701. r = _remainder(a, b)
  1702. has_terminated = F.equal(r, ZERO_TENSOR)
  1703. a = where_(has_terminated, a, b)
  1704. b = where_(has_terminated, b, r)
  1705. if not _check_same_type(F.dtype(b), dtype):
  1706. b = F.cast(b, dtype)
  1707. return b
  1708. def gcd(x1, x2, dtype=None):
  1709. """
  1710. Returns the greatest common divisor of ``|x1|`` and ``|x2|``.
  1711. Note:
  1712. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1713. not supported.
  1714. Args:
  1715. x1 (Tensor): input data.
  1716. x2 (Tensor): input data.
  1717. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1718. output Tensor.
  1719. Returns:
  1720. Tensor or scalar, the greatest common divisor of the absolute value of the inputs.
  1721. This is a scalar if both `x1` and `x2` are scalars.
  1722. Supported Platforms:
  1723. ``Ascend`` ``GPU`` ``CPU``
  1724. Examples:
  1725. >>> import mindspore.numpy as np
  1726. >>> output = np.gcd(np.arange(6), np.array(20))
  1727. >>> print(output)
  1728. [20 1 2 1 4 5]
  1729. """
  1730. return _apply_tensor_op(_gcd, x1, x2, dtype=dtype)
  1731. def lcm(x1, x2, dtype=None):
  1732. """
  1733. Returns the lowest common multiple of ``|x1|`` and ``|x2|``.
  1734. Note:
  1735. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1736. not supported.
  1737. Args:
  1738. x1 (Tensor): input data.
  1739. x2 (Tensor): input data.
  1740. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1741. output Tensor.
  1742. Returns:
  1743. Tensor or scalar, the lowest common multiple of the absolute value of the inputs.
  1744. This is a scalar if both `x1` and `x2` are scalars.
  1745. Supported Platforms:
  1746. ``Ascend`` ``GPU`` ``CPU``
  1747. Examples:
  1748. >>> import mindspore.numpy as np
  1749. >>> output = np.lcm(np.arange(6), np.array(20))
  1750. >>> print(output)
  1751. [ 0 20 20 60 20 20]
  1752. """
  1753. def _lcm(x1, x2):
  1754. """Calculates lcm without applying keyword arguments"""
  1755. common_divisor = _gcd(x1, x2)
  1756. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1757. x1 = x1.astype(mstype.float32)
  1758. x2 = x2.astype(mstype.float32)
  1759. q1 = F.tensor_div(x1, common_divisor)
  1760. q2 = F.tensor_div(x2, common_divisor)
  1761. res = F.tensor_mul(F.tensor_mul(q1, q2), common_divisor)
  1762. has_zero = F.equal(multiply(x1, x2), ZERO_TENSOR)
  1763. res = where_(has_zero, ZERO_TENSOR, res)
  1764. return F.absolute(res).astype(dtype)
  1765. return _apply_tensor_op(_lcm, x1, x2, dtype=dtype)
  1766. def convolve(a, v, mode='full'):
  1767. """
  1768. Returns the discrete, linear convolution of two one-dimensional sequences.
  1769. Note:
  1770. If `v` is longer than `a`, the tensors are swapped before computation.
  1771. Args:
  1772. a (Union[list, tuple, Tensor]): First one-dimensional input tensor.
  1773. v (Union[list, tuple, Tensor]): Second one-dimensional input tensor.
  1774. mode (str, optional): By default, mode is `\'full\'`. This returns the
  1775. convolution at each point of overlap, with an output shape of :math:`(N+M-1,)`.
  1776. At the end-points of the convolution, the signals do not overlap completely,
  1777. and boundary effects may be seen.
  1778. If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary
  1779. effects are still visible.
  1780. If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`.
  1781. The convolution product is only given for points where the signals overlap
  1782. completely. Values outside the signal boundary have no effect.
  1783. Returns:
  1784. Tensor, discrete, linear convolution of a and v.
  1785. Raises:
  1786. TypeError: if the inputs have types not specified above.
  1787. ValueError: if a and v are empty or have wrong dimensions
  1788. Supported Platforms:
  1789. ``GPU`` ``CPU``
  1790. Examples:
  1791. >>> import mindspore.numpy as np
  1792. >>> output = np.convolve([1., 2., 3., 4., 5.], [2., 3.], mode="valid")
  1793. >>> print(output)
  1794. [ 7. 12. 17. 22.]
  1795. """
  1796. if not isinstance(a, Tensor):
  1797. a = asarray_const(a)
  1798. if not isinstance(v, Tensor):
  1799. v = asarray_const(v)
  1800. a_size = F.shape_mul(a.shape)
  1801. v_size = F.shape_mul(v.shape)
  1802. if a_size == 0 or v_size == 0:
  1803. _raise_value_error("Inputs cannot be empty.")
  1804. a = _expand(a, 1)
  1805. v = _expand(v, 1)
  1806. final_dtype = _promote(a.dtype, v.dtype)
  1807. a = a.astype("float32")
  1808. v = v.astype("float32")
  1809. if a.ndim != 1 or v.ndim != 1:
  1810. _raise_value_error("a and v must be 1-D tensor.")
  1811. if a_size < v_size:
  1812. a, v = v, a
  1813. a_size, v_size = v_size, a_size
  1814. v = v[::-1]
  1815. return _compute_1d_conv(a, v, mode).astype(final_dtype)
  1816. def _handle_weights(weights, num_samples):
  1817. """Checks fweight and aweight in np.cov."""
  1818. weights = asarray_const(weights)
  1819. if not _check_is_int(weights.dtype):
  1820. _raise_type_error("weights must be integer")
  1821. weights = weights.astype("float32")
  1822. if weights.ndim > 1:
  1823. _raise_runtime_error("cannot handle multidimensional weights")
  1824. if weights.shape[0] != num_samples:
  1825. _raise_runtime_error("incompatible numbers of samples and weights")
  1826. return absolute(weights)
  1827. def _handle_inputs(cov_input, rowvar):
  1828. """Checks input arrays for np.cov."""
  1829. if not isinstance(cov_input, Tensor):
  1830. cov_input = asarray_const(cov_input)
  1831. if cov_input.ndim > 2:
  1832. _raise_value_error("input array has dimension more than 2.")
  1833. cov_input = cov_input.astype("float32")
  1834. cov_input = _expand(cov_input, 2)
  1835. if not isinstance(rowvar, bool):
  1836. _raise_type_error("input rowvar should be boolean.")
  1837. if not rowvar and cov_input.shape[0] != 1:
  1838. cov_input = cov_input.T
  1839. return cov_input
  1840. def _handle_facts(w, m, ddof, aweights):
  1841. """Computes facts for np.cov"""
  1842. fact = None
  1843. if w is None:
  1844. fact = m.shape[1] - ddof
  1845. else:
  1846. w_sum = _reduce_sum_default(w, -1)
  1847. if ddof == 0:
  1848. fact = w_sum
  1849. elif aweights is None:
  1850. fact = w_sum - ddof
  1851. else:
  1852. fact = w_sum - ddof * F.reduce_sum(w * aweights) / w_sum
  1853. return fact
  1854. def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None, dtype=None):
  1855. """
  1856. Estimates a covariance matrix, given data and weights.
  1857. Covariance indicates the level to which two variables vary together. If we examine
  1858. N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix
  1859. element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element
  1860. :math:`C_{ii}` is the variance of :math:`x_i`.
  1861. Note:
  1862. `fweights` and `aweights` must be all positive, in Numpy if negative values
  1863. are detected, a value error will be raised, in MindSpore we converts all values
  1864. to positive instead.
  1865. Args:
  1866. m (Union[Tensor, list, tuple]): A 1-D or 2-D tensor containing multiple variables
  1867. and observations. Each row of `m` represents a variable, and each column
  1868. represents a single observation of all those variables. Also see `rowvar` below.
  1869. y (Union[Tensor, list, tuple], optional): An additional set of variables
  1870. and observations. `y` has the same form as that of `m`, default is ``None``.
  1871. rowvar(bool, optional): If `rowvar` is ``True`` (default), then each row represents
  1872. a variable, with observations in the columns. Otherwise, the relationship
  1873. is transposed: each column represents a variable, while the rows contain
  1874. observations.
  1875. bias (bool, optional): Default Normalization (``False``) is by :math:`(N - 1)`, where
  1876. :math:`N` is the number of observations given (unbiased estimate). If bias is
  1877. ``True``, then Normalization is by `N`. These values can be overridden by
  1878. using the keyword `ddof`.
  1879. ddof (int, optional): If not ``None``, the default value implied by `bias` is
  1880. overridden. Note that :math:`ddof=1` will return the unbiased estimate, even
  1881. if both fweights and aweights are specified, and :math:`ddof=0` will return
  1882. the simple average. See the notes for the details. The default value
  1883. is ``None``.
  1884. fweights (Union[Tensor, list, tuple], optional): 1-D tensor of integer
  1885. frequency weights; the number of times each observation vector should
  1886. be repeated. The default value is ``None``.
  1887. aweights (Union[Tensor, list, tuple], optional): 1-D tensor of observation
  1888. vector weights. These relative weights are typically larger for observations
  1889. considered more important and smaller for observations considered less
  1890. important. If :math:`ddof=0` the tensor of weights can be used to assign probabilities
  1891. to observation vectors. The default value is ``None``.
  1892. dtype (Union[:class:`mindspore.dtype`, str], optional): Data-type of the
  1893. result. By default, the return data-type will have mstype.float32 precision.
  1894. Returns:
  1895. Tensor, the covariance matrix of the variables.
  1896. Raises:
  1897. TypeError: if the inputs have types not specified above.
  1898. ValueError: if `m` and `y` have wrong dimensions.
  1899. RuntimeError: if `aweights` and `fweights` have dimensions > 2.
  1900. Supported Platforms:
  1901. ``Ascend`` ``GPU`` ``CPU``
  1902. Examples:
  1903. >>> import mindspore.numpy as np
  1904. >>> output = np.cov([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]])
  1905. >>> print(output)
  1906. [[1.6666666 2.1666667 1.6666666]
  1907. [2.1666667 2.9166667 2.1666667]
  1908. [1.6666666 2.1666667 1.6666666]]
  1909. """
  1910. # This implementation was inspired by original numpy implementation.
  1911. m = _handle_inputs(m, rowvar)
  1912. if m.shape[0] == 0:
  1913. return empty((0, 0), dtype="float32")
  1914. if y is not None:
  1915. y = _handle_inputs(y, rowvar)
  1916. m = concatenate((m, y), axis=0)
  1917. if ddof is None:
  1918. if not bias:
  1919. ddof = 1
  1920. else:
  1921. ddof = 0
  1922. # Handle fweights and aweights
  1923. w = _handle_weights(fweights, m.shape[1]) if fweights is not None else None
  1924. if aweights is not None:
  1925. aweights = _handle_weights(aweights, m.shape[1])
  1926. w = aweights if w is None else w * aweights
  1927. avg = average(m, axis=1, weights=w)
  1928. # Determine the Normalization
  1929. fact = _handle_facts(w, m, ddof, aweights)
  1930. m = m - F.expand_dims(avg, -1)
  1931. if w is None:
  1932. m_t = m.T
  1933. else:
  1934. m_t = (m * w).T
  1935. res = true_divide(dot(m, m_t), fact).squeeze()
  1936. if dtype is not None:
  1937. return res.astype(dtype)
  1938. return res
  1939. @constexpr
  1940. def _real_axes(ndim_orig, ndim_out, axes_orig):
  1941. """Returns the real axes to be reduced after performing broadcast"""
  1942. _diff = ndim_out - ndim_orig
  1943. axes = F.make_range(_diff)
  1944. axes_orig = map(functools.partial(operator.add, _diff), axes_orig)
  1945. return axes + tuple(axes_orig)
  1946. @constexpr
  1947. def _shape_reduced_keepdims(shape, axes):
  1948. """
  1949. Reduces dimensions corresponding to argument axes while
  1950. keeping the number of dimensions unchanged.
  1951. """
  1952. ndim_out = F.tuple_len(shape)
  1953. shape_out = [1]*ndim_out
  1954. for i in range(ndim_out):
  1955. if not i in axes:
  1956. shape_out[i] = shape[i]
  1957. return tuple(shape_out)
  1958. @constexpr
  1959. def _shape_reduced(shape, axes):
  1960. """Removes dimensions corresponding to argument axes"""
  1961. ndim_orig = F.tuple_len(shape)
  1962. ndim_out = ndim_orig - F.tuple_len(axes)
  1963. shape_out = [0]*ndim_out
  1964. idx_out = 0
  1965. for i in range(ndim_orig):
  1966. if not i in axes:
  1967. shape_out[idx_out] = shape[i]
  1968. idx_out += 1
  1969. return tuple(shape_out)
  1970. def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None, where=True, dtype=None):
  1971. """
  1972. Applies comparison based on cmp_fn and reduction based on reduce_fn.
  1973. If cmp_fn is None, only reduction is performed.
  1974. """
  1975. a = _to_tensor(a)
  1976. shape = F.shape(a)
  1977. ndim = F.rank(a)
  1978. if dtype is None:
  1979. dtype = F.dtype(a)
  1980. axes = _check_axis_valid(axis, ndim)
  1981. if initial is not None:
  1982. if ((isinstance(initial, Tensor) and F.rank(initial) > 0) or
  1983. not isinstance(initial, (int, float, bool, Tensor))):
  1984. _raise_type_error('initial should be scalar')
  1985. if _is_shape_empty(shape):
  1986. if not axes:
  1987. return a
  1988. if keepdims:
  1989. shape_out = _shape_reduced_keepdims(shape, axes)
  1990. else:
  1991. shape_out = _shape_reduced(shape, axes)
  1992. if _is_shape_empty(shape_out):
  1993. return empty(shape_out, dtype)
  1994. if initial is None:
  1995. if cmp_fn is None:
  1996. initial = nan
  1997. else:
  1998. _raise_value_error('initial value must be provided for zero-size arrays')
  1999. return full(shape_out, initial, dtype)
  2000. if initial is not None:
  2001. initial = full(shape, initial, dtype)
  2002. a = cmp_fn(a, initial)
  2003. if isinstance(where, Tensor):
  2004. if initial is None:
  2005. _raise_value_error('initial value must be provided for where masks')
  2006. ndim_orig = F.rank(a)
  2007. a = where_(where, a, initial)
  2008. axes = _real_axes(ndim_orig, F.rank(a), axes)
  2009. return reduce_fn(a, axes).astype(dtype)
  2010. def nanmax(a, axis=None, dtype=None, keepdims=False):
  2011. """
  2012. Return the maximum of an array or maximum along an axis, ignoring any NaNs.
  2013. Note:
  2014. Numpy arguments `out` is not supported.
  2015. For all NaN slices, a very small negative number is returned instead of NaN.
  2016. Args:
  2017. a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose maximum
  2018. is desired. If `a` is not an array, a conversion is attempted.
  2019. axis (Union[int, tuple of int, None], optional): Axis or axes along which the maximum is
  2020. computed. The default is to compute the maximum of the flattened array.
  2021. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2022. output Tensor.
  2023. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2024. are reduced are left in the result as dimensions with size one. With this option,
  2025. the result will broadcast correctly against the original `a`.
  2026. Returns:
  2027. Tensor.
  2028. Raises:
  2029. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2030. if the axes contain duplicates.
  2031. Supported Platforms:
  2032. ``GPU`` ``CPU``
  2033. Examples:
  2034. >>> import mindspore.numpy as np
  2035. >>> a = np.array([[1, 2], [3, np.nan]])
  2036. >>> output = np.nanmax(a)
  2037. >>> print(output)
  2038. 3.0
  2039. >>> output = np.nanmax(a, axis=0)
  2040. >>> print(output)
  2041. [3. 2.]
  2042. """
  2043. a = _to_tensor(a)
  2044. if not isinstance(keepdims, int):
  2045. _raise_type_error("integer argument expected, got", keepdims)
  2046. nan_mask = _isnan(a)
  2047. a = F.select(nan_mask, full(F.shape(a), -sys.maxsize - 1, F.dtype(a)), a)
  2048. reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default
  2049. return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
  2050. def nanmin(a, axis=None, dtype=None, keepdims=False):
  2051. """
  2052. Returns the minimum of array elements over a given axis, ignoring any NaNs.
  2053. Note:
  2054. Numpy arguments `out` is not supported.
  2055. For all-NaN slices, a very large number is returned instead of NaN.
  2056. On Ascend, since checking for NaN is currently not supported, it is not recommended to
  2057. use np.nanmin. If the array does not contain NaN, np.min should be used instead.
  2058. Args:
  2059. a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose minimum
  2060. is desired. If `a` is not an array, a conversion is attempted.
  2061. axis (Union[int, tuple of int, None], optional): Axis or axes along which the minimum is
  2062. computed. The default is to compute the minimum of the flattened array.
  2063. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2064. output Tensor.
  2065. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2066. are reduced are left in the result as dimensions with size one. With this option,
  2067. the result will broadcast correctly against the original `a`.
  2068. Returns:
  2069. Tensor.
  2070. Raises:
  2071. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2072. if the axes contain duplicates.
  2073. Supported Platforms:
  2074. ``GPU`` ``CPU``
  2075. Examples:
  2076. >>> import mindspore.numpy as np
  2077. >>> a = np.array([[1, 2], [3, np.nan]])
  2078. >>> output = np.nanmin(a)
  2079. >>> print(output)
  2080. 1.0
  2081. >>> output = np.nanmin(a, axis=0)
  2082. >>> print(output)
  2083. [1. 2.]
  2084. """
  2085. a = _to_tensor(a)
  2086. if not isinstance(keepdims, int):
  2087. _raise_type_error("integer argument expected, got", keepdims)
  2088. nan_mask = _isnan(a)
  2089. a = F.select(nan_mask, full(F.shape(a), sys.maxsize, F.dtype(a)), a)
  2090. reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default
  2091. return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
  2092. def _reduce_nansum(x, axis, keepdims=False):
  2093. """Computes reduce sum treating NaNs as zeros."""
  2094. x = F.select(_isnan(x), zeros(F.shape(x), F.dtype(x)), x)
  2095. if keepdims:
  2096. return _reduce_sum_keepdims(x, axis)
  2097. return _reduce_sum_default(x, axis)
  2098. def nansum(a, axis=None, dtype=None, keepdims=False):
  2099. """
  2100. Returns the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero.
  2101. Note:
  2102. Numpy arguments `out` is not supported.
  2103. Args:
  2104. a (Union[int, float, list, tuple, Tensor]): Array containing numbers
  2105. whose sum is desired. If `a` is not an array, a conversion is attempted.
  2106. axis (Union[int, tuple of int, None], optional): Axis or axes along which the sum is
  2107. computed. The default is to compute the sum of the flattened array.
  2108. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2109. output Tensor.
  2110. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2111. are reduced are left in the result as dimensions with size one. With this option,
  2112. the result will broadcast correctly against the original `a`.
  2113. Returns:
  2114. Tensor.
  2115. Raises:
  2116. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2117. if the axes contain duplicates.
  2118. Supported Platforms:
  2119. ``GPU`` ``CPU``
  2120. Examples:
  2121. >>> import mindspore.numpy as np
  2122. >>> a = np.array([[1, 1], [1, np.nan]])
  2123. >>> output = np.nansum(a)
  2124. >>> print(output)
  2125. 3.0
  2126. >>> output = np.nansum(a, axis=0)
  2127. >>> print(output)
  2128. [2. 1.]
  2129. """
  2130. a = _to_tensor(a)
  2131. nan_mask = _isnan(a)
  2132. a = F.select(nan_mask, zeros(F.shape(a), F.dtype(a)), a)
  2133. return _reduce(a, functools.partial(_reduce_nansum, keepdims=keepdims), axis=axis,
  2134. keepdims=keepdims, dtype=dtype)
  2135. def _count_nonnan(a, axis, keepdims=False):
  2136. """Counts the number of elements excluding NaNs."""
  2137. nonnan_mask = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), ones(F.shape(a), F.dtype(a)))
  2138. if keepdims:
  2139. return _reduce_sum_keepdims(nonnan_mask, axis)
  2140. return _reduce_sum_default(nonnan_mask, axis)
  2141. def nanmean(a, axis=None, dtype=None, keepdims=False):
  2142. """
  2143. Computes the arithmetic mean along the specified axis, ignoring NaNs.
  2144. Returns the average of the array elements. The average is taken over the flattened
  2145. array by default, otherwise over the specified axis. float32 intermediate and
  2146. return values are used for integer inputs.
  2147. Note:
  2148. Numpy arguments `out` is not supported.
  2149. Args:
  2150. a (Union[int, float, list, tuple, Tensor]): Array containing numbers
  2151. whose mean is desired. If `a` is not an array, a conversion is attempted.
  2152. axis (Union[int, tuple of int, None], optional): Axis or axes along which the mean is
  2153. computed. The default is to compute the mean of the flattened array.
  2154. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2155. output Tensor.
  2156. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2157. are reduced are left in the result as dimensions with size one. With this option,
  2158. the result will broadcast correctly against the original `a`.
  2159. Returns:
  2160. Tensor.
  2161. Raises:
  2162. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2163. if the axes contain duplicates.
  2164. Supported Platforms:
  2165. ``GPU`` ``CPU``
  2166. Examples:
  2167. >>> import mindspore.numpy as np
  2168. >>> a = np.array([[1, np.nan], [3, 4]])
  2169. >>> output = np.nanmean(a)
  2170. >>> print(output)
  2171. 2.6666667
  2172. >>> output = np.nanmean(a, axis=0)
  2173. >>> print(output)
  2174. [2. 4.]
  2175. >>> output = np.nanmean(a, axis=1)
  2176. >>> print(output)
  2177. [1. 3.5]
  2178. """
  2179. if dtype is None:
  2180. dtype = mstype.float32
  2181. a = _to_tensor(a)
  2182. axis = _check_axis_valid(axis, F.rank(a))
  2183. sum_a = nansum(a, axis=axis, dtype=dtype, keepdims=keepdims)
  2184. return F.tensor_div(sum_a, _count_nonnan(a, axis, keepdims))
  2185. def _nanvar(a, axis, ddof=0, keepdims=False):
  2186. """Computes nanvar without applying keyword arguments."""
  2187. mean_a = nanmean(a, axis=axis, keepdims=True)
  2188. pow_a = F.tensor_pow(F.tensor_sub(a, mean_a), 2)
  2189. sum_a = _reduce_nansum(pow_a, axis, keepdims)
  2190. count = _count_nonnan(a, axis, keepdims)
  2191. return divide(sum_a, F.tensor_sub(count, ddof))
  2192. def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
  2193. """
  2194. Computes the variance along the specified axis, while ignoring NaNs.
  2195. Returns the variance of the array elements, a measure of the spread of a distribution. The
  2196. variance is computed for the flattened array by default, otherwise over the specified axis.
  2197. Note:
  2198. Numpy arguments `out` is not supported.
  2199. On GPU, the supported dtypes are np.float16, and np.float32.
  2200. Args:
  2201. a (Union[int, float, list, tuple, Tensor]): Array containing numbers
  2202. whose variance is desired. If `a` is not an array, a conversion is attempted.
  2203. axis (Union[int, tuple of int, None], optional): Axis or axes along which the variance is
  2204. computed. The default is to compute the variance of the flattened array.
  2205. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2206. output Tensor.
  2207. ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
  2208. ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
  2209. is zero.
  2210. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2211. are reduced are left in the result as dimensions with size one. With this option,
  2212. the result will broadcast correctly against the original `a`.
  2213. Returns:
  2214. Tensor.
  2215. Raises:
  2216. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2217. if the axes contain duplicates.
  2218. Supported Platforms:
  2219. ``GPU`` ``CPU``
  2220. Examples:
  2221. >>> import mindspore.numpy as np
  2222. >>> a = np.array([[1, np.nan], [3, 4]])
  2223. >>> output = np.nanvar(a)
  2224. >>> print(output)
  2225. 1.5555557
  2226. >>> output = np.nanvar(a, axis=0)
  2227. >>> print(output)
  2228. [1. 0.]
  2229. >>> output = np.nanvar(a, axis=1)
  2230. >>> print(output)
  2231. [0. 0.25]
  2232. """
  2233. if dtype is None:
  2234. dtype = mstype.float32
  2235. return _reduce(a, functools.partial(_nanvar, ddof=ddof, keepdims=keepdims), axis=axis,
  2236. keepdims=keepdims, dtype=dtype)
  2237. def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
  2238. """
  2239. Computes the standard deviation along the specified axis, while ignoring NaNs.
  2240. Returns the standard deviation, a measure of the spread of a distribution, of the non-NaN
  2241. array elements. The standard deviation is computed for the flattened array by default,
  2242. otherwise over the specified axis.
  2243. Note:
  2244. Numpy arguments `out` is not supported.
  2245. On GPU, the supported dtypes are np.float16, and np.float32.
  2246. Args:
  2247. a (Union[int, float, list, tuple, Tensor]): Calculates the standard deviation of the non-NaN values.
  2248. axis (Union[int, tuple of int, None], optional): Axis or axes along which the standard
  2249. deviation is computed. The default is to compute the standard deviation of the
  2250. flattened array.
  2251. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2252. output Tensor.
  2253. ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
  2254. ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
  2255. is zero.
  2256. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2257. are reduced are left in the result as dimensions with size one. With this option,
  2258. the result will broadcast correctly against the original `a`.
  2259. Returns:
  2260. Tensor.
  2261. Raises:
  2262. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2263. if the axes contain duplicates.
  2264. Supported Platforms:
  2265. ``GPU`` ``CPU``
  2266. Examples:
  2267. >>> import mindspore.numpy as np
  2268. >>> a = np.array([[1, np.nan], [3, 4]])
  2269. >>> output = np.nanstd(a)
  2270. >>> print(output)
  2271. 1.2472192
  2272. >>> output = np.nanstd(a, axis=0)
  2273. >>> print(output)
  2274. [1. 0.]
  2275. >>> output = np.nanstd(a, axis=1)
  2276. >>> print(output)
  2277. [0. 0.5]
  2278. """
  2279. if dtype is None:
  2280. dtype = mstype.float32
  2281. return _reduce(a, lambda a, axis: F.sqrt(_nanvar(a, axis, ddof=ddof, keepdims=keepdims)),
  2282. axis=axis, keepdims=keepdims, dtype=dtype)
  2283. def exp2(x, dtype=None):
  2284. """
  2285. Calculates ``2**p`` for all p in the input array.
  2286. Note:
  2287. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2288. not supported.
  2289. On GPU, the supported dtypes are np.float16, and np.float32.
  2290. Args:
  2291. x (Tensor): input values.
  2292. dtype (:class:`mindspore.dtype`, optional): defaults to :class:`None`. Overrides the dtype of the
  2293. output Tensor.
  2294. Returns:
  2295. Tensor or scalar, element-wise 2 to the power `x`.
  2296. Supported Platforms:
  2297. ``Ascend`` ``GPU`` ``CPU``
  2298. Examples:
  2299. >>> import mindspore.numpy as np
  2300. >>> x = np.array([2, 3]).astype(np.float32)
  2301. >>> output = np.exp2(x)
  2302. >>> print(output)
  2303. [4. 8.]
  2304. """
  2305. return _apply_tensor_op(lambda x: F.tensor_pow(2, x), x, dtype=dtype)
  2306. def kron(a, b):
  2307. """
  2308. Kronecker product of two arrays.
  2309. Computes the Kronecker product, a composite array made of blocks of the second
  2310. array scaled by the first.
  2311. Note:
  2312. Booleans are not supported.
  2313. Args:
  2314. a (Union[int, float, list, tuple, Tensor]): input values.
  2315. b (Union[int, float, list, tuple, Tensor]): input values.
  2316. Returns:
  2317. Tensor.
  2318. Supported Platforms:
  2319. ``Ascend`` ``GPU`` ``CPU``
  2320. Examples:
  2321. >>> import mindspore.numpy as np
  2322. >>> output = np.kron([1,10,100], [5,6,7])
  2323. >>> print(output)
  2324. [ 5 6 7 50 60 70 500 600 700]
  2325. >>> output = np.kron([5,6,7], [1,10,100])
  2326. >>> print(output)
  2327. [ 5 50 500 6 60 600 7 70 700]
  2328. >>> output = np.kron(np.eye(2), np.ones((2,2)))
  2329. >>> print(output)
  2330. [[1. 1. 0. 0.]
  2331. [1. 1. 0. 0.]
  2332. [0. 0. 1. 1.]
  2333. [0. 0. 1. 1.]]
  2334. """
  2335. a, b = _to_tensor(a, b)
  2336. ndim = _max(F.rank(a), F.rank(b))
  2337. if ndim == 0:
  2338. return F.tensor_mul(a, b)
  2339. a = _expand(a, ndim)
  2340. b = _expand(b, ndim)
  2341. shape_a = F.shape(a)
  2342. shape_b = F.shape(b)
  2343. # scales a by the shape of b
  2344. kron_shape = _seq_prod(shape_a, shape_b)
  2345. a = F.reshape(a, _add_unit_axes(shape_a, 2*ndim, True))
  2346. a = F.tile(a, _add_unit_axes(shape_b, 2*ndim, False))
  2347. a = moveaxis(a, F.make_range(ndim, 2*ndim), F.make_range(1, 2*ndim, 2))
  2348. a = F.reshape(a, kron_shape)
  2349. # scales b by the shape of a
  2350. b = F.tile(b, shape_a)
  2351. return F.tensor_mul(a, b)
  2352. def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
  2353. """
  2354. Returns the cross product of two (arrays of) vectors.
  2355. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both
  2356. `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the
  2357. last axis of `a` and `b` by default, and these axes can have dimensions 2 or 3.
  2358. Where the dimension of either `a` or `b` is 2, the third component of the input
  2359. vector is assumed to be zero and the cross product calculated accordingly. In cases
  2360. where both input vectors have dimension 2, the z-component of the cross product is
  2361. returned.
  2362. Args:
  2363. a (Union[list, tuple, Tensor]): Components of the first vector(s).
  2364. b (Union[list, tuple, Tensor]): Components of the second vector(s).
  2365. axisa (int, optional): Axis of `a` that defines the vector(s). By default, the last
  2366. axis.
  2367. axisb (int, optional): Axis of `b` that defines the vector(s). By default, the last
  2368. axis.
  2369. axisc (int, optional): Axis of `c` containing the cross product vector(s). Ignored
  2370. if both input vectors have dimension 2, as the return is scalar. By default,
  2371. the last axis.
  2372. axis (int, optional): If defined, the axis of `a`, `b` and `c` that defines the
  2373. vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
  2374. Returns:
  2375. Tensor, vector cross product(s).
  2376. Raises:
  2377. ValueError: when the dimensions of the vector(s) in `a` and/or `b` does not equal 2
  2378. or 3.
  2379. Supported Platforms:
  2380. ``Ascend`` ``GPU`` ``CPU``
  2381. Examples:
  2382. >>> import mindspore.numpy as np
  2383. >>> x = np.array([[1,2,3], [4,5,6]])
  2384. >>> y = np.array([[4,5,6], [1,2,3]])
  2385. >>> output = np.cross(x, y)
  2386. >>> print(output)
  2387. [[-3 6 -3]
  2388. [ 3 -6 3]]
  2389. >>> output = np.cross(x, y, axisc=0)
  2390. >>> print(output)
  2391. [[-3 3]
  2392. [ 6 -6]
  2393. [-3 3]]
  2394. """
  2395. a, b = _to_tensor(a, b)
  2396. if axis is not None:
  2397. axisa, axisb, axisc = axis, axis, axis
  2398. _check_axis_in_range(axisa, F.rank(a))
  2399. _check_axis_in_range(axisb, F.rank(b))
  2400. a = moveaxis(a, axisa, -1)
  2401. b = moveaxis(b, axisb, -1)
  2402. shape_a = F.shape(a)
  2403. shape_b = F.shape(b)
  2404. if F.shape(a)[-1] not in (2, 3) or F.shape(b)[-1] not in (2, 3):
  2405. _raise_value_error('incompatible dimensions for cross product (dimension must be 2 or 3)')
  2406. a_has_z = shape_a[-1] == 3
  2407. b_has_z = shape_b[-1] == 3
  2408. shape_out = _infer_out_shape(shape_a[:-1], shape_b[:-1])
  2409. if a_has_z or b_has_z:
  2410. shape_out += (3,)
  2411. _check_axis_in_range(axisc, len(shape_out))
  2412. dtype = _promote(F.dtype(a), F.dtype(b))
  2413. if _get_device() == 'CPU':
  2414. # F.tensor_slice only supports float on CPU
  2415. if not _check_is_float(F.dtype(a)):
  2416. a = F.cast(a, mstype.float32)
  2417. if not _check_is_float(F.dtype(b)):
  2418. b = F.cast(b, mstype.float32)
  2419. a_slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
  2420. a_slice_size = shape_a[:-1] + (1,)
  2421. b_slice_start = _list_comprehensions(F.rank(b) - 1, 0, True)
  2422. b_slice_size = shape_b[:-1] + (1,)
  2423. def _get_slice_product(idx_a, idx_b):
  2424. return multiply(F.tensor_slice(a, a_slice_start + (idx_a,), a_slice_size),
  2425. F.tensor_slice(b, b_slice_start + (idx_b,), b_slice_size))
  2426. cz = F.tensor_sub(_get_slice_product(0, 1), _get_slice_product(1, 0)) # ax*by - ay*bx
  2427. if not a_has_z and not b_has_z:
  2428. return F.reshape(cz, shape_out).astype(dtype)
  2429. if a_has_z and b_has_z:
  2430. cx = F.tensor_sub(_get_slice_product(1, 2), _get_slice_product(2, 1)) # ay*bz - az*by
  2431. cy = F.tensor_sub(_get_slice_product(2, 0), _get_slice_product(0, 2)) # az*bx - ax*bz
  2432. elif a_has_z:
  2433. cx = F.neg_tensor(_get_slice_product(2, 1)) # -az*by
  2434. cy = _get_slice_product(2, 0) # az*bx
  2435. else: # b_has_z
  2436. cx = _get_slice_product(1, 2) # ay*bz
  2437. cy = F.neg_tensor(_get_slice_product(0, 2)) # -ax*bz
  2438. res = _concat((cx, cy, cz)).reshape(shape_out)
  2439. return moveaxis(res, -1, axisc).astype(dtype)
  2440. def ceil(x, dtype=None):
  2441. """
  2442. Returns the ceiling of the input, element-wise.
  2443. The ceil of the scalar `x` is the smallest integer `i`, such that ``i >= x``.
  2444. Note:
  2445. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2446. not supported.
  2447. On GPU, the supported dtypes are np.float16, and np.float32.
  2448. Args:
  2449. x (Tensor): input values.
  2450. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2451. output Tensor.
  2452. Returns:
  2453. Tensor or scalar, the floor of each element in `x`. This is a scalar if `x` is a scalar.
  2454. Supported Platforms:
  2455. ``Ascend`` ``GPU`` ``CPU``
  2456. Examples:
  2457. >>> import mindspore.numpy as np
  2458. >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
  2459. >>> output = np.ceil(a)
  2460. >>> print(output)
  2461. [-1. -1. -0. 1. 2. 2. 2.]
  2462. """
  2463. return _apply_tensor_op(lambda x: F.neg_tensor(F.floor(F.neg_tensor(x.astype(mstype.float32)))),
  2464. x, dtype=dtype)
  2465. def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
  2466. """Infers the shape of the last two dimensions after performing matmul."""
  2467. shape_rem = ()
  2468. if ndim1 >= 2:
  2469. shape_rem += (shape1[-2],)
  2470. if transpose_b:
  2471. if ndim2 >= 2:
  2472. shape_rem += (shape2[-2],)
  2473. else:
  2474. if ndim1 >= 1:
  2475. shape_rem += (shape2[-1],)
  2476. return shape_rem
  2477. def positive(a, dtype=None):
  2478. """
  2479. Numerical positive, element-wise.
  2480. Note:
  2481. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2482. not supported.
  2483. Args:
  2484. a (Tensor): Input tensor.
  2485. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2486. output Tensor.
  2487. Returns:
  2488. Tensor.
  2489. Supported Platforms:
  2490. ``Ascend`` ``GPU`` ``CPU``
  2491. Examples:
  2492. >>> import mindspore.numpy as np
  2493. >>> a = np.asarray([1, -1]).astype('float32')
  2494. >>> output = np.positive(a)
  2495. >>> print(output)
  2496. [1. -1.]
  2497. """
  2498. _check_input_tensor(a)
  2499. neg_tensor = F.neg_tensor(a)
  2500. return _apply_tensor_op(F.neg_tensor, neg_tensor, dtype=dtype)
  2501. def negative(a, dtype=None):
  2502. """
  2503. Numerical negative, element-wise.
  2504. Note:
  2505. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2506. not supported.
  2507. Args:
  2508. a (Tensor): Input tensor.
  2509. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2510. output Tensor.
  2511. Returns:
  2512. Tensor.
  2513. Supported Platforms:
  2514. ``Ascend`` ``GPU`` ``CPU``
  2515. Examples:
  2516. >>> import mindspore.numpy as np
  2517. >>> a = np.asarray([1, -1]).astype('float32')
  2518. >>> output = np.negative(a)
  2519. >>> print(output)
  2520. [-1. 1.]
  2521. """
  2522. return _apply_tensor_op(F.neg_tensor, a, dtype=dtype)
  2523. def cumsum(a, axis=None, dtype=None):
  2524. """
  2525. Returns the cumulative sum of the elements along a given axis.
  2526. Note:
  2527. If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  2528. `dtype` will be elevated to :class:`int32`.
  2529. Args:
  2530. a (Tensor): Input tensor.
  2531. axis (int, optional): Axis along which the cumulative sum is computed. The
  2532. default (None) is to compute the cumsum over the flattened array.
  2533. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
  2534. unless `a` has an integer dtype with a precision less than that of the
  2535. default platform integer. In that case, the default platform integer
  2536. is used.
  2537. Returns:
  2538. Tensor.
  2539. Raises:
  2540. TypeError: If input arguments have types not specified above.
  2541. ValueError: If axis is out of range.
  2542. Supported Platforms:
  2543. ``Ascend`` ``GPU`` ``CPU``
  2544. Examples:
  2545. >>> import mindspore.numpy as np
  2546. >>> output = np.cumsum(np.ones((3,3)), axis=0)
  2547. >>> print(output)
  2548. [[1. 1. 1.]
  2549. [2. 2. 2.]
  2550. [3. 3. 3.]]
  2551. """
  2552. _check_input_tensor(a)
  2553. return a.cumsum(axis, dtype)
  2554. def nancumsum(a, axis=None, dtype=None):
  2555. """
  2556. Return the cumulative sum of array elements over a given axis treating Not a Numbers (NaNs)
  2557. as zero. The cumulative sum does not change when NaNs are encountered and leading NaNs are
  2558. replaced by zeros.
  2559. Zeros are returned for slices that are all-NaN or empty.
  2560. Note:
  2561. If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  2562. `dtype` will be elevated to :class:`int32`.
  2563. Args:
  2564. a (Tensor): Input tensor.
  2565. axis (int, optional): Axis along which the cumulative sum is computed. The
  2566. default (None) is to compute the cumsum over the flattened array.
  2567. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
  2568. unless `a` has an integer dtype with a precision less than that of the
  2569. default platform integer. In that case, the default platform integer
  2570. is used.
  2571. Returns:
  2572. Tensor.
  2573. Raises:
  2574. TypeError: If input arguments have types not specified above.
  2575. ValueError: If axis is out of range.
  2576. Supported Platforms:
  2577. ``GPU`` ``CPU``
  2578. Examples:
  2579. >>> import mindspore.numpy as np
  2580. >>> a = np.array([[1, 2], [3, np.nan]])
  2581. >>> output = np.nancumsum(a)
  2582. >>> print(output)
  2583. [1. 3. 6. 6.]
  2584. >>> output = np.nancumsum(a, axis=0)
  2585. >>> print(output)
  2586. [[1. 2.]
  2587. [4. 2.]]
  2588. >>> output = np.nancumsum(a, axis=1)
  2589. >>> print(output)
  2590. [[1. 3.]
  2591. [3. 3.]]
  2592. """
  2593. a = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), a)
  2594. return a.cumsum(axis, dtype)
  2595. def cbrt(x, dtype=None):
  2596. """
  2597. Returns the cube-root of a tensor, element-wise.
  2598. Note:
  2599. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  2600. not supported.
  2601. Args:
  2602. x (Tensor): Input tensor.
  2603. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2604. output Tensor.
  2605. Returns:
  2606. Tensor.
  2607. Supported Platforms:
  2608. ``Ascend`` ``GPU`` ``CPU``
  2609. Examples:
  2610. >>> import mindspore.numpy as np
  2611. >>> a = np.asarray([1, -1, 3, -8, 64])
  2612. >>> output = np.cbrt(a)
  2613. >>> print(output)
  2614. [ 1. -1. 1.4422495 -2. 4. ]
  2615. """
  2616. def _cbrt(x):
  2617. compute_type = promote_types(x.dtype, "float32")
  2618. x = x.astype(compute_type)
  2619. # TODO: use P.Sign() once gpu support is added
  2620. abs_x = F.absolute(x)
  2621. sign_x = abs_x / x
  2622. return sign_x * F.tensor_pow(abs_x, 1. / 3.)
  2623. return _apply_tensor_op(_cbrt, x, dtype=dtype)
  2624. def log1p(x, dtype=None):
  2625. """
  2626. Returns the natural logarithm of one plus the input array, element-wise.
  2627. Calculates ``log(1 + x)``.
  2628. Note:
  2629. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2630. not supported.
  2631. Args:
  2632. x (Tensor): Input array.
  2633. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2634. output Tensor.
  2635. Returns:
  2636. Tensor or scalar. This is a scalar if `x` is a scalar.
  2637. Supported Platforms:
  2638. ``Ascend`` ``GPU`` ``CPU``
  2639. Examples:
  2640. >>> import mindspore.numpy as np
  2641. >>> x = np.array([1, 2, 3]).astype('float16')
  2642. >>> output = np.log1p(x)
  2643. >>> print(output)
  2644. [0.6934 1.099 1.387 ]
  2645. """
  2646. return _apply_tensor_op(lambda x: F.log(x + 1), x, dtype=dtype)
  2647. def logaddexp(x1, x2, dtype=None):
  2648. """
  2649. Logarithm of the sum of exponentiations of the inputs.
  2650. Calculates ``log(exp(x1) + exp(x2))``. This function is useful in statistics where the
  2651. calculated probabilities of events may be so small as to exceed the range of normal
  2652. floating point numbers. In such cases the logarithm of the calculated probability is
  2653. stored. This function allows adding probabilities stored in such a fashion.
  2654. Note:
  2655. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2656. not supported.
  2657. Args:
  2658. x1 (Tensor): Input array.
  2659. x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be broadcastable to
  2660. a common shape (which becomes the shape of the output).
  2661. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2662. output Tensor.
  2663. Returns:
  2664. Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
  2665. Supported Platforms:
  2666. ``Ascend`` ``GPU`` ``CPU``
  2667. Examples:
  2668. >>> import mindspore.numpy as np
  2669. >>> x1 = np.array([1, 2, 3]).astype('float16')
  2670. >>> x2 = np.array(2).astype('float16')
  2671. >>> output = np.logaddexp(x1, x2)
  2672. >>> print(output)
  2673. [2.312 2.693 3.312]
  2674. """
  2675. def _logaddexp(x1, x2):
  2676. return F.log(F.tensor_add(F.tensor_exp(x1), F.tensor_exp(x2)))
  2677. return _apply_tensor_op(_logaddexp, x1, x2, dtype=dtype)
  2678. def log2(x, dtype=None):
  2679. """
  2680. Base-2 logarithm of `x`.
  2681. Note:
  2682. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2683. not supported.
  2684. Args:
  2685. x (Tensor): Input tensor.
  2686. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2687. output Tensor.
  2688. Returns:
  2689. Tensor or scalar. This is a scalar if `x` is a scalar.
  2690. Supported Platforms:
  2691. ``Ascend`` ``GPU`` ``CPU``
  2692. Examples:
  2693. >>> import mindspore.numpy as np
  2694. >>> x = np.array([2, 4, 8]).astype('float16')
  2695. >>> output = np.log2(x)
  2696. >>> print(output)
  2697. [1. 2. 3.]
  2698. """
  2699. tensor_2 = _make_tensor(2, x.dtype)
  2700. def _log2(x):
  2701. return F.log(x) / F.log(tensor_2)
  2702. return _apply_tensor_op(_log2, x, dtype=dtype)
  2703. def logaddexp2(x1, x2, dtype=None):
  2704. """
  2705. Logarithm of the sum of exponentiations of the inputs in base of 2.
  2706. Calculates ``log2(2**x1 + 2**x2)``.
  2707. This function is useful in machine learning when the calculated probabilities of events
  2708. may be so small as to exceed the range of normal floating point numbers.
  2709. In such cases the base-2 logarithm of the calculated probability can be used instead.
  2710. This function allows adding probabilities stored in such a fashion.
  2711. Note:
  2712. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2713. not supported.
  2714. Args:
  2715. x1 (Tensor): Input tensor.
  2716. x2 (Tensor): Input tensor. If ``x1.shape != x2.shape``, they must be broadcastable to
  2717. a common shape (which becomes the shape of the output).
  2718. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2719. output Tensor.
  2720. Returns:
  2721. Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
  2722. Supported Platforms:
  2723. ``Ascend`` ``GPU`` ``CPU``
  2724. Examples:
  2725. >>> import mindspore.numpy as np
  2726. >>> x1 = np.array([2, 4, 8]).astype('float16')
  2727. >>> x2 = np.array(2).astype('float16')
  2728. >>> output = np.logaddexp2(x1, x2)
  2729. >>> print(output)
  2730. [3. 4.32 8.02]
  2731. """
  2732. _check_input_tensor(x1, x2)
  2733. add_exp = F.tensor_add(F.tensor_pow(2, x1), F.tensor_pow(2, x2))
  2734. return log2(add_exp, dtype=dtype)
  2735. def log10(x, dtype=None):
  2736. """
  2737. Base-10 logarithm of `x`.
  2738. Note:
  2739. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2740. not supported.
  2741. Args:
  2742. x (Tensor): Input tensor.
  2743. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2744. output Tensor.
  2745. Returns:
  2746. Tensor or scalar. This is a scalar if `x` is a scalar.
  2747. Supported Platforms:
  2748. ``Ascend`` ``GPU`` ``CPU``
  2749. Examples:
  2750. >>> import mindspore.numpy as np
  2751. >>> x = np.array([10, 100, 1000]).astype('float16')
  2752. >>> output = np.log10(x)
  2753. >>> print(output)
  2754. [1. 2. 3.]
  2755. """
  2756. tensor_10 = _make_tensor(10, x.dtype)
  2757. def _log10(x):
  2758. return F.log(x) / F.log(tensor_10)
  2759. return _apply_tensor_op(_log10, x, dtype=dtype)
  2760. def _cast_type_for_trigonometric(x):
  2761. _check_input_tensor(x)
  2762. if x.dtype != mstype.float16 or x.dtype != mstype.float32 or x.dtype != mstype.float64:
  2763. dtype = _promote_for_trigonometric(x.dtype)
  2764. x = F.cast(x, dtype)
  2765. return x
  2766. def sin(x, dtype=None):
  2767. """
  2768. Trigonometric sine, element-wise.
  2769. Note:
  2770. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2771. not supported.
  2772. Args:
  2773. x (Tensor): Input tensor.
  2774. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2775. output Tensor.
  2776. Returns:
  2777. Tensor or scalar. This is a scalar if `x` is a scalar.
  2778. Supported Platforms:
  2779. ``Ascend`` ``GPU`` ``CPU``
  2780. Examples:
  2781. >>> import mindspore.numpy as np
  2782. >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
  2783. >>> output = np.sin(x)
  2784. >>> print(output)
  2785. [ 0.9589243 -0.84147096 0. 0.9092974 -0.7568025 -0.50636566]
  2786. """
  2787. x = _cast_type_for_trigonometric(x)
  2788. return _apply_tensor_op(F.sin, x, dtype=dtype)
  2789. def cos(x, dtype=None):
  2790. """
  2791. Cosine element-wise.
  2792. Note:
  2793. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2794. not supported.
  2795. Args:
  2796. x (Tensor): Input tensor.
  2797. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2798. output Tensor.
  2799. Returns:
  2800. Tensor or scalar. This is a scalar if `x` is a scalar.
  2801. Supported Platforms:
  2802. ``Ascend`` ``GPU`` ``CPU``
  2803. Examples:
  2804. >>> import mindspore.numpy as np
  2805. >>> x = np.arange(5).astype('float32')
  2806. >>> print(np.cos(x))
  2807. [ 1. 0.5403023 -0.41614684 -0.9899925 -0.6536436 ]
  2808. """
  2809. x = _cast_type_for_trigonometric(x)
  2810. return _apply_tensor_op(F.cos, x, dtype=dtype)
  2811. def tan(x, dtype=None):
  2812. """
  2813. Computes tangent element-wise.
  2814. Equivalent to :math:`np.sin(x)/np.cos(x)` element-wise.
  2815. Note:
  2816. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2817. not supported.
  2818. Args:
  2819. x (Tensor): Input tensor.
  2820. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2821. output Tensor.
  2822. Returns:
  2823. Tensor or scalar. This is a scalar if `x` is a scalar.
  2824. Raises:
  2825. TypeError: If the input is not a tensor or is :class:`tensor.dtype` is :class:`mindspore.float64`.
  2826. Supported Platforms:
  2827. ``Ascend`` ``CPU``
  2828. Examples:
  2829. >>> import mindspore.numpy as np
  2830. >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
  2831. >>> print(np.tan(x))
  2832. [ 3.380515 -1.5574077 0. -2.1850398 1.1578213 -0.58721393]
  2833. """
  2834. x = _cast_type_for_trigonometric(x)
  2835. return _apply_tensor_op(F.tan, x, dtype=dtype)
  2836. def arcsin(x, dtype=None):
  2837. """
  2838. Inverse sine, element-wise.
  2839. Note:
  2840. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2841. not supported.
  2842. Args:
  2843. x (Tensor): Input tensor. y-coordinate on the unit circle.
  2844. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2845. output Tensor.
  2846. Returns:
  2847. Output Tensor.
  2848. Raises:
  2849. TypeError: If the input is not a tensor.
  2850. Supported Platforms:
  2851. ``Ascend`` ``GPU`` ``CPU``
  2852. Examples:
  2853. >>> import mindspore.numpy as np
  2854. >>> x = np.asarray([1, -1], np.float32)
  2855. >>> output = np.arcsin(x)
  2856. >>> print(output)
  2857. [ 1.5707964 -1.5707964]
  2858. """
  2859. x = _cast_type_for_trigonometric(x)
  2860. return _apply_tensor_op(F.asin, x, dtype=dtype)
  2861. def arccos(x, dtype=None):
  2862. """
  2863. Trigonometric inverse cosine, element-wise.
  2864. Note:
  2865. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2866. not supported.
  2867. Args:
  2868. x (Tensor): Input tensor. x-coordinate on the unit circle.
  2869. For real arguments, the domain is :math:`[-1, 1]`.
  2870. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2871. output Tensor.
  2872. Returns:
  2873. Tensor.
  2874. Raises:
  2875. TypeError: If the input is not a tensor.
  2876. Supported Platforms:
  2877. ``Ascend`` ``GPU`` ``CPU``
  2878. Examples:
  2879. >>> import mindspore.numpy as np
  2880. >>> x = np.asarray([1, -1], np.float32)
  2881. >>> output = np.arccos(x)
  2882. >>> print(output)
  2883. [0. 3.1415927]
  2884. """
  2885. x = _cast_type_for_trigonometric(x)
  2886. return _apply_tensor_op(F.acos, x, dtype=dtype)
  2887. def arctan(x, dtype=None):
  2888. """
  2889. Trigonometric inverse tangent, element-wise.
  2890. The inverse of tan, so that if :math:`y = tan(x)` then :math:`x = arctan(y)`.
  2891. Note:
  2892. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2893. not supported.
  2894. Args:
  2895. x (Tensor): Input tensor.
  2896. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2897. output Tensor.
  2898. Returns:
  2899. Tensor or scalar. This is a scalar if `x` is a scalar.
  2900. Supported Platforms:
  2901. ``Ascend`` ``GPU`` ``CPU``
  2902. Examples:
  2903. >>> import mindspore.numpy as np
  2904. >>> x = np.arange(5).astype('float32')
  2905. >>> print(np.arctan(x))
  2906. [0. 0.7853982 1.1071488 1.2490457 1.3258177]
  2907. """
  2908. x = _cast_type_for_trigonometric(x)
  2909. return _apply_tensor_op(F.atan, x, dtype=dtype)
  2910. def sinh(x, dtype=None):
  2911. """
  2912. Hyperbolic sine, element-wise.
  2913. Note:
  2914. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2915. not supported.
  2916. Args:
  2917. x (Tensor): Input tensor.
  2918. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2919. output Tensor.
  2920. Returns:
  2921. Tensor or scalar. This is a scalar if `x` is a scalar.
  2922. Supported Platforms:
  2923. ``Ascend`` ``CPU``
  2924. Examples:
  2925. >>> import mindspore.numpy as np
  2926. >>> x = np.arange(5).astype('float32')
  2927. >>> print(np.sinh(x))
  2928. [ 0. 1.1752012 3.6268604 10.017875 27.289917 ]
  2929. """
  2930. x = _cast_type_for_trigonometric(x)
  2931. return _apply_tensor_op(F.sinh, x, dtype=dtype)
  2932. def cosh(x, dtype=None):
  2933. """
  2934. Hyperbolic cosine, element-wise.
  2935. Note:
  2936. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2937. not supported.
  2938. Args:
  2939. x (Tensor): Input tensor.
  2940. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2941. output Tensor.
  2942. Returns:
  2943. Tensor or scalar. This is a scalar if `x` is a scalar.
  2944. Supported Platforms:
  2945. ``Ascend`` ``CPU``
  2946. Examples:
  2947. >>> import mindspore.numpy as np
  2948. >>> x = np.arange(5).astype('float32')
  2949. >>> print(np.cosh(x))
  2950. [ 1. 1.5430807 3.7621956 10.067662 27.308233 ]
  2951. """
  2952. x = _cast_type_for_trigonometric(x)
  2953. return _apply_tensor_op(F.cosh, x, dtype=dtype)
  2954. def tanh(x, dtype=None):
  2955. """
  2956. Computes hyperbolic tangent element-wise.
  2957. Note:
  2958. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2959. not supported.
  2960. Args:
  2961. x (Tensor): Input tensor.
  2962. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2963. output Tensor.
  2964. Returns:
  2965. Tensor or scalar. This is a scalar if `x` is a scalar.
  2966. Supported Platforms:
  2967. ``Ascend`` ``GPU`` ``CPU``
  2968. Examples:
  2969. >>> import mindspore.numpy as np
  2970. >>> x = np.arange(5).astype('float32')
  2971. >>> print(np.tanh(x))
  2972. [0. 0.7615942 0.9640276 0.9950548 0.9993293]
  2973. """
  2974. x = _cast_type_for_trigonometric(x)
  2975. return _apply_tensor_op(F.tanh, x, dtype=dtype)
  2976. def arcsinh(x, dtype=None):
  2977. """
  2978. Inverse hyperbolic sine element-wise.
  2979. Note:
  2980. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2981. not supported.
  2982. Args:
  2983. x (Tensor): Input tensor.
  2984. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2985. output Tensor.
  2986. Returns:
  2987. Tensor or scalar. This is a scalar if `x` is a scalar.
  2988. Supported Platforms:
  2989. ``Ascend`` ``GPU`` ``CPU``
  2990. Examples:
  2991. >>> import mindspore.numpy as np
  2992. >>> x = np.arange(5).astype('float32')
  2993. >>> print(np.arcsinh(x))
  2994. [0. 0.8813736 1.4436355 1.8184465 2.0947125]
  2995. """
  2996. x = _cast_type_for_trigonometric(x)
  2997. return _apply_tensor_op(F.asinh, x, dtype=dtype)
  2998. def arccosh(x, dtype=None):
  2999. """
  3000. Inverse hyperbolic cosine, element-wise.
  3001. Note:
  3002. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  3003. not supported.
  3004. Args:
  3005. x (Tensor): Input tensor.
  3006. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  3007. output Tensor.
  3008. Returns:
  3009. Tensor or scalar. This is a scalar if `x` is a scalar.
  3010. Supported Platforms:
  3011. ``Ascend`` ``GPU`` ``CPU``
  3012. Examples:
  3013. >>> import mindspore.numpy as np
  3014. >>> x = np.arange(1, 5).astype('float32')
  3015. >>> print(np.arccosh(x))
  3016. [0. 1.316958 1.7627472 2.063437 ]
  3017. """
  3018. x = _cast_type_for_trigonometric(x)
  3019. return _apply_tensor_op(F.acosh, x, dtype=dtype)
  3020. def arctanh(x, dtype=None):
  3021. """
  3022. Inverse hyperbolic tangent element-wise.
  3023. Note:
  3024. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  3025. not supported.
  3026. Args:
  3027. x (Tensor): Input tensor.
  3028. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  3029. output Tensor.
  3030. Returns:
  3031. Tensor or scalar. This is a scalar if `x` is a scalar.
  3032. Supported Platforms:
  3033. ``Ascend`` ``CPU``
  3034. Examples:
  3035. >>> import mindspore.numpy as np
  3036. >>> x = np.array([-0.99, -0.75, -0.5, 0, 0.5]).astype('float32')
  3037. >>> print(np.arctanh(x))
  3038. [-2.646653 -0.97295505 -0.54930615 0. 0.54930615]
  3039. """
  3040. x = _cast_type_for_trigonometric(x)
  3041. return _apply_tensor_op(F.atanh, x, dtype=dtype)
  3042. def arctan2(x1, x2, dtype=None):
  3043. """
  3044. Element-wise arc tangent of :math:`x1/x2` choosing the quadrant correctly.
  3045. Note:
  3046. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  3047. not supported.
  3048. Args:
  3049. x1 (Tensor): input tensor.
  3050. x2 (Tensor): input tensor.
  3051. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  3052. output Tensor.
  3053. Returns:
  3054. Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
  3055. if both `x1` and `x2` are scalars.
  3056. Supported Platforms:
  3057. ``Ascend`` ``CPU`` ``GPU``
  3058. Examples:
  3059. >>> import mindspore.numpy as np
  3060. >>> x1 = np.array([-1, +1, +1, -1])
  3061. >>> x2 = np.array([-1, -1, +1, +1])
  3062. >>> output = np.arctan2(x1, x2)
  3063. >>> print(output)
  3064. [-2.3561945 2.3561945 0.78539819 -0.78539819]
  3065. """
  3066. x1 = _cast_type_for_trigonometric(x1)
  3067. x2 = _cast_type_for_trigonometric(x2)
  3068. return _apply_tensor_op(F.atan2, x1, x2, dtype=dtype)
  3069. def promote_types(type1, type2):
  3070. """
  3071. Returns the data type with the smallest size and smallest scalar kind.
  3072. Note:
  3073. The promotion rule is slightly different from original Numpy, but more like
  3074. jax, due to the preference on ``32-bit`` over ``64-bit`` data types.
  3075. Args:
  3076. type1 (Union[:class:`mindspore.dtype`, str]): First data type.
  3077. type2 (Union[:class:`mindspore.dtype`, str]): Second data type.
  3078. Returns:
  3079. The promoted data type.
  3080. Raises:
  3081. TypeError: if the input are not valid :class:`mindspore.dtype` input.
  3082. Supported Platforms:
  3083. ``Ascend`` ``GPU`` ``CPU``
  3084. Examples:
  3085. >>> import mindspore.numpy as np
  3086. >>> output = np.promote_types(np.float32, np.float64)
  3087. >>> print(output)
  3088. Float64
  3089. """
  3090. type1 = _check_dtype(type1)
  3091. type2 = _check_dtype(type2)
  3092. return _promote(type1, type2)
  3093. def corrcoef(x, y=None, rowvar=True, dtype=None):
  3094. r"""
  3095. Returns Pearson product-moment correlation coefficients.
  3096. Please refer to the documentation for cov for more detail. The relationship
  3097. between the correlation coefficient matrix, R, and the covariance matrix, C, is
  3098. :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
  3099. The values of R are between -1 and 1, inclusive.
  3100. Note:
  3101. Currently, complex numbers are not supported.
  3102. Args:
  3103. x (Union[int, float, bool, tuple, list, Tensor]): A 1-D or 2-D array containing
  3104. multiple variables and observations. Each row of `x` represents a variable,
  3105. and each column a single observation of all those variables. Also see rowvar below.
  3106. y (Union[int, float, bool, tuple, list, Tensor], optional): An additional set
  3107. of variables and observations.
  3108. rowvar (bool, optional): If rowvar is `True` (default), then each row represents
  3109. a variable, with observations in the columns. Otherwise, the relationship
  3110. is transposed: each column represents a variable, while the rows contain observations.
  3111. dtype (:class:`mindspore.dtype`, optional): Data-type of the result. By default,
  3112. the return data-type will have at least float32 precision.
  3113. Returns:
  3114. Tensor, The correlation coefficient matrix of the variables.
  3115. Raises:
  3116. TypeError: if the inputs have types not specified above.
  3117. ValueError: if `x` and `y` have wrong dimensions.
  3118. Supported Platforms:
  3119. ``Ascend`` ``GPU`` ``CPU``
  3120. Examples:
  3121. >>> import mindspore.numpy as np
  3122. >>> output = np.corrcoef([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]])
  3123. >>> print(output)
  3124. [[1. 0.9827076 1. ]
  3125. [0.9827077 0.99999994 0.9827077 ]
  3126. [1. 0.9827076 1. ]]
  3127. """
  3128. # This implementation was adapted from original Numpy.
  3129. c = cov(x, y, rowvar)
  3130. if not c.shape:
  3131. return F.tensor_div(c, c)
  3132. d = diag(c)
  3133. stddev = sqrt(d)
  3134. c /= F.expand_dims(stddev, -1)
  3135. c /= F.expand_dims(stddev, 0)
  3136. c = clip(c, -1, 1)
  3137. if dtype is not None:
  3138. return c.astype(dtype)
  3139. return c
  3140. def _slice_along_axis(f, axis, slice_start, slice_end):
  3141. """
  3142. Slice a tensor along a given axis, a helper function for gradient
  3143. Args:
  3144. f (Tensor): Input Tensor.
  3145. axis (int): Specified axis.
  3146. slice_start (int): The start of the slice.
  3147. slice_end (int): The end of the int.
  3148. Returns:
  3149. Sliced tensor.
  3150. """
  3151. slice_size = slice_end - slice_start
  3152. index_start = (0,) * f.ndim
  3153. index_end = f.shape
  3154. index_start = _tuple_setitem(index_start, axis, slice_start)
  3155. index_end = _tuple_setitem(index_end, axis, slice_size)
  3156. return F.tensor_slice(f, index_start, index_end)
  3157. def _gradient_along_axis(f, h, axis):
  3158. """compute the gradients of `f` along a given axis, a helper function of gradient."""
  3159. end = f.shape[axis]
  3160. upper_edge = _slice_along_axis(f, axis, 1, 2) - _slice_along_axis(f, axis, 0, 1)
  3161. lower_edge = _slice_along_axis(f, axis, end-1, end) - _slice_along_axis(f, axis, end-2, end-1)
  3162. if end <= 2:
  3163. a_grad = concatenate((upper_edge, lower_edge), axis)
  3164. else:
  3165. middle = (_slice_along_axis(f, axis, 2, end) - _slice_along_axis(f, axis, 0, end-2)) * 0.5
  3166. a_grad = concatenate((upper_edge, middle, lower_edge), axis)
  3167. return a_grad / h
  3168. def check_gradient_arguments(f, axis, edge_order):
  3169. """check arguments for gradient"""
  3170. if edge_order != 1:
  3171. _raise_unimplemented_error("edge_order != 1 not implemented")
  3172. if not isinstance(f, Tensor):
  3173. f = asarray_const(f)
  3174. if f.dtype != mstype.float64:
  3175. f = f.astype(mstype.float32)
  3176. if axis is None:
  3177. axis = F.make_range(f.ndim)
  3178. else:
  3179. _check_axis_type(axis, True, True, True)
  3180. axis = _canonicalize_axis(axis, f.ndim)
  3181. axis = (axis,) if isinstance(axis, int) else axis
  3182. return f, axis, edge_order
  3183. def gradient(f, *varargs, axis=None, edge_order=1):
  3184. """
  3185. Returns the gradient of a N-dimensional array.
  3186. The gradient is computed using second order accurate central differences
  3187. in the interior points and either first or second order accurate one-sides
  3188. (forward or backwards) differences at the boundaries.
  3189. The returned gradient hence has the same shape as the input array.
  3190. Note:
  3191. Currently we only support `edge_order` =1 and uniform spacing of `varargs`.
  3192. Args:
  3193. f (Union[tuple, list, Tensor]): An N-dimensional array containing samples of
  3194. a scalar function.
  3195. varargs (Union[tuple[number], tuple[tensor scalar]], optional)
  3196. Spacing between f values. Default unitary spacing for all dimensions.
  3197. Spacing can be specified using:
  3198. 1. single scalar to specify a sample distance for all dimensions.
  3199. 2. N scalars to specify a constant sample distance for each dimension.
  3200. edge_order (int): Gradient is calculated using N-th order accurate differences
  3201. at the boundaries. Default: 1.
  3202. axis (Union[None, int, tuple(int), list(int)], optional): Gradient is calculated
  3203. only along the given axis or axes. The default :class:`(axis = None)` is to calculate
  3204. the gradient for all the axes of the input tensor. `axis` may be negative,
  3205. in which case it counts from the last to the first `axis`.
  3206. Returns:
  3207. gradient, a list of tensors (or a single tensor if there is only one dimension
  3208. to be calculated). Each derivative has the same shape as f.
  3209. Raises:
  3210. TypeError: if the inputs have types not specified above.
  3211. ValueError: if `axis` values out of bounds, or shape of `f` has entries < 1.
  3212. NotImplementedError: if `edge_order` != 1, or `varargs` contains non-scalar entries.
  3213. Supported Platforms:
  3214. ``Ascend`` ``GPU`` ``CPU``
  3215. Examples:
  3216. >>> import mindspore.numpy as np
  3217. >>> output = np.gradient([[1, 2, 6], [3, 4, 5]], axis=-1)
  3218. >>> print(output)
  3219. [[1. 2.5 4. ]
  3220. [1. 1. 1. ]]
  3221. """
  3222. # This implementation was adapted from Numpy and jax.numpy
  3223. f, axis, edge_order = check_gradient_arguments(f, axis, edge_order)
  3224. len_axes = len(axis)
  3225. n = len(varargs)
  3226. dx = None
  3227. # check varargs and make varags the same length as axis
  3228. if n == 0 or varargs is None:
  3229. # no spacing
  3230. dx = (1,) * len_axes
  3231. elif n == 1:
  3232. # single value for all axes
  3233. dx = varargs * len_axes
  3234. elif n == len_axes:
  3235. dx = varargs
  3236. else:
  3237. _raise_type_error("Invalid number of arguments")
  3238. a_grad = []
  3239. for idx in F.make_range(len_axes):
  3240. h = dx[idx]
  3241. ax = axis[idx]
  3242. if f.shape[ax] < 2:
  3243. _raise_value_error("Shape of array too small to calculate a numerical gradient, "
  3244. "at least 2 elements are required.")
  3245. # if h is not scalar
  3246. if not (isinstance(h, (int, float, bool)) or (isinstance(h, Tensor) and h.ndim == 0)):
  3247. _raise_unimplemented_error("Non-constant spacing not implemented")
  3248. a_grad.append(_gradient_along_axis(f, h, ax))
  3249. if len(axis) == 1:
  3250. return a_grad[0]
  3251. return a_grad
  3252. def sum_(a, axis=None, dtype=None, keepdims=False, initial=None):
  3253. """
  3254. Returns sum of array elements over a given axis.
  3255. Note:
  3256. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
  3257. `extobj` are not supported.
  3258. Args:
  3259. x (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
  3260. axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
  3261. If None, sum all of the elements of the input array.
  3262. If axis is negative it counts from the last to the first axis.
  3263. If axis is a tuple of integers, a sum is performed on all of the axes specified in the tuple
  3264. instead of a single axis or all the axes as before.
  3265. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  3266. output Tensor.
  3267. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  3268. dimensions with size one. With this option, the result will broadcast correctly against the input array.
  3269. If the default value is passed, then keepdims will not be passed through to the sum method of
  3270. sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
  3271. implement keepdims any exceptions will be raised. Default: `False`.
  3272. initial (scalar): Starting value for the sum, if `None`, which refers to the first element of the reduction.
  3273. Default: `None`.
  3274. Returns:
  3275. Tensor. An array with the same shape as a, with the specified axis removed.
  3276. If a is a 0-d array, or if axis is None, a scalar is returned.
  3277. If an output array is specified, a reference to out is returned.
  3278. Raises:
  3279. TypeError: If input is not array_like or `axis` is not int or tuple of integers or
  3280. `keepdims` is not integer or `initial` is not scalar.
  3281. ValueError: If any axis is out of range or duplicate axes exist.
  3282. Supported Platforms:
  3283. ``Ascend`` ``GPU`` ``CPU``
  3284. Examples:
  3285. >>> import mindspore.numpy as np
  3286. >>> print(np.sum([0.5, 1.5]))
  3287. 2.0
  3288. >>> x = np.arange(10).reshape(2, 5).astype('float32')
  3289. >>> print(np.sum(x, axis=1))
  3290. [10. 35.]
  3291. """
  3292. a = _to_tensor(a)
  3293. return a.sum(axis, dtype, keepdims, initial)
  3294. @constexpr
  3295. def _min_cost_chain_matmul(dims):
  3296. """
  3297. Returns indices of splits that has the minimal cost for matmul.
  3298. s[i, j] holds the index of the split with minimal cost for arrays[i, i + 1, ... j]
  3299. """
  3300. dims = tuple(dims)
  3301. n = len(dims) - 1
  3302. m = [[0]*n for _ in range(n)]
  3303. s = [[0]*n for _ in range(n)]
  3304. for pos in range(1, n):
  3305. for i in range(n - pos):
  3306. j = i + pos
  3307. m[i][j] = sys.maxsize
  3308. for k in range(i, j):
  3309. cost = m[i][k] + m[k + 1][j] + dims[i]*dims[k + 1]*dims[j + 1]
  3310. if cost < m[i][j]:
  3311. m[i][j] = cost
  3312. s[i][j] = k
  3313. return s
  3314. @constexpr
  3315. def _get_dims(shapes):
  3316. """
  3317. Returns the chain of the dimensions in arrays.
  3318. dims[i] == arrays[i - 1].shape[1] == arrays[i].shape[0]
  3319. """
  3320. shapes = tuple(shapes)
  3321. if any(len(shape) != 2 for shape in shapes):
  3322. raise ValueError('Array must be 2 dimensional')
  3323. dims = tuple(map(operator.itemgetter(0), shapes))
  3324. if any(shape[1] != dim for shape, dim in zip(shapes[:-1], dims[1:])):
  3325. raise ValueError(f'shapes not aligned')
  3326. return dims + (shapes[-1][1],)
  3327. def _multi_dot(arrays, i, j, order):
  3328. """Computes multi dot recursively using minimal cost."""
  3329. if i == j:
  3330. return arrays[i]
  3331. return dot(_multi_dot(arrays, i, order[i][j], order),
  3332. _multi_dot(arrays, order[i][j] + 1, j, order))
  3333. def multi_dot(arrays):
  3334. """
  3335. Computes the dot product of two or more arrays in a single function call, while automatically
  3336. selecting the fastest evaluation order.
  3337. multi_dot chains numpy.dot and uses optimal parenthesization of the matrices
  3338. `[1] <en.wikipedia.org/wiki/Matrix_chain_multiplication>`. Depending on the shapes of the
  3339. matrices, this can speed up the multiplication a lot.
  3340. If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it
  3341. is treated as a column vector. The other arguments must be 2-D.
  3342. Note:
  3343. Numpy argument `out` is not supported.
  3344. Args:
  3345. arrays (sequence of array_like): If the first argument is 1-D it is treated as row
  3346. vector. If the last argument is 1-D it is treated as column vector. The other
  3347. arguments must be 2-D.
  3348. Returns:
  3349. Tensor, the dot product of the supplied arrays.
  3350. Raises:
  3351. ValueError: arrays are not 2-D.
  3352. Supported Platforms:
  3353. ``Ascend`` ``GPU`` ``CPU``
  3354. Examples:
  3355. >>> import mindspore.numpy as np
  3356. >>> A = np.ones((10000, 100))
  3357. >>> B = np.ones((100, 1000))
  3358. >>> C = np.ones((1000, 5))
  3359. >>> D = np.ones((5, 333))
  3360. >>> output = np.multi_dot([A, B, C, D])
  3361. >>> print(output)
  3362. [[500000. 500000. 500000. ... 500000. 500000. 500000.]
  3363. [500000. 500000. 500000. ... 500000. 500000. 500000.]
  3364. [500000. 500000. 500000. ... 500000. 500000. 500000.]
  3365. ...
  3366. [500000. 500000. 500000. ... 500000. 500000. 500000.]
  3367. [500000. 500000. 500000. ... 500000. 500000. 500000.]
  3368. [500000. 500000. 500000. ... 500000. 500000. 500000.]]
  3369. """
  3370. if len(arrays) < 2:
  3371. _raise_value_error('Expecting at least 2 arrays')
  3372. if isinstance(arrays, (tuple, list)):
  3373. arrays = _to_tensor(*arrays)
  3374. else:
  3375. arrays = _to_tensor(arrays)
  3376. num = len(arrays)
  3377. arrays = F.reshape(arrays, (-1,) + _tuple_slice(F.shape(arrays), 2, None))
  3378. arrays = split(arrays, num)
  3379. if len(arrays) == 2:
  3380. return dot(*arrays)
  3381. shape_out = ()
  3382. arrs = []
  3383. for arr in arrays:
  3384. arrs.append(arr)
  3385. if F.rank(arrs[0]) == 1:
  3386. arrs[0] = F.reshape(arrs[0], (1, arrs[0].size))
  3387. else:
  3388. shape_out += (F.shape(arrs[0])[0],)
  3389. if F.rank(arrs[-1]) == 1:
  3390. arrs[-1] = F.reshape(arrs[-1], (arrs[-1].size, 1))
  3391. else:
  3392. shape_out += (F.shape(arrs[-1])[1],)
  3393. shapes = []
  3394. for arr in arrs:
  3395. shapes.append(F.shape(arr))
  3396. dims = _get_dims(shapes)
  3397. order = _min_cost_chain_matmul(dims)
  3398. res = _multi_dot(arrs, 0, len(arrs) - 1, order)
  3399. return F.reshape(res, shape_out)
  3400. def argmax(a, axis=None):
  3401. """
  3402. Returns the indices of the maximum values along an axis.
  3403. Note:
  3404. Numpy argument `out` is not supported.
  3405. On Ascend, in case of multiple occurrences of the maximum values, the return
  3406. indices may not necessarily correspond to the first occurrence.
  3407. Args:
  3408. a (Union[int, float, bool, list, tuple, Tensor]): Input array.
  3409. axis (int, optional): By default, the index is into
  3410. the flattened array, otherwise along the specified axis.
  3411. Returns:
  3412. Tensor, array of indices into the array. It has the same
  3413. shape as a.shape with the dimension along axis removed.
  3414. Raises:
  3415. ValueError: if axis is out of range.
  3416. Supported Platforms:
  3417. ``Ascend`` ``GPU`` ``CPU``
  3418. Examples:
  3419. >>> import mindspore.numpy as np
  3420. >>> a = np.arange(10, 16).reshape(2, 3)
  3421. >>> print(np.argmax(a))
  3422. 5
  3423. >>> print(np.argmax(a, axis=0))
  3424. [1 1 1]
  3425. >>> print(np.argmax(a, axis=1))
  3426. [2 2]
  3427. """
  3428. a = _to_tensor(a)
  3429. return a.argmax(axis)
  3430. def argmin(a, axis=None):
  3431. """
  3432. Returns the indices of the minimum values along an axis.
  3433. Note:
  3434. Numpy argument `out` is not supported.
  3435. Args:
  3436. a (Union[int, float, bool, list, tuple, Tensor]): Input array.
  3437. axis (int, optional): By default, the index is into
  3438. the flattened array, otherwise along the specified axis.
  3439. Returns:
  3440. Tensor, array of indices into the array. It has the same
  3441. shape as a.shape with the dimension along axis removed.
  3442. Raises:
  3443. ValueError: if axis is out of range.
  3444. Supported Platforms:
  3445. ``Ascend`` ``GPU`` ``CPU``
  3446. Examples:
  3447. >>> import mindspore.numpy as np
  3448. >>> a = np.arange(10, 16).reshape(2, 3)
  3449. >>> print(np.argmin(a))
  3450. 0
  3451. >>> print(np.argmin(a, axis=0))
  3452. [0 0 0]
  3453. >>> print(np.argmin(a, axis=1))
  3454. [0 0]
  3455. """
  3456. a = _to_tensor(a)
  3457. return a.argmin(axis)
  3458. @constexpr
  3459. def _get_sort_range(size):
  3460. """Returns the range for number of searches (log2(size)) on a sorted array with the given size."""
  3461. return tuple(range(ceil(log2(_to_tensor(size + 1).astype(mstype.float32))).astype(mstype.int32)))
  3462. def searchsorted(a, v, side='left', sorter=None):
  3463. """
  3464. Finds indices where elements should be inserted to maintain order.
  3465. Finds the indices into a sorted array `a` such that, if the corresponding elements
  3466. in `v` were inserted before the indices, the order of `a` would be preserved.
  3467. Args:
  3468. a (Union[list, tuple, Tensor]): 1-D input array. If `sorter` is
  3469. None, then it must be sorted in ascending order, otherwise `sorter` must be
  3470. an array of indices that sort it.
  3471. v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
  3472. side ('left', 'right', optional): If 'left', the index of the first suitable
  3473. location found is given. If 'right', return the last such index. If there is
  3474. no suitable index, return either 0 or N (where N is the length of `a`).
  3475. sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
  3476. integer indices that sort array `a` into ascending order. They are typically
  3477. the result of argsort.
  3478. Returns:
  3479. Tensor, array of insertion points with the same shape as `v`.
  3480. Raises:
  3481. ValueError: if argument for `side` or `sorter` is invalid.
  3482. Supported Platforms:
  3483. ``Ascend`` ``GPU`` ``CPU``
  3484. Examples:
  3485. >>> from mindspore import numpy as np
  3486. >>> print(np.searchsorted([1,2,3,4,5], 3))
  3487. 2
  3488. >>> print(np.searchsorted([1,2,3,4,5], 3, side='right'))
  3489. 3
  3490. >>> print(np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]))
  3491. [0 5 1 2]
  3492. """
  3493. if side not in ('left', 'right'):
  3494. _raise_value_error('invalid value for keyword "side"')
  3495. a = _to_tensor(a).astype(mstype.float32)
  3496. if F.rank(a) != 1:
  3497. _raise_value_error('`a` should be 1-D array')
  3498. v = _to_tensor(v)
  3499. shape = F.shape(v)
  3500. if sorter is not None:
  3501. if F.rank(sorter) != 1 or sorter.size != a.size:
  3502. _raise_value_error('sorter must be 1-D array with the same size as `a`')
  3503. sorter = _to_tensor(sorter)
  3504. sorter = F.expand_dims(sorter, -1)
  3505. a = F.gather_nd(a, sorter)
  3506. less_op = F.tensor_le if side == 'left' else F.tensor_lt
  3507. i = F.fill(mstype.int32, shape, 0)
  3508. j = F.fill(mstype.int32, shape, a.size)
  3509. two = F.fill(mstype.int32, shape, 2)
  3510. for _ in _get_sort_range(a.size):
  3511. mid = floor_divide(add(i, j), two)
  3512. mask = less_op(v, F.gather_nd(a, F.expand_dims(mid, -1)))
  3513. i = F.select(mask, i, mid)
  3514. j = F.select(mask, mid, j)
  3515. return j
  3516. def interp(x, xp, fp, left=None, right=None):
  3517. """
  3518. One-dimensional linear interpolation for monotonically increasing sample points.
  3519. Returns the one-dimensional piecewise linear interpolant to a function with given
  3520. discrete data points `(xp, fp)`, evaluated at `x`.
  3521. Note:
  3522. Numpy argument `period` is not supported.
  3523. Complex values are not supported.
  3524. Args:
  3525. x (Union[int, float, bool, list, tuple, Tensor]): The x-coordinates at which
  3526. to evaluate the interpolated values.
  3527. xp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the
  3528. x-coordinates of the data points, must be increasing.
  3529. fp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the
  3530. y-coordinates of the data points, same length as `xp`.
  3531. left (float, optional): Value to return for ``x < xp[0]``, default is ``fp[0]``
  3532. once obtained.
  3533. right (float, optional): Value to return for ``x > xp[-1]``, default is ``fp[-1]``
  3534. once obtained.
  3535. Returns:
  3536. Tensor, the interpolated values, same shape as `x`.
  3537. Raises:
  3538. ValueError: if `xp` or `fp` is not one-dimensional, or if `xp` and `fp` do not have
  3539. the same length.
  3540. Supported Platforms:
  3541. ``Ascend`` ``GPU`` ``CPU``
  3542. Examples:
  3543. >>> xp = [1, 2, 3]
  3544. >>> fp = [3, 2, 0]
  3545. >>> print(np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp))
  3546. [3. 3. 2.5 0.55999994 0. ]
  3547. >>> UNDEF = -99.0
  3548. >>> print(np.interp(3.14, xp, fp, right=UNDEF))
  3549. -99.0
  3550. """
  3551. # implement period once sort is supported
  3552. x, xp, fp = _to_tensor(x, xp, fp)
  3553. if F.rank(xp) != 1 or F.rank(fp) != 1:
  3554. _raise_value_error('xp and fp must be 1-d sequences')
  3555. size = xp.size
  3556. if fp.size != size:
  3557. _raise_value_error('the y-coordinates must have the same length as `xp`')
  3558. xp = xp.astype(mstype.float32)
  3559. fp = fp.astype(mstype.float32)
  3560. indices_1 = clip(searchsorted(xp, x), 0, size - 1)
  3561. indices_0 = clip(indices_1 - _to_tensor(1), 0, size - 1)
  3562. indices_0 = F.expand_dims(indices_0, -1)
  3563. indices_1 = F.expand_dims(indices_1, -1)
  3564. x_0 = F.gather_nd(xp, indices_0)
  3565. x_1 = F.gather_nd(xp, indices_1)
  3566. y_0 = F.gather_nd(fp, indices_0)
  3567. y_1 = F.gather_nd(fp, indices_1)
  3568. res = (y_0*(x_1 - x) + y_1*(x - x_0))/(x_1 - x_0)
  3569. res = F.select(F.equal(x_0, x_1), y_0, res)
  3570. idx_0 = _to_tensor([0])
  3571. idx_last = _to_tensor([size - 1])
  3572. if left is None:
  3573. left = F.gather_nd(fp, idx_0)
  3574. left = full(F.shape(x), left, mstype.float32)
  3575. if right is None:
  3576. right = F.gather_nd(fp, idx_last)
  3577. right = full(F.shape(x), right, mstype.float32)
  3578. res = F.select(F.tensor_lt(x, F.gather_nd(xp, idx_0)), left, res)
  3579. res = F.select(F.tensor_gt(x, F.gather_nd(xp, idx_last)), right, res)
  3580. return res
  3581. def _apply_tensor_op(fn, *args, dtype=None):
  3582. """Applies tensor operations based on fn"""
  3583. args = _to_tensor(*args)
  3584. if isinstance(args, Tensor):
  3585. res = fn(args)
  3586. else:
  3587. res = fn(*args)
  3588. if dtype is not None and not _check_same_type(F.dtype(res), dtype):
  3589. res = F.cast(res, dtype)
  3590. return res
  3591. def sign(x, dtype=None):
  3592. """
  3593. Returns an element-wise indication of the sign of a number.
  3594. The sign function returns `-1 if x < 0, 0 if x == 0, 1 if x > 0`. nan is returned for nan inputs.
  3595. Note:
  3596. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  3597. not supported.
  3598. Complex inputs are not supported now.
  3599. On Ascend, integer inputs are not supported.
  3600. Args:
  3601. x (Union[int, float, list, tuple, Tensor]): Input values.
  3602. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  3603. output Tensor.
  3604. Returns:
  3605. The sign of x. This is a tensor or a scalar when x is a scalar.
  3606. Raises:
  3607. TypeError: if dtype of the input is not in the given types or
  3608. the input can not be converted to tensor.
  3609. Supported Platforms:
  3610. ``Ascend`` ``GPU`` ``CPU``
  3611. Examples:
  3612. >>> import mindspore.numpy as np
  3613. >>> output = np.sign(np.array([-1., 0., 1., 1.2]))
  3614. >>> print(output)
  3615. [-1. 0. 1. 1.]
  3616. """
  3617. if not isinstance(x, (int, float, list, tuple, Tensor)):
  3618. _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x)
  3619. x = _to_tensor(x)
  3620. if _check_same_type(F.dtype(x), mstype.bool_):
  3621. _raise_type_error("sign does not accept dtype bool.")
  3622. _non_zero_sign = x / absolute(x)
  3623. _zero = _broadcast_to_shape(_make_tensor(0, x.dtype), x.shape)
  3624. is_zero = F.equal(x, 0)
  3625. res = F.select(is_zero, _zero, _non_zero_sign)
  3626. if dtype is not None and not _check_same_type(F.dtype(res), dtype):
  3627. res = F.cast(res, dtype)
  3628. return res
  3629. def copysign(x1, x2, dtype=None):
  3630. """
  3631. Changes the sign of `x1` to that of `x2`, element-wise.
  3632. If `x2` is a scalar, its sign will be copied to all elements of `x1`.
  3633. Note:
  3634. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  3635. not supported.
  3636. Complex inputs are not supported now.
  3637. Args:
  3638. x1 (Union[int, float, list, tuple, Tensor]): Values to change the sign of.
  3639. x2 (Union[int, float, list, tuple, Tensor]): The sign of x2 is copied to x1. If `x1.shape != x2.shape`,
  3640. they must be broadcastable to a common shape (which becomes the shape of the output).
  3641. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  3642. output Tensor.
  3643. Returns:
  3644. Tensor or scalar. The values of `x1` with the sign of `x2`. This is a scalar if both `x1` and `x2` are scalars.
  3645. Raises:
  3646. TypeError: if dtype of the input is not in the given types or
  3647. the input can not be converted to tensor.
  3648. Supported Platforms:
  3649. ``Ascend`` ``GPU`` ``CPU``
  3650. Examples:
  3651. >>> import mindspore.numpy as np
  3652. >>> output = np.copysign(np.array([1, -1, -1]), np.array([-1, 1, -1]))
  3653. >>> print(output)
  3654. [-1 1 -1]
  3655. """
  3656. if not isinstance(x1, (int, float, list, tuple, Tensor)):
  3657. _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x1)
  3658. if not isinstance(x2, (int, float, list, tuple, Tensor)):
  3659. _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x2)
  3660. x1, x2 = _to_tensor(x1, x2)
  3661. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  3662. x1 = _broadcast_to_shape(x1, shape_out)
  3663. x2 = _broadcast_to_shape(x2, shape_out)
  3664. if _check_same_type(F.dtype(x1), mstype.bool_) or _check_same_type(F.dtype(x2), mstype.bool_):
  3665. _raise_type_error("sign does not accept dtype bool.")
  3666. original_dtype = x1.dtype
  3667. if not _check_is_float(original_dtype):
  3668. pos_tensor = F.absolute(x1.astype('float32')).astype(original_dtype)
  3669. else:
  3670. pos_tensor = F.absolute(x1)
  3671. neg_tensor = F.neg_tensor(pos_tensor)
  3672. less_zero = F.less(x2, 0)
  3673. res = F.select(less_zero, neg_tensor, pos_tensor)
  3674. if dtype is not None and not _check_same_type(F.dtype(res), dtype):
  3675. res = F.cast(res, dtype)
  3676. return res
  3677. def digitize(x, bins, right=False):
  3678. """
  3679. Returns the indices of the bins to which each value in input array belongs.
  3680. If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is returned
  3681. as appropriate.
  3682. Args:
  3683. x (Union[int, float, bool, list, tuple, Tensor]): Input array to be binned.
  3684. bins (Union[list, tuple, Tensor]): Array of bins. It has to
  3685. be 1-dimensional and monotonic.
  3686. right (boolean, optional): Indicating whether the intervals include the right
  3687. or the left bin edge. Default behavior is ``(right==False)`` indicating
  3688. that the interval does not include the right edge. The left bin end is
  3689. open in this case, i.e., ``bins[i-1] <= x < bins[i]`` is the default
  3690. behavior for monotonically increasing bins.
  3691. Returns:
  3692. Tensor of ints, output array of indices, of same shape as `x`.
  3693. Supported Platforms:
  3694. ``Ascend`` ``GPU`` ``CPU``
  3695. Examples:
  3696. >>> import mindspore.numpy as np
  3697. >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
  3698. >>> bins = np.array([0, 5, 10, 15, 20])
  3699. >>> inds = np.digitize(x, bins)
  3700. >>> print(inds)
  3701. [1 3 3 4 5]
  3702. """
  3703. x, bins = _to_tensor(x, bins)
  3704. if F.rank(bins) != 1:
  3705. _raise_value_error('bins should be 1-dimensional')
  3706. if x.size == 0:
  3707. return x
  3708. if bins.size == 0:
  3709. return zeros(F.shape(x), mstype.int32)
  3710. side = 'left' if right else 'right'
  3711. first_bin = bins[0]
  3712. last_bin = bins[_type_convert(int, bins.size) - 1]
  3713. cond = first_bin <= last_bin
  3714. incr = searchsorted(bins, x, side)
  3715. decr = _to_tensor(bins.size) - searchsorted(flip(bins), x, side)
  3716. return where_(cond, incr, decr)
  3717. def bincount(x, weights=None, minlength=0, length=None):
  3718. """
  3719. Count number of occurrences of each value in array of non-negative ints.
  3720. The number of bins (of size 1) is one larger than the largest value in `x`.
  3721. If `minlength` is specified, there will be at least this number of bins in the
  3722. output array (though it will be longer if necessary, depending on the contents
  3723. of `x`). Each bin gives the number of occurrences of its index value in `x`. If
  3724. `weights` is specified the input array is weighted by it, i.e. if a value `n`
  3725. is found at position `i`, ``out[n] += weight[i]`` instead of ``out[n] += 1``.
  3726. Note:
  3727. The additional argument `length` specifies the number of bins (overriding
  3728. ``x.max() + 1``), which must be provided in graph mode.
  3729. If `x` contains negative values, no error will be raised, and negative values
  3730. are treated as zeros instead.
  3731. Args:
  3732. x (Union[list, tuple, Tensor]): 1-d input array.
  3733. weights (Union[int, float, bool, list, tuple, Tensor], optional): Weights,
  3734. array of the same shape as `x`. Defaults to None.
  3735. minlength (int, optional): A minimum number of bins for the output array.
  3736. Defaults to 0.
  3737. length (int, optional): Number of bins. Defaults to None.
  3738. Returns:
  3739. Tensor, the result of binning the input array. The length of out is equal to
  3740. ``np.amax(x)+1``.
  3741. Raises:
  3742. ValueError: if `x` is not one-dimensional, or if `x` and `weights` do not have
  3743. the same shape.
  3744. Supported Platforms:
  3745. ``Ascend`` ``GPU`` ``CPU``
  3746. Examples:
  3747. >>> import mindspore.numpy as np
  3748. >>> print(np.bincount(np.arange(5)))
  3749. [1 1 1 1 1]
  3750. >>> print(np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
  3751. [1 3 1 1 0 0 0 1]
  3752. >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
  3753. >>> x = np.array([0, 1, 1, 2, 2, 2])
  3754. >>> print(np.bincount(x, weights=w))
  3755. [0.3 0.7 1.1]
  3756. """
  3757. x = _to_tensor(x)
  3758. if F.rank(x) != 1:
  3759. _raise_value_error('`x` should be one-dimensional')
  3760. if not _check_is_int(F.dtype(x)):
  3761. _raise_type_error('`x` should be an array of ints')
  3762. x = clip(x, 0, None)
  3763. if length is None:
  3764. if F.isconstant(x):
  3765. length = int(maximum(F.reduce_max(x.astype(mstype.float32)), minlength - 1).asnumpy()) + 1
  3766. else:
  3767. _raise_value_error('argument `length` must be provided in graph mode')
  3768. idx = arange(length).reshape(length, 1)
  3769. idx_mapping = F.equal(x, idx)
  3770. if weights is not None:
  3771. weights = _to_tensor(weights)
  3772. if F.shape(x) != F.shape(weights):
  3773. _raise_value_error('`x` and `weights` must have the same length')
  3774. idx_mapping *= weights
  3775. return F.reduce_sum(idx_mapping.astype(mstype.float32), 1).ravel()
  3776. def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
  3777. """
  3778. Computes the histogram of a dataset.
  3779. Note:
  3780. String values for `bins` is not supported.
  3781. Deprecated numpy argument `normed` is not supported.
  3782. Args:
  3783. a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram
  3784. is computed over the flattened array.
  3785. bins (Union[int, tuple, list, Tensor], optional): If `bins` is an int, it
  3786. defines the number of equal-width bins in the given range (10, by
  3787. default). If `bins` is a sequence, it defines the bin edges, including
  3788. the rightmost edge, allowing for non-uniform bin widths.
  3789. range((float, float), optional): The lower and upper range of the bins. If
  3790. not provided, `range` is simply ``(a.min(), a.max())``. Values outside
  3791. the range are ignored. The first element of the range must be less than
  3792. or equal to the second.
  3793. weights (Union[int, float, bool, list, tuple, Tensor], optional): An array
  3794. of weights, of the same shape as `a`. If density is True, the weights
  3795. are normalized, so that the integral of the density over the range
  3796. remains 1.
  3797. density (boolean, optional): If False, the result will contain the number of
  3798. samples in each bin. If True, the result is the value of the probability
  3799. density function at the bin, normalized such that the integral over the
  3800. range is 1. Note that the sum of the histogram values will not be equal
  3801. to 1 unless bins of unity width are chosen; it is not a probability mass
  3802. function.
  3803. Returns:
  3804. (Tensor, Tensor), the values of the histogram and the bin edges.
  3805. Raises:
  3806. ValueError: if `x` and `weights` do not have the same size.
  3807. Supported Platforms:
  3808. ``Ascend`` ``GPU`` ``CPU``
  3809. Examples:
  3810. >>> from mindspore import numpy as np
  3811. >>> print(np.histogram([1, 2, 1], bins=[0, 1, 2, 3]))
  3812. (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 2.00000000e+00, 1.00000000e+00]),
  3813. Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3]))
  3814. >>> print(np.histogram(np.arange(4), bins=np.arange(5), density=True))
  3815. (Tensor(shape=[4], dtype=Float32, value=
  3816. [ 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01]),
  3817. Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]))
  3818. >>> print(np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]))
  3819. (Tensor(shape=[3], dtype=Float32, value= [ 1.00000000e+00, 4.00000000e+00, 1.00000000e+00]),
  3820. Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3]))
  3821. """
  3822. a = _to_tensor(a)
  3823. if weights is not None:
  3824. weights = _to_tensor(weights)
  3825. if F.shape(a) != F.shape(weights):
  3826. _raise_value_error('weights should have the same shape as a')
  3827. weights = weights.ravel()
  3828. a = a.ravel()
  3829. bin_edges = histogram_bin_edges(a, bins, range, weights)
  3830. data_to_bins = searchsorted(bin_edges, a, 'right')
  3831. bin_size = _type_convert(int, bin_edges.size)
  3832. data_to_bins = where_(a == bin_edges[-1], _to_tensor(bin_size - 1), data_to_bins)
  3833. count = bincount(data_to_bins, weights, length=bin_size)[1:]
  3834. if count.size == 0:
  3835. return count, bin_edges
  3836. if density:
  3837. count = F.cast(count, mstype.float32)
  3838. count = count/diff(bin_edges)/F.reduce_sum(count)
  3839. return count, bin_edges
  3840. @constexpr
  3841. def _factor_flattened_hist(nbin):
  3842. """Returns the factor that will be applied to the histogram to be flattened."""
  3843. factor = list((itertools.accumulate(nbin[1:][::-1], operator.mul)))[::-1]
  3844. factor.append(1)
  3845. return factor
  3846. def _get_histogramdd_count(ndim, bin_edges, sample, weights):
  3847. """Returns count for histogramdd."""
  3848. data_indices = []
  3849. nbin = ()
  3850. flattened_bin_size = 1
  3851. for i in F.make_range(ndim):
  3852. data_to_bins = searchsorted(bin_edges[i], sample[:, i], 'right')
  3853. bin_size = _type_convert(int, bin_edges[i].size)
  3854. data_to_bins = where_(sample[:, i] == bin_edges[i][-1], _to_tensor(bin_size - 1), data_to_bins)
  3855. data_indices.append(data_to_bins)
  3856. nbin += (bin_size + 1,)
  3857. flattened_bin_size *= (bin_size + 1)
  3858. factor = F.reshape(_to_tensor(_factor_flattened_hist(nbin)), (ndim, 1))
  3859. stacked_indices = stack(data_indices) * factor
  3860. if _get_device() == 'Ascend':
  3861. stacked_indices = F.cast(stacked_indices, mstype.float32)
  3862. flattened_hist = F.reduce_sum(stacked_indices.astype(mstype.float32), 0)
  3863. count = bincount(flattened_hist.astype(mstype.int32), weights, length=flattened_bin_size)
  3864. count = F.reshape(count, nbin)
  3865. slices = _list_comprehensions(ndim, F.make_slice(1, -1, 1), True)
  3866. count = count[slices]
  3867. return count
  3868. def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
  3869. """
  3870. Computes the multidimensional histogram of some data.
  3871. Note:
  3872. Deprecated numpy argument `normed` is not supported.
  3873. Args:
  3874. sample (Union[list, tuple, Tensor]): The data to be histogrammed, either `(N, D)`
  3875. array, or `(D, N)` array_like. Note the unusual interpretation of sample
  3876. when an array_like:
  3877. When an array, each row is a coordinate in a `D-dimensional` space, such as
  3878. ``histogramdd(np.array([p1, p2, p3]))``.
  3879. When an array_like, each element is the list of values for single coordinate,
  3880. such as ``histogramdd((X, Y, Z))``.
  3881. The first form should be preferred.
  3882. bins (Union[int, tuple, list], optional): The bin specification:
  3883. A sequence of arrays describing the monotonically increasing bin edges along
  3884. each dimension.
  3885. The number of bins for each dimension ``(nx, ny, … =bins)``
  3886. The number of bins for all dimensions ``(nx=ny=…=bins)``.
  3887. range(Union[list, tuple], optional): A sequence of length `D`, each an optional
  3888. ``(lower, upper)`` tuple giving the outer bin edges to be used if the edges
  3889. are not given explicitly in bins. An entry of None in the sequence results in
  3890. the minimum and maximum values being used for the corresponding dimension.
  3891. The default, None, is equivalent to passing a tuple of `D` None values.
  3892. weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
  3893. `w_i` weighing each sample ``(x_i, y_i, z_i, …)``.
  3894. density (boolean, optional): If False, the default, returns the number of samples
  3895. in each bin. If True, returns the probability density function at the bin,
  3896. ``bin_count / sample_count / bin_volume``.
  3897. Returns:
  3898. (Tensor, list of Tensor), the values of the histogram and the bin edges.
  3899. Raises:
  3900. ValueError: if `range` does not have the same size as the number of samples.
  3901. Supported Platforms:
  3902. ``Ascend`` ``GPU`` ``CPU``
  3903. Examples:
  3904. >>> from mindspore import numpy as np
  3905. >>> sample = np.arange(15).reshape(5, 3)
  3906. >>> print(sample)
  3907. [[ 0 1 2]
  3908. [ 3 4 5]
  3909. [ 6 7 8]
  3910. [ 9 10 11]
  3911. [12 13 14]]
  3912. >>> print(np.histogramdd(sample, bins=(2, 3, 4)))
  3913. (Tensor(shape=[2, 3, 4], dtype=Float32, value=
  3914. [[[ 1.00000000e+00, 1.00000000e+00, 0.00000000e+00, 0.00000000e+00],
  3915. [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
  3916. [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]],
  3917. [[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
  3918. [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
  3919. [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.00000000e+00]]]),
  3920. [Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 6.00000000e+00, 1.20000000e+01]),
  3921. Tensor(shape=[4], dtype=Float32, value=
  3922. [ 1.00000000e+00, 5.00000000e+00, 9.00000000e+00, 1.30000000e+01]),
  3923. Tensor(shape=[5], dtype=Float32, value=
  3924. [ 2.00000000e+00, 5.00000000e+00, 8.00000000e+00, 1.10000000e+01, 1.40000000e+01])])
  3925. """
  3926. if isinstance(sample, (tuple, list)):
  3927. sample = _to_tensor(*sample)
  3928. sample = stack(sample, -1)
  3929. elif not isinstance(sample, Tensor):
  3930. _raise_type_error('sample should be (N, D) array, or (D, N) array_like')
  3931. if F.rank(sample) != 2:
  3932. _raise_value_error('when an array, sample should be 2-dimensional')
  3933. ndim = F.shape(sample)[1]
  3934. if isinstance(bins, int):
  3935. bins = _list_comprehensions(ndim, bins)
  3936. if isinstance(bins, (tuple, list, Tensor)):
  3937. if len(bins) != ndim:
  3938. _raise_value_error('The dimension of bins must be equal to the dimension of the sample')
  3939. else:
  3940. _raise_type_error('bins should be int or sequence')
  3941. if range is None:
  3942. range = _list_comprehensions(ndim, None, False, True)
  3943. else:
  3944. if len(range) != ndim:
  3945. _raise_value_error('range argument must have one entry per dimension')
  3946. bin_edges = []
  3947. dedges = []
  3948. for i in F.make_range(ndim):
  3949. edges = histogram_bin_edges(sample[:, i], bins[i], range[i], weights)
  3950. bin_edges.append(edges)
  3951. dedges.append(diff(edges))
  3952. count = _get_histogramdd_count(ndim, bin_edges, sample, weights)
  3953. if density:
  3954. s = F.reduce_sum(count.astype(mstype.float32))
  3955. for i in F.make_range(ndim):
  3956. shape = _expanded_shape(ndim, dedges[i].size, i)
  3957. count /= _to_tensor(dedges[i]).reshape(shape)
  3958. count /= s
  3959. return count, bin_edges
  3960. def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
  3961. """
  3962. Computes the multidimensional histogram of some data.
  3963. Note:
  3964. Deprecated numpy argument `normed` is not supported.
  3965. Args:
  3966. x (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the x
  3967. coordinates of the points to be histogrammed.
  3968. y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y
  3969. coordinates of the points to be histogrammed.
  3970. bins (Union[int, tuple, list], optional): The bin specification:
  3971. If int, the number of bins for the two dimensions ``(nx=ny=bins)``.
  3972. If array_like, the bin edges for the two dimensions ``(x_edges=y_edges=bins)``.
  3973. If [int, int], the number of bins in each dimension ``(nx, ny = bins)``.
  3974. If [array, array], the bin edges in each dimension ``(x_edges, y_edges = bins)``.
  3975. A combination [int, array] or [array, int], where int is the number of bins and
  3976. array is the bin edges.
  3977. range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost
  3978. edges of the bins along each dimension (if not specified explicitly in the bins
  3979. parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
  3980. will be considered outliers and not tallied in the histogram.
  3981. weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
  3982. `w_i` weighing each sample `(x_i, y_i)`.
  3983. density (boolean, optional): If False, the default, returns the number of samples
  3984. in each bin. If True, returns the probability density function at the bin,
  3985. ``bin_count / sample_count / bin_volume``.
  3986. Returns:
  3987. (Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges
  3988. along the first and second dimensions.
  3989. Raises:
  3990. ValueError: if `range` does not have the same size as the number of samples.
  3991. Supported Platforms:
  3992. ``Ascend`` ``GPU`` ``CPU``
  3993. Examples:
  3994. >>> from mindspore import numpy as np
  3995. >>> x = np.arange(5)
  3996. >>> y = np.arange(2, 7)
  3997. >>> print(np.histogram2d(x, y, bins=(2, 3)))
  3998. (Tensor(shape=[2, 3], dtype=Float32, value=
  3999. [[ 2.00000000e+00, 0.00000000e+00, 0.00000000e+00],
  4000. [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]]),
  4001. Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 2.00000000e+00, 4.00000000e+00]),
  4002. Tensor(shape=[4], dtype=Float32, value=
  4003. [ 2.00000000e+00, 3.33333349e+00, 4.66666698e+00, 6.00000000e+00]))
  4004. """
  4005. count, bin_edges = histogramdd((x, y), bins=bins, range=range, weights=weights, density=density)
  4006. return count, bin_edges[0], bin_edges[1]
  4007. def matrix_power(a, n):
  4008. """
  4009. Raises a square matrix to the (integer) power `n`.
  4010. For positive integers `n`, the power is computed by repeated matrix squarings and
  4011. matrix multiplications.
  4012. If :math:`n == 0`, the identity matrix of the same shape as `M` is returned.
  4013. Note:
  4014. Stacks of object matrices are not currently supported and
  4015. :math:`n < 0` is not supported.
  4016. Args:
  4017. a (Union[int, float, bool, list, tuple, Tensor]): Input matrix.
  4018. n (int): The exponent can be any integer or long integer, positive or zero.
  4019. Returns:
  4020. Tensor.
  4021. Raises:
  4022. TypeError: if the input can not be converted to a tensor or
  4023. the exponent is not integer.
  4024. ValueError: if the input includes less than 2 dimensions or
  4025. the last 2 dimensions are not square.
  4026. Supported Platforms:
  4027. ``Ascend`` ``GPU`` ``CPU``
  4028. Examples:
  4029. >>> from mindspore import numpy as np
  4030. >>> a = np.arange(16).reshape(4, 4).astype('float32')
  4031. >>> print(np.matrix_power(a, 2))
  4032. [[ 56. 62. 68. 74.]
  4033. [152. 174. 196. 218.]
  4034. [248. 286. 324. 362.]
  4035. [344. 398. 452. 506.]]
  4036. """
  4037. a = _to_tensor(a)
  4038. if not isinstance(n, int):
  4039. _raise_type_error("exponent must be an integer")
  4040. if a.ndim < 2:
  4041. _raise_value_error("Array must be at least two-dimensional")
  4042. if a.shape[-2] != a.shape[-1]:
  4043. _raise_value_error("Last 2 dimensions of the array must be square")
  4044. if n < 0:
  4045. _raise_value_error("n < 0 is not supported now.")
  4046. if n == 0:
  4047. return _broadcast_to_shape(eye(a.shape[-1], a.shape[-1], dtype=a.dtype), a.shape)
  4048. if n == 1:
  4049. return a
  4050. res = a
  4051. while n > 1:
  4052. res = C.matmul(res, a)
  4053. n = n - 1
  4054. return res
  4055. def around(a, decimals=0):
  4056. """
  4057. Evenly round to the given number of decimals.
  4058. Note:
  4059. Numpy argument `out` is not supported.
  4060. Complex numbers are not supported.
  4061. Args:
  4062. a (Union[int, float, list, tuple, Tensor]): Input data.
  4063. decimals (int): Number of decimal places to round to. Default: 0.
  4064. Returns:
  4065. Tensor. A tensor of the same type as a, containing the rounded values.
  4066. The result of rounding a float is a float.
  4067. Raises:
  4068. TypeError: if the input can not be converted to a tensor or
  4069. the `decimals` argument is not integer.
  4070. Supported Platforms:
  4071. ``Ascend`` ``GPU`` ``CPU``
  4072. Examples:
  4073. >>> import mindspore.numpy as np
  4074. >>> a = np.array([-1.3, 0.0, 0.5, 1.5, 2.5])
  4075. >>> print(np.around(a))
  4076. [-1. 0. 0. 2. 2.]
  4077. """
  4078. a = _to_tensor_origin_dtype(a)
  4079. if not isinstance(decimals, int):
  4080. _raise_type_error("decimals must be an integer")
  4081. if decimals < 0:
  4082. _raise_value_error("decimals < 0 is not supported now.")
  4083. if decimals == 0:
  4084. return _round(a)
  4085. return F.tensor_div(_round(a * 10**decimals), 10**decimals)
  4086. def _to_poly1d(x):
  4087. x = atleast_1d(_to_tensor(x))
  4088. if F.rank(x) > 1:
  4089. _raise_value_error('input array must be scalar or 1-d sequence')
  4090. return x
  4091. def polyadd(a1, a2):
  4092. """
  4093. Finds the sum of two polynomials.
  4094. Returns the polynomial resulting from the sum of two input polynomials.
  4095. Note:
  4096. Numpy object poly1d is currently not supported.
  4097. Args:
  4098. a1 (Union[int, float, list, tuple, Tensor): Input polynomial.
  4099. a2 (Union[int, float, list, tuple, Tensor): Input polynomial.
  4100. Returns:
  4101. Tensor, the sum of the inputs.
  4102. Raises:
  4103. ValueError: if the input array has more than 1 dimensions.
  4104. Supported Platforms:
  4105. ``Ascend`` ``GPU`` ``CPU``
  4106. Examples:
  4107. >>> import mindspore.numpy as np
  4108. >>> print(np.polyadd([1, 2], [9, 5, 4]))
  4109. [9 6 6]
  4110. """
  4111. a1 = _to_poly1d(a1)
  4112. a2 = _to_poly1d(a2)
  4113. diff_size = a1.size - a2.size
  4114. if diff_size == 0:
  4115. return add(a1, a2)
  4116. if diff_size > 0:
  4117. return concatenate((a1[:diff_size], add(a1[diff_size:], a2)))
  4118. return concatenate((a2[:-diff_size], add(a1, a2[-diff_size:])))
  4119. def polysub(a1, a2):
  4120. """
  4121. Difference (subtraction) of two polynomials.
  4122. Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
  4123. Note:
  4124. Numpy object poly1d is currently not supported.
  4125. Args:
  4126. a1 (Union[int, float, list, tuple, Tensor): Minuend polynomial.
  4127. a2 (Union[int, float, list, tuple, Tensor): Subtrahend polynomial.
  4128. Returns:
  4129. Tensor, the difference of the inputs.
  4130. Raises:
  4131. ValueError: if the input array has more than 1 dimensions.
  4132. Supported Platforms:
  4133. ``Ascend`` ``GPU`` ``CPU``
  4134. Examples:
  4135. >>> import mindspore.numpy as np
  4136. >>> print(np.polysub([2, 10, -2], [3, 10, -4]))
  4137. [-1 0 2]
  4138. """
  4139. return polyadd(a1, F.neg_tensor(_to_tensor(a2)))
  4140. def polyval(p, x):
  4141. """
  4142. Evaluates a polynomial at specific values.
  4143. If `p` is of length `N`, this function returns the value:
  4144. ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
  4145. If `x` is a sequence, then ``p(x)`` is returned for each element of `x`. If `x`
  4146. is another polynomial then the composite polynomial ``p(x(t))`` is returned.
  4147. Note:
  4148. Numpy object poly1d is currently not supported.
  4149. Args:
  4150. p (Union[int, float, bool, list, tuple, Tensor): 1D array of polynomial
  4151. coefficients (including coefficients equal to zero) from highest
  4152. degree to the constant term.
  4153. x (Union[int, float, bool, list, tuple, Tensor): A number, an array of
  4154. numbers, at which to evaluate `p`.
  4155. Returns:
  4156. Tensor.
  4157. Raises:
  4158. ValueError: if `p` has more than 1 dimensions.
  4159. Supported Platforms:
  4160. ``Ascend`` ``GPU`` ``CPU``
  4161. Examples:
  4162. >>> import mindspore.numpy as np
  4163. >>> print(np.polyval([3.,0.,1.], 5.))
  4164. 76.0
  4165. """
  4166. p = _to_poly1d(p)
  4167. x = _to_tensor(x)
  4168. shape = F.shape(x)
  4169. exp_p = arange(_type_convert(int, p.size) - 1, -1, -1).astype(mstype.float32)
  4170. var_p = (x.reshape(shape + (1,)))**exp_p
  4171. return F.reduce_sum(p*var_p, -1)
  4172. def polyder(p, m=1):
  4173. """
  4174. Returns the derivative of the specified order of a polynomial.
  4175. Note:
  4176. Numpy object poly1d is currently not supported.
  4177. Args:
  4178. p (Union[int, float, bool, list, tuple, Tensor): Polynomial to differentiate.
  4179. A sequence is interpreted as polynomial coefficients.
  4180. m (int, optional): Defaults to 1, order of differentiation.
  4181. Returns:
  4182. Tensor, a new polynomial representing the derivative.
  4183. Raises:
  4184. ValueError: if `p` has more than 1 dimensions.
  4185. Supported Platforms:
  4186. ``Ascend`` ``GPU`` ``CPU``
  4187. Examples:
  4188. >>> import mindspore.numpy as np
  4189. >>> print(np.polyder([1, 1, 1, 1]))
  4190. [3 2 1]
  4191. """
  4192. p = _to_poly1d(p)
  4193. if m < 0:
  4194. _raise_value_error('Order of derivative must be positive')
  4195. if m >= p.size:
  4196. return _to_tensor([])
  4197. for _ in range(m):
  4198. coeff = _to_tensor(F.make_range(_type_convert(int, p.size) - 1, 0, -1))
  4199. p = p[:-1]*coeff
  4200. return p
  4201. def polymul(a1, a2):
  4202. """
  4203. Finds the product of two polynomials.
  4204. Note:
  4205. Numpy object poly1d is currently not supported.
  4206. Args:
  4207. a1 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
  4208. a2 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
  4209. Returns:
  4210. Tensor, a new polynomial representing the derivative.
  4211. Raises:
  4212. ValueError: if the input array has more than 1 dimensions.
  4213. Supported Platforms:
  4214. ``GPU``
  4215. Examples:
  4216. >>> import mindspore.numpy as np
  4217. >>> print(np.polymul([3, 1, 2], [2, 5]))
  4218. [ 6 17 9 10]
  4219. """
  4220. a1 = _to_poly1d(a1)
  4221. a2 = _to_poly1d(a2)
  4222. return convolve(a1, a2)
  4223. def polyint(p, m=1, k=None):
  4224. """
  4225. Returns an antiderivative (indefinite integral) of a polynomial.
  4226. Note:
  4227. Numpy object poly1d is currently not supported.
  4228. Args:
  4229. p (Union[int, float, bool, list, tuple, Tensor): Polynomial to integrate. A
  4230. sequence is interpreted as polynomial coefficients.
  4231. m (int, optional): Defaults to 1, Order of the antiderivative.
  4232. k (Union[int, list of int]y, optinoal): Integration constants. They are given
  4233. in the order of integration: those corresponding to highest-order terms
  4234. come first. If None (default), all constants are assumed to be zero. If
  4235. ``m = 1``, a single scalar can be given instead of a list.
  4236. Returns:
  4237. Tensor, a new polynomial representing the antiderivative.
  4238. Raises:
  4239. ValueError: if `p` has more than 1 dimensions.
  4240. Supported Platforms:
  4241. ``Ascend`` ``GPU`` ``CPU``
  4242. Examples:
  4243. >>> import mindspore.numpy as np
  4244. >>> print(np.polyint([1, 1, 1]))
  4245. [0.33333334 0.5 1. 0. ]
  4246. """
  4247. p = _to_poly1d(p)
  4248. if m < 0:
  4249. _raise_value_error('Order of derivative must be positive')
  4250. if m == 0:
  4251. return p
  4252. if k is None:
  4253. k = zeros(m, F.dtype(p))
  4254. k = atleast_1d(_to_tensor(k))
  4255. if k.size == 1:
  4256. k = F.tile(k, (m,))
  4257. k = F.expand_dims(k, -1)
  4258. for i in range(m):
  4259. coeff = _to_tensor(F.make_range(_type_convert(int, p.size), 0, -1))
  4260. p = concatenate((true_divide(p, coeff), k[i]))
  4261. return p
  4262. @constexpr
  4263. def _get_dtype(x):
  4264. """Returns the dtype of x."""
  4265. if isinstance(x, bool):
  4266. return mstype.bool_
  4267. if isinstance(x, int):
  4268. return mstype.int32
  4269. if isinstance(x, float):
  4270. return mstype.float32
  4271. if isinstance(x, typing.Number):
  4272. return x
  4273. if isinstance(x, str):
  4274. t = dtype_map.get(x, None)
  4275. if t is None:
  4276. t = dtype_map.get(str(nptype(x)))
  4277. return t
  4278. raise TypeError('data type not understood')
  4279. def result_type(*arrays_and_dtypes):
  4280. """
  4281. Returns the type that results from applying the type promotion rules to the arguments.
  4282. Note:
  4283. The promotion rule is slightly different from original Numpy, but more like
  4284. jax, due to the preference on ``32-bit`` over ``64-bit`` data types.
  4285. Complex dtypes are not supported.
  4286. Args:
  4287. *arrays_and_dtypes (Union[int, float, bool, list, tuple, Tensor, :class:`mindspore.dtype`, str]):
  4288. The operands of some operation whose result type is needed.
  4289. Returns:
  4290. :class:`mindspore.dtype`, the result type.
  4291. Raises:
  4292. TypeError: if the input is not a valid data type.
  4293. Supported Platforms:
  4294. ``Ascend`` ``GPU`` ``CPU``
  4295. Examples:
  4296. >>> import mindspore.numpy as np
  4297. >>> print(np.result_type('i2', np.float32, True))
  4298. Float32
  4299. """
  4300. def get_dtype(x):
  4301. if isinstance(x, Tensor):
  4302. return F.dtype(_to_tensor(x))
  4303. return _get_dtype(x)
  4304. dtype_out = get_dtype(arrays_and_dtypes[0])
  4305. for i in arrays_and_dtypes[1:]:
  4306. dtype_out = _promote(dtype_out, get_dtype(i))
  4307. return dtype_out
  4308. def unwrap(p, discont=3.141592653589793, axis=-1):
  4309. """
  4310. Unwraps by changing deltas between values to ``2*pi`` complement.
  4311. Unwraps radian phase `p` by changing absolute jumps greater than `discont` to their
  4312. `2*pi` complement along the given axis.
  4313. Note:
  4314. For absolute jumps that are within a very close range to pi, unwrapping may be done
  4315. differently than numpy due to differences in round-off.
  4316. Args:
  4317. p (Union[int, float, bool, list, tuple, Tensor): Input array.
  4318. discont (float, optional): Maximum discontinuity between values, default is pi.
  4319. axis (int, optional): Axis along which unwrap will operate, default is -1.
  4320. Returns:
  4321. Tensor.
  4322. Raises:
  4323. ValueError: if the axis is out of range.
  4324. Supported Platforms:
  4325. ``Ascend`` ``GPU`` ``CPU``
  4326. Examples:
  4327. >>> import mindspore.numpy as np
  4328. >>> phase = np.add(np.linspace(0, np.pi, num=5), [0, 0, 0, np.pi, np.pi])
  4329. >>> print(phase)
  4330. [0. 0.7853982 1.5707964 5.4977875 6.2831855]
  4331. >>> print(np.unwrap(phase))
  4332. [ 0.0000000e+00 7.8539819e-01 1.5707964e+00 -7.8539848e-01 -4.7683716e-07]
  4333. """
  4334. if not isinstance(discont, (int, float)):
  4335. _raise_type_error('discont should be a float')
  4336. p = _to_tensor(p)
  4337. ndim = F.rank(p)
  4338. axis = _check_axis_in_range(axis, ndim)
  4339. dd = diff(p, axis=axis)
  4340. ddmod = remainder(add(dd, pi), 2*pi) - pi
  4341. ddmod = where_(F.logical_and(ddmod == -pi, dd > 0), pi, ddmod)
  4342. ph_correct = ddmod - dd
  4343. ph_correct = where_(absolute(dd) < discont, 0, ph_correct)
  4344. slice_all = _list_comprehensions(F.rank(p), F.make_slice(None, None, None), True)
  4345. slice0 = _tuple_setitem(slice_all, axis, F.make_slice(0, 1, None))
  4346. slice1 = _tuple_setitem(slice_all, axis, F.make_slice(1, None, None))
  4347. head = p[slice0]
  4348. tail = add(p[slice1], cumsum(ph_correct, axis))
  4349. return concatenate((head, tail), axis=axis)
  4350. def cumprod(a, axis=None, dtype=None):
  4351. """
  4352. Returns the cumulative product of elements along a given axis.
  4353. Note:
  4354. Numpy argument `out` is not supported.
  4355. Args:
  4356. a (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
  4357. axis (int, optional): Axis along which the cumulative product is computed.
  4358. By default the input is flattened.
  4359. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  4360. output Tensor.
  4361. Returns:
  4362. Tensor.
  4363. Raises:
  4364. TypeError: If the input can not be converted to tensor or `axis` is not integer.
  4365. ValueError: If axis is out of range.
  4366. Supported Platforms:
  4367. ``Ascend`` ``GPU``
  4368. Examples:
  4369. >>> import mindspore.numpy as np
  4370. >>> x = np.array([1, 2, 3])
  4371. >>> print(np.cumprod(x))
  4372. [1 2 6]
  4373. """
  4374. a = _to_tensor_origin_dtype(a)
  4375. original_dtype = F.dtype(a)
  4376. if axis is not None and not isinstance(axis, int):
  4377. _raise_type_error("integer axis is expected, but got", axis)
  4378. if axis is None:
  4379. a = a.ravel()
  4380. axis = 0
  4381. _check_axis_in_range(axis, a.ndim)
  4382. a = a.astype('float32') if original_dtype != mstype.float64 else a
  4383. if dtype is None:
  4384. if original_dtype in [mstype.int8, mstype.int16, mstype.bool_]:
  4385. dtype = mstype.int32
  4386. elif original_dtype in [mstype.uint8, mstype.uint16]:
  4387. dtype = mstype.uint32
  4388. else:
  4389. dtype = original_dtype
  4390. return _cumprod_default(a, axis).astype(dtype, copy=False)
  4391. def _process_index(index, dims, mode='raise'):
  4392. """Generates index (Tensor) according to different modes."""
  4393. if mode == "raise":
  4394. _raise_unimplemented_error("'raise' mode is not implemented")
  4395. if mode not in ['clip', 'wrap']:
  4396. _raise_value_error("invalid mode. Expected 'wrap' or 'clip'")
  4397. ori_shape = index.shape
  4398. tup = ()
  4399. for i, idx in enumerate(index):
  4400. d = dims[i]
  4401. if mode == "clip":
  4402. idx = clip(idx, 0, d - 1)
  4403. elif mode == "wrap":
  4404. idx = remainder(idx, d)
  4405. idx = F.expand_dims(idx, 0) if idx.ndim < 1 else idx
  4406. tup += (idx,)
  4407. return P.Concat(0)(tup).reshape(ori_shape)
  4408. def _get_strides(dims, order='C'):
  4409. """Generates strides (1-D tensor) according to `dims` (1-D tensor)."""
  4410. if order not in ['C', 'F']:
  4411. _raise_value_error("invalid order. Expected 'C' or 'F'")
  4412. tup = (_to_tensor([1]),)
  4413. dims = dims[1:][::-1] if order == 'C' else dims[:-1]
  4414. for d in dims:
  4415. tensor = tup[-1] * d
  4416. if tensor.ndim < 1:
  4417. tensor = F.expand_dims(tensor, 0)
  4418. tup += (tensor,)
  4419. tup = tup[::-1] if order == 'C' else tup
  4420. return P.Concat(0)(tup)
  4421. def ravel_multi_index(multi_index, dims, mode='clip', order='C'):
  4422. """
  4423. Converts a tuple of index arrays into an array of flat indices,
  4424. applying boundary modes to the multi-index.
  4425. Note:
  4426. `raise` mode is not supported. Default mode is `clip`.
  4427. Args:
  4428. multi_index (tuple of array_like):
  4429. A tuple of integer arrays, one array for each dimension.
  4430. dims (Union[int, tuple of integers]): The shape of array into which the indices from multi_index apply.
  4431. mode ({`wrap`, `clip`}): Specifies how out-of-bounds indices are handled. Default: `clip`.
  4432. - `wrap`: wrap around
  4433. - `clip`: clip to the range
  4434. In `clip` mode, a negative index which would normally wrap will clip to 0 instead.
  4435. order ({`C`, `F`}): Determines whether the multi-index should be viewed as indexing in
  4436. row-major (C-style) or column-major (Fortran-style) order.
  4437. Returns:
  4438. Raveled_indices array. An array of indices into the flattened version of an array of dimensions dims.
  4439. Raises:
  4440. TypeError: If `multi_index` or `dims` can not be converted to tensor or
  4441. `dims` is not a sequence of integer values.
  4442. ValueError: If the length of `multi_index` and that of `dims` are not equal.
  4443. Supported Platforms:
  4444. ``GPU``
  4445. Examples:
  4446. >>> import mindspore.numpy as np
  4447. >>> arr = np.array([[3, 6, 6], [4, 5, 1]])
  4448. >>> output = np.ravel_multi_index(arr, (7, 6))
  4449. >>> print(output)
  4450. [22. 41. 37.]
  4451. >>> output = np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))
  4452. >>> print(output)
  4453. 1621.0
  4454. """
  4455. if isinstance(dims, int):
  4456. dims = (dims,)
  4457. dims = _to_tensor(dims)
  4458. if dims.ndim > 1 or dims.dtype in (mstype.float16, mstype.float32, mstype.float64, mstype.bool_):
  4459. _raise_type_error("only 1-D integer arrays are accepted.")
  4460. multi_index = _to_tensor(multi_index)
  4461. if len(multi_index) != len(dims):
  4462. _raise_value_error("parameter multi_index must be a sequence of length ", len(dims))
  4463. if multi_index.dtype in (mstype.float16, mstype.float32, mstype.float64):
  4464. _raise_type_error("only int indices permitted")
  4465. multi_index = _process_index(multi_index, dims, mode)
  4466. strides = _get_strides(dims, order)
  4467. s_shape = strides.shape + _list_comprehensions(multi_index.ndim - 1, 1, True)
  4468. strides = _broadcast_to_shape(strides.reshape(s_shape), multi_index.shape)
  4469. return sum_((multi_index * strides).astype('float32'), axis=0)
  4470. def _vector_norm(x, _ord, axis, keepdims):
  4471. """Returns norm of a vector."""
  4472. if _in(_ord, ('fro', 'nuc')):
  4473. _raise_value_error('Frobenius norm and nuclear norm are only defined for vectors')
  4474. if _ord is None:
  4475. _ord = 2
  4476. if _ord == inf:
  4477. res = P.ReduceMax(keepdims)(absolute(x), axis)
  4478. elif _ord == -inf:
  4479. res = P.ReduceMin(keepdims)(absolute(x), axis)
  4480. elif _ord == 0:
  4481. res = P.ReduceSum(keepdims)(F.not_equal(x, 0).astype(mstype.float32), axis)
  4482. else:
  4483. res = power(P.ReduceSum(keepdims)(power(absolute(x), _ord), axis), 1./_ord)
  4484. return res
  4485. def _matrix_norm(x, _ord, axis, keepdims):
  4486. """Returns norm of a matrix."""
  4487. if _ord == 0:
  4488. _raise_value_error('for 0 axis, norm is defined only for 2-D matrices')
  4489. if _ord == 'nuc':
  4490. _raise_unimplemented_error('nuclear norm is not implemented')
  4491. if _in(_ord, (2, -2)):
  4492. _raise_unimplemented_error('2-norm is not implemented for matrices')
  4493. if _in(_ord, (None, 'fro')):
  4494. return F.sqrt(P.ReduceSum(keepdims)(F.square(x), axis))
  4495. axis0, axis1 = axis
  4496. if not keepdims:
  4497. if _check_is_inf(_abs(_ord)) and axis0 > axis1:
  4498. axis0 -= 1
  4499. elif _abs(_ord) == 1 and axis1 > axis0:
  4500. axis1 -= 1
  4501. if _check_is_inf(_ord):
  4502. return P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0)
  4503. if _check_is_inf(_ord, True):
  4504. return P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0)
  4505. if _ord == 1:
  4506. return P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1)
  4507. if _ord == -1:
  4508. return P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1)
  4509. return _raise_value_error('invalid norm order for matrices')
  4510. def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-builtin
  4511. """
  4512. Matrix or vector norm.
  4513. This function is able to return one of eight different matrix norms, or one of an
  4514. infinite number of vector norms (described below), depending on the value of the
  4515. ord parameter.
  4516. Note:
  4517. Nuclear norm and 2-norm are not supported for matrices.
  4518. Args:
  4519. x (Union[int, float, bool, list, tuple, Tensor]): Input array. If `axis` is None,
  4520. `x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None,
  4521. the 2-norm of ``x.ravel`` will be returned.
  4522. ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm.
  4523. inf means numpy’s inf object. The default is None.
  4524. axis (Union[None, int, 2-tuple of integers], optional): If `axis` is an integer, it
  4525. specifies the axis of `x` along which to compute the vector norms. If `axis` is
  4526. a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of
  4527. these matrices are computed. If `axis` is None then either a vector norm (when x
  4528. is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default is None.
  4529. keepdims (boolean, optional): If this is set to True, the axes which are normed over
  4530. are left in the result as dimensions with size one. With this option the result
  4531. will broadcast correctly against the original `x`.
  4532. Returns:
  4533. Tensor, norm of the matrix or vector(s).
  4534. Raises:
  4535. ValueError: If the norm order is not defined.
  4536. Supported Platforms:
  4537. ``Ascend`` ``GPU`` ``CPU``
  4538. Examples:
  4539. >>> import mindspore.numpy as np
  4540. >>> print(np.norm(np.arange(9).astype(np.float32)))
  4541. 14.282857
  4542. """
  4543. if not isinstance(ord, (int, float)) and not _in(ord, (None, 'fro', 'nuc', inf, -inf)):
  4544. _raise_value_error('invalid value for `ord`')
  4545. x = _to_tensor(x)
  4546. ndim = F.rank(x)
  4547. if axis is None:
  4548. if ord is None:
  4549. x = x.ravel()
  4550. if F.rank(x) not in (1, 2):
  4551. _raise_value_error('for None axis, array must a vector or a 2-D matrix')
  4552. axis = F.make_range(F.rank(x))
  4553. axis = _check_axis_valid(axis, F.rank(x))
  4554. if len(axis) == 1:
  4555. res = _vector_norm(x, ord, axis, keepdims)
  4556. elif len(axis) == 2:
  4557. res = _matrix_norm(x, ord, axis, keepdims)
  4558. else:
  4559. return _raise_value_error('invalid number of dimensions to norm')
  4560. if keepdims and ndim > F.rank(res):
  4561. res = _expand(res, ndim)
  4562. return res
  4563. def bitwise_and(x1, x2, dtype=None):
  4564. """
  4565. Computes the bit-wise AND of two arrays element-wise.
  4566. Computes the bit-wise AND of the underlying binary representation of the integers in
  4567. the input arrays. This ufunc implements the C/Python operator &.
  4568. Note:
  4569. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  4570. not supported.
  4571. Args:
  4572. x1 (Tensor): Input array.
  4573. x2 (Tensor): Input array. Only integer and boolean types are handled. If
  4574. ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
  4575. the shape of the output).
  4576. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4577. output Tensor.
  4578. Returns:
  4579. Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
  4580. Supported Platforms:
  4581. ``Ascend``
  4582. Examples:
  4583. >>> import mindspore.numpy as np
  4584. >>> print(np.bitwise_and(13, 17))
  4585. 1
  4586. """
  4587. return _apply_tensor_op(F.bitwise_and, x1, x2, dtype=dtype)
  4588. def bitwise_or(x1, x2, dtype=None):
  4589. r"""
  4590. Computes the bit-wise OR of two arrays element-wise.
  4591. Computes the bit-wise OR of the underlying binary representation of the integers in
  4592. the input arrays. This ufunc implements the C/Python operator \|.
  4593. Note:
  4594. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  4595. not supported.
  4596. Args:
  4597. x1 (Tensor): Input array.
  4598. x2 (Tensor): Input array. Only integer and boolean types are handled. If
  4599. ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
  4600. the shape of the output).
  4601. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4602. output Tensor.
  4603. Returns:
  4604. Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
  4605. Supported Platforms:
  4606. ``Ascend``
  4607. Examples:
  4608. >>> import mindspore.numpy as np
  4609. >>> print(np.bitwise_or(13, 16))
  4610. 29
  4611. """
  4612. return _apply_tensor_op(F.bitwise_or, x1, x2, dtype=dtype)
  4613. def bitwise_xor(x1, x2, dtype=None):
  4614. """
  4615. Computes the bit-wise XOR of two arrays element-wise.
  4616. Computes the bit-wise XOR of the underlying binary representation of the integers in
  4617. the input arrays. This ufunc implements the C/Python operator ^.
  4618. Note:
  4619. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  4620. not supported.
  4621. Args:
  4622. x1 (Tensor): Input array.
  4623. x2 (Tensor): Input array. Only integer and boolean types are handled. If
  4624. ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
  4625. the shape of the output).
  4626. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4627. output Tensor.
  4628. Returns:
  4629. Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
  4630. Supported Platforms:
  4631. ``Ascend``
  4632. Examples:
  4633. >>> import mindspore.numpy as np
  4634. >>> print(np.bitwise_xor(13, 17))
  4635. 28
  4636. """
  4637. return _apply_tensor_op(F.bitwise_xor, x1, x2, dtype=dtype)
  4638. def invert(x, dtype=None):
  4639. """
  4640. Computes bit-wise inversion, or bit-wise NOT, element-wise.
  4641. Computes the bit-wise NOT of the underlying binary representation of the integers in
  4642. the input arrays. This ufunc implements the C/Python operator ~.
  4643. For signed integer inputs, the two's complement is returned. In a two's-complement system
  4644. negative numbers are represented by the two's complement of the absolute value. This is
  4645. the most common method of representing signed integers on computers
  4646. `[1] <https://en.wikipedia.org/wiki/Two's_complement>`_. A N-bit two's-complement system
  4647. can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``.
  4648. Note:
  4649. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  4650. not supported.
  4651. Supported dtypes on Ascend: np.int16, np.uint16.
  4652. Args:
  4653. x (Tensor): Only integer and boolean types are handled.
  4654. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4655. output Tensor.
  4656. Returns:
  4657. Tensor or scalar.
  4658. Supported Platforms:
  4659. ``Ascend``
  4660. Examples:
  4661. >>> import mindspore.numpy as np
  4662. >>> print(np.invert(np.array(13, dtype=np.uint16)))
  4663. 65522
  4664. """
  4665. return _apply_tensor_op(F.invert, x, dtype=dtype)
  4666. def rint(x, dtype=None):
  4667. """
  4668. Rounds elements of the array to the nearest integer.
  4669. Note:
  4670. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  4671. not supported.
  4672. Ascend does not support dtype `float64` currently.
  4673. Args:
  4674. x (Union[float, list, tuple, Tensor]): Input tensor.
  4675. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4676. output Tensor.
  4677. Returns:
  4678. Output tensor is same shape and type as x. This is a scalar if x is a scalar.
  4679. Raises:
  4680. TypeError: If `x` can not be converted to tensor.
  4681. Supported Platforms:
  4682. ``Ascend`` ``GPU`` ``CPU``
  4683. Examples:
  4684. >>> import mindspore.numpy as np
  4685. >>> x = np.array([-1.7, -1.5, 0.2, 1.5, 1.7, 2.0])
  4686. >>> print(np.rint(x))
  4687. [-2. -2. 0. 2. 2. 2.]
  4688. """
  4689. x = _to_tensor_origin_dtype(x)
  4690. res = _rint(x)
  4691. if dtype is not None and not _check_same_type(F.dtype(res), dtype):
  4692. res = F.cast(res, dtype)
  4693. return res
  4694. def correlate(a, v, mode='valid'):
  4695. """
  4696. Cross-correlation of two 1-dimensional sequences.
  4697. This function computes the correlation as generally defined in signal processing texts:
  4698. :math:`c_{av}[k] = sum_n a[n+k] * conj(v[n])`
  4699. with `a` and `v` sequences being zero-padded where necessary and conj being the conjugate.
  4700. Note:
  4701. Currently, complex numbers are not supported.
  4702. Args:
  4703. a (Union[list, tuple, Tensor]): First input sequence.
  4704. v (Union[list, tuple, Tensor]): Second input sequence.
  4705. mode (str, optional): By default, mode is `\'valid\'`.
  4706. If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`.
  4707. The convolution product is only given for points where the signals overlap
  4708. completely. Values outside the signal boundary have no effect.
  4709. If `mode` is `\'full\'`, it returns the convolution at each point of overlap, with
  4710. an output shape of :math:`(N + M - 1,)`.
  4711. At the end-points of the convolution, the signals do not overlap completely,
  4712. and boundary effects may be seen.
  4713. If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary
  4714. effects are still visible.
  4715. Returns:
  4716. Tensor. Discrete cross-correlation of `a` and `v`.
  4717. Raises:
  4718. TypeError: if the inputs can not be converted to tensor.
  4719. ValueError: if `a` and `v` are empty or have wrong dimensions
  4720. Supported Platforms:
  4721. ``GPU``
  4722. Examples:
  4723. >>> import mindspore.numpy as np
  4724. >>> output = np.correlate([1, 2, 3], [0, 1, 0.5])
  4725. >>> print(output)
  4726. [3.5]
  4727. >>> output = np.correlate([1, 2, 3], [0, 1, 0.5], mode="same")
  4728. >>> print(output)
  4729. [2. 3.5 3. ]
  4730. >>> output = np.correlate([1, 2, 3, 4, 5], [1, 2], mode="same")
  4731. >>> print(output)
  4732. [ 2. 5. 8. 11. 14.]
  4733. """
  4734. a, v = _to_tensor(a, v)
  4735. if a.ndim != 1 or v.ndim != 1:
  4736. _raise_value_error("only support 1-dimensional inputs.")
  4737. if a.size == 0 or v.size == 0:
  4738. _raise_value_error("Inputs cannot be empty.")
  4739. promote_dtype = _promote(a.dtype, v.dtype)
  4740. # P.Conv2D requires that the two tensors have the same data type.
  4741. # If the promote data type is not supported, it will be converted to float32.
  4742. # The supported dtype list may vary in the future.
  4743. if promote_dtype not in [mstype.float32, mstype.float16]:
  4744. promote_dtype = mstype.float32
  4745. a = a.astype(promote_dtype)
  4746. v = v.astype(promote_dtype)
  4747. if a.size < v.size:
  4748. a, v = v, a
  4749. return _compute_1d_conv(a, v, mode)[::-1]
  4750. return _compute_1d_conv(a, v, mode)
  4751. def _compute_1d_conv(a, v, mode):
  4752. """Returns a 1-D sequence which is the cross-correlate of two 1-D sequences (`a` and `v`)."""
  4753. v_size = F.shape_mul(v.shape)
  4754. if mode not in ('same', 'full', 'valid'):
  4755. _raise_value_error("mode must be one of ['full', 'same', 'valid']")
  4756. if v_size > 1:
  4757. if mode == 'same':
  4758. pad_left = _to_tensor(_list_comprehensions(v_size // 2, 0.0, True))
  4759. pad_right = _to_tensor(_list_comprehensions(v_size - v_size // 2 - 1, 0.0, True))
  4760. a = P.Concat(0)((pad_left, a, pad_right))
  4761. elif mode == 'full':
  4762. pad = _to_tensor(_list_comprehensions(v_size - 1, 0.0, True))
  4763. a = P.Concat(0)((pad, a, pad))
  4764. a = a.reshape(1, 1, 1, a.size)
  4765. v = v.reshape(1, 1, 1, v.size)
  4766. _conv = P.Conv2D(1, (1, v.size))
  4767. return _conv(a, v).reshape(-1)
  4768. def radians(x, dtype=None):
  4769. """
  4770. Converts angles from degrees to radians.
  4771. Args:
  4772. x (Tensor): Angles in degrees.
  4773. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  4774. output Tensor.
  4775. Returns:
  4776. Tensor, the corresponding radian values. This is a tensor scalar if `x`
  4777. is a tensor scalar.
  4778. Raises:
  4779. TypeError: if `x` is not a tensor.
  4780. Supported Platforms:
  4781. ``Ascend`` ``GPU`` ``CPU``
  4782. Examples:
  4783. >>> import mindspore.numpy as np
  4784. >>> x = np.asarray([1, 2, 3, -4, -5])
  4785. >>> output = np.radians(x)
  4786. >>> print(output)
  4787. [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647]
  4788. """
  4789. return deg2rad(x, dtype=dtype)