You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor.py 93 kB

5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Tensor implementation."""
  16. import numbers
  17. import numpy as np
  18. from mindspore import log as logger
  19. from mindspore.communication.management import get_rank, get_group_size
  20. from . import dtype as mstype
  21. from ._register_for_tensor import tensor_operator_registry
  22. from .._c_expression import Tensor as Tensor_
  23. from .._c_expression import CSRTensor as CSRTensor_
  24. from .._c_expression import PynativeExecutor_
  25. from .._checkparam import Validator as validator
  26. __all__ = ['Tensor', 'RowTensor', 'SparseTensor', 'CSRTensor']
  27. np_types = (np.int8, np.int16, np.int32, np.int64,
  28. np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
  29. np.float32, np.float64, np.bool_, np.complex64, np.complex128)
  30. class Tensor(Tensor_):
  31. """
  32. Tensor is used for data storage.
  33. Tensor inherits tensor object in C++.
  34. Some functions are implemented in C++ and some functions are implemented in Python.
  35. Args:
  36. input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): Input data of the tensor.
  37. dtype (:class:`mindspore.dtype`): Input data should be None, bool or numeric type defined in `mindspore.dtype`.
  38. The argument is used to define the data type of the output tensor. If it is None, the data type of the
  39. output tensor will be the same as the `input_data`. Default: None.
  40. shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of
  41. output. If `input_data` is available, `shape` doesn't need to be set. Default: None.
  42. init (Initializer): The information of init data.
  43. 'init' is used for delayed initialization in parallel mode. Usually, it is not recommended to use
  44. 'init' interface to initialize parameters in other conditions. If 'init' interface is used to initialize
  45. parameters, the `Tensor.init_data` API needs to be called to convert `Tensor` to the actual data.
  46. Outputs:
  47. Tensor. If `dtype` and `shape` are not set, return a tensor with the same dtype and shape as `input_data`.
  48. If `dtype` or `shape` is set, the dtype or shape of the output Tensor is consistent with the setting.
  49. Examples:
  50. >>> import numpy as np
  51. >>> import mindspore as ms
  52. >>> from mindspore import Tensor
  53. >>> from mindspore.common.initializer import One
  54. >>> # initialize a tensor with numpy.ndarray
  55. >>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
  56. >>> print(t1)
  57. [[[0. 0. 0.]
  58. [0. 0. 0.]]]
  59. >>> print(type(t1))
  60. <class 'mindspore.common.tensor.Tensor'>
  61. >>> print(t1.shape)
  62. (1, 2, 3)
  63. >>> print(t1.dtype)
  64. Float32
  65. >>>
  66. >>> # initialize a tensor with a float scalar
  67. >>> t2 = Tensor(0.1)
  68. >>> print(t2)
  69. 0.1
  70. >>> print(type(t2))
  71. <class 'mindspore.common.tensor.Tensor'>
  72. >>> print(t2.shape)
  73. ()
  74. >>> print(t2.dtype)
  75. Float32
  76. >>>
  77. >>> # initialize a tensor with a tuple
  78. >>> t3 = Tensor((1, 2))
  79. >>> print(t3)
  80. [1 2]
  81. >>> print(type(t3))
  82. <class 'mindspore.common.tensor.Tensor'>
  83. >>> print(t3.shape)
  84. (2,)
  85. >>> print(t3.dtype)
  86. Int64
  87. ...
  88. >>> # initialize a tensor with init
  89. >>> t4 = Tensor(shape = (1, 3), dtype=ms.float32, init=One())
  90. >>> print(t4)
  91. [[1. 1. 1.]]
  92. >>> print(type(t4))
  93. <class 'mindspore.common.tensor.Tensor'>
  94. >>> print(t4.shape)
  95. (1, 3)
  96. >>> print(t4.dtype)
  97. Float32
  98. """
  99. def __init__(self, input_data=None, dtype=None, shape=None, init=None):
  100. self.init_finished = False
  101. # If input data is numpy number, convert it to np array
  102. if isinstance(input_data, np_types):
  103. input_data = np.array(input_data)
  104. if isinstance(shape, numbers.Number):
  105. shape = (shape,)
  106. _check_tensor_input(input_data, dtype, shape, init)
  107. # If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
  108. if init is None:
  109. validator.check_value_type('input_data', input_data,
  110. (Tensor_, np.ndarray, list, tuple, float, int, bool, complex), 'Tensor')
  111. valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
  112. np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
  113. if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
  114. input_data.dtype.kind != 'U': # Support dtype np.str_
  115. raise TypeError(f"For Tensor, the input_data is a numpy array, "
  116. f"but it's data type: {input_data.dtype} is not in supported list: "
  117. f"{list(i.__name__ for i in valid_dtypes)}.")
  118. if isinstance(input_data, (tuple, list)):
  119. if np.array(input_data).dtype not in valid_dtypes:
  120. raise TypeError(f"For Tensor, the input_data is {input_data} that contain unsupported element.")
  121. if dtype is not None:
  122. validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
  123. else:
  124. dtype = self._set_default_dtype(input_data, dtype)
  125. if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
  126. input_data = np.ascontiguousarray(input_data)
  127. if dtype is not None:
  128. Tensor_.__init__(self, input_data, dtype)
  129. else:
  130. Tensor_.__init__(self, input_data)
  131. else:
  132. Tensor_.__init__(self, dtype, shape)
  133. self.virtual_flag = False
  134. self.init = init
  135. self.init_finished = True
  136. # if cur Tensor is a index value of another Tensor,
  137. # parent_tensor_ set to another Tensor
  138. # index_of_parent_ will set to the index
  139. self.parent_tensor_ = None
  140. self.index_of_parent_ = None
  141. @staticmethod
  142. def _set_default_dtype(input_data, dtype):
  143. if isinstance(input_data, (float, list, tuple)):
  144. if np.array(input_data).dtype == np.float64:
  145. return mstype.float32
  146. return dtype
  147. def __deepcopy__(self, memodict):
  148. new_obj = Tensor(self)
  149. new_obj.init = self.init
  150. new_obj.virtual_flag = self.virtual_flag
  151. return new_obj
  152. def __repr__(self):
  153. if self.init_finished:
  154. Tensor_.data_sync(self, False)
  155. return Tensor_.__repr__(self)
  156. return ''
  157. def __eq__(self, other):
  158. if not isinstance(other, (int, float, Tensor)):
  159. return False
  160. # bool type is not supported for `Equal` operator in backend.
  161. if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
  162. if isinstance(other, Tensor):
  163. return Tensor(np.array(self.asnumpy() == other.asnumpy()))
  164. return Tensor(np.array(self.asnumpy() == other))
  165. return tensor_operator_registry.get('__eq__')(self, other)
  166. def __ne__(self, other):
  167. if not isinstance(other, (int, float, Tensor)):
  168. return True
  169. # bool type is not supported for `NotEqual` operator in backend.
  170. if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
  171. return Tensor(np.array(self.asnumpy() != other.asnumpy()))
  172. return tensor_operator_registry.get('__ne__')(self, other)
  173. def __hash__(self):
  174. return hash(id(self))
  175. def __neg__(self):
  176. out = tensor_operator_registry.get('__neg__')(self)
  177. return out
  178. def __invert__(self):
  179. out = tensor_operator_registry.get('__logical_not__')(self)
  180. return out
  181. def __bool__(self):
  182. data = self.asnumpy()
  183. if data.shape == ():
  184. return bool(data)
  185. if data.shape == (1,):
  186. return bool(data[0])
  187. raise ValueError("The truth value of an array with several elements is ambiguous.")
  188. def __index__(self):
  189. data = self.asnumpy()
  190. if not (data.dtype == "int8"
  191. or data.dtype == "int16"
  192. or data.dtype == "int32"
  193. or data.dtype == "int64"
  194. or data.dtype == "bool"):
  195. raise ValueError("Only integer tensors of a single element can be converted to an index.")
  196. if data.shape == ():
  197. return int(data)
  198. if data.shape == (1,):
  199. return int(data[0])
  200. raise ValueError("Only integer tensors of a single element can be converted to an index.")
  201. def __pos__(self):
  202. return self
  203. def __add__(self, other):
  204. return tensor_operator_registry.get('__add__')(self, other)
  205. def __radd__(self, other):
  206. return self.__add__(other)
  207. def __iadd__(self, other):
  208. return self.__add__(other)
  209. def __sub__(self, other):
  210. return tensor_operator_registry.get('__sub__')(self, other)
  211. def __rsub__(self, other):
  212. return tensor_operator_registry.get('__sub__')(other, self)
  213. def __isub__(self, other):
  214. return self.__sub__(other)
  215. def __mul__(self, other):
  216. return tensor_operator_registry.get('__mul__')(self, other)
  217. def __rmul__(self, other):
  218. return self.__mul__(other)
  219. def __imul__(self, other):
  220. return self.__mul__(other)
  221. def __truediv__(self, other):
  222. return tensor_operator_registry.get('__truediv__')(self, other)
  223. def __rtruediv__(self, other):
  224. return tensor_operator_registry.get('__truediv__')(other, self)
  225. def __mod__(self, other):
  226. return tensor_operator_registry.get('__mod__')(self, other)
  227. def __rmod__(self, other):
  228. return tensor_operator_registry.get('__mod__')(other, self)
  229. def __imod__(self, other):
  230. return self.__mod__(other)
  231. def __pow__(self, other):
  232. return tensor_operator_registry.get('__pow__')(self, other)
  233. def __floordiv__(self, other):
  234. return tensor_operator_registry.get('__floordiv__')(self, other)
  235. def __rfloordiv__(self, other):
  236. return tensor_operator_registry.get('__floordiv__')(other, self)
  237. def __ifloordiv__(self, other):
  238. return self.__floordiv__(other)
  239. def __lt__(self, other):
  240. out = tensor_operator_registry.get('__lt__')(self, other)
  241. return out
  242. def __le__(self, other):
  243. out = tensor_operator_registry.get('__le__')(self, other)
  244. return out
  245. def __getitem__(self, index):
  246. out = tensor_operator_registry.get('__getitem__')(self, index)
  247. if out is not self:
  248. out.parent_tensor_ = self
  249. out.index_of_parent_ = index
  250. return out
  251. def __setitem__(self, index, value):
  252. out = tensor_operator_registry.get('__setitem__')(self, index, value)
  253. self.assign_value(out)
  254. if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
  255. self.parent_tensor_.__setitem__(self.index_of_parent_, self)
  256. return self
  257. def __gt__(self, other):
  258. out = tensor_operator_registry.get('__gt__')(self, other)
  259. return out
  260. def __ge__(self, other):
  261. out = tensor_operator_registry.get('__ge__')(self, other)
  262. return out
  263. def __len__(self):
  264. out = tensor_operator_registry.get('shape')(self)
  265. if out:
  266. return out[0]
  267. raise TypeError("Not support len of a 0-D tensor")
  268. def __str__(self):
  269. if self.dtype == mstype.type_none:
  270. return "Unknown Tensor type!"
  271. return str(self.asnumpy())
  272. @property
  273. def shape(self):
  274. """Returns the shape of the tensor as a tuple."""
  275. return self._shape
  276. @property
  277. def dtype(self):
  278. """Return the dtype of the tensor (:class:`mindspore.dtype`)."""
  279. return self._dtype
  280. @property
  281. def size(self):
  282. """Returns the total number of elements in tensor."""
  283. return self._size
  284. @property
  285. def ndim(self):
  286. """Return the number of tensor dimensions."""
  287. return len(self._shape)
  288. @property
  289. def has_init(self):
  290. """Whether tensor is initialized."""
  291. return self.init is not None
  292. @property
  293. def itemsize(self):
  294. """Return the length of one tensor element in bytes."""
  295. return self._itemsize
  296. @property
  297. def strides(self):
  298. """Return the tuple of bytes to step in each dimension when traversing a tensor."""
  299. return self._strides
  300. @property
  301. def nbytes(self):
  302. """Return the total number of bytes taken by the tensor."""
  303. return self._nbytes
  304. @property
  305. def T(self):
  306. """Return the transposed tensor."""
  307. return self.transpose()
  308. @staticmethod
  309. def from_numpy(array):
  310. """
  311. Convert numpy array to Tensor without copy data.
  312. Args:
  313. array (numpy.array): The input array.
  314. Returns:
  315. Tensor, has the same data type as input array.
  316. Examples:
  317. >>> import numpy as np
  318. >>> from mindspore import Tensor
  319. >>> x = np.array([1, 2])
  320. >>> output = Tensor.from_numpy(x)
  321. >>> print(output)
  322. [1 2]
  323. """
  324. return Tensor(Tensor_.from_numpy(array))
  325. def assign_value(self, value):
  326. PynativeExecutor_.get_instance().execute_all_task()
  327. self.assign_value_cpp(value)
  328. return self
  329. def item(self, index=None):
  330. """
  331. Get the item at the specified index of the tensor.
  332. Note:
  333. Tensor.item returns a Tensor scalar instead of a Python scalar.
  334. Args:
  335. index (Union[None, int, tuple(int)]): The index in Tensor. Default: None.
  336. Returns:
  337. A Tensor scalar, dtype is the same with the original Tensor.
  338. Raises:
  339. ValueError: If the length of the `index` is not equal to self.ndim.
  340. Supported Platforms:
  341. ``Ascend`` ``GPU``
  342. Examples:
  343. >>> import numpy as np
  344. >>> from mindspore import Tensor
  345. >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
  346. >>> x = x.item((0,1))
  347. >>> print(x)
  348. 2.0
  349. """
  350. output = tensor_operator_registry.get('item')(self, index)
  351. return output
  352. def itemset(self, *args):
  353. r"""
  354. Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible).
  355. There must be at least 1 argument, and define the last argument as item.
  356. Then, tensor.itemset(\*args) is equivalent to :math:`tensor[args] = item`.
  357. Args:
  358. args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
  359. specify the index and value. If `args` contain one argument (a scalar),
  360. it is only used in case tensor is of size 1. If `args` contain two
  361. arguments, the last argument is the value to be set and must be a
  362. scalar, the first argument specifies a single tensor element location.
  363. It is either an int or a tuple.
  364. Returns:
  365. A new tensor that doesn't affect the original tensor, with value set by :math:`tensor[args] = item`.
  366. Raises:
  367. ValueError: If the length of the first argument is not equal to self.ndim.
  368. IndexError: If only one argument is provided, and the original Tensor is not scalar.
  369. Supported Platforms:
  370. ``Ascend`` ``GPU``
  371. Examples:
  372. >>> import numpy as np
  373. >>> from mindspore import Tensor
  374. >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
  375. >>> print(x.itemset((0,1), 4))
  376. [[1. 4. 3.]
  377. [4. 5. 6.]]
  378. >>> print(x)
  379. [[1. 2. 3.]
  380. [4. 5. 6.]]
  381. """
  382. output = tensor_operator_registry.get('itemset')(self, *args)
  383. return output
  384. def asnumpy(self):
  385. """
  386. Convert tensor to numpy array. Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray
  387. share the same underlying storage. Changes to self tensor will be reflected in the ndarray.
  388. Returns:
  389. A numpy ndarray which shares the same underlying storage with the tensor.
  390. Examples:
  391. >>> from mindspore import Tensor
  392. >>> import numpy as np
  393. >>> x = Tensor(np.array([1, 2], dtype=np.float32))
  394. >>> y = x.asnumpy()
  395. >>> y[0] = 11
  396. >>> print(x)
  397. [11. 2.]
  398. >>> print(y)
  399. [11. 2.]
  400. """
  401. self._init_check()
  402. PynativeExecutor_.get_instance().execute_all_task()
  403. return Tensor_.asnumpy(self)
  404. def flush_from_cache(self):
  405. """
  406. Flush cache data to host if tensor is cache enable.
  407. Examples:
  408. >>> from mindspore import Tensor
  409. >>> import numpy as np
  410. >>> x = Tensor(np.array([1, 2], dtype=np.float32))
  411. >>> y = x.flush_from_cache()
  412. >>> print(y)
  413. None
  414. """
  415. self._init_check()
  416. Tensor_._flush_from_cache(self)
  417. def all(self, axis=(), keep_dims=False):
  418. """
  419. Check all tensor elements along a given axis evaluate to True.
  420. Args:
  421. axis (Union[None, int, tuple(int)]): Dimensions of reduction,
  422. when the axis is None or empty tuple, reduce all dimensions. Default: ().
  423. keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
  424. Returns:
  425. Tensor, if all tensor elements along the given axis evaluate to True, its value is True,
  426. otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
  427. Supported Platforms:
  428. ``Ascend`` ``GPU`` ``CPU``
  429. See also:
  430. :func:`mindspore.Tensor.any`: Check any tensor element along a given axis evaluate to True.
  431. Examples:
  432. >>> from mindspore import Tensor
  433. >>> a = Tensor([True, True, False])
  434. >>> output = a.all()
  435. >>> print(output)
  436. False
  437. """
  438. self._init_check()
  439. if axis is None:
  440. axis = ()
  441. return tensor_operator_registry.get('all')(keep_dims)(self, axis)
  442. def any(self, axis=(), keep_dims=False):
  443. """
  444. Check any tensor element along a given axis evaluate to True.
  445. Args:
  446. axis (Union[None, int, tuple(int)]): Dimensions of reduction,
  447. when the axis is None or empty tuple, reduce all dimensions. Default: ().
  448. keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
  449. Returns:
  450. Tensor, if any tensor element along the given axis evaluates to True, its value is True,
  451. otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
  452. Supported Platforms:
  453. ``Ascend`` ``GPU`` ``CPU``
  454. See also:
  455. :func:`mindspore.Tensor.all`: Check all tensor elements along a given axis evaluate to True.
  456. Examples:
  457. >>> from mindspore import Tensor
  458. >>> a = Tensor([True, True, False])
  459. >>> output = a.any()
  460. >>> print(output)
  461. True
  462. """
  463. self._init_check()
  464. if axis is None:
  465. axis = ()
  466. return tensor_operator_registry.get('any')(keep_dims)(self, axis)
  467. def view(self, *shape):
  468. """
  469. Reshape the tensor according to the input shape.
  470. Args:
  471. shape (Union[tuple(int), int]): Dimension of the output tensor.
  472. Returns:
  473. Tensor, has the same dimension as the input shape.
  474. Examples:
  475. >>> from mindspore import Tensor
  476. >>> import numpy as np
  477. >>> a = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
  478. >>> output = a.view((3, 2))
  479. >>> print(output)
  480. [[1. 2.]
  481. [3. 2.]
  482. [3. 4.]]
  483. """
  484. self._init_check()
  485. if not shape:
  486. raise ValueError("The shape variable should not be empty")
  487. if isinstance(shape[0], tuple):
  488. if len(shape) != 1:
  489. raise ValueError(f"Only one tuple is needed, but got {shape}")
  490. shape = shape[0]
  491. return tensor_operator_registry.get('reshape')()(self, shape)
  492. def expand_as(self, x):
  493. """
  494. Expand the dimension of target tensor to the dimension of input tensor.
  495. Args:
  496. x (Tensor): The input tensor. The shape of the input tensor must obey
  497. the broadcasting rule.
  498. Returns:
  499. Tensor, has the same dimension as input tensor.
  500. Examples:
  501. >>> import numpy as np
  502. >>> from mindspore import Tensor
  503. >>> from mindspore import dtype as mstype
  504. >>> x = Tensor([1, 2, 3], dtype=mstype.float32)
  505. >>> y = Tensor(np.ones((2, 3)), dtype=mstype.float32)
  506. >>> output = x.expand_as(y)
  507. >>> print(output)
  508. [[1. 2. 3.]
  509. [1. 2. 3.]]
  510. """
  511. self._init_check()
  512. return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
  513. def abs(self):
  514. """
  515. Return absolute value element-wisely.
  516. Returns:
  517. Tensor, with absolute value element-wisely.
  518. Supported Platforms:
  519. ``Ascend`` ``GPU`` ``CPU``
  520. Examples:
  521. >>> from mindspore import Tensor
  522. >>> a = Tensor([1.1, -2.1]).astype("float32")
  523. >>> output = a.abs()
  524. >>> print(output)
  525. [1.1 2.1]
  526. """
  527. self._init_check()
  528. return tensor_operator_registry.get('abs')()(self)
  529. def mean(self, axis=(), keep_dims=False):
  530. """
  531. Reduce a dimension of a tensor by averaging all elements in the dimension.
  532. Args:
  533. axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
  534. when the axis is None or empty tuple, reduce all dimensions. Default: ().
  535. keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
  536. Returns:
  537. Tensor, has the same data type as input tensor.
  538. Supported Platforms:
  539. ``Ascend`` ``GPU`` ``CPU``
  540. See also:
  541. :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
  542. :func:`mindspore.Tensor.var`: Compute the variance along the specified axis.
  543. Examples:
  544. >>> import numpy as np
  545. >>> from mindspore import Tensor
  546. >>> input_x = Tensor(np.array([1, 2, 3], dtype=np.float32))
  547. >>> output = input_x.mean()
  548. >>> print(output)
  549. 2.0
  550. """
  551. self._init_check()
  552. if axis is None:
  553. axis = ()
  554. return tensor_operator_registry.get('mean')(keep_dims)(self, axis)
  555. def transpose(self, *axes):
  556. r"""
  557. Return a tensor with axes transposed.
  558. - For a 1-D tensor, this has no effect, as a transposed vector is simply the same vector.
  559. - For a 2-D tensor, this is a standard matrix transpose.
  560. - For an n-D tensor, if axes are given, their order indicates how the axes are permuted.
  561. If axes are not provided and ``tensor.shape = (i[0], i[1],...i[n-2], i[n-1])``,
  562. then ``tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
  563. Args:
  564. axes(Union[None, tuple(int), list(int), int], optional): If axes is None or
  565. blank, the method will reverse the order of the axes. If axes is tuple(int)
  566. or list(int), tensor.transpose() will transpose the tensor to the new axes order.
  567. If axes is int, this form is simply intended as a convenience alternative to the
  568. tuple/list form.
  569. Returns:
  570. Tensor, has the same dimension as input tensor, with axes suitably permuted.
  571. Raises:
  572. TypeError: If input arguments have types not specified above.
  573. ValueError: If the number of `axes` is not equal to Tensor's ndim.
  574. Supported Platforms:
  575. ``Ascend`` ``GPU`` ``CPU``
  576. Examples:
  577. >>> import numpy as np
  578. >>> from mindspore import Tensor
  579. >>> x = Tensor(np.ones((1,2,3), dtype=np.float32))
  580. >>> x = x.transpose()
  581. >>> print(x.shape)
  582. (3, 2, 1)
  583. """
  584. self._init_check()
  585. perm = validator.check_transpose_axis(axes, self.ndim)
  586. return tensor_operator_registry.get('transpose')()(self, perm)
  587. def reshape(self, *shape):
  588. """
  589. Give a new shape to a tensor without changing its data.
  590. Args:
  591. shape(Union[int, tuple(int), list(int)]): The new shape should be compatible
  592. with the original shape. If an integer, then the result will be a 1-D
  593. tensor of that length. One shape dimension can be -1. In this case, the
  594. value is inferred from the length of the tensor and remaining dimensions.
  595. Returns:
  596. Tensor, with new specified shape.
  597. Raises:
  598. TypeError: If new shape is not integer, list or tuple.
  599. ValueError: If new shape is not compatible with the original shape.
  600. Supported Platforms:
  601. ``Ascend`` ``GPU`` ``CPU``
  602. Examples:
  603. >>> from mindspore import Tensor
  604. >>> from mindspore import dtype as mstype
  605. >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32)
  606. >>> output = x.reshape((3, 2))
  607. >>> print(output)
  608. [[-0.1 0.3]
  609. [ 3.6 0.4]
  610. [ 0.5 -3.2]]
  611. """
  612. self._init_check()
  613. new_shape = validator.check_reshape_shp(shape)
  614. return tensor_operator_registry.get('reshape')()(self, new_shape)
  615. def ravel(self):
  616. """
  617. Return a contiguous flattened tensor.
  618. Returns:
  619. Tensor, a 1-D tensor, containing the same elements of the input.
  620. Supported Platforms:
  621. ``Ascend`` ``GPU`` ``CPU``
  622. See also:
  623. :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
  624. :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
  625. Examples:
  626. >>> import numpy as np
  627. >>> from mindspore import Tensor
  628. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  629. >>> output = x.ravel()
  630. >>> print(output.shape)
  631. (24,)
  632. """
  633. self._init_check()
  634. reshape_op = tensor_operator_registry.get('reshape')()
  635. return reshape_op(self, (-1,))
  636. def flatten(self, order='C'):
  637. r"""
  638. Return a copy of the tensor collapsed into one dimension.
  639. Args:
  640. order (str, optional): Can choose between 'C' and 'F'. 'C' means to
  641. flatten in row-major (C-style) order. 'F' means to flatten in column-major
  642. (Fortran-style) order. Default: 'C'.
  643. Returns:
  644. Tensor, has the same data type as input.
  645. Supported Platforms:
  646. ``Ascend`` ``GPU`` ``CPU``
  647. Raises:
  648. TypeError: If `order` is not string type.
  649. ValueError: If `order` is string type, but not 'C' or 'F'.
  650. See also:
  651. :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
  652. :func:`mindspore.Tensor.ravel`: Return a contiguous flattened tensor.
  653. Examples:
  654. >>> import numpy as np
  655. >>> from mindspore import Tensor
  656. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  657. >>> output = x.flatten()
  658. >>> print(output.shape)
  659. (24,)
  660. """
  661. self._init_check()
  662. reshape_op = tensor_operator_registry.get('reshape')()
  663. trans_op = tensor_operator_registry.get('transpose')()
  664. order = validator.check_flatten_order(order)
  665. if order == 'C':
  666. return reshape_op(self, (-1,))
  667. perm = tuple(range(self.ndim-1, -1, -1))
  668. return reshape_op(trans_op(self, perm), (-1,))
  669. def swapaxes(self, axis1, axis2):
  670. """
  671. Interchange two axes of a tensor.
  672. Args:
  673. axis1 (int): First axis.
  674. axis2 (int): Second axis.
  675. Returns:
  676. Transposed tensor, has the same data type as the input.
  677. Raises:
  678. TypeError: If `axis1` or `axis2` is not integer.
  679. ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
  680. Supported Platforms:
  681. ``Ascend`` ``GPU`` ``CPU``
  682. Examples:
  683. >>> import numpy as np
  684. >>> from mindspore import Tensor
  685. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  686. >>> output = x.swapaxes(0, 2)
  687. >>> print(output.shape)
  688. (4,3,2)
  689. """
  690. self._init_check()
  691. axis1, axis2 = validator.check_swapaxes_axis((axis1, axis2), self.ndim)
  692. if axis1 == axis2:
  693. return self
  694. if axis1 > axis2:
  695. axis1, axis2 = axis2, axis1
  696. perm = tuple(range(0, self.ndim))
  697. if axis2 + 1 < self.ndim:
  698. new_perm = perm[0:axis1] + perm[axis2:axis2+1] + \
  699. perm[axis1+1:axis2] + perm[axis1:axis1+1] + perm[axis2+1:]
  700. else:
  701. new_perm = perm[0:axis1] + perm[axis2:axis2+1] + \
  702. perm[axis1+1:axis2] + perm[axis1:axis1+1]
  703. return tensor_operator_registry.get('transpose')()(self, new_perm)
  704. def squeeze(self, axis=None):
  705. """
  706. Remove the dimension of shape 1 from the Tensor
  707. Args:
  708. axis (Union[None, int, list(int), tuple(int)], optional): Selects a subset of the entries of
  709. length one in the shape. If an axis is selected with shape entry greater than one,
  710. an error is raised. Default is None.
  711. Returns:
  712. Tensor, with all or a subset of the dimensions of length 1 removed.
  713. Raises:
  714. TypeError: If input arguments have types not specified above.
  715. ValueError: If axis is greater than one.
  716. Supported Platforms:
  717. ``Ascend`` ``GPU`` ``CPU``
  718. See also:
  719. :func:`mindspore.Tensor.expand_as`: Expand the dimension of target tensor to the dimension of input tensor.
  720. :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
  721. Examples:
  722. >>> import numpy as np
  723. >>> from mindspore import Tensor
  724. >>> x = Tensor(np.ones((1,2,2), dtype=np.float32))
  725. >>> print(x)
  726. [[[1. 1.]
  727. [1. 1.]]]
  728. >>> print(x.shape)
  729. (1, 2, 2)
  730. >>> y = x.squeeze()
  731. >>> print(y)
  732. [[1. 1.]
  733. [1. 1.]]
  734. >>> print(y.shape)
  735. (2, 2)
  736. >>> y = x.squeeze(axis=0)
  737. >>> print(y)
  738. [[1. 1.]
  739. [1. 1.]]
  740. >>> print(y.shape)
  741. (2, 2)
  742. """
  743. self._init_check()
  744. if axis is None:
  745. return tensor_operator_registry.get('squeeze')(self)
  746. new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
  747. return tensor_operator_registry.get('reshape')()(self, new_shape)
  748. def astype(self, dtype, copy=True):
  749. """
  750. Return a copy of the tensor, cast to a specified type.
  751. Args:
  752. dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format
  753. of :class:`mindspore.dtype.float32` or `float32`.
  754. copy (bool, optional): By default, astype always returns a newly allocated
  755. tensor. If this is set to false, the input tensor is returned instead
  756. of a copy. Default: True.
  757. Returns:
  758. Tensor, with the designated dtype.
  759. Raises:
  760. TypeError: If the specified dtype cannot be understood.
  761. Supported Platforms:
  762. ``Ascend`` ``GPU`` ``CPU``
  763. Examples:
  764. >>> import numpy as np
  765. >>> from mindspore import Tensor
  766. >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
  767. >>> x = x.astype("int32")
  768. >>> print(x.dtype)
  769. Int32
  770. """
  771. self._init_check()
  772. dtype = validator.check_astype_dtype(dtype)
  773. if not copy and dtype == self.dtype:
  774. return self
  775. return tensor_operator_registry.get('cast')(self, dtype)
  776. def argmax(self, axis=None):
  777. """
  778. Return the indices of the maximum values along an axis.
  779. Args:
  780. axis (int, optional): By default, the index is into
  781. the flattened tensor, otherwise along the specified axis. Default: None.
  782. Returns:
  783. Tensor, indices into the input tensor. It has the same
  784. shape as self.shape with the dimension along axis removed.
  785. Raises:
  786. ValueError: If the axis is out of range.
  787. Supported Platforms:
  788. ``Ascend`` ``GPU`` ``CPU``
  789. See also:
  790. :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
  791. :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
  792. :func:`mindspore.Tensor.max`: Return the maximum of a tensor or maximum along an axis.
  793. Examples:
  794. >>> import numpy as np
  795. >>> from mindspore import Tensor
  796. >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
  797. >>> print(a.argmax())
  798. 5
  799. """
  800. # P.Argmax only supports float
  801. a = self.astype(mstype.float32)
  802. if axis is None:
  803. a = a.ravel()
  804. axis = 0
  805. else:
  806. axis = validator.check_axis_in_range(axis, a.ndim)
  807. return tensor_operator_registry.get('argmax')(axis)(a)
  808. def argmin(self, axis=None):
  809. """
  810. Return the indices of the minimum values along an axis.
  811. Args:
  812. axis (int, optional): By default, the index is into
  813. the flattened tensor, otherwise along the specified axis. Default: None.
  814. Returns:
  815. Tensor, indices into the input tensor. It has the same
  816. shape as self.shape with the dimension along axis removed.
  817. Raises:
  818. ValueError: If the axis is out of range.
  819. Supported Platforms:
  820. ``Ascend`` ``GPU`` ``CPU``
  821. See also:
  822. :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
  823. :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
  824. :func:`mindspore.Tensor.max`: Return the maximum of a tensor or maximum along an axis.
  825. Examples:
  826. >>> import numpy as np
  827. >>> from mindspore import Tensor
  828. >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
  829. >>> print(a.argmin())
  830. 0
  831. """
  832. # P.Argmin only supports float
  833. a = self.astype(mstype.float32)
  834. if axis is None:
  835. a = a.ravel()
  836. axis = 0
  837. else:
  838. axis = validator.check_axis_in_range(axis, a.ndim)
  839. # P.Argmin is currently not supported
  840. return tensor_operator_registry.get('argmax')(axis)(tensor_operator_registry.get('__neg__')(a))
  841. def cumsum(self, axis=None, dtype=None):
  842. """
  843. Return the cumulative sum of the elements along a given axis.
  844. Note:
  845. If ``self.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  846. `dtype` will be elevated to :class:`int32`, :class:`int64` is not supported.
  847. Args:
  848. axis (int, optional): Axis along which the cumulative sum is computed. The
  849. default (None) is to compute the cumsum over the flattened array.
  850. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as original
  851. tensor, unless it has an integer dtype with a precision less than :class:`float32`.
  852. In that case, :class:`float32` is used. Default: None.
  853. Raises:
  854. ValueError: If the axis is out of range.
  855. Returns:
  856. Tensor.
  857. Supported Platforms:
  858. ``Ascend`` ``GPU`` ``CPU``
  859. See also:
  860. :func:`mindspore.Tensor.sum`: Return sum of tensor elements over a given axis.
  861. Examples:
  862. >>> import numpy as np
  863. >>> from mindspore import Tensor
  864. >>> a = Tensor(np.ones((3,3)).astype("float32"))
  865. >>> output = a.cumsum(axis=0)
  866. >>> print(output)
  867. [[1. 1. 1.]
  868. [2. 2. 2.]
  869. [3. 3. 3.]]
  870. """
  871. x = self
  872. original_dtype = x.dtype
  873. # If original tensor is int, and has precision less then int32, convert to int32
  874. if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
  875. x = x.astype(mstype.int32)
  876. if axis is None:
  877. x = x.ravel()
  878. axis = 0
  879. validator.check_axis_in_range(axis, x.ndim)
  880. if dtype is not None and original_dtype != dtype:
  881. return tensor_operator_registry.get('cumsum')()(x, axis).astype(dtype, copy=False)
  882. return tensor_operator_registry.get('cumsum')()(x, axis)
  883. def copy(self):
  884. """
  885. Return a copy of the tensor.
  886. Note:
  887. The current implementation does not support `order` argument.
  888. Returns:
  889. Copied tensor.
  890. Supported Platforms:
  891. ``Ascend`` ``GPU`` ``CPU``
  892. Examples:
  893. >>> import numpy as np
  894. >>> from mindspore import Tensor
  895. >>> a = Tensor(np.ones((3,3)).astype("float32"))
  896. >>> output = a.copy()
  897. >>> print(output)
  898. [[1. 1. 1.]
  899. [1. 1. 1.]
  900. [1. 1. 1.]]
  901. """
  902. if self.size == 0:
  903. return self
  904. origin_dtype = self.dtype
  905. x = self
  906. logical_not_op = tensor_operator_registry.get('logical_not')()
  907. if origin_dtype == mstype.bool_:
  908. return logical_not_op(logical_not_op(x))
  909. if origin_dtype != mstype.float64:
  910. x = x.astype("float32")
  911. x = x / 1.0
  912. x = x.astype(origin_dtype)
  913. return x
  914. def max(self, axis=None, keepdims=False, initial=None, where=True):
  915. """
  916. Return the maximum of a tensor or maximum along an axis.
  917. Args:
  918. axis (Union[None, int, tuple of ints], optional): Axis or
  919. axes along which to operate. By default, flattened input is used. If
  920. this is a tuple of ints, the maximum is selected over multiple axes,
  921. instead of a single axis or all the axes as before. Default: None.
  922. keepdims (bool, optional):
  923. If this is set to True, the axes which are reduced are left in the
  924. result as dimensions with size one. With this option, the result will
  925. broadcast correctly against the input array. Default: False.
  926. initial (scalar, optional):
  927. The minimum value of an output element. Must be present to allow
  928. computation on empty slice. Default: None.
  929. where (bool Tensor, optional):
  930. A boolean tensor which is broadcasted to match the dimensions of array,
  931. and selects elements to include in the reduction. If non-default value
  932. is passed, initial must also be provided. Default: True.
  933. Returns:
  934. Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
  935. value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
  936. Raises:
  937. TypeError: If arguments have types not specified above.
  938. Supported Platforms:
  939. ``Ascend`` ``GPU`` ``CPU``
  940. See also:
  941. :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
  942. :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
  943. :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
  944. Examples:
  945. >>> import numpy as np
  946. >>> from mindspore import Tensor
  947. >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
  948. >>> output = a.max()
  949. >>> print(output)
  950. 3.0
  951. """
  952. reduce_ = tensor_operator_registry.get("reduce")
  953. reduce_max = tensor_operator_registry.get("reduce_max")
  954. maximum = tensor_operator_registry.get("maximum")
  955. return reduce_(self, reduce_max(keepdims), cmp_fn=maximum(), axis=axis, keepdims=keepdims,
  956. initial=initial, where=where)
  957. def min(self, axis=None, keepdims=False, initial=None, where=True):
  958. """
  959. Return the minimum of a tensor or minimum along an axis.
  960. Args:
  961. axis (Union[None, int, tuple of ints], optional): Axis or
  962. axes along which to operate. By default, flattened input is used. If
  963. this is a tuple of ints, the minimum is selected over multiple axes,
  964. instead of a single axis or all the axes as before. Default: None.
  965. keepdims (bool, optional):
  966. If this is set to True, the axes which are reduced are left in the
  967. result as dimensions with size one. With this option, the result will
  968. broadcast correctly against the input tensor. Default: False.
  969. initial (scalar, optional):
  970. The maximum value of an output element. Must be present to allow
  971. computation on empty slice. Default: None.
  972. where (bool Tensor, optional):
  973. A boolean tensor which is broadcasted to match the dimensions of tensor,
  974. and selects elements to include in the reduction. If non-default value
  975. is passed, initial must also be provided. Default: True.
  976. Returns:
  977. Tensor or scalar, minimum of input tensor. If the axis is None, the result is a scalar
  978. value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
  979. Raises:
  980. TypeError: If arguments have types not specified above.
  981. Supported Platforms:
  982. ``Ascend`` ``GPU`` ``CPU``
  983. See also:
  984. :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
  985. :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
  986. :func:`mindspore.Tensor.max`: Return the maximum of a tensor or maximum along an axis.
  987. Examples:
  988. >>> from mindspore import Tensor
  989. >>> import mindspore.numpy as np
  990. >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
  991. >>> output = a.min()
  992. >>> print(output)
  993. 0.0
  994. """
  995. reduce_ = tensor_operator_registry.get("reduce")
  996. reduce_min = tensor_operator_registry.get("reduce_min")
  997. minimum = tensor_operator_registry.get("minimum")
  998. return reduce_(self, reduce_min(keepdims), cmp_fn=minimum(), axis=axis, keepdims=keepdims,
  999. initial=initial, where=where)
  1000. def fill(self, value):
  1001. """
  1002. Fill the tensor with a scalar value.
  1003. Note:
  1004. Unlike Numpy, tensor.fill() will always return a new tensor, instead of
  1005. filling the original tensor.
  1006. Args:
  1007. value (Union[None, int, float, bool]): All elements of a will be assigned this value.
  1008. Returns:
  1009. Tensor, with the original dtype and shape.
  1010. Raises:
  1011. TypeError: If input arguments have types not specified above.
  1012. Supported Platforms:
  1013. ``Ascend`` ``GPU`` ``CPU``
  1014. Examples:
  1015. >>> import numpy as np
  1016. >>> from mindspore import Tensor
  1017. >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
  1018. >>> print(a.fill(1.0))
  1019. [[1. 1.]
  1020. [1. 1.]]
  1021. """
  1022. if value is None:
  1023. if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
  1024. raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
  1025. "tensor must be float, but got {}.".format(self.dtype))
  1026. value = Tensor(float('nan')).astype("float32")
  1027. return tensor_operator_registry.get("tile")()(value, self.shape).astype(self.dtype)
  1028. if not isinstance(value, (int, float, bool)):
  1029. raise TypeError("For 'Tensor.fill', the type of the argument 'value' must be int, float or bool, "
  1030. "but got {}.".format(type(value)))
  1031. return tensor_operator_registry.get("fill")(self.dtype, self.shape, value)
  1032. def ptp(self, axis=None, keepdims=False):
  1033. """
  1034. The name of the function comes from the acronym for "peak to peak".
  1035. Note:
  1036. Numpy argument `out` is not supported.
  1037. Args:
  1038. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
  1039. The default is to compute the variance of the flattened tensor. Default: None.
  1040. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  1041. dimensions with size one. With this option, the result will broadcast correctly against the tensor.
  1042. Default is False.
  1043. Returns:
  1044. Tensor.
  1045. Raises:
  1046. TypeError: If `self` is not a tensor, or `axis` and `keepdims` have types not specified above.
  1047. Supported Platforms:
  1048. ``Ascend`` ``GPU`` ``CPU``
  1049. Examples:
  1050. >>> from mindspore import Tensor
  1051. >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32")
  1052. >>> print(x.ptp(axis=1))
  1053. [8. 6.]
  1054. >>> print(x.ptp(axis=0))
  1055. [2. 0. 5. 2.]
  1056. """
  1057. if not isinstance(keepdims, bool):
  1058. raise TypeError("For 'Tensor.ptp', the type of the argument 'keepdims' must be bool, "
  1059. "but got {}.".format(type(keepdims)))
  1060. if axis is None:
  1061. axis = ()
  1062. else:
  1063. validator.check_axis_type(axis, True, True, False)
  1064. axis = validator.check_axis_valid(axis, self.ndim)
  1065. return self.max(axis, keepdims) - self.min(axis, keepdims)
  1066. def clip(self, xmin, xmax, dtype=None):
  1067. """
  1068. Clips (limits) the values in a Tensor.
  1069. Given an interval, values outside the interval are clipped to the interval edges.
  1070. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
  1071. and values larger than 1 become 1.
  1072. Note:
  1073. Currently, clip with `xmin=nan` or `xmax=nan` is not supported.
  1074. Args:
  1075. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
  1076. on the lower interval edge. Not more than one of `xmin` and `xmax` may be None.
  1077. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
  1078. on the upper interval edge. Not more than one of `xmin` and `xmax` may be None.
  1079. If `xmin` or `xmax` are tensors, then `xmin`, `xmax` and the given tensor
  1080. will be broadcasted to match their shapes.
  1081. dtype (:class:`mindspore.dtype`, optional): Overrides the dtype of the
  1082. output Tensor. Default is None.
  1083. Returns:
  1084. Tensor, a tensor with the elements of the input tensor, but where values
  1085. < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
  1086. Raises:
  1087. TypeError: If inputs have types not specified above.
  1088. ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
  1089. Supported Platforms:
  1090. ``Ascend`` ``GPU`` ``CPU``
  1091. Examples:
  1092. >>> from mindspore import Tensor
  1093. >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32")
  1094. >>> y = x.clip(0, 2)
  1095. >>> print(y)
  1096. [1. 2. 2. 0. 0. 2. 2. 0.]
  1097. >>> t = Tensor([1, 1, 1, 1, 1, 1, 1, 1])
  1098. >>> y = x.clip(t, 2)
  1099. >>> print(y)
  1100. [1. 2. 2. 1. 1. 2. 2. 1.]
  1101. """
  1102. if xmin is None and xmax is None:
  1103. raise ValueError("For 'Tensor.clip', the argument 'xmin' and 'xman' cannot all be None.")
  1104. x = self
  1105. # F.maximum/minimum does not support when both operands are scalar
  1106. if xmin is not None:
  1107. xmin = Tensor(xmin).astype(x.dtype)
  1108. if x.ndim == 0 and xmin.ndim == 0:
  1109. x = tensor_operator_registry.get("maximum")()(x.reshape((1,)), xmin).squeeze()
  1110. else:
  1111. x = tensor_operator_registry.get("maximum")()(x, xmin)
  1112. if xmax is not None:
  1113. xmax = Tensor(xmax).astype(x.dtype)
  1114. if x.ndim == 0 and xmax.ndim == 0:
  1115. x = tensor_operator_registry.get("minimum")()(x.reshape((1,)), xmax).squeeze()
  1116. else:
  1117. x = tensor_operator_registry.get("minimum")()(x, xmax)
  1118. if dtype is not None and dtype != x.dtype:
  1119. return x.astype(dtype)
  1120. return x
  1121. def _init_check(self):
  1122. if self.has_init:
  1123. self.init_data()
  1124. return self
  1125. def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
  1126. """
  1127. Get the tensor format data of this Tensor.
  1128. The init_data function can be called once for the same tensor.
  1129. Args:
  1130. slice_index (int): Slice index of a parameter's slices.
  1131. It is used when initialize a slice of a parameter, it guarantees that devices
  1132. using the same slice can generate the same tensor. Default: None.
  1133. shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
  1134. opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
  1135. to get one shard of a parameter's slice. Default: None.
  1136. Returns:
  1137. Initialized Tensor.
  1138. Supported Platforms:
  1139. ``Ascend`` ``GPU`` ``CPU``
  1140. Examples:
  1141. >>> import mindspore as ms
  1142. >>> import mindspore.common.initializer as init
  1143. >>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
  1144. >>> out = x.init_data()
  1145. >>> print(out)
  1146. [[1. 1.]
  1147. [1. 1.]]
  1148. """
  1149. if self.init is None:
  1150. raise TypeError("init_data must be set Tensor.init, init can't be None")
  1151. if shape is None:
  1152. shape = self.shape
  1153. try:
  1154. arr = np.ndarray(shape, dtype=mstype.dtype_to_nptype(self.dtype))
  1155. except ValueError:
  1156. msg = "Error shape={}".format(shape)
  1157. logger.critical(msg)
  1158. raise ValueError(msg)
  1159. class seed_context:
  1160. """Set and restore seed."""
  1161. def __init__(self, init):
  1162. self.init = init
  1163. from .seed import get_seed
  1164. global_seed = get_seed()
  1165. self._np_seed = np.random.get_state()[1][0]
  1166. self.need_set_seed = ((slice_index is not None) and (global_seed is None))
  1167. def __enter__(self):
  1168. if self.need_set_seed:
  1169. self.seed = self.init.seed
  1170. np.random.seed(slice_index)
  1171. self.init.seed = slice_index
  1172. def __exit__(self, ptype, value, trace):
  1173. if self.need_set_seed:
  1174. np.random.seed(self._np_seed)
  1175. self.init.seed, _ = self.seed
  1176. with seed_context(self.init):
  1177. self.init(arr)
  1178. data = np.array(arr)
  1179. if opt_shard_group:
  1180. rank = get_rank(opt_shard_group)
  1181. size = get_group_size(opt_shard_group)
  1182. data = np.split(data, size)[rank]
  1183. self.init = None
  1184. self.assign_value(Tensor(data, dtype=self.dtype))
  1185. return self
  1186. def to_tensor(self, slice_index=None, shape=None, opt_shard_group=None):
  1187. """
  1188. Return init_data() and get the tensor format data of this Tensor.
  1189. Note:
  1190. The usage of `to_tensor` is deprecated. Please use `init_data`.
  1191. Args:
  1192. slice_index (int): Slice index of a parameter's slices.
  1193. It is used when initialize a slice of a parameter, it guarantees that devices
  1194. using the same slice can generate the same tensor. Default: None.
  1195. shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
  1196. opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
  1197. to get one shard of a parameter's slice. Default: None.
  1198. Returns:
  1199. Initialized Tensor.
  1200. Supported Platforms:
  1201. ``Ascend`` ``GPU`` ``CPU``
  1202. Examples:
  1203. >>> import mindspore as ms
  1204. >>> import mindspore.common.initializer as init
  1205. >>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
  1206. >>> out = x.to_tensor()
  1207. >>> print(out)
  1208. [[1. 1.]
  1209. [1. 1.]]
  1210. """
  1211. logger.warning("WARN_DEPRECATED: The usage of to_tensor is deprecated."
  1212. " Please use init_data")
  1213. return self.init_data(slice_index, shape, opt_shard_group)
  1214. def resize(self, *new_shape):
  1215. """
  1216. Changes shape and size of tensor in-place.
  1217. If the shape of the new tensor is larger than the shape of the original tensor, the new tensor will be filled
  1218. with 0. And if the shape of the new tensor is smaller than the shape of the original tensor, the new tensor is
  1219. filled with the elements of the original tensor in order.
  1220. Note:
  1221. Instead of changing the size of the input tensor and returns nothing as in numpy,
  1222. this method returns a new Tensor with the input size.
  1223. Numpy argument `refcheck` is not supported.
  1224. Args:
  1225. new_shape (Union[ints, tuple of ints]): Shape of resized tensor.
  1226. Returns:
  1227. Tensor.
  1228. Supported Platforms:
  1229. ``Ascend`` ``GPU`` ``CPU``
  1230. See also:
  1231. :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
  1232. :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
  1233. Examples:
  1234. >>> import numpy as np
  1235. >>> from mindspore import Tensor
  1236. >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32))
  1237. >>> y = x.resize(3, 3)
  1238. >>> print(y)
  1239. [[1. 2. 3.]
  1240. [4. 5. 6.]
  1241. [0. 0. 0.]]
  1242. >>> y = x.resize(2, 2)
  1243. >>> print(y)
  1244. [[1. 2.]
  1245. [3. 4.]]
  1246. """
  1247. if not new_shape:
  1248. return self
  1249. if len(new_shape) == 1:
  1250. if isinstance(new_shape[0], tuple):
  1251. new_shape = new_shape[0]
  1252. flattened = self.ravel()
  1253. cur_size = flattened.size
  1254. new_size = tensor_operator_registry.get('shape_mul')(new_shape)
  1255. diff_size = new_size - cur_size
  1256. if diff_size > 0:
  1257. pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
  1258. res = tensor_operator_registry.get('concatenate')(0)((flattened, pad_val))
  1259. else:
  1260. res = flattened[:new_size]
  1261. return res.reshape(new_shape)
  1262. def diagonal(self, offset=0, axis1=0, axis2=1):
  1263. """
  1264. Return specified diagonals.
  1265. Args:
  1266. offset (int, optional): Offset of the diagonal from the main diagonal.
  1267. Can be positive or negative. Defaults to main diagonal.
  1268. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  1269. sub-arrays from which the diagonals should be taken. Defaults to
  1270. first axis (0).
  1271. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  1272. sub-arrays from which the diagonals should be taken. Defaults to
  1273. second axis.
  1274. Returns:
  1275. Tensor, if Tensor is 2-D, return a 1-D Tensor containing the diagonal.
  1276. Raises:
  1277. ValueError: if the input tensor has less than two dimensions.
  1278. Supported Platforms:
  1279. ``Ascend`` ``GPU`` ``CPU``
  1280. See also:
  1281. :func:`mindspore.Tensor.trace`: Return the sum along diagonals of the tensor.
  1282. Examples:
  1283. >>> import numpy as np
  1284. >>> from mindspore import Tensor
  1285. >>> a = Tensor(np.arange(4).reshape(2, 2))
  1286. >>> print(a)
  1287. [[0 1]
  1288. [2 3]]
  1289. >>> output = a.diagonal()
  1290. >>> print(output)
  1291. [0 3]
  1292. """
  1293. ndim = self.ndim
  1294. if ndim < 2:
  1295. raise ValueError("For 'Tensor.diagonal', the original tensor requires at least two dimensions, "
  1296. "but got {}.".format(ndim))
  1297. dtype = self.dtype
  1298. axes = validator.check_axis_valid((axis1, axis2), ndim)
  1299. perm = ()
  1300. for i in range(ndim):
  1301. if i not in axes:
  1302. perm += (i,)
  1303. perm += axes
  1304. a = self.transpose(perm)
  1305. shape = a.shape
  1306. n, m = shape[-2:]
  1307. e = tensor_operator_registry.get('eye')(n, m, dtype)
  1308. if offset >= m or offset <= -n:
  1309. e = tensor_operator_registry.get('fill')(dtype, (n, m), 0)
  1310. elif offset != 0:
  1311. e = e.astype(mstype.float32)
  1312. if offset > 0:
  1313. e_left = tensor_operator_registry.get('fill')(dtype, (n, offset), 0)
  1314. e_right = e[..., 0:m-offset:1]
  1315. e = tensor_operator_registry.get('concatenate')(1)((e_left, e_right)).astype(dtype)
  1316. elif offset < 0:
  1317. e_upper = tensor_operator_registry.get('fill')(dtype, (-offset, m), 0)
  1318. e_lower = e[0:n+offset:1, ...]
  1319. e = tensor_operator_registry.get('concatenate')(0)((e_upper, e_lower)).astype(dtype)
  1320. e = tensor_operator_registry.get('broadcast_to')(shape)(e)
  1321. prod = tensor_operator_registry.get('__mul__')(a, e)
  1322. res = tensor_operator_registry.get('reduce_sum')(prod.astype(mstype.float32), -1)
  1323. begin = ()
  1324. for i in range(ndim-2):
  1325. begin += (0,)
  1326. last_dim_begin = max(0, -offset)
  1327. begin += (last_dim_begin,)
  1328. size = res.shape[:-1]
  1329. last_dim_end = min(
  1330. shape[-2], max(0, shape[-1] - offset)) - last_dim_begin
  1331. if last_dim_end <= 0:
  1332. return Tensor([])
  1333. size += (last_dim_end,)
  1334. res = tensor_operator_registry.get('tensor_slice')(res, begin, size)
  1335. return res.astype(dtype)
  1336. def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
  1337. """
  1338. Return the sum along diagonals of the tensor.
  1339. Args:
  1340. offset (int, optional): Offset of the diagonal from the main diagonal.
  1341. Can be positive or negative. Defaults to main diagonal.
  1342. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  1343. sub-arrays from which the diagonals should be taken. Defaults to
  1344. first axis (0).
  1345. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  1346. sub-arrays from which the diagonals should be taken. Defaults to
  1347. second axis.
  1348. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1349. output Tensor.
  1350. Returns:
  1351. Tensor, the sum along diagonals.
  1352. Raises:
  1353. ValueError: If the input tensor has less than two dimensions.
  1354. Supported Platforms:
  1355. ``Ascend`` ``GPU`` ``CPU``
  1356. See also:
  1357. :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
  1358. Examples:
  1359. >>> import numpy as np
  1360. >>> from mindspore import Tensor
  1361. >>> x = Tensor(np.eye(3, dtype=np.float32))
  1362. >>> print(x.trace())
  1363. 3.0
  1364. """
  1365. d = self.diagonal(offset, axis1=axis1, axis2=axis2)
  1366. shape = d.shape
  1367. if dtype is None:
  1368. dtype = d.dtype
  1369. if shape[-1] == 0:
  1370. return tensor_operator_registry.get('fill')(dtype, shape[:-1], 0)
  1371. res = tensor_operator_registry.get('reduce_sum')(d.astype(mstype.float32), -1)
  1372. return res.astype(dtype)
  1373. def take(self, indices, axis=None, mode='clip'):
  1374. """
  1375. Takes elements from a tensor along an axis.
  1376. Args:
  1377. indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
  1378. axis (int, optional): The axis over which to select values. By default,
  1379. the flattened input tensor is used. Default: `None`.
  1380. mode ('raise', 'wrap', 'clip', optional):
  1381. - edge: Pads with the edge values of the tensor.
  1382. - raise: Raises an error;
  1383. - wrap: Wraps around;
  1384. - clip: Clips to the range. 'clip' mode means that all indices that are
  1385. too large are replaced by the index that addresses the last element
  1386. along that axis. Note that this disables indexing with negative numbers.
  1387. Default: 'clip'.
  1388. Returns:
  1389. Tensor, the indexed result.
  1390. Raises:
  1391. ValueError: if `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip')
  1392. Supported Platforms:
  1393. ``Ascend`` ``GPU`` ``CPU``
  1394. Examples:
  1395. >>> import numpy as np
  1396. >>> from mindspore import Tensor
  1397. >>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
  1398. >>> indices = Tensor(np.array([0, 1, 4]))
  1399. >>> output = a.take(indices)
  1400. >>> print(output)
  1401. [4 3 6]
  1402. """
  1403. if mode not in ('raise', 'wrap', 'clip'):
  1404. raise ValueError(f"For 'Tensor.take', the argument 'mode' should be one of in ['raise', 'wrap', 'clip'],"
  1405. f" but got {mode}.")
  1406. if axis is None:
  1407. a = self.ravel()
  1408. axis = 0
  1409. else:
  1410. a = self
  1411. ndim = a.ndim
  1412. validator.check_axis_in_range(axis, ndim)
  1413. axis = axis + ndim if axis < 0 else axis
  1414. shape_a = a.shape
  1415. shape_indices = indices.shape
  1416. size_indices = indices.size
  1417. indices = tensor_operator_registry.get('check_indices')(shape_a[axis], indices, mode)
  1418. # reshapes indices to shape (Ni..., Nj..., Nk)
  1419. shape_ni = shape_a[:axis]
  1420. shape_nk = shape_a[axis + 1:]
  1421. shape_out = shape_ni + shape_indices + shape_nk
  1422. shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
  1423. indices = indices.reshape(shape_indices)
  1424. shape_indices = shape_ni + (indices.size,) + shape_nk
  1425. indices = tensor_operator_registry.get('broadcast_to')(shape_indices)(indices)
  1426. res = tensor_operator_registry.get('gather_d')(a, axis, indices)
  1427. return res.reshape(shape_out)
  1428. def choose(self, choices, mode='clip'):
  1429. """
  1430. Construct a tensor from an index tensor and a list of tensors to choose from.
  1431. Args:
  1432. choices (Union[tuple, list, Tensor]): Choice tensors. The input tensor and all of the
  1433. `choices` must be broadcasted to the same shape. If `choices` is itself a tensor,
  1434. then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
  1435. is taken as defining the "sequence".
  1436. mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
  1437. ``[0, n-1]`` will be treated:
  1438. 'raise' – raise an error (default);
  1439. 'wrap' – wrap around;
  1440. 'clip' – clip to the range. 'clip' mode means that values greater than n-1 are mapped to n-1.
  1441. Note that this disables indexing with negative numbers.
  1442. Returns:
  1443. Tensor, the merged result.
  1444. Supported Platforms:
  1445. ``Ascend`` ``GPU`` ``CPU``
  1446. Raises:
  1447. ValueError: if the input tensor and any of the `choices` cannot be broadcast.
  1448. Examples:
  1449. >>> import numpy as np
  1450. >>> from mindspore import Tensor
  1451. >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
  1452. >>> x = Tensor(np.array([2, 3, 1, 0]))
  1453. >>> print(x.choose(choices))
  1454. [20 31 12 3]
  1455. """
  1456. if isinstance(choices, Tensor):
  1457. shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
  1458. choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)(choices)
  1459. else:
  1460. # broadcasts choices to the same shape if choices is a sequence
  1461. choicelist = []
  1462. shapes = ()
  1463. for choice in choices:
  1464. if not isinstance(choice, Tensor):
  1465. choice = tensor_operator_registry.get('make_tensor')(choice)
  1466. shapes += (choice.shape,)
  1467. choicelist.append(choice)
  1468. shape_choice = validator.infer_out_shape(self.shape, *shapes)
  1469. tmp = []
  1470. for choice in choicelist:
  1471. tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)(choice))
  1472. choices = tensor_operator_registry.get('stack')(0)(tmp)
  1473. if self.ndim == 0 or choices.ndim == 0:
  1474. raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
  1475. f" Their dimensions should all be > 0, but got the original tensor's dimension "
  1476. f"{self.ndim}, 'choices' dimension {choices.ndim}.")
  1477. a = tensor_operator_registry.get('broadcast_to')(shape_choice)(self)
  1478. dtype = choices.dtype
  1479. # adjusts dtype for F.tensor_mul and F.gather_nd
  1480. a = a.astype(mstype.int32)
  1481. choices = choices.astype(mstype.int32)
  1482. a = tensor_operator_registry.get('check_indices')(choices.shape[0], a, mode, allow_negative_index=False)
  1483. grids = []
  1484. ndim = len(a.shape)
  1485. for i in range(ndim):
  1486. dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
  1487. dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
  1488. dim_grid = tensor_operator_registry.get('broadcast_to')(a.shape)(dim_grid.reshape(dim_shape))
  1489. grids.append(dim_grid)
  1490. grid = tensor_operator_registry.get('stack')(-1)(grids)
  1491. indices = tensor_operator_registry.get('concatenate')(-1)((a.reshape(a.shape + (1,)), grid))
  1492. return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
  1493. def searchsorted(self, v, side='left', sorter=None):
  1494. """
  1495. Finds indices where elements should be inserted to maintain order.
  1496. Args:
  1497. v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into the tensor.
  1498. side ('left', 'right', optional): If 'left', the index of the first suitable
  1499. location found is given. If 'right', return the last such index. If there is
  1500. no suitable index, return either 0 or N (where N is the length of the tensor).
  1501. Default: 'left'.
  1502. sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional tensor of
  1503. integer indices that sort the tensor into ascending order. They are typically
  1504. the result of argsort.
  1505. Returns:
  1506. Tensor, array of insertion points with the same shape as `v`.
  1507. Raises:
  1508. ValueError: if argument for `side` or `sorter` is invalid.
  1509. Supported Platforms:
  1510. ``Ascend`` ``GPU`` ``CPU``
  1511. Examples:
  1512. >>> import numpy as np
  1513. >>> from mindspore import Tensor
  1514. >>> x = Tensor(np.array([1, 2, 3, 4, 5]))
  1515. >>> print(x.searchsorted(3))
  1516. 2
  1517. """
  1518. if side not in ('left', 'right'):
  1519. raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
  1520. f"['left', 'right'], but got {side}.")
  1521. a = self.astype(mstype.float32)
  1522. if not isinstance(v, Tensor):
  1523. v = tensor_operator_registry.get('make_tensor')(v)
  1524. shape = v.shape
  1525. if sorter is not None:
  1526. if sorter.ndim != 1 or sorter.size != a.size:
  1527. raise ValueError('sorter must be 1-D array with the same size as the Tensor')
  1528. sorter = tensor_operator_registry.get('make_tensor')(sorter)
  1529. sorter = sorter.reshape(sorter.shape + (1,))
  1530. a = tensor_operator_registry.get('gather_nd')(a, sorter)
  1531. less_op = tensor_operator_registry.get('__le__') if side == 'left' else tensor_operator_registry.get('__lt__')
  1532. i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
  1533. j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
  1534. sort_range = tuple(range(validator.get_log2_size(
  1535. tensor_operator_registry.get('shape_mul')(a.shape) + 1)))
  1536. for _ in sort_range:
  1537. mid = (i - -j)//2
  1538. mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
  1539. i = tensor_operator_registry.get('select')(mask, i, mid)
  1540. j = tensor_operator_registry.get('select')(mask, mid, j)
  1541. return j
  1542. def var(self, axis=None, ddof=0, keepdims=False):
  1543. """
  1544. Compute the variance along the specified axis.
  1545. The variance is the average of the squared deviations from the mean, i.e.,
  1546. :math:`var = mean(abs(x - x.mean())**2)`.
  1547. Return the variance, which is computed for the flattened array by default,
  1548. otherwise over the specified axis.
  1549. Note:
  1550. Numpy arguments `dtype`, `out` and `where` are not supported.
  1551. Args:
  1552. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
  1553. The default is to compute the variance of the flattened array. Default: `None`.
  1554. ddof (int): Means Delta Degrees of Freedom. Default: 0.
  1555. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
  1556. keepdims (bool): Default: `False`.
  1557. Returns:
  1558. Variance tensor.
  1559. Supported Platforms:
  1560. ``Ascend`` ``GPU`` ``CPU``
  1561. See also:
  1562. :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
  1563. :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
  1564. Examples:
  1565. >>> import numpy as np
  1566. >>> from mindspore import Tensor
  1567. >>> input_x = Tensor(np.array([1., 2., 3., 4.], np.float32))
  1568. >>> output = input_x.var()
  1569. >>> print(output)
  1570. 1.25
  1571. """
  1572. if 0 in self.shape:
  1573. return Tensor(float('nan'), self.dtype)
  1574. if not isinstance(ddof, int):
  1575. raise TypeError("For 'Tensor.var', the type of the argument 'ddof' must be int, but got "
  1576. "{}.".format(type(ddof)))
  1577. if not isinstance(keepdims, bool):
  1578. raise TypeError("For 'Tensor.var', the type of the argument 'keepdims' must be bool, but "
  1579. "got {}.".format(type(keepdims)))
  1580. if axis is None:
  1581. axis = ()
  1582. else:
  1583. axis = validator.check_and_canonicalize_axes(axis, self.ndim)
  1584. x_mean = tensor_operator_registry.get('mean')(True)(self, axis)
  1585. x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
  1586. x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
  1587. x_sum = tensor_operator_registry.get('sum')(bool(keepdims))(x_pow, axis)
  1588. nums = 1
  1589. if axis == ():
  1590. nums = self.size
  1591. else:
  1592. for ax in axis:
  1593. nums *= self.shape[ax]
  1594. return tensor_operator_registry.get('__truediv__')(x_sum, nums - ddof)
  1595. def std(self, axis=None, ddof=0, keepdims=False):
  1596. """
  1597. Compute the standard deviation along the specified axis.
  1598. The standard deviation is the square root of the average of the squared deviations
  1599. from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
  1600. Return the standard deviation, which is computed for the flattened array by default,
  1601. otherwise over the specified axis.
  1602. Note:
  1603. Numpy arguments `dtype`, `out` and `where` are not supported.
  1604. Args:
  1605. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
  1606. deviation is computed. Default: `None`.
  1607. If `None`, compute the standard deviation of the flattened array.
  1608. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
  1609. where :math:`N` represents the number of elements. Default: 0.
  1610. keepdims: Default: `False`.
  1611. Returns:
  1612. Standard deviation tensor.
  1613. Supported Platforms:
  1614. ``Ascend`` ``GPU`` ``CPU``
  1615. See also:
  1616. :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
  1617. :func:`mindspore.Tensor.var`: Compute the variance along the specified axis.
  1618. Examples:
  1619. >>> import numpy as np
  1620. >>> from mindspore import Tensor
  1621. >>> input_x = Tensor(np.array([1, 2, 3, 4], dtype=np.float32))
  1622. >>> output = input_x.std()
  1623. >>> print(output)
  1624. 1.118034
  1625. """
  1626. x_var = self.var(axis, ddof, keepdims)
  1627. return tensor_operator_registry.get('__pow__')(x_var, 0.5)
  1628. def sum(self, axis=None, dtype=None, keepdims=False, initial=None):
  1629. """
  1630. Return sum of tensor elements over a given axis.
  1631. Note:
  1632. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
  1633. `extobj` are not supported.
  1634. Args:
  1635. axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
  1636. If None, sum all the elements of the input tensor.
  1637. If the axis is negative, it counts from the last to the first axis.
  1638. If the axis is a tuple of ints, a sum is performed on all the axes specified in the tuple
  1639. instead of a single axis or all the axes as before.
  1640. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1641. output Tensor.
  1642. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  1643. dimensions with size one. With this option, the result will broadcast correctly against the input array.
  1644. If the default value is passed, then keepdims will not be passed through to the sum method of
  1645. sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
  1646. implement keepdims any exceptions will be raised. Default: `False`.
  1647. initial (scalar): Starting value for the sum. Default: `None`.
  1648. Returns:
  1649. Tensor. A tensor with the same shape as input, with the specified axis removed.
  1650. If the input tensor is a 0-d array, or if the axis is None, a scalar is returned.
  1651. Raises:
  1652. TypeError: If input is not array_like, or `axis` is not int or tuple of ints,
  1653. or `keepdims` is not integer, or `initial` is not scalar.
  1654. ValueError: If any axis is out of range or duplicate axes exist.
  1655. Supported Platforms:
  1656. ``Ascend`` ``GPU`` ``CPU``
  1657. See also:
  1658. :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given axis.
  1659. Examples:
  1660. >>> import numpy as np
  1661. >>> from mindspore import Tensor
  1662. >>> input_x = Tensor(np.array([-1, 0, 1]).astype(np.float32))
  1663. >>> print(input_x.sum())
  1664. 0.0
  1665. >>> input_x = Tensor(np.arange(10).reshape(2, 5).astype(np.float32))
  1666. >>> print(input_x.sum(axis=1))
  1667. [10. 35.]
  1668. """
  1669. input_x = self.astype(mstype.int32) if self.dtype == mstype.bool_ else self
  1670. dtype = input_x.dtype if dtype is None else dtype
  1671. if not isinstance(keepdims, int):
  1672. raise TypeError("For 'Tensor.sum', the type of the argument 'keepdims' must be int, but "
  1673. "got {}.".format(type(keepdims)))
  1674. if initial is not None and not isinstance(initial, (int, float, bool)):
  1675. raise TypeError("For 'Tensor.sum', when the argument 'initial' is not None, it must be int, "
  1676. "float or bool, but got {}.".format(type(initial)))
  1677. if axis is None:
  1678. axis = ()
  1679. else:
  1680. axis = validator.check_and_canonicalize_axes(axis, self.ndim)
  1681. if not validator.check_type_support(input_x.dtype, 'GPU',
  1682. (mstype.float64, mstype.float32, mstype.float16)):
  1683. input_x = input_x.astype(mstype.float32)
  1684. if 0 in self.shape:
  1685. input_x = tensor_operator_registry.get('make_tensor')([0], self.dtype)
  1686. res = tensor_operator_registry.get('sum')(bool(keepdims))(input_x, axis)
  1687. if initial is not None:
  1688. res += initial
  1689. return res.astype(dtype)
  1690. def repeat(self, repeats, axis=None):
  1691. """
  1692. Repeat elements of a tensor.
  1693. Args:
  1694. repeats (Union[int, tuple, list]): The number of repetitions for each element.
  1695. `repeats` is broadcasted to fit the shape of the given axis.
  1696. axis (int, optional): The axis along which to repeat values. By default,
  1697. use the flattened input tensor, and return a flat output tensor.
  1698. Returns:
  1699. Tensor, has the same shape as input tensor except along the given axis.
  1700. Raises:
  1701. ValueError: if the axis is out of range.
  1702. TypeError: if arguments have types not specified above.
  1703. Supported Platforms:
  1704. ``Ascend`` ``GPU`` ``CPU``
  1705. See also:
  1706. :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
  1707. :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
  1708. Examples:
  1709. >>> import numpy as np
  1710. >>> from mindspore import Tensor
  1711. >>> x = Tensor(np.array(3))
  1712. >>> print(x.repeat(4))
  1713. [3 3 3 3]
  1714. >>> x = Tensor(np.array([[1, 2],[3, 4]]))
  1715. >>> print(x.repeat(2))
  1716. [1 1 2 2 3 3 4 4]
  1717. >>> print(x.repeat(3, axis=1))
  1718. [[1 1 1 2 2 2]
  1719. [3 3 3 4 4 4]]
  1720. >>> print(x.repeat([1,2], axis=0))
  1721. [[1 2]
  1722. [3 4]
  1723. [3 4]]
  1724. """
  1725. if not isinstance(repeats, (tuple, list)):
  1726. repeats = (repeats,)
  1727. for index, element in enumerate(repeats):
  1728. if not isinstance(element, int):
  1729. raise TypeError(f"For 'Tensor.repeat', each element in {repeats} should be int, but got "
  1730. f"{type(element)} at index {index}.")
  1731. input_x = self
  1732. if axis is None:
  1733. input_x = self.ravel()
  1734. axis = 0
  1735. if axis is not None and not isinstance(axis, int):
  1736. raise TypeError(f"For 'Tensor.repeat', the argument 'axis' should be int, but got {type(axis)}.")
  1737. validator.check_axis_in_range(axis, input_x.ndim)
  1738. axis = axis + input_x.ndim if axis < 0 else axis
  1739. if len(repeats) == 1:
  1740. repeats = repeats[0]
  1741. if repeats == 0:
  1742. return Tensor_(input_x.dtype, (0,))
  1743. return tensor_operator_registry.get('repeat_elements')(input_x, repeats, axis)
  1744. size = input_x.shape[axis]
  1745. if len(repeats) != size:
  1746. raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
  1747. f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
  1748. f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
  1749. subs = tensor_operator_registry.get('split')(axis, size)(input_x)
  1750. repeated_subs = []
  1751. for sub, rep in zip(subs, repeats):
  1752. if rep != 0:
  1753. repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
  1754. return tensor_operator_registry.get('concatenate')(axis)(repeated_subs)
  1755. class RowTensor:
  1756. """
  1757. A sparse representation of a set of tensor slices at given indices.
  1758. An RowTensor is typically used to represent a subset of a larger
  1759. tensor dense of shape [L0, D1, .. , DN] where L0 >> D0.
  1760. The values in indices are the indices in the first dimension of the slices
  1761. that have been extracted from the larger tensor.
  1762. The dense tensor dense represented by an RowTensor slices has
  1763. `dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]`.
  1764. RowTensor can only be used in the `Cell`'s construct method.
  1765. It is not supported in pynative mode at the moment.
  1766. Args:
  1767. indices (Tensor): A 1-D integer Tensor of shape [D0].
  1768. values (Tensor): A Tensor of any dtype of shape [D0, D1, ..., Dn].
  1769. dense_shape (tuple(int)): An integer tuple which contains the shape
  1770. of the corresponding dense tensor.
  1771. Returns:
  1772. RowTensor, composed of `indices`, `values`, and `dense_shape`.
  1773. Examples:
  1774. >>> import mindspore as ms
  1775. >>> import mindspore.nn as nn
  1776. >>> from mindspore import Tensor, RowTensor
  1777. >>> class Net(nn.Cell):
  1778. ... def __init__(self, dense_shape):
  1779. ... super(Net, self).__init__()
  1780. ... self.dense_shape = dense_shape
  1781. ... def construct(self, indices, values):
  1782. ... x = RowTensor(indices, values, self.dense_shape)
  1783. ... return x.values, x.indices, x.dense_shape
  1784. >>>
  1785. >>> indices = Tensor([0])
  1786. >>> values = Tensor([[1, 2]], dtype=ms.float32)
  1787. >>> out = Net((3, 2))(indices, values)
  1788. >>> print(out[0])
  1789. [[1. 2.]]
  1790. >>> print(out[1])
  1791. [0]
  1792. >>> print(out[2])
  1793. (3, 2)
  1794. """
  1795. def __init__(self, indices, values, dense_shape):
  1796. "Init RowTensor"
  1797. self.__indices = indices
  1798. self.__values = values
  1799. self.__dense_shape = dense_shape
  1800. @property
  1801. def indices(self):
  1802. return self.__indices
  1803. @property
  1804. def values(self):
  1805. return self.__values
  1806. @property
  1807. def dense_shape(self):
  1808. return self.__dense_shape
  1809. class SparseTensor:
  1810. """
  1811. A sparse representation of a set of nonzero elements from a tensor at given indices.
  1812. SparseTensor can only be used in the `Cell`'s construct method.
  1813. Pynative mode not supported at the moment.
  1814. For a tensor dense, its SparseTensor(indices, values, dense_shape) has
  1815. `dense[indices[i]] = values[i]`.
  1816. Args:
  1817. indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
  1818. where N and ndims are the number of `values` and number of dimensions in
  1819. the SparseTensor, respectively.
  1820. values (Tensor): A 1-D tensor of any type and shape `[N]`, which
  1821. supplies the values for each element in `indices`.
  1822. dense_shape (tuple(int)): A integer tuple of size `ndims`,
  1823. which specifies the dense_shape of the sparse tensor.
  1824. Returns:
  1825. SparseTensor, composed of `indices`, `values`, and `dense_shape`.
  1826. Examples:
  1827. >>> import mindspore as ms
  1828. >>> import mindspore.nn as nn
  1829. >>> from mindspore import Tensor, SparseTensor
  1830. >>> class Net(nn.Cell):
  1831. ... def __init__(self, dense_shape):
  1832. ... super(Net, self).__init__()
  1833. ... self.dense_shape = dense_shape
  1834. ... def construct(self, indices, values):
  1835. ... x = SparseTensor(indices, values, self.dense_shape)
  1836. ... return x.values, x.indices, x.dense_shape
  1837. >>>
  1838. >>> indices = Tensor([[0, 1], [1, 2]])
  1839. >>> values = Tensor([1, 2], dtype=ms.float32)
  1840. >>> out = Net((3, 4))(indices, values)
  1841. >>> print(out[0])
  1842. [1. 2.]
  1843. >>> print(out[1])
  1844. [[0 1]
  1845. [1 2]]
  1846. >>> print(out[2])
  1847. (3, 4)
  1848. """
  1849. def __init__(self, indices, values, dense_shape):
  1850. "Init SparseTensor"
  1851. self.__indices = indices
  1852. self.__values = values
  1853. self.__dense_shape = dense_shape
  1854. @property
  1855. def indices(self):
  1856. return self.__indices
  1857. @property
  1858. def values(self):
  1859. return self.__values
  1860. @property
  1861. def dense_shape(self):
  1862. return self.__dense_shape
  1863. class CSRTensor(CSRTensor_):
  1864. """
  1865. Constructs a sparse tensor in CSR (Compressed Sparse Row) format, with specified
  1866. values indicated by `values` and row and column positions indicated by `indptr`
  1867. and `indices`.
  1868. Alternatively, CSRTensor can be initialized by passing another CSRTensor as input.
  1869. Currently this constructor can only be supported in PyNative Mode.
  1870. Note:
  1871. This is an experimental feature and is subjected to change.
  1872. Args:
  1873. indptr (Tensor): 1-D Tensor of size `shape[0] + 1`, which indicates the
  1874. start and end point for `values` in each row. Default: None. If provided,
  1875. must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
  1876. indices (Tensor): 1-D Tensor, which has the same length as `values`. `indices`
  1877. indicates the which column `values` should be placed. Default: None. If provided,
  1878. must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
  1879. values (Tensor): 1-D Tensor, which has the same length as `indices`. `values`
  1880. stores the data for CSRTensor. Default: None.
  1881. shape (Tuple): A tuple indicates the shape of the CSRTensor, its length must
  1882. be `2`, as only 2-D CSRTensor is currently supported, and `shape[0]` must
  1883. equal to `indptr[0] - 1`, which all equal to number of rows of the CSRTensor.
  1884. csr_tensor (CSRTensor): A CSRTensor object.
  1885. Outputs:
  1886. CSRTensor, with shape defined by `shape`, and dtype inferred from `value`.
  1887. Examples:
  1888. >>> import mindspore as ms
  1889. >>> from mindspore import Tensor, CSRTensor
  1890. >>> # initialize a csr_tensor with indptr, indices, values and shape
  1891. >>> indptr = Tensor([0, 1, 2])
  1892. >>> indices = Tensor([0, 1])
  1893. >>> values = Tensor([1, 2], dtype=ms.float32)
  1894. >>> shape = (2, 4)
  1895. >>> csr_tensor = CSRTensor(indptr, indices, values, shape)
  1896. >>> # initialize a csr_tensor from another csr_tensor
  1897. >>> csr_tensor_2 = CSRTensor(csr_tensor=csr_tensor)
  1898. >>> # access a data member of CSRTensor
  1899. >>> print(indptr == csr_tensor.indptr)
  1900. >>> [ True True True]
  1901. """
  1902. def __init__(self, indptr=None, indices=None, values=None, shape=None, csr_tensor=None):
  1903. self.init_finished = False
  1904. # Case 1: directly init a CSRTensor from another CSRTensor
  1905. if indptr is None and indices is None and values is None and shape is None:
  1906. if not isinstance(csr_tensor, (CSRTensor, CSRTensor_)):
  1907. raise TypeError("If only one input provided, it must be a CSRTensor.")
  1908. CSRTensor_.__init__(self, csr_tensor)
  1909. # Case 2: init a CSRTensor from indptr, indices, values and shape
  1910. else:
  1911. if (indptr is None or indices is None or values is None or shape is None):
  1912. raise TypeError("Inputs must follow: CSRTensor(indptr, indices, values, shape).")
  1913. if not (isinstance(indptr, Tensor) and isinstance(indices, Tensor) \
  1914. and isinstance(values, Tensor) and isinstance(shape, tuple)):
  1915. raise TypeError("Inputs must follow: CSRTensor(tensor, tensor, tensor, tuple).")
  1916. if len(shape) != 2 or shape[0] + 1 != indptr.shape[0] or shape[1] <= 0:
  1917. raise ValueError("Shape length should be 2, shape[0] should equal to indptr.shape[0] - 1")
  1918. if indptr.dtype not in (mstype.int16, mstype.int32, mstype.int64):
  1919. raise TypeError("indptr must have integer data type.")
  1920. if indices.dtype not in (mstype.int16, mstype.int32, mstype.int64):
  1921. raise TypeError("indices must have integer data type.")
  1922. CSRTensor_.__init__(self, indptr, indices, values, shape)
  1923. self.init_finished = True
  1924. def __repr__(self):
  1925. """Avoid PyTest Segfault when CSRTensor is not initialized."""
  1926. if self.init_finished:
  1927. return CSRTensor_.__repr__(self)
  1928. return ''
  1929. @property
  1930. def indptr(self):
  1931. return Tensor(self._indptr)
  1932. @property
  1933. def indices(self):
  1934. return Tensor(self._indices)
  1935. @property
  1936. def values(self):
  1937. return Tensor(self._values)
  1938. @property
  1939. def shape(self):
  1940. return self._shape
  1941. def to_tuple(self):
  1942. return self.indptr, self.indices, self.values, self.shape
  1943. def _vm_compare(*args):
  1944. """Implement `vm_compare` for tensor."""
  1945. obj_str = args[-1]
  1946. if obj_str == "shape":
  1947. fn = getattr(args[0].asnumpy(), obj_str)
  1948. return fn
  1949. if len(args) == 2:
  1950. fn = getattr(args[0].asnumpy(), obj_str)
  1951. return Tensor(fn())
  1952. if isinstance(args[0], Tensor):
  1953. fn = getattr(args[0].asnumpy(), obj_str)
  1954. y = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
  1955. else:
  1956. obj_str = "__r" + obj_str[2:]
  1957. fn = getattr(args[1].asnumpy(), obj_str)
  1958. y = args[0]
  1959. return Tensor(np.array(fn(y)))
  1960. def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
  1961. """Check the tensor input."""
  1962. if input_data is not None and shape is not None:
  1963. raise ValueError("If input_data is available, shape doesn't need to be set")
  1964. if init is not None and (shape is None or dtype is None):
  1965. raise ValueError("init, dtype and shape must have values at the same time.")
  1966. if (int(input_data is None) + int(init is None)) != 1:
  1967. raise TypeError("input_data and init can not be None at the same time.")
  1968. if input_data is not None:
  1969. if isinstance(input_data, np.ndarray) and input_data.ndim > 1 and input_data.size == 0:
  1970. raise ValueError("input_data can not contain zero dimension.")
  1971. if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim > 1 \
  1972. and np.array(input_data).size == 0:
  1973. raise ValueError("input_data can not contain zero dimension.")
  1974. if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
  1975. raise ValueError("Shape can not contain zero value.")
  1976. tensor_operator_registry.register('vm_compare', _vm_compare)