You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 194 kB

4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
4 years ago
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
6 years ago
6 years ago
6 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
6 years ago
6 years ago
6 years ago
5 years ago
6 years ago
6 years ago
6 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Operators for math."""
  16. import numpy as np
  17. from ... import context
  18. from .. import signature as sig
  19. from ..._checkparam import Validator as validator
  20. from ..._checkparam import Rel
  21. from ...common import dtype as mstype
  22. from ...common.tensor import Tensor
  23. from ...common._decorator import deprecated
  24. from .._utils import get_broadcast_shape
  25. from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
  26. def _infer_shape_reduce(x, axis, keep_dims, prim_name):
  27. """Common infer for reduce operator"""
  28. def reduce_one_axis(one_axis):
  29. validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
  30. if one_axis < 0:
  31. one_axis += dim
  32. axis_reduce.add(one_axis)
  33. validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
  34. dim = len(x)
  35. axis_reduce = set()
  36. if isinstance(axis, int):
  37. reduce_one_axis(axis)
  38. else:
  39. if not axis:
  40. if keep_dims:
  41. return [1] * dim
  42. return []
  43. for index, one_axis in enumerate(axis):
  44. validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
  45. reduce_one_axis(one_axis)
  46. out_shape = []
  47. for i in range(dim):
  48. if i in axis_reduce:
  49. if keep_dims:
  50. out_shape.append(1)
  51. else:
  52. out_shape.append(x[i])
  53. return out_shape
  54. class _BinaryOp(PrimitiveWithInfer):
  55. """
  56. Define binary operators.
  57. """
  58. __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
  59. @prim_attr_register
  60. def __init__(self):
  61. """Initialize _BinaryOp"""
  62. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  63. def infer_shape(self, x_shape, y_shape):
  64. return get_broadcast_shape(x_shape, y_shape, self.name)
  65. def infer_min_shape(self, x_shape, y_shape):
  66. return get_broadcast_shape(x_shape, y_shape, self.name, "min_shape")
  67. def infer_max_shape(self, x_shape, y_shape):
  68. return get_broadcast_shape(x_shape, y_shape, self.name, "max_shape")
  69. class _MathBinaryOp(_BinaryOp):
  70. """
  71. Define math binary operators.
  72. """
  73. @staticmethod
  74. def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
  75. """Staticmethod of infer dtype for _MathBinaryOp."""
  76. args_type = {"x": x_dtype, "y": y_dtype}
  77. complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)]
  78. if x_dtype in complex_types or y_dtype in complex_types:
  79. type_infer_dict = {
  80. (mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64),
  81. (mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64),
  82. (mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64),
  83. (mstype.complex128, mstype.complex128): mstype.tensor_type(mstype.complex128),
  84. (mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128),
  85. (mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
  86. }
  87. if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
  88. raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],'
  89. + '[complex64, float32], [float32, complex64], [complex128, complex128],'
  90. + '[complex128, float64], [float64, complex128],'
  91. + f'but got : [{format(x_dtype)},{format(y_dtype)}].')
  92. return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
  93. validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
  94. return x_dtype
  95. def infer_dtype(self, x_dtype, y_dtype):
  96. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
  97. class _BitwiseBinaryOp(_MathBinaryOp):
  98. """
  99. Define bitwise binary operators.
  100. """
  101. @prim_attr_register
  102. def __init__(self):
  103. """Initialize _BitwiseBinaryOp"""
  104. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
  105. @staticmethod
  106. def _check_bitwise_op_input_type(x1_type, x2_type, prim):
  107. args = {'x1': x1_type, 'x2': x2_type}
  108. valid_dtypes = mstype.int_type + mstype.uint_type
  109. validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
  110. return x1_type
  111. def infer_dtype(self, x1_type, x2_type):
  112. return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
  113. class Ger(Primitive):
  114. r"""
  115. Ger product of `x1` and `x2`. Calculate the outer product of two one-dimensional arrays.If `x1` is a 1D Tensor of
  116. shape :math:`(m,)` and `x2` is a 1D Tensor of shape :math:`(n,)`,then `output` must be a Tensor of shape
  117. :math:`(m * n)`.
  118. Inputs:
  119. - **x1** - (Tensor) - 1-D input Tensor, with dtype of float16 or float32.
  120. - **x2** - (Tensor) - 1-D input Tensor, with dtype of float16 or float32.
  121. Outputs:
  122. Tensor, output matrix with the same dtype as inputs.With `x1` shape :math:`(m,)` and
  123. `x2` shape of :math:`(n,)`,`output` has shape :math:`(m * n)`.
  124. Raises:
  125. TypeError: If `x1` or `x2` is not a Tensor.
  126. TypeError: If the dtype of `x1` and `x2` is neither float16 nor float32.
  127. ValueError: If `x1` or `x2` is not a 1D Tensor.
  128. Supported Platforms:
  129. ``Ascend``
  130. Examples:
  131. >>> x1 = Tensor([1., 2., 3., 4.], mindspore.float32)
  132. >>> x2 = Tensor([1., 2., 3.], mindspore.float32)
  133. >>> ger = ops.Ger()
  134. >>> output = ger(x1, x2)
  135. >>> print(output)
  136. [[ 1. 2. 3.]
  137. [ 2. 4. 6.]
  138. [ 3. 6. 9.]
  139. [ 4. 8. 12.]]
  140. """
  141. @prim_attr_register
  142. def __init__(self):
  143. """Initialize Ger"""
  144. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
  145. class Add(_MathBinaryOp):
  146. r"""
  147. Adds two input tensors element-wise.
  148. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  149. The inputs must be two tensors or one tensor and one scalar.
  150. When the inputs are two tensors,
  151. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  152. When the inputs are one tensor and one scalar,
  153. the scalar could only be a constant.
  154. .. math::
  155. out_{i} = x_{i} + y_{i}
  156. Inputs:
  157. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  158. or a tensor whose data type is number or bool.
  159. - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  160. is a tensor, or a tensor whose data type is number or bool.
  161. Outputs:
  162. Tensor, the shape is the same as the one after broadcasting,
  163. and the data type is the one with higher precision or higher digits among the two inputs.
  164. Raises:
  165. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  166. Supported Platforms:
  167. ``Ascend`` ``GPU`` ``CPU``
  168. Examples:
  169. >>> # case 1: x and y are both Tensor.
  170. >>> add = ops.Add()
  171. >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  172. >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
  173. >>> output = add(x, y)
  174. >>> print(output)
  175. [5. 7. 9.]
  176. >>> # case 2: x is a scalar and y is a Tensor
  177. >>> add = ops.Add()
  178. >>> x = Tensor(1, mindspore.int32)
  179. >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
  180. >>> output = add(x, y)
  181. >>> print(output)
  182. [5. 6. 7.]
  183. >>> # the data type of x is int32, the data type of y is float32,
  184. >>> # and the output is the data format of higher precision float32.
  185. >>> print(output.dtype)
  186. Float32
  187. """
  188. def infer_value(self, x, y):
  189. if x is not None and y is not None:
  190. x = x.asnumpy()
  191. y = y.asnumpy()
  192. out = x + y
  193. out = np.array(out, x.dtype)
  194. return Tensor(out)
  195. return None
  196. class TensorAdd(_MathBinaryOp):
  197. """
  198. Same as operator Add. TensorAdd will be deprecated in the future.
  199. Please use Add instead.
  200. """
  201. @deprecated("1.1", "Add", True)
  202. @prim_attr_register
  203. def __init__(self):
  204. """Initialize TensorAdd."""
  205. _MathBinaryOp.__init__(self)
  206. def infer_value(self, x, y):
  207. if x is not None and y is not None:
  208. x = x.asnumpy()
  209. y = y.asnumpy()
  210. out = x + y
  211. out = np.array(out, x.dtype)
  212. return Tensor(out)
  213. return None
  214. class AssignAdd(Primitive):
  215. """
  216. Updates a `Parameter` by adding a value to it.
  217. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  218. If they have different data types, lower priority data type will be converted to
  219. the relatively highest priority data type.
  220. If `value` is a number, the number is automatically converted to Tensor,
  221. and the data type is consistent with the Tensor data type involved in the operation.
  222. Note:
  223. Since `variable` is a data type Parameter, the data type cannot be changed,
  224. so only the type of `value` is allowed to be promoted to the type of `variable`.
  225. And the conversion type supported by different devices will be different,
  226. it is recommended to use the same data type when using this operator.
  227. Inputs:
  228. - **variable** (Parameter) - The `Parameter`.
  229. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  230. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
  231. It must have the same shape as `variable` if it is a Tensor.
  232. it is recommended to use the same data type when using this operator.
  233. Outputs:
  234. Tensor, has the same data type and shape as original `variable`.
  235. Raises:
  236. TypeError: If `value` is neither Number nor Tensor.
  237. RuntimeError: If the data type of `variable` and `value` conversion of Parameter
  238. is required when data type conversion of Parameter is not supported.
  239. Supported Platforms:
  240. ``Ascend`` ``GPU`` ``CPU``
  241. Examples:
  242. >>> class Net(nn.Cell):
  243. ... def __init__(self):
  244. ... super(Net, self).__init__()
  245. ... self.AssignAdd = ops.AssignAdd()
  246. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  247. ...
  248. ... def construct(self, x):
  249. ... self.AssignAdd(self.variable, x)
  250. ... return self.variable
  251. ...
  252. >>> net = Net()
  253. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  254. >>> output = net(value)
  255. >>> print(output)
  256. [101]
  257. """
  258. __mindspore_signature__ = (
  259. sig.make_sig('ref', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  260. sig.make_sig('value', dtype=sig.sig_dtype.T)
  261. )
  262. @prim_attr_register
  263. def __init__(self):
  264. """Initialize AssignAdd"""
  265. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['ref'])
  266. self.add_prim_attr('side_effect_mem', True)
  267. class AssignSub(PrimitiveWithInfer):
  268. """
  269. Updates a `Parameter` by subtracting a value from it.
  270. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  271. If they have different data types, lower priority data type will be converted to
  272. the relatively highest priority data type.
  273. If `value` is a number, the number is automatically converted to Tensor,
  274. and the data type is consistent with the Tensor data type involved in the operation.
  275. Note:
  276. Since `variable` is a data type Parameter, the data type cannot be changed,
  277. so only the type of `value` is allowed to be promoted to the type of `variable`.
  278. And the conversion type supported by different devices will be different,
  279. it is recommended to use the same data type when using this operator.
  280. Inputs:
  281. - **variable** (Parameter) - The `Parameter`.
  282. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank be should be less than 8.
  283. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
  284. It must have the same shape as `variable` if it is a Tensor.
  285. it is recommended to use the same data type when using this operator.
  286. Outputs:
  287. Tensor, has the same data type and shape as original `variable`.
  288. Raises:
  289. TypeError: If `value` is neither Number nor Tensor.
  290. RuntimeError: If the data type of `x`, `y` conversion of Parameter is required
  291. when data type conversion of Parameter is not supported.
  292. Supported Platforms:
  293. ``Ascend``
  294. Examples:
  295. >>> class Net(nn.Cell):
  296. ... def __init__(self):
  297. ... super(Net, self).__init__()
  298. ... self.AssignSub = ops.AssignSub()
  299. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
  300. ...
  301. ... def construct(self, x):
  302. ... self.AssignSub(self.variable, x)
  303. ... return self.variable
  304. ...
  305. >>> net = Net()
  306. >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
  307. >>> output = net(value)
  308. >>> print(output)
  309. [-99]
  310. """
  311. __mindspore_signature__ = (
  312. sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  313. sig.make_sig('value', dtype=sig.sig_dtype.T)
  314. )
  315. @prim_attr_register
  316. def __init__(self):
  317. """Initialize AssignSub"""
  318. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  319. self.add_prim_attr('side_effect_mem', True)
  320. def infer_shape(self, variable, value):
  321. return value
  322. def infer_dtype(self, variable, value):
  323. args = {"variable": variable, "value": value}
  324. validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
  325. return value
  326. class _Reduce(PrimitiveWithInfer):
  327. """
  328. Definition of base class of reduction class operators.
  329. Args:
  330. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  331. If false, don't keep these dimensions. Default: False.
  332. """
  333. __mindspore_signature__ = (
  334. sig.make_sig('input_x'),
  335. sig.make_sig('axis', default=())
  336. )
  337. @prim_attr_register
  338. def __init__(self, keep_dims=False):
  339. """Initialize Reduce"""
  340. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  341. self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
  342. def __call__(self, x, axis=()):
  343. args = [x, axis]
  344. output = _run_op(self, self.name, args)
  345. return output
  346. def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
  347. """ return meta infos of input parameters """
  348. axis_v = axis['value']
  349. input_shp = input_x['shape']
  350. args = {'input_x': input_x['dtype']}
  351. validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
  352. if not isinstance(axis['dtype'], mstype.tensor_type) and axis_v is None:
  353. raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
  354. if -1 in input_shp:
  355. if axis_v is None:
  356. max_v = max(input_shp)
  357. if 'max_shape' and 'min_shape' in input_x:
  358. input_max_shp = input_x['max_shape']
  359. max_v = max(input_max_shp)
  360. axis_shape_list = axis['shape']
  361. if len(axis_shape_list) != 1:
  362. raise ValueError(f"For '{self.name}', the shape of 'axis' must be 1-D, but "
  363. f"got {len(axis_shape_list)}.")
  364. axis_shape = axis_shape_list[0]
  365. if axis_shape == -1 and not self.keep_dims:
  366. out_shape = np.array([-2]).tolist()
  367. output_min_shape = input_x['min_shape']
  368. output_max_shape = input_x['max_shape']
  369. elif not self.keep_dims:
  370. out_shape = -1 * np.ones_like(input_shp[:-axis_shape])
  371. out_shape = out_shape.tolist()
  372. output_min_shape = np.ones_like(out_shape).tolist()
  373. output_max_shape = max_v * np.ones_like(out_shape)
  374. output_max_shape = output_max_shape.tolist()
  375. else:
  376. out_shape = -1 * np.ones_like(input_shp)
  377. out_shape = out_shape.tolist()
  378. output_min_shape = np.ones_like(input_shp).tolist()
  379. output_max_shape = max_v * np.ones_like(input_shp)
  380. output_max_shape = output_max_shape.tolist()
  381. else:
  382. output_max_shape = _infer_shape_reduce(input_x['max_shape'], axis_v, self.keep_dims, self.name)
  383. output_min_shape = _infer_shape_reduce(input_x['min_shape'], axis_v, self.keep_dims, self.name)
  384. out_shape = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  385. else:
  386. if axis_v is None:
  387. raise ValueError(f"For {self.name}, axis must be const, its value cannot be None.")
  388. out_shape = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  389. output_max_shape = out_shape
  390. output_min_shape = out_shape
  391. value = None
  392. if input_x['value'] is not None:
  393. prim_map = {
  394. 'ReduceSum': np.sum,
  395. 'ReduceMax': np.max,
  396. 'ReduceMin': np.min,
  397. }
  398. np_reduce_func = prim_map.get(self.name, None)
  399. if np_reduce_func is not None:
  400. value = input_x['value'].asnumpy()
  401. if isinstance(axis_v, int):
  402. pass
  403. elif axis_v:
  404. axis_v = tuple(set(axis_v))
  405. else:
  406. axis_v = tuple(range(len(input_x['shape'])))
  407. value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
  408. value = np.array(value)
  409. value = Tensor(value)
  410. return {'shape': out_shape,
  411. 'min_shape': output_min_shape,
  412. 'max_shape': output_max_shape,
  413. 'dtype': input_x['dtype'],
  414. 'value': value}
  415. def __infer__(self, input_x, axis):
  416. return self.do_infer(input_x, axis)
  417. class ReduceMean(_Reduce):
  418. """
  419. Reduces a dimension of a tensor by averaging all elements in the dimension, by Default. And also can reduces
  420. a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  421. controlling `keep_dims`.
  422. Args:
  423. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  424. If false, don't keep these dimensions. Default: False.
  425. Inputs:
  426. - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
  427. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  428. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  429. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
  430. Outputs:
  431. Tensor, has the same dtype as the `x`.
  432. - If axis is (), and keep_dims is False,
  433. the output is a 0-D tensor representing the mean of all elements in the input tensor.
  434. - If axis is int, set as 2, and keep_dims is False,
  435. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  436. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  437. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  438. Raises:
  439. TypeError: If `keep_dims` is not a bool.
  440. TypeError: If `x` is not a Tensor.
  441. ValueError: If `axis` is not one of the following: int, tuple or list.
  442. Supported Platforms:
  443. ``Ascend`` ``GPU`` ``CPU``
  444. Examples:
  445. >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  446. >>> op = ops.ReduceMean(keep_dims=True)
  447. >>> output = op(x, 1)
  448. >>> result = output.shape
  449. >>> print(result)
  450. (3, 1, 5, 6)
  451. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  452. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
  453. ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
  454. ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
  455. >>> output = op(x)
  456. >>> print(output)
  457. [[[5.]]]
  458. >>> print(output.shape)
  459. (1, 1, 1)
  460. >>> # case 2: Reduces a dimension along the axis 0
  461. >>> output = op(x, 0)
  462. >>> print(output)
  463. [[[4. 4. 4. 4. 4. 4.]
  464. [5. 5. 5. 5. 5. 5.]
  465. [6. 6. 6. 6. 6. 6.]]]
  466. >>> # case 3: Reduces a dimension along the axis 1
  467. >>> output = op(x, 1)
  468. >>> print(output)
  469. [[[2. 2. 2. 2. 2. 2.]]
  470. [[5. 5. 5. 5. 5. 5.]]
  471. [[8. 8. 8. 8. 8. 8.]]]
  472. >>> # case 4: Reduces a dimension along the axis 2
  473. >>> output = op(x, 2)
  474. >>> print(output)
  475. [[[1. ]
  476. [2. ]
  477. [3. ]]
  478. [[4. ]
  479. [5. ]
  480. [6. ]]
  481. [[7.0000005]
  482. [8. ]
  483. [9. ]]]
  484. """
  485. class ReduceSum(_Reduce):
  486. """
  487. Reduces a dimension of a tensor by summing all elements in the dimension, by Default. And also can reduces
  488. a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  489. controlling `keep_dims`.
  490. Args:
  491. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  492. If false, don't keep these dimensions. Default: False.
  493. Inputs:
  494. - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
  495. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  496. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  497. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
  498. Outputs:
  499. Tensor, has the same dtype as the `x`.
  500. - If axis is (), and keep_dims is False,
  501. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  502. - If axis is int, set as 2, and keep_dims is False,
  503. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  504. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  505. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  506. Raises:
  507. TypeError: If `keep_dims` is not a bool.
  508. TypeError: If `x` is not a Tensor.
  509. ValueError: If `axis` is None.
  510. Supported Platforms:
  511. ``Ascend`` ``GPU`` ``CPU``
  512. Examples:
  513. >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  514. >>> op = ops.ReduceSum(keep_dims=True)
  515. >>> output = op(x, 1)
  516. >>> output.shape
  517. (3, 1, 5, 6)
  518. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  519. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
  520. ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
  521. ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
  522. >>> output = op(x)
  523. >>> print(output)
  524. [[[270.]]]
  525. >>> print(output.shape)
  526. (1, 1, 1)
  527. >>> # case 2: Reduces a dimension along axis 0.
  528. >>> output = op(x, 0)
  529. >>> print(output)
  530. [[[12. 12. 12. 12. 12. 12.]
  531. [15. 15. 15. 15. 15. 15.]
  532. [18. 18. 18. 18. 18. 18.]]]
  533. >>> # case 3: Reduces a dimension along axis 1.
  534. >>> output = op(x, 1)
  535. >>> print(output)
  536. [[[ 6. 6. 6. 6. 6. 6.]]
  537. [[15. 15. 15. 15. 15. 15.]]
  538. [[24. 24. 24. 24. 24. 24.]]]
  539. >>> # case 4: Reduces a dimension along axis 2.
  540. >>> output = op(x, 2)
  541. >>> print(output)
  542. [[[ 6.]
  543. [12.]
  544. [18.]]
  545. [[24.]
  546. [30.]
  547. [36.]]
  548. [[42.]
  549. [48.]
  550. [54.]]]
  551. """
  552. @prim_attr_register
  553. def __init__(self, keep_dims=False):
  554. """Initialize ReduceSum"""
  555. super(ReduceSum, self).__init__(keep_dims)
  556. self.__setattr_flag__ = True
  557. class ReduceAll(_Reduce):
  558. """
  559. Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by Default. And also can
  560. reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  561. controlling `keep_dims`.
  562. Args:
  563. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  564. If false, don't keep these dimensions.
  565. Default : False, don't keep these reduced dimensions.
  566. Inputs:
  567. - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
  568. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  569. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  570. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
  571. Outputs:
  572. Tensor, the dtype is bool.
  573. - If axis is (), and keep_dims is False,
  574. the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
  575. - If axis is int, set as 2, and keep_dims is False,
  576. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  577. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  578. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  579. Raises:
  580. TypeError: If `keep_dims` is not a bool.
  581. TypeError: If `x` is not a Tensor.
  582. ValueError: If `axis` is not one of the following: int, tuple or list.
  583. Supported Platforms:
  584. ``Ascend`` ``GPU`` ``CPU``
  585. Examples:
  586. >>> x = Tensor(np.array([[True, False], [True, True]]))
  587. >>> op = ops.ReduceAll(keep_dims=True)
  588. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  589. >>> output = op(x)
  590. >>> print(output)
  591. [[False]]
  592. >>> print(output.shape)
  593. (1, 1)
  594. >>> # case 2: Reduces a dimension along axis 0.
  595. >>> output = op(x, 0)
  596. >>> print(output)
  597. [[ True False]]
  598. >>> # case 3: Reduces a dimension along axis 1.
  599. >>> output = op(x, 1)
  600. >>> print(output)
  601. [[False]
  602. [ True]]
  603. """
  604. def __infer__(self, input_x, axis):
  605. return self.do_infer(input_x, axis, (mstype.bool_,))
  606. class ReduceAny(_Reduce):
  607. """
  608. Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by Default. And also can
  609. reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  610. controlling `keep_dims`.
  611. Args:
  612. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  613. If false, don't keep these dimensions.
  614. Default : False, don't keep these reduced dimensions.
  615. Inputs:
  616. - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
  617. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  618. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  619. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
  620. Outputs:
  621. Tensor, the dtype is bool.
  622. - If axis is (), and keep_dims is False,
  623. the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
  624. - If axis is int, set as 2, and keep_dims is False,
  625. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  626. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  627. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  628. Raises:
  629. TypeError: If `keep_dims` is not a bool.
  630. TypeError: If `x` is not a Tensor.
  631. ValueError: If `axis` is not one of the following: int, tuple or list.
  632. Supported Platforms:
  633. ``Ascend`` ``GPU`` ``CPU``
  634. Examples:
  635. >>> x = Tensor(np.array([[True, False], [True, True]]))
  636. >>> op = ops.ReduceAny(keep_dims=True)
  637. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  638. >>> output = op(x)
  639. >>> print(output)
  640. [[ True]]
  641. >>> print(output.shape)
  642. (1, 1)
  643. >>> # case 2: Reduces a dimension along axis 0.
  644. >>> output = op(x, 0)
  645. >>> print(output)
  646. [[ True True]]
  647. >>> # case 3: Reduces a dimension along axis 1.
  648. >>> output = op(x, 1)
  649. >>> print(output)
  650. [[True]
  651. [ True]]
  652. """
  653. def __infer__(self, input_x, axis):
  654. return self.do_infer(input_x, axis, (mstype.bool_,))
  655. class ReduceMax(_Reduce):
  656. """
  657. Reduces a dimension of a tensor by the maximum value in this dimension, by Default. And also can
  658. reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  659. controlling `keep_dims`.
  660. Args:
  661. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  662. If false, don't keep these dimensions.
  663. Default : False, don't keep these reduced dimensions.
  664. Inputs:
  665. - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
  666. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  667. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  668. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
  669. Outputs:
  670. Tensor, has the same dtype as the `x`.
  671. - If axis is (), and keep_dims is False,
  672. the output is a 0-D tensor representing the maximum of all elements in the input tensor.
  673. - If axis is int, set as 2, and keep_dims is False,
  674. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  675. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  676. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  677. Raises:
  678. TypeError: If `keep_dims` is not a bool.
  679. TypeError: If `x` is not a Tensor.
  680. ValueError: If `axis` is not one of the following: int, tuple or list.
  681. Supported Platforms:
  682. ``Ascend`` ``GPU`` ``CPU``
  683. Examples:
  684. >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  685. >>> op = ops.ReduceMax(keep_dims=True)
  686. >>> output = op(x, 1)
  687. >>> result = output.shape
  688. >>> print(result)
  689. (3, 1, 5, 6)
  690. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  691. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
  692. ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
  693. ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
  694. >>> output = op(x)
  695. >>> print(output)
  696. [[[9.]]]
  697. >>> print(output.shape)
  698. (1, 1, 1)
  699. >>> # case 2: Reduces a dimension along axis 0.
  700. >>> output = op(x, 0)
  701. >>> print(output)
  702. [[[7. 7. 7. 7. 7. 7.]
  703. [8. 8. 8. 8. 8. 8.]
  704. [9. 9. 9. 9. 9. 9.]]]
  705. >>> # case 3: Reduces a dimension along axis 1.
  706. >>> output = op(x, 1)
  707. >>> print(output)
  708. [[[3. 3. 3. 3. 3. 3.]]
  709. [[6. 6. 6. 6. 6. 6.]]
  710. [[9. 9. 9. 9. 9. 9.]]]
  711. >>> # case 4: Reduces a dimension along axis 2.
  712. >>> output = op(x, 2)
  713. >>> print(output)
  714. [[[1.]
  715. [2.]
  716. [3.]]
  717. [[4.]
  718. [5.]
  719. [6.]]
  720. [[7.]
  721. [8.]
  722. [9.]]]
  723. """
  724. @prim_attr_register
  725. def __init__(self, keep_dims=False):
  726. """Initialize ReduceMax."""
  727. super(ReduceMax, self).__init__(keep_dims)
  728. self.__setattr_flag__ = True
  729. def __infer__(self, input_x, axis):
  730. return self.do_infer(input_x, axis, mstype.number_type + (mstype.bool_,))
  731. class ReduceMin(_Reduce):
  732. """
  733. Reduces a dimension of a tensor by the minimum value in the dimension, by Default. And also can
  734. reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  735. controlling `keep_dims`.
  736. Args:
  737. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  738. If false, don't keep these dimensions.
  739. Default : False, don't keep these reduced dimensions.
  740. Inputs:
  741. - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
  742. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  743. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  744. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
  745. Outputs:
  746. Tensor, has the same dtype as the `x`.
  747. - If axis is (), and keep_dims is False,
  748. the output is a 0-D tensor representing the minimum of all elements in the input tensor.
  749. - If axis is int, set as 2, and keep_dims is False,
  750. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  751. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  752. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  753. Raises:
  754. TypeError: If `keep_dims` is not a bool.
  755. TypeError: If `x` is not a Tensor.
  756. ValueError: If `axis` is not one of the following: int, tuple or list.
  757. Supported Platforms:
  758. ``Ascend`` ``GPU`` ``CPU``
  759. Examples:
  760. >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  761. >>> op = ops.ReduceMin(keep_dims=True)
  762. >>> output = op(x, 1)
  763. >>> result = output.shape
  764. >>> print(result)
  765. (3, 1, 5, 6)
  766. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  767. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
  768. ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
  769. ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
  770. >>> output = op(x)
  771. >>> print(output)
  772. [[[1.]]]
  773. >>> print(output.shape)
  774. (1, 1, 1)
  775. >>> # case 2: Reduces a dimension along axis 0.
  776. >>> output = op(x, 0)
  777. >>> print(output)
  778. [[[1. 1. 1. 1. 1. 1.]
  779. [2. 2. 2. 2. 2. 2.]
  780. [3. 3. 3. 3. 3. 3.]]]
  781. >>> # case 3: Reduces a dimension along axis 1.
  782. >>> output = op(x, 1)
  783. >>> print(output)
  784. [[[1. 1. 1. 1. 1. 1.]]
  785. [[4. 4. 4. 4. 4. 4.]]
  786. [[7. 7. 7. 7. 7. 7.]]]
  787. >>> # case 4: Reduces a dimension along axis 2.
  788. >>> output = op(x, 2)
  789. >>> print(output)
  790. [[[1.]
  791. [2.]
  792. [3.]]
  793. [[4.]
  794. [5.]
  795. [6.]]
  796. [[7.]
  797. [8.]
  798. [9.]]]
  799. """
  800. class ReduceProd(_Reduce):
  801. """
  802. Reduces a dimension of a tensor by multiplying all elements in the dimension, by Default. And also can
  803. reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
  804. controlling `keep_dims`.
  805. Args:
  806. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  807. If false, don't keep these dimensions.
  808. Default : False, don't keep these reduced dimensions.
  809. Inputs:
  810. - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
  811. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  812. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  813. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
  814. Outputs:
  815. Tensor, has the same dtype as the `x`.
  816. - If axis is (), and keep_dims is False,
  817. the output is a 0-D tensor representing the product of all elements in the input tensor.
  818. - If axis is int, set as 2, and keep_dims is False,
  819. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  820. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  821. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  822. Raises:
  823. TypeError: If `keep_dims` is not a bool.
  824. TypeError: If `x` is not a Tensor.
  825. ValueError: If `axis` is not one of the following: int, tuple or list.
  826. Supported Platforms:
  827. ``Ascend`` ``GPU`` ``CPU``
  828. Examples:
  829. >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  830. >>> op = ops.ReduceProd(keep_dims=True)
  831. >>> output = op(x, 1)
  832. >>> result = output.shape
  833. >>> print(result)
  834. (3, 1, 5, 6)
  835. >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
  836. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
  837. ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
  838. ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
  839. >>> output = op(x)
  840. >>> print(output)
  841. [[[2.2833798e+33]]]
  842. >>> print(output.shape)
  843. (1, 1, 1)
  844. >>> # case 2: Reduces a dimension along axis 0.
  845. >>> output = op(x, 0)
  846. >>> print(output)
  847. [[[ 28. 28. 28. 28. 28. 28.]
  848. [ 80. 80. 80. 80. 80. 80.]
  849. [162. 162. 162. 162. 162. 162.]]]
  850. >>> # case 3: Reduces a dimension along axis 1.
  851. >>> output = op(x, 1)
  852. >>> print(output)
  853. [[[ 6. 6. 6. 6. 6. 6.]]
  854. [[120. 120. 120. 120. 120. 120.]]
  855. [[504. 504. 504. 504. 504. 504.]]]
  856. >>> # case 4: Reduces a dimension along axis 2.
  857. >>> output = op(x, 2)
  858. >>> print(output)
  859. [[[1.00000e+00]
  860. [6.40000e+01]
  861. [7.29000e+02]]
  862. [[4.09600e+03]
  863. [1.56250e+04]
  864. [4.66560e+04]]
  865. [[1.17649e+05]
  866. [2.62144e+05]
  867. [5.31441e+05]]]
  868. """
  869. class CumProd(PrimitiveWithInfer):
  870. """
  871. Computes the cumulative product of the tensor x along axis.
  872. For example, if input is a vector of size N, the result will also be a vector of size N, with elements.
  873. .. math::
  874. y_i = x_1 * x_2 * x_3 * ... * x_i
  875. Args:
  876. exclusive (bool): If true, perform exclusive cumulative product. Default: False.
  877. reverse (bool): If true, reverse the result along axis. Default: False
  878. Inputs:
  879. - **x** (Tensor[Number]) - The input tensor.
  880. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  881. - **axis** (int) - The dimensions to compute the cumulative product.
  882. Only constant value is allowed.
  883. Outputs:
  884. Tensor, has the same shape and dtype as the `x`.
  885. Raises:
  886. TypeError: If `exclusive` or `reverse` is not a bool.
  887. ValueError: If `axis` is None.
  888. Supported Platforms:
  889. ``Ascend`` ``GPU``
  890. Examples:
  891. >>> a, b, c, = 1, 2, 3
  892. >>> x = Tensor(np.array([a, b, c]).astype(np.float32))
  893. >>> op0 = ops.CumProd()
  894. >>> output0 = op0(x, 0) # output=[a, a * b, a * b * c]
  895. >>> op1 = ops.CumProd(exclusive=True)
  896. >>> output1 = op1(x, 0) # output=[1, a, a * b]
  897. >>> op2 = ops.CumProd(reverse=True)
  898. >>> output2 = op2(x, 0) # output=[a * b * c, b * c, c]
  899. >>> op3 = ops.CumProd(exclusive=True, reverse=True)
  900. >>> output3 = op3(x, 0) # output=[b * c, c, 1]
  901. >>> print(output0)
  902. [1. 2. 6.]
  903. >>> print(output1)
  904. [1. 1. 2.]
  905. >>> print(output2)
  906. [6. 6. 3.]
  907. >>> print(output3)
  908. [6. 3. 1.]
  909. >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [5, 3, 5]]).astype(np.float32))
  910. >>> output4 = op0(x, 0)
  911. >>> output5 = op0(x, 1)
  912. >>> print(output4)
  913. [[ 1. 2. 3.]
  914. [ 4. 10. 18.]
  915. [20. 30. 90.]]
  916. >>> print(output5)
  917. [[1. 2. 6.]
  918. [4. 20. 120.]
  919. [5. 15. 75.]]
  920. """
  921. @prim_attr_register
  922. def __init__(self, exclusive=False, reverse=False):
  923. """Initialize CumProd."""
  924. cls_name = self.name
  925. self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
  926. self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
  927. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  928. def infer_shape(self, x_shape, axis_shape):
  929. return x_shape
  930. def infer_dtype(self, x_type, axis_type):
  931. cls_name = self.name
  932. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, cls_name)
  933. validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
  934. return x_type
  935. def infer_value(self, x, axis):
  936. if axis is None:
  937. raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
  938. class Cdist(Primitive):
  939. """
  940. Computes batched the p norm distance between each pair of the two collections of row vectors.
  941. Args:
  942. p (float): P value for the p norm distance to calculate between each vector pair ∈[0,∞].
  943. Inputs:
  944. - **input_x** (Tensor) - Input tensor of shape :math:`(B, P, M)`.
  945. Letter :math:`B` represents 0 or positive int number.
  946. When :math:`B` is equal to 0, it means this dimension can be ignored,
  947. i.e. shape of the tensor is :math:`(P, M)`.
  948. - **input_y** (Tensor) - Input tensor of shape :math:`(B, R, M)`.
  949. Outputs:
  950. Tensor, has the same dtype as `input_x`, which shape is :math:`(B, P, R)`.
  951. Raises:
  952. TypeError: If `input_x` or `input_y` is not a Tensor.
  953. TypeError: If dtype of `input_x` or `input_y` is neither float16 nor float32.
  954. TypeError: If `p` is not a float.
  955. ValueError: If `p` is a negative float.
  956. ValueError: If dimension of `input_x` is not the same as `input_y`.
  957. ValueError: If dimension of `input_x` or `input_y` is neither 2 nor 3.
  958. Supported Platforms:
  959. ``Ascend``
  960. Examples:
  961. >>> input_x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
  962. >>> input_y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
  963. >>> op = ops.Cdist(p=2.0)
  964. >>> output = op(input_x, input_y)
  965. >>> print(output)
  966. [[[2.8284273 2.8284273]
  967. [1.4142137 1.4142137]]]
  968. """
  969. @prim_attr_register
  970. def __init__(self, p=2.0):
  971. """Initialize Cdist"""
  972. validator.check_value_type("p", p, [float], self.name)
  973. validator.check_non_negative_float(p, "p", self.name)
  974. self.init_prim_io_names(inputs=['input_x', 'input_y'], outputs=['output'])
  975. class LpNorm(Primitive):
  976. """
  977. Returns the matrix norm or vector norm of a given tensor.
  978. .. math::
  979. output = sum(abs(input)**p)**(1/p)
  980. Args:
  981. axis(int,list,tuple): Specifies which dimension or dimensions of input to calculate the norm across.
  982. p(int): The order of norm.
  983. keep_dims(bool): Whether the output tensors have dim retained or not.
  984. Inputs:
  985. - **input** (Tensor) - Input tensor.
  986. Outputs:
  987. Tensor, has the same dtype as `input`, which shape is depend on the args axis.For example, if the size of input
  988. is (2, 3, 4), axis is [0, 1], Outputs' shape will be (4,).
  989. Raises:
  990. TypeError: If `input` is not a Tensor.
  991. TypeError: If dtype of `input` is not one of: float16, float32.
  992. TypeError: If `p` is not an int.
  993. TypeError: If `axis` is not an int, a tuple or a list.
  994. TypeError: If `axis` is a tuple or a list, but the element of `axis` is not an int.
  995. TypeError: If `keep_dims` is not a bool.
  996. ValueError: If the element of `axis` is out of the range [-len(input.shape), len(input.shape)).
  997. ValueError: If the length of shape of `axis` is bigger than the length of shape of `input`.
  998. Supported Platforms:
  999. ``Ascend``
  1000. Examples:
  1001. >>> input = Tensor(np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]).astype(np.float32))
  1002. >>> op = ops.LpNorm(axis=[0, 1], p=2, keep_dims=False)
  1003. >>> output = op(input)
  1004. >>> print(output)
  1005. [ 9.165152 10.954452]
  1006. """
  1007. @prim_attr_register
  1008. def __init__(self, axis, p=2, keep_dims=False, epsilon=1e-12):
  1009. """Initialize LpNorm"""
  1010. validator.check_value_type("p", p, [int], self.name)
  1011. validator.check_value_type("axis", axis, [int, tuple, list], self.name)
  1012. validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
  1013. validator.check_value_type("epsilon", epsilon, [float], self.name)
  1014. validator.check_non_negative_int(p, "p", self.name)
  1015. validator.check_non_negative_float(epsilon, "epsilon", self.name)
  1016. if isinstance(axis, int):
  1017. self.add_prim_attr('axis', [self.axis])
  1018. else:
  1019. for element_of_axis in axis:
  1020. validator.check_value_type("element_of_axis", element_of_axis, [int], self.name)
  1021. self.init_prim_io_names(inputs=['input'], outputs=['output'])
  1022. class MatMul(PrimitiveWithCheck):
  1023. r"""
  1024. Multiplies matrix `x` and matrix `y`.
  1025. .. math::
  1026. (Output)_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N
  1027. where the :math:`i,j` indicates the output of the i-th row and j-th column element.
  1028. Args:
  1029. transpose_x (bool): If true, `x` is transposed before multiplication. Default: False.
  1030. transpose_y (bool): If true, `y` is transposed before multiplication. Default: False.
  1031. Inputs:
  1032. - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
  1033. `transpose_x` is True, its shape must be :math:`(N, C)` after transpose.
  1034. - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
  1035. `transpose_y` is True, its shape must be :math:`(C, M)` after transpose.
  1036. Outputs:
  1037. Tensor, the shape of the output tensor is :math:`(N, M)`.
  1038. Raises:
  1039. TypeError: If `transpose_a` or `transpose_b` is not a bool.
  1040. ValueError: If the column of matrix dimensions of `x` is not equal to
  1041. the row of matrix dimensions of `y`.
  1042. ValueError: If length of shape of `x` or `y` is not equal to 2.
  1043. Supported Platforms:
  1044. ``Ascend`` ``GPU`` ``CPU``
  1045. Examples:
  1046. >>> x = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
  1047. >>> y = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
  1048. >>> matmul = ops.MatMul()
  1049. >>> output = matmul(x, y)
  1050. >>> print(output)
  1051. [[3. 3. 3. 3.]]
  1052. """
  1053. @prim_attr_register
  1054. def __init__(self, transpose_a=False, transpose_b=False):
  1055. """Initialize MatMul."""
  1056. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  1057. cls_name = self.name
  1058. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  1059. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  1060. def check_shape_size(self, x1, x2):
  1061. if len(x1) != 2 or len(x2) != 2:
  1062. raise ValueError(f"For '{self.name}', inputs 'x', 'y' should have the same dimension size and "
  1063. f"be equal to 2, but got the size of 'x': ({len(x1)}) and the size of 'y': ({len(x2)}).")
  1064. def check_shape(self, x1, x2):
  1065. self.check_shape_size(x1, x2)
  1066. cls_name = self.name
  1067. # validate whether last two dims satisfying matrix multiply
  1068. x1_last = x1[-2:]
  1069. x2_last = x2[-2:]
  1070. x1_col = x1_last[not self.transpose_a]
  1071. x2_row = x2_last[self.transpose_b]
  1072. if np.all(np.array(x1) != -1) and np.all(np.array(x2) != -1):
  1073. if x1_col != x2_row:
  1074. raise ValueError(f"For '{cls_name}', the input dimensions must be equal, but got 'x1_col': {x1_col} "
  1075. f"and 'x2_row': {x2_row}. And 'x' shape {x1}(transpose_a={self.transpose_a}), "
  1076. f"'y' shape {x2}(transpose_b={self.transpose_b}).")
  1077. # set attribute
  1078. self.add_prim_attr('transpose_x1', self.transpose_a)
  1079. self.add_prim_attr('transpose_x2', self.transpose_b)
  1080. def check_dtype(self, x1, x2):
  1081. args = {"x1": x1, "x2": x2}
  1082. validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type, self.name)
  1083. class BatchMatMul(MatMul):
  1084. r"""
  1085. Computes matrix multiplication between two tensors by batch.
  1086. .. math::
  1087. \text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :])
  1088. The first input tensor must be not less than `3` and the second input must be not less than `2`.
  1089. Args:
  1090. transpose_a (bool): If true, the last two dimensions of `x` is transposed before multiplication.
  1091. Default: False.
  1092. transpose_b (bool): If true, the last two dimensions of `y` is transposed before multiplication.
  1093. Default: False.
  1094. Inputs:
  1095. - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
  1096. where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
  1097. size of the last two dimensions. If `transpose_a` is True, its shape must be :math:`(*B, C, N)`.
  1098. - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
  1099. `transpose_b` is True, its shape must be :math:`(*B, M, C)`.
  1100. Outputs:
  1101. Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
  1102. Raises:
  1103. TypeError: If `transpose_a` or `transpose_b` is not a bool.
  1104. ValueError: If length of shape of `x` is not equal to length of shape of `y` or
  1105. length of shape of `x` is less than 3.
  1106. Supported Platforms:
  1107. ``Ascend`` ``GPU`` ``CPU``
  1108. Examples:
  1109. >>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
  1110. >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  1111. >>> batmatmul = ops.BatchMatMul()
  1112. >>> output = batmatmul(x, y)
  1113. >>> print(output)
  1114. [[[[3. 3. 3. 3.]]
  1115. [[3. 3. 3. 3.]]
  1116. [[3. 3. 3. 3.]]
  1117. [[3. 3. 3. 3.]]]
  1118. [[[3. 3. 3. 3.]]
  1119. [[3. 3. 3. 3.]]
  1120. [[3. 3. 3. 3.]]
  1121. [[3. 3. 3. 3.]]]]
  1122. >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
  1123. >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  1124. >>> batmatmul = ops.BatchMatMul(transpose_a=True)
  1125. >>> output = batmatmul(x, y)
  1126. >>> print(output)
  1127. [[[[3. 3. 3. 3.]]
  1128. [[3. 3. 3. 3.]]
  1129. [[3. 3. 3. 3.]]
  1130. [[3. 3. 3. 3.]]]
  1131. [[[3. 3. 3. 3.]]
  1132. [[3. 3. 3. 3.]]
  1133. [[3. 3. 3. 3.]]
  1134. [[3. 3. 3. 3.]]]]
  1135. """
  1136. @prim_attr_register
  1137. def __init__(self, transpose_a=False, transpose_b=False):
  1138. """Initialize BatchMatMul."""
  1139. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  1140. cls_name = self.name
  1141. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  1142. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  1143. def check_shape_size(self, x, y):
  1144. if len(x) < 3 or len(y) < 2:
  1145. raise ValueError(f"For '{self.name}', input 'x' should be greater than or equal to 3, input 'y' should "
  1146. f"be greater than or equal to 2, but got 'x' size: {len(x)}, 'y' size: {len(y)}.")
  1147. class CumSum(PrimitiveWithInfer):
  1148. """
  1149. Computes the cumulative sum of input tensor along axis.
  1150. .. math::
  1151. y_i = x_1 + x_2 + x_3 + ... + x_i
  1152. Args:
  1153. exclusive (bool): If true, perform exclusive mode. Default: False.
  1154. reverse (bool): If true, perform inverse cumulative sum. Default: False.
  1155. Inputs:
  1156. - **input** (Tensor) - The input tensor to accumulate.
  1157. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
  1158. Must be in the range [-rank(input), rank(input)).
  1159. Outputs:
  1160. Tensor, the shape of the output tensor is consistent with the input tensor's.
  1161. Raises:
  1162. TypeError: If `exclusive` or `reverse` is not a bool.
  1163. TypeError: If `axis` is not an int.
  1164. Supported Platforms:
  1165. ``Ascend`` ``GPU`` ``CPU``
  1166. Examples:
  1167. >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
  1168. >>> cumsum = ops.CumSum()
  1169. >>> # case 1: along the axis 0
  1170. >>> y = cumsum(x, 0)
  1171. >>> print(y)
  1172. [[ 3. 4. 6. 10.]
  1173. [ 4. 10. 13. 19.]
  1174. [ 8. 13. 21. 26.]
  1175. [ 9. 16. 28. 35.]]
  1176. >>> # case 2: along the axis 1
  1177. >>> y = cumsum(x, 1)
  1178. >>> print(y)
  1179. [[ 3. 7. 13. 23.]
  1180. [ 1. 7. 14. 23.]
  1181. [ 4. 7. 15. 22.]
  1182. [ 1. 4. 11. 20.]]
  1183. >>> # Next demonstrate exclusive and reverse, along axis 1
  1184. >>> # case 3: exclusive = True
  1185. >>> cumsum = ops.CumSum(exclusive=True)
  1186. >>> y = cumsum(x, 1)
  1187. >>> print(y)
  1188. [[ 0. 3. 7. 13.]
  1189. [ 0. 1. 7. 14.]
  1190. [ 0. 4. 7. 15.]
  1191. [ 0. 1. 4. 11.]]
  1192. >>> # case 4: reverse = True
  1193. >>> cumsum = ops.CumSum(reverse=True)
  1194. >>> y = cumsum(x, 1)
  1195. >>> print(y)
  1196. [[23. 20. 16. 10.]
  1197. [23. 22. 16. 9.]
  1198. [22. 18. 15. 7.]
  1199. [20. 19. 16. 9.]]
  1200. """
  1201. @prim_attr_register
  1202. def __init__(self, exclusive=False, reverse=False):
  1203. """Initialize cumsum"""
  1204. cls_name = self.name
  1205. validator.check_value_type('exclusive', exclusive, [bool], cls_name)
  1206. validator.check_value_type('reverse', reverse, [bool], cls_name)
  1207. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  1208. def __infer__(self, x, axis):
  1209. cls_name = self.name
  1210. x_shp = x['shape']
  1211. if axis['value'] is None:
  1212. raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
  1213. validator.check_value_type('axis', axis['value'], [int], cls_name)
  1214. valid_dtypes = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32, mstype.float64]
  1215. validator.check_tensor_dtype_valid('x', x['dtype'], valid_dtypes, cls_name)
  1216. return {'shape': x_shp,
  1217. 'dtype': x['dtype'],
  1218. 'value': None}
  1219. class AddN(Primitive):
  1220. """
  1221. Computes addition of all input tensors element-wise.
  1222. All input tensors must have the same shape.
  1223. Inputs:
  1224. - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  1225. is made up of multiple tensors whose dtype is number or bool to be added together.
  1226. Outputs:
  1227. Tensor, has the same shape and dtype as each entry of the `x`.
  1228. Raises:
  1229. TypeError: If `x` is neither tuple nor list.
  1230. Supported Platforms:
  1231. ``Ascend`` ``GPU`` ``CPU``
  1232. Examples:
  1233. >>> class NetAddN(nn.Cell):
  1234. ... def __init__(self):
  1235. ... super(NetAddN, self).__init__()
  1236. ... self.addN = ops.AddN()
  1237. ...
  1238. ... def construct(self, *z):
  1239. ... return self.addN(z)
  1240. ...
  1241. >>> net = NetAddN()
  1242. >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1243. >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  1244. >>> output = net(x, y, x, y)
  1245. >>> print(output)
  1246. [10. 14. 18.]
  1247. """
  1248. @prim_attr_register
  1249. def __init__(self):
  1250. """Initialize AddN."""
  1251. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  1252. def check_elim(self, inputs):
  1253. if len(inputs) != 1:
  1254. return False, None
  1255. if isinstance(inputs[0], Tensor):
  1256. return True, inputs[0]
  1257. raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, but "
  1258. f"got {type(inputs[0]).__name__}, "
  1259. f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
  1260. class AccumulateNV2(Primitive):
  1261. """
  1262. Computes accumulation of all input tensors element-wise.
  1263. AccumulateNV2 is similar to AddN, but there is a significant difference
  1264. among them: AccumulateNV2 will not wait for all of its inputs to be ready
  1265. before summing. That is to say, AccumulateNV2 is able to save
  1266. memory when inputs are ready at different time since the minimum temporary
  1267. storage is proportional to the output size rather than the input size.
  1268. Inputs:
  1269. - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  1270. is made up of multiple tensors whose dtype is number to be added together.
  1271. Each element of tuple or list should have the same shape.
  1272. Outputs:
  1273. Tensor, has the same shape and dtype as each entry of the `x`.
  1274. Raises:
  1275. TypeError: If `x` is neither tuple nor list.
  1276. ValueError: If there is an input element with a different shape.
  1277. Supported Platforms:
  1278. ``Ascend``
  1279. Examples:
  1280. >>> class NetAccumulateNV2(nn.Cell):
  1281. ... def __init__(self):
  1282. ... super(NetAccumulateNV2, self).__init__()
  1283. ... self.accumulateNV2 = ops.AccumulateNV2()
  1284. ...
  1285. ... def construct(self, *z):
  1286. ... return self.accumulateNV2(z)
  1287. ...
  1288. >>> net = NetAccumulateNV2()
  1289. >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1290. >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  1291. >>> output = net(x, y, x, y)
  1292. >>> print(output)
  1293. [10. 14. 18.]
  1294. """
  1295. @prim_attr_register
  1296. def __init__(self):
  1297. """Initialize AccumulateNV2."""
  1298. self.__setattr_flag__ = True
  1299. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  1300. def check_elim(self, inputs):
  1301. if len(inputs) != 1:
  1302. return False, None
  1303. if isinstance(inputs[0], Tensor):
  1304. return True, inputs[0]
  1305. raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, "
  1306. f"but got {type(inputs[0]).__name__}, "
  1307. f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
  1308. class Neg(PrimitiveWithInfer):
  1309. """
  1310. Returns a tensor with negative values of the input tensor element-wise.
  1311. .. math::
  1312. out_{i} = - x_{i}
  1313. Inputs:
  1314. - **x** (Tensor) - The input tensor whose dtype is number.
  1315. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1316. Outputs:
  1317. Tensor, has the same shape and dtype as input.
  1318. Raises:
  1319. TypeError: If `x` is not a Tensor.
  1320. Supported Platforms:
  1321. ``Ascend`` ``GPU`` ``CPU``
  1322. Examples:
  1323. >>> neg = ops.Neg()
  1324. >>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
  1325. >>> output = neg(x)
  1326. >>> print(output)
  1327. [-1. -2. 1. -2. 0. 3.5]
  1328. """
  1329. @prim_attr_register
  1330. def __init__(self):
  1331. """Initialize Neg"""
  1332. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1333. def infer_shape(self, x_shape):
  1334. return x_shape
  1335. def infer_dtype(self, x_dtype):
  1336. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1337. return x_dtype
  1338. def infer_value(self, input_x):
  1339. if input_x is not None:
  1340. input_x = input_x.asnumpy()
  1341. out = np.array(-input_x, input_x.dtype)
  1342. return Tensor(out)
  1343. return None
  1344. class InplaceAdd(PrimitiveWithInfer):
  1345. """
  1346. Adds v into specified rows of x. Computes y = x; y[i,] += v.
  1347. Args:
  1348. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  1349. to add with v. It is an integer or a tuple, whose value is in [0, the first dimension size of x).
  1350. Inputs:
  1351. - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  1352. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1353. - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
  1354. the first dimension, which must be the same as indices' size. It has the same data type with `x`.
  1355. Outputs:
  1356. Tensor, has the same shape and dtype as x.
  1357. Raises:
  1358. TypeError: If `indices` is neither int nor tuple.
  1359. TypeError: If `indices` is a tuple whose elements are not all int.
  1360. ValueError: If length of shape of `x` is not equal to length of shape of `input_v`.
  1361. Supported Platforms:
  1362. ``Ascend``
  1363. Examples:
  1364. >>> indices = (0, 1)
  1365. >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  1366. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  1367. >>> inplaceAdd = ops.InplaceAdd(indices)
  1368. >>> output = inplaceAdd(x, input_v)
  1369. >>> print(output)
  1370. [[1.5 3. ]
  1371. [4. 5.5]
  1372. [5. 6. ]]
  1373. """
  1374. @prim_attr_register
  1375. def __init__(self, indices):
  1376. """Initialize InplaceAdd"""
  1377. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  1378. self.indices = indices
  1379. validator.check_value_type('indices', indices, [tuple, int], self.name)
  1380. if isinstance(indices, int):
  1381. self.indices = (indices,)
  1382. for item in self.indices:
  1383. validator.check_value_type("item of indices", item, [int], self.name)
  1384. def infer_dtype(self, x_dtype, v_dtype):
  1385. args = {'x': x_dtype, 'v': v_dtype}
  1386. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  1387. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  1388. return x_dtype
  1389. def infer_shape(self, x_shape, v_shape):
  1390. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  1391. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  1392. Rel.EQ, self.name)
  1393. for i in self.indices:
  1394. if i < 0 or i >= x_shape[0]:
  1395. raise ValueError(f"For '{self.name}', the value of 'indices' must be "
  1396. f"in [0, {x_shape[0]}), but got {i}.")
  1397. x_rank = len(x_shape)
  1398. for idx in range(x_rank)[1:]:
  1399. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  1400. return x_shape
  1401. class InplaceSub(PrimitiveWithInfer):
  1402. """
  1403. Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v.
  1404. Args:
  1405. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  1406. to subtract with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
  1407. Inputs:
  1408. - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  1409. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1410. - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
  1411. the first dimension, which must be the same as indices' size. It has the same data type with `x`.
  1412. Outputs:
  1413. Tensor, has the same shape and dtype as x.
  1414. Raises:
  1415. TypeError: If `indices` is neither int nor tuple.
  1416. TypeError: If `indices` is a tuple whose elements are not all int.
  1417. ValueError: If length of shape of `x` is not equal to length of shape of `input_v`.
  1418. Supported Platforms:
  1419. ``Ascend``
  1420. Examples:
  1421. >>> indices = (0, 1)
  1422. >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  1423. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  1424. >>> inplaceSub = ops.InplaceSub(indices)
  1425. >>> output = inplaceSub(x, input_v)
  1426. >>> print(output)
  1427. [[0.5 1. ]
  1428. [2. 2.5]
  1429. [5. 6. ]]
  1430. """
  1431. @prim_attr_register
  1432. def __init__(self, indices):
  1433. """Initialize InplaceSub"""
  1434. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  1435. self.indices = indices
  1436. validator.check_value_type('indices', indices, [tuple, int], self.name)
  1437. if isinstance(indices, int):
  1438. self.indices = (indices,)
  1439. for item in self.indices:
  1440. validator.check_value_type("item of indices", item, [int], self.name)
  1441. def infer_dtype(self, x_dtype, v_dtype):
  1442. args = {'x': x_dtype, 'v': v_dtype}
  1443. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  1444. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  1445. return x_dtype
  1446. def infer_shape(self, x_shape, v_shape):
  1447. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  1448. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  1449. Rel.EQ, self.name)
  1450. for i in self.indices:
  1451. if i < 0 or i >= x_shape[0]:
  1452. raise ValueError(f"For '{self.name}', the value of 'indices' must be "
  1453. f"in [0, {x_shape[0]}), but got {i}.")
  1454. x_rank = len(x_shape)
  1455. for idx in range(x_rank)[1:]:
  1456. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  1457. return x_shape
  1458. class Sub(_MathBinaryOp):
  1459. """
  1460. Subtracts the second input tensor from the first input tensor element-wise.
  1461. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  1462. The inputs must be two tensors or one tensor and one scalar.
  1463. When the inputs are two tensors,
  1464. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  1465. When the inputs are one tensor and one scalar,
  1466. the scalar could only be a constant.
  1467. .. math::
  1468. out_{i} = x_{i} - y_{i}
  1469. Inputs:
  1470. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1471. or a tensor whose data type is number or bool.
  1472. - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1473. is a tensor, or a tensor whose data type is number or bool.
  1474. Outputs:
  1475. Tensor, the shape is the same as the one after broadcasting,
  1476. and the data type is the one with higher precision or higher digits among the two inputs.
  1477. Raises:
  1478. TypeError: If `x` and `y` is not a Number or a bool or a Tensor.
  1479. Supported Platforms:
  1480. ``Ascend`` ``GPU`` ``CPU``
  1481. Examples:
  1482. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1483. >>> y = Tensor(np.array([4, 5, 6]), mindspore.int32)
  1484. >>> sub = ops.Sub()
  1485. >>> output = sub(x, y)
  1486. >>> print(output)
  1487. [-3 -3 -3]
  1488. """
  1489. def infer_value(self, x, y):
  1490. if x is not None and y is not None:
  1491. x = x.asnumpy()
  1492. y = y.asnumpy()
  1493. out = x - y
  1494. out = np.array(out, x.dtype)
  1495. return Tensor(out)
  1496. return None
  1497. class Mul(_MathBinaryOp):
  1498. """
  1499. Multiplies two tensors element-wise.
  1500. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  1501. The inputs must be two tensors or one tensor and one scalar.
  1502. When the inputs are two tensors,
  1503. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  1504. When the inputs are one tensor and one scalar,
  1505. the scalar could only be a constant.
  1506. .. math::
  1507. out_{i} = x_{i} * y_{i}
  1508. Inputs:
  1509. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  1510. a bool or a tensor whose data type is number or bool.
  1511. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  1512. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1513. Outputs:
  1514. Tensor, the shape is the same as the one after broadcasting,
  1515. and the data type is the one with higher precision or higher digits among the two inputs.
  1516. Raises:
  1517. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  1518. ValueError: If `x` and `y` are not the same shape.
  1519. Supported Platforms:
  1520. ``Ascend`` ``GPU`` ``CPU``
  1521. Examples:
  1522. >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1523. >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  1524. >>> mul = ops.Mul()
  1525. >>> output = mul(x, y)
  1526. >>> print(output)
  1527. [ 4. 10. 18.]
  1528. """
  1529. def infer_value(self, x, y):
  1530. if x is not None and y is not None:
  1531. x = x.asnumpy()
  1532. y = y.asnumpy()
  1533. out = x * y
  1534. out = np.array(out, x.dtype)
  1535. return Tensor(out)
  1536. return None
  1537. class SquaredDifference(_MathBinaryOp):
  1538. """
  1539. Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
  1540. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  1541. The inputs must be two tensors or one tensor and one scalar.
  1542. When the inputs are two tensors,
  1543. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  1544. When the inputs are one tensor and one scalar,
  1545. the scalar could only be a constant.
  1546. .. math::
  1547. out_{i} = (x_{i} - y_{i}) * (x_{i} - y_{i}) = (x_{i} - y_{i})^2
  1548. Inputs:
  1549. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1550. or a tensor whose data type is float16, float32, int32 or bool.
  1551. - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1552. is a tensor or a tensor whose data type is float16, float32, int32 or bool.
  1553. Outputs:
  1554. Tensor, the shape is the same as the one after broadcasting,
  1555. and the data type is the one with higher precision or higher digits among the two inputs.
  1556. Raises:
  1557. TypeError: if `x` and `y` is not a Number or a bool or a Tensor.
  1558. Supported Platforms:
  1559. ``Ascend`` ``GPU`` ``CPU``
  1560. Examples:
  1561. >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1562. >>> y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
  1563. >>> squared_difference = ops.SquaredDifference()
  1564. >>> output = squared_difference(x, y)
  1565. >>> print(output)
  1566. [1. 4. 9.]
  1567. """
  1568. def infer_dtype(self, x_dtype, y_dtype):
  1569. valid_type = [mstype.float16, mstype.float32, mstype.float64, mstype.int32]
  1570. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name)
  1571. class Square(Primitive):
  1572. """
  1573. Returns square of a tensor element-wise.
  1574. .. math::
  1575. out_{i} = (x_{i})^2
  1576. Inputs:
  1577. - **x** (Tensor) - The input tensor whose dtype is number.
  1578. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1579. Outputs:
  1580. Tensor, has the same shape and dtype as the `x`.
  1581. Raises:
  1582. TypeError: If `x` is not a Tensor.
  1583. Supported Platforms:
  1584. ``Ascend`` ``GPU`` ``CPU``
  1585. Examples:
  1586. >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1587. >>> square = ops.Square()
  1588. >>> output = square(x)
  1589. >>> print(output)
  1590. [1. 4. 9.]
  1591. """
  1592. @prim_attr_register
  1593. def __init__(self):
  1594. """Initialize Square"""
  1595. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  1596. class Rsqrt(PrimitiveWithInfer):
  1597. r"""
  1598. Computes reciprocal of square root of input tensor element-wise.
  1599. .. math::
  1600. out_{i} = \frac{1}{\sqrt{x_{i}}}
  1601. Inputs:
  1602. - **x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number.
  1603. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1604. Outputs:
  1605. Tensor, has the same type and shape as `x`.
  1606. Raises:
  1607. TypeError: If dtype of `x` is neither float16 nor float32.
  1608. Supported Platforms:
  1609. ``Ascend`` ``GPU``
  1610. Examples:
  1611. >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
  1612. >>> rsqrt = ops.Rsqrt()
  1613. >>> output = rsqrt(input_tensor)
  1614. >>> print(output)
  1615. [[0.5 0.5 ]
  1616. [0.33333334 0.33333334]]
  1617. """
  1618. @prim_attr_register
  1619. def __init__(self):
  1620. """Initialize Rsqrt"""
  1621. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1622. def infer_shape(self, x_shape):
  1623. return x_shape
  1624. def infer_dtype(self, x_dtype):
  1625. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1626. return x_dtype
  1627. def infer_value(self, x):
  1628. if x is not None:
  1629. x = x.asnumpy()
  1630. out = 1.0 / np.sqrt(x)
  1631. out = np.array(out, x.dtype)
  1632. return Tensor(out)
  1633. return None
  1634. class Sqrt(PrimitiveWithCheck):
  1635. r"""
  1636. Returns square root of a tensor element-wise.
  1637. .. math::
  1638. out_{i} = \sqrt{x_{i}}
  1639. Inputs:
  1640. - **x** (Tensor) - The input tensor whose dtype is number.
  1641. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1642. Outputs:
  1643. Tensor, has the same shape and data type as the `x`.
  1644. Raises:
  1645. TypeError: If `x` is not a Tensor.
  1646. Supported Platforms:
  1647. ``Ascend`` ``GPU`` ``CPU``
  1648. Examples:
  1649. >>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
  1650. >>> sqrt = ops.Sqrt()
  1651. >>> output = sqrt(x)
  1652. >>> print(output)
  1653. [1. 2. 3.]
  1654. """
  1655. @prim_attr_register
  1656. def __init__(self):
  1657. """Initialize Sqrt"""
  1658. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1659. def check_dtype(self, x_type):
  1660. validator.check_tensor_dtype_valid("x", x_type, mstype.number_type, self.name)
  1661. def infer_value(self, x):
  1662. """Infer the value of input for Sqrt."""
  1663. if x is not None:
  1664. x = x.asnumpy()
  1665. out = np.sqrt(x)
  1666. out = np.array(out, x.dtype)
  1667. return Tensor(out)
  1668. return None
  1669. class Reciprocal(PrimitiveWithInfer):
  1670. r"""
  1671. Returns reciprocal of a tensor element-wise.
  1672. .. math::
  1673. out_{i} = \frac{1}{x_{i}}
  1674. Inputs:
  1675. - **x** (Tensor) - The input tensor.
  1676. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1677. Outputs:
  1678. Tensor, has the same shape as the `x`.
  1679. Raises:
  1680. TypeError: If `x` is not a Tensor.
  1681. Supported Platforms:
  1682. ``Ascend`` ``GPU`` ``CPU``
  1683. Examples:
  1684. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1685. >>> reciprocal = ops.Reciprocal()
  1686. >>> output = reciprocal(x)
  1687. >>> print(output)
  1688. [1. 0.5 0.25]
  1689. """
  1690. @prim_attr_register
  1691. def __init__(self):
  1692. """Initialize Reciprocal"""
  1693. if context.get_context("device_target") == "GPU":
  1694. self.target = "GPU"
  1695. else:
  1696. self.target = "OTHER"
  1697. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1698. def infer_shape(self, x):
  1699. return x
  1700. def infer_dtype(self, x):
  1701. validator.check_subclass("x", x, mstype.tensor, self.name)
  1702. return x
  1703. def infer_value(self, x):
  1704. if x is not None:
  1705. x = x.asnumpy()
  1706. out = 1.0 / x
  1707. out = np.array(out, x.dtype)
  1708. return Tensor(out)
  1709. return None
  1710. class Pow(_MathBinaryOp):
  1711. """
  1712. Computes a tensor to the power of the second input.
  1713. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  1714. The inputs must be two tensors or one tensor and one scalar.
  1715. When the inputs are two tensors,
  1716. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  1717. When the inputs are one tensor and one scalar,
  1718. the scalar could only be a constant.
  1719. .. math::
  1720. out_{i} = x_{i} ^{ y_{i}}
  1721. Inputs:
  1722. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  1723. a bool or a tensor whose data type is number or bool.
  1724. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  1725. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1726. Outputs:
  1727. Tensor, the shape is the same as the one after broadcasting,
  1728. and the data type is the one with higher precision or higher digits among the two inputs.
  1729. Raises:
  1730. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  1731. ValueError: If `x` and `y` are not the same shape.
  1732. Supported Platforms:
  1733. ``Ascend`` ``GPU`` ``CPU``
  1734. Examples:
  1735. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1736. >>> y = 3.0
  1737. >>> pow = ops.Pow()
  1738. >>> output = pow(x, y)
  1739. >>> print(output)
  1740. [ 1. 8. 64.]
  1741. >>>
  1742. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1743. >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
  1744. >>> pow = ops.Pow()
  1745. >>> output = pow(x, y)
  1746. >>> print(output)
  1747. [ 1. 16. 64.]
  1748. """
  1749. def infer_value(self, x, power):
  1750. if x is not None and power is not None:
  1751. x = x.asnumpy()
  1752. power = power.asnumpy()
  1753. out = np.power(x, power)
  1754. out = np.array(out, x.dtype)
  1755. return Tensor(out)
  1756. return None
  1757. class Exp(PrimitiveWithInfer):
  1758. r"""
  1759. Returns exponential of a tensor element-wise.
  1760. .. math::
  1761. out_i = e^{x_i}
  1762. Inputs:
  1763. - **x** (Tensor) - The input tensor.
  1764. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1765. Outputs:
  1766. Tensor, has the same shape and dtype as the `x`.
  1767. Raises:
  1768. TypeError: If `x` is not a Tensor.
  1769. Supported Platforms:
  1770. ``Ascend`` ``GPU`` ``CPU``
  1771. Examples:
  1772. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1773. >>> exp = ops.Exp()
  1774. >>> output = exp(x)
  1775. >>> print(output)
  1776. [ 2.718282 7.389056 54.598152]
  1777. """
  1778. @prim_attr_register
  1779. def __init__(self):
  1780. """Initialize Exp"""
  1781. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1782. def infer_shape(self, x_shape):
  1783. return x_shape
  1784. def infer_dtype(self, x_type):
  1785. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  1786. return x_type
  1787. def infer_value(self, x):
  1788. if x is not None:
  1789. x = x.asnumpy()
  1790. out = np.exp(x)
  1791. out = np.array(out, x.dtype)
  1792. return Tensor(out)
  1793. return None
  1794. class Expm1(PrimitiveWithInfer):
  1795. r"""
  1796. Returns exponential then minus 1 of a tensor element-wise.
  1797. .. math::
  1798. out_i = e^{x_i} - 1
  1799. Inputs:
  1800. - **x** (Tensor) - The input tensor. With float16 or float32 data type.
  1801. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1802. Outputs:
  1803. Tensor, has the same shape as the `x`.
  1804. Raises:
  1805. TypeError: If dtype of `x` is neither float16 nor float32.
  1806. Supported Platforms:
  1807. ``Ascend`` ``GPU`` ``CPU``
  1808. Examples:
  1809. >>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
  1810. >>> expm1 = ops.Expm1()
  1811. >>> output = expm1(x)
  1812. >>> print(output)
  1813. [ 0. 1.718282 6.389056 53.598152]
  1814. """
  1815. @prim_attr_register
  1816. def __init__(self):
  1817. """Initialize Expm1."""
  1818. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1819. def infer_shape(self, x_shape):
  1820. return x_shape
  1821. def infer_dtype(self, x_type):
  1822. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1823. return x_type
  1824. class HistogramFixedWidth(PrimitiveWithInfer):
  1825. """
  1826. Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
  1827. width and determined by the arguments range and nbins.
  1828. Args:
  1829. dtype (str): An optional attribute. The dtype must be "int32". Default: "int32".
  1830. nbins (int): The number of histogram bins, the type is a positive integer.
  1831. Inputs:
  1832. - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
  1833. - **range** (Tensor) - Must has the same data type as `x`, and the shape is [2].
  1834. x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
  1835. Outputs:
  1836. Tensor, the type is int32.
  1837. Raises:
  1838. TypeError: If `dtype` is not a str or `nbins` is not an int.
  1839. ValueError: If `nbins` is less than 1.
  1840. ValueError: If `dtype` is neither 'int32' nor 'int64'.
  1841. Supported Platforms:
  1842. ``Ascend``
  1843. Examples:
  1844. >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
  1845. >>> range_op = Tensor([0.0, 5.0], mindspore.float16)
  1846. >>> hist = ops.HistogramFixedWidth(5)
  1847. >>> output = hist(x, range_op)
  1848. >>> print(output)
  1849. [2 1 1 0 2]
  1850. """
  1851. @prim_attr_register
  1852. def __init__(self, nbins, dtype='int32'):
  1853. """Initialize HistogramFixedWidth."""
  1854. self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
  1855. validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
  1856. valid_values = ['int32']
  1857. self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
  1858. self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
  1859. self.add_prim_attr('dtype', 3)
  1860. def infer_shape(self, x_shape, range_shape):
  1861. return (self.nbins,)
  1862. def infer_dtype(self, x_dtype, range_dtype):
  1863. valid_dtypes = (mstype.float16, mstype.float32, mstype.int32)
  1864. validator.check_tensor_dtype_valid("x", x_dtype, valid_dtypes, self.name)
  1865. validator.check_tensor_dtype_valid("range", range_dtype, valid_dtypes, self.name)
  1866. y_dtype = mstype.int32
  1867. return y_dtype
  1868. class Log(PrimitiveWithInfer):
  1869. """
  1870. Returns the natural logarithm of a tensor element-wise.
  1871. .. math::
  1872. y_i = log_e(x_i)
  1873. .. warning::
  1874. If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
  1875. is subject to change.
  1876. Inputs:
  1877. - **x** (Tensor) - The input tensor. The value must be greater than 0.
  1878. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1879. Outputs:
  1880. Tensor, has the same shape as the `x`.
  1881. Raises:
  1882. TypeError: If `x` is not a Tensor.
  1883. Supported Platforms:
  1884. ``Ascend`` ``GPU`` ``CPU``
  1885. Examples:
  1886. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1887. >>> log = ops.Log()
  1888. >>> output = log(x)
  1889. >>> print(output)
  1890. [0. 0.6931472 1.3862944]
  1891. """
  1892. @prim_attr_register
  1893. def __init__(self):
  1894. """Initialize Log."""
  1895. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1896. def infer_shape(self, x):
  1897. return x
  1898. def infer_dtype(self, x):
  1899. validator.check_subclass("x", x, mstype.tensor, self.name)
  1900. return x
  1901. def infer_value(self, x):
  1902. if x is not None:
  1903. x = x.asnumpy()
  1904. out = np.log(x)
  1905. out = np.array(out, x.dtype)
  1906. return Tensor(out)
  1907. return None
  1908. class Log1p(Primitive):
  1909. """
  1910. Returns the natural logarithm of one plus the input tensor element-wise.
  1911. .. math::
  1912. out_i = {log_e}(x_i + 1)
  1913. Inputs:
  1914. - **x** (Tensor) - The input tensor. With float16 or float32 data type.
  1915. The value must be greater than -1.
  1916. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1917. Outputs:
  1918. Tensor, has the same shape as the `x`.
  1919. Raises:
  1920. TypeError: If dtype of `x` is neither float16 nor float32.
  1921. Supported Platforms:
  1922. ``Ascend`` ``GPU``
  1923. Examples:
  1924. >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1925. >>> log1p = ops.Log1p()
  1926. >>> output = log1p(x)
  1927. >>> print(output)
  1928. [0.6931472 1.0986123 1.609438 ]
  1929. """
  1930. @prim_attr_register
  1931. def __init__(self):
  1932. """Initialize Log1p."""
  1933. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1934. class Erf(PrimitiveWithInfer):
  1935. r"""
  1936. Computes the Gauss error function of `x` element-wise.
  1937. .. math::
  1938. erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
  1939. Inputs:
  1940. - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1941. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1942. Outputs:
  1943. Tensor, has the same shape and dtype as the `x`.
  1944. Raises:
  1945. TypeError: If dtype of `x` is neither float16 nor float32.
  1946. Supported Platforms:
  1947. ``Ascend`` ``GPU``
  1948. Examples:
  1949. >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1950. >>> erf = ops.Erf()
  1951. >>> output = erf(x)
  1952. >>> print(output)
  1953. [-0.8427168 0. 0.8427168 0.99530876 0.99997765]
  1954. """
  1955. @prim_attr_register
  1956. def __init__(self):
  1957. """Initialize Erf"""
  1958. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1959. def infer_shape(self, x_shape):
  1960. return x_shape
  1961. def infer_dtype(self, x_dtype):
  1962. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1963. return x_dtype
  1964. class Erfc(PrimitiveWithInfer):
  1965. r"""
  1966. Computes the complementary error function of `x` element-wise.
  1967. .. math::
  1968. erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
  1969. Inputs:
  1970. - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1971. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  1972. Outputs:
  1973. Tensor, has the same shap dtype as the `x`.
  1974. Raises:
  1975. TypeError: If dtype of `x` is neither float16 nor float32.
  1976. Supported Platforms:
  1977. ``Ascend`` ``GPU``
  1978. Examples:
  1979. >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1980. >>> erfc = ops.Erfc()
  1981. >>> output = erfc(x)
  1982. >>> print(output)
  1983. [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
  1984. """
  1985. @prim_attr_register
  1986. def __init__(self):
  1987. """Initialize Erfc"""
  1988. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1989. def infer_shape(self, x_shape):
  1990. return x_shape
  1991. def infer_dtype(self, x_type):
  1992. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1993. return x_type
  1994. class Minimum(_MathBinaryOp):
  1995. r"""
  1996. Computes the minimum of input tensors element-wise.
  1997. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  1998. The inputs must be two tensors or one tensor and one scalar.
  1999. When the inputs are two tensors,
  2000. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2001. When the inputs are one tensor and one scalar,
  2002. the scalar could only be a constant.
  2003. If one of the elements being compared is a NaN, then that element is returned.
  2004. .. math::
  2005. output_i = min(x_i, y_i)
  2006. Inputs:
  2007. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2008. a bool or a tensor whose data type is number or bool.
  2009. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2010. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2011. Outputs:
  2012. Tensor, the shape is the same as the one after broadcasting,
  2013. and the data type is the one with higher precision or higher digits among the two inputs.
  2014. Raises:
  2015. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2016. ValueError: If `x` and `y` are not the same shape.
  2017. Supported Platforms:
  2018. ``Ascend`` ``GPU`` ``CPU``
  2019. Examples:
  2020. >>> # case 1 : same data type
  2021. >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  2022. >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  2023. >>> minimum = ops.Minimum()
  2024. >>> output = minimum(x, y)
  2025. >>> print(output)
  2026. [1. 2. 3.]
  2027. >>> # case 2 : different data type
  2028. >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
  2029. >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  2030. >>> output = minimum(x, y)
  2031. >>> print(output.dtype)
  2032. Float32
  2033. """
  2034. def infer_value(self, x, y):
  2035. if x is not None and y is not None:
  2036. x = x.asnumpy()
  2037. y = y.asnumpy()
  2038. out = np.minimum(x, y)
  2039. out = np.array(out, x.dtype)
  2040. return Tensor(out)
  2041. return None
  2042. class Maximum(_MathBinaryOp):
  2043. """
  2044. Computes the maximum of input tensors element-wise.
  2045. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2046. The inputs must be two tensors or one tensor and one scalar.
  2047. When the inputs are two tensors,
  2048. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2049. When the inputs are one tensor and one scalar,
  2050. the scalar could only be a constant.
  2051. If one of the elements being compared is a NaN, then that element is returned.
  2052. .. math::
  2053. output_i = max(x_i, y_i)
  2054. Inputs:
  2055. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2056. a bool or a tensor whose data type is number or bool.
  2057. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2058. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2059. Outputs:
  2060. Tensor, the shape is the same as the one after broadcasting,
  2061. and the data type is the one with higher precision or higher digits among the two inputs.
  2062. Raises:
  2063. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2064. ValueError: If `x` and `y` are not the same shape.
  2065. Supported Platforms:
  2066. ``Ascend`` ``GPU`` ``CPU``
  2067. Examples:
  2068. >>> # case 1 : same data type
  2069. >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  2070. >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  2071. >>> maximum = ops.Maximum()
  2072. >>> output = maximum(x, y)
  2073. >>> print(output)
  2074. [4. 5. 6.]
  2075. >>> # case 2 : different data type
  2076. >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
  2077. >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  2078. >>> output = maximum(x, y)
  2079. >>> print(output.dtype)
  2080. Float32
  2081. """
  2082. def infer_value(self, x, y):
  2083. if x is not None and y is not None:
  2084. x = x.asnumpy()
  2085. y = y.asnumpy()
  2086. out = np.maximum(x, y)
  2087. out = np.array(out, x.dtype)
  2088. return Tensor(out)
  2089. return None
  2090. class RealDiv(_MathBinaryOp):
  2091. """
  2092. Divides the first input tensor by the second input tensor in floating-point type element-wise.
  2093. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2094. The inputs must be two tensors or one tensor and one scalar.
  2095. When the inputs are two tensors,
  2096. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2097. When the inputs are one tensor and one scalar,
  2098. the scalar could only be a constant.
  2099. .. math::
  2100. out_{i} = x_{i} / y_{i}
  2101. Inputs:
  2102. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2103. a bool or a tensor whose data type is number or bool.
  2104. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2105. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2106. Outputs:
  2107. Tensor, the shape is the same as the one after broadcasting,
  2108. and the data type is the one with higher precision or higher digits among the two inputs.
  2109. Raises:
  2110. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2111. ValueError: If `x` and `y` are not the same shape.
  2112. Supported Platforms:
  2113. ``Ascend`` ``GPU`` ``CPU``
  2114. Examples:
  2115. >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  2116. >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  2117. >>> realdiv = ops.RealDiv()
  2118. >>> output = realdiv(x, y)
  2119. >>> print(output)
  2120. [0.25 0.4 0.5 ]
  2121. """
  2122. def infer_value(self, x, y):
  2123. if x is not None and y is not None:
  2124. x = x.asnumpy()
  2125. y = y.asnumpy()
  2126. out = x / y
  2127. out = np.array(out, x.dtype)
  2128. return Tensor(out)
  2129. return None
  2130. class Div(_MathBinaryOp):
  2131. r"""
  2132. Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
  2133. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2134. The inputs must be two tensors or one tensor and one scalar.
  2135. When the inputs are two tensors,
  2136. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2137. When the inputs are one tensor and one scalar,
  2138. the scalar could only be a constant.
  2139. .. math::
  2140. out_{i} = \frac{x_i}{y_i}
  2141. Inputs:
  2142. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2143. a bool or a tensor whose data type is number or bool.
  2144. - **y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
  2145. could be a number, a bool, or a tensor whose data type is number or bool. When the first input
  2146. is a number or a bool, the second input must be a tensor whose data type is number or bool.
  2147. Outputs:
  2148. Tensor, the shape is the same as the one after broadcasting,
  2149. and the data type is the one with higher precision or higher digits among the two inputs.
  2150. Raises:
  2151. TypeError: If neither `x` nor `y` is a Tensor.
  2152. Supported Platforms:
  2153. ``Ascend`` ``GPU`` ``CPU``
  2154. Examples:
  2155. >>> # case 1 :has same data type and shape of the two inputs
  2156. >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  2157. >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  2158. >>> div = ops.Div()
  2159. >>> output = div(x, y)
  2160. >>> print(output)
  2161. [-1.3333334 2.5 2. ]
  2162. >>> # case 2 : different data type and shape of the two inputs
  2163. >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.int32)
  2164. >>> y = Tensor(2, mindspore.float32)
  2165. >>> output = div(x, y)
  2166. >>> print(output)
  2167. [-2. 2.5 3.]
  2168. >>> print(output.dtype)
  2169. Float32
  2170. """
  2171. def infer_value(self, x, y):
  2172. if x is not None and y is not None:
  2173. x = x.asnumpy()
  2174. y = y.asnumpy()
  2175. out = np.array(x / y, x.dtype)
  2176. return Tensor(out)
  2177. return None
  2178. class DivNoNan(_MathBinaryOp):
  2179. r"""
  2180. Computes a safe divide and returns 0 if the y is zero.
  2181. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2182. The inputs must be two tensors or one tensor and one scalar.
  2183. When the inputs are two tensors,
  2184. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2185. When the inputs are one tensor and one scalar,
  2186. the scalar could only be a constant.
  2187. .. math::
  2188. output_{i} = \begin{cases}
  2189. 0, & \text{ if } y_{i} = 0\\
  2190. x_{i} / y_{i}, & \text{ if } y_{i} \ne 0
  2191. \end{cases}
  2192. Inputs:
  2193. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2194. a bool or a tensor whose data type is number or bool.
  2195. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2196. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2197. Outputs:
  2198. Tensor, the shape is the same as the one after broadcasting,
  2199. and the data type is the one with higher precision or higher digits among the two inputs.
  2200. Raises:
  2201. TypeError: If neither `x` nor `y` is a Tensor.
  2202. Supported Platforms:
  2203. ``Ascend`` ``GPU``
  2204. Examples:
  2205. >>> x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
  2206. >>> y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
  2207. >>> div_no_nan = ops.DivNoNan()
  2208. >>> output = div_no_nan(x, y)
  2209. >>> print(output)
  2210. [0. 0. 0. 2.5 2. ]
  2211. """
  2212. @prim_attr_register
  2213. def __init__(self):
  2214. """Initialize _BinaryOp"""
  2215. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  2216. def infer_value(self, x, y):
  2217. if x is not None and y is not None:
  2218. x = x.asnumpy()
  2219. y = y.asnumpy()
  2220. with np.errstate(divide='ignore', invalid='ignore'):
  2221. out = np.true_divide(x, y)
  2222. out[~np.isfinite(out)] = 0
  2223. return out
  2224. return None
  2225. class MulNoNan(_MathBinaryOp):
  2226. r"""
  2227. Computes `x` * `y` element-wise. If `y` is zero, no matter what `x` is, it will return 0, and also
  2228. If `x` is zero, no matter what `y` is, it will return 0.
  2229. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2230. The inputs must be two tensors or one tensor and one scalar.
  2231. When the inputs are two tensors, the shapes of them could be broadcasted.
  2232. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  2233. .. math::
  2234. output_{ij} = \begin{cases}
  2235. 0, & if\ x_{ij} = 0\ or\ y_{ij} = 0;\\
  2236. x_{ij} * y_{ij}, & otherwise.
  2237. \end{cases}
  2238. Note:
  2239. The shapes of `x` and `y` should be the same or can be broadcasted.
  2240. Inputs:
  2241. - **x** (Union[Tensor]) - The first input is a tensor whose data type is one of
  2242. float16, float32, int32, int64 currently or scalar.
  2243. - **y** (Union[Tensor]) - The second input is a tensor whose data type is one of
  2244. float16, float32, int32, int64 currently or scalar.
  2245. Outputs:
  2246. Tensor, the shape is the same as the shape after broadcasting,
  2247. and the data type is the one with higher precision among the two inputs.
  2248. Supported Platforms:
  2249. ``Ascend``
  2250. Raises:
  2251. TypeError: If neither `x` nor `y` is a bool Tensor.
  2252. Examples:
  2253. >>> # case 1 : same data type and shape of two inputs, there are some 0 in y.
  2254. >>> x = Tensor(np.array([[-1.0, 6.0, np.inf], [np.nan, -7.0, 4.0]]), mindspore.float32)
  2255. >>> y = Tensor(np.array([[-1.0, 4.0, 0], [0, -3.0, 1.0]]), mindspore.float32)
  2256. >>> mul_no_nan = ops.MulNoNan()
  2257. >>> output = mul_no_nan(x, y)
  2258. >>> print(output)
  2259. [[ 1. 24. 0.]
  2260. [ 0. 21. 4.]]
  2261. >>> # case 2 : the shape of two inputs is same, there are some 0 in x, y.
  2262. >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.int32)
  2263. >>> y = Tensor(np.array([[-1.0, 4.0, np.inf], [np.nan, 0, 1.0]]), mindspore.float32)
  2264. >>> output = mul_no_nan(x, y)
  2265. >>> print(output)
  2266. [[ 1. 24. 0.]
  2267. [ 0. 0. 4.]]
  2268. >>> print(output.dtype)
  2269. Float32
  2270. >>> # case 3 : the y is a scalar.
  2271. >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.float32)
  2272. >>> y = Tensor(0, mindspore.float32)
  2273. >>> output = mul_no_nan(x, y)
  2274. >>> print(output)
  2275. [[ 0. 0. 0.]
  2276. [ 0. 0. 0.]]
  2277. """
  2278. @prim_attr_register
  2279. def __init__(self):
  2280. """Initialize _BinaryOp"""
  2281. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  2282. def infer_value(self, x, y):
  2283. if x is not None and y is not None:
  2284. x = x.asnumpy()
  2285. y = y.asnumpy()
  2286. with np.errstate(divide='ignore', invalid='ignore'):
  2287. out = np.multiply(x, y)
  2288. out[y == 0] = 0
  2289. return out
  2290. return None
  2291. class FloorDiv(_MathBinaryOp):
  2292. """
  2293. Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
  2294. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2295. The inputs must be two tensors or one tensor and one scalar.
  2296. When the inputs are two tensors,
  2297. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2298. When the inputs are one tensor and one scalar,
  2299. the scalar could only be a constant.
  2300. .. math::
  2301. out_{i} = \\text{floor}( \\frac{x_i}{y_i})
  2302. where the :math:`floor` indicates the Floor operator, for more details, please refer to the Floor operator.
  2303. Inputs:
  2304. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2305. a bool or a tensor whose data type is number or bool.
  2306. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2307. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2308. Outputs:
  2309. Tensor, the shape is the same as the one after broadcasting,
  2310. and the data type is the one with higher precision or higher digits among the two inputs.
  2311. Raises:
  2312. TypeError: If neither `x` nor `y` is a Tensor.
  2313. Supported Platforms:
  2314. ``Ascend`` ``GPU`` ``CPU``
  2315. Examples:
  2316. >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  2317. >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  2318. >>> floor_div = ops.FloorDiv()
  2319. >>> output = floor_div(x, y)
  2320. >>> print(output)
  2321. [ 0 1 -1]
  2322. """
  2323. class TruncateDiv(_MathBinaryOp):
  2324. """
  2325. Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will
  2326. round fractional quantities towards zero.
  2327. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2328. The inputs must be two tensors or one tensor and one scalar.
  2329. When the inputs are two tensors,
  2330. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2331. When the inputs are one tensor and one scalar,
  2332. the scalar could only be a constant.
  2333. Note:
  2334. Broadcasting is supported.
  2335. Inputs:
  2336. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  2337. or a tensor whose data type is number or bool.
  2338. - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  2339. is a tensor, or a tensor whose data type is number or bool.
  2340. Outputs:
  2341. Tensor, the shape is the same as the one after broadcasting,
  2342. and the data type is the one with higher precision or higher digits among the two inputs.
  2343. Raises:
  2344. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2345. Supported Platforms:
  2346. ``Ascend`` ``GPU``
  2347. Examples:
  2348. >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  2349. >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  2350. >>> truncate_div = ops.TruncateDiv()
  2351. >>> output = truncate_div(x, y)
  2352. >>> print(output)
  2353. [0 1 0]
  2354. """
  2355. class TruncateMod(_MathBinaryOp):
  2356. r"""
  2357. Returns the remainder of division element-wise.
  2358. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2359. The inputs must be two tensors or one tensor and one scalar.
  2360. When the inputs are two tensors,
  2361. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2362. When the inputs are one tensor and one scalar,
  2363. the scalar could only be a constant.
  2364. .. warning::
  2365. - The input data does not support 0.
  2366. - When the elements of input exceed 2048 , the accuracy of operator cannot guarantee the requirement of
  2367. double thousandths in the mini form.
  2368. - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
  2369. - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
  2370. Inputs:
  2371. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  2372. or a tensor whose data type is number or bool.
  2373. - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  2374. is a tensor, or a tensor whose data type is number or bool.
  2375. Outputs:
  2376. Tensor, the shape is the same as the one after broadcasting,
  2377. and the data type is the one with higher precision or higher digits among the two inputs.
  2378. Raises:
  2379. TypeError: If neither `x` nor `y` is one of the following: Tensor, Number, bool.
  2380. Supported Platforms:
  2381. ``Ascend`` ``GPU``
  2382. Examples:
  2383. >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  2384. >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  2385. >>> truncate_mod = ops.TruncateMod()
  2386. >>> output = truncate_mod(x, y)
  2387. >>> print(output)
  2388. [ 2 1 -1]
  2389. """
  2390. class Mod(_MathBinaryOp):
  2391. r"""
  2392. Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
  2393. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2394. The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
  2395. both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
  2396. and one scalar, the scalar could only be a constant.
  2397. .. math::
  2398. out_{i} = x_{i} // y_{i}
  2399. .. warning::
  2400. - The input data does not support 0.
  2401. - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
  2402. double thousandths in the mini form.
  2403. - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
  2404. - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
  2405. Inputs:
  2406. - **x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
  2407. - **y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
  2408. could be a number or a tensor whose data type is number. When the first input is a number,
  2409. the second input must be a tensor whose data type is number.
  2410. Outputs:
  2411. Tensor, the shape is the same as the one after broadcasting,
  2412. and the data type is the one with higher precision or higher digits among the two inputs.
  2413. Raises:
  2414. ValueError: When `x` and `y` are not the same dtype.
  2415. Supported Platforms:
  2416. ``Ascend`` ``GPU`` ``CPU``
  2417. Examples:
  2418. >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  2419. >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  2420. >>> mod = ops.Mod()
  2421. >>> output = mod(x, y)
  2422. >>> print(output)
  2423. [-1. 1. 0.]
  2424. """
  2425. def infer_value(self, x, y):
  2426. if x is not None and y is not None:
  2427. x = x.asnumpy()
  2428. y = y.asnumpy()
  2429. return Tensor(np.fmod(x, y))
  2430. return None
  2431. class Floor(Primitive):
  2432. r"""
  2433. Rounds a tensor down to the closest integer element-wise.
  2434. .. math::
  2435. out_i = \lfloor x_i \rfloor
  2436. Inputs:
  2437. - **x** (Tensor) - The input tensor. Its element data type must be float16 or float32.
  2438. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2439. Outputs:
  2440. Tensor, has the same shape as `x`.
  2441. Raises:
  2442. TypeError: If dtype of `x` is not in [float16, float32, float64].
  2443. Supported Platforms:
  2444. ``Ascend`` ``GPU`` ``CPU``
  2445. Examples:
  2446. >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  2447. >>> floor = ops.Floor()
  2448. >>> output = floor(x)
  2449. >>> print(output)
  2450. [ 1. 2. -2.]
  2451. """
  2452. @prim_attr_register
  2453. def __init__(self):
  2454. """Initialize Floor."""
  2455. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  2456. class FloorMod(_MathBinaryOp):
  2457. r"""
  2458. Computes the remainder of division element-wise. It's a flooring divide.
  2459. E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
  2460. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2461. The inputs must be two tensors or one tensor and one scalar.
  2462. When the inputs are two tensors,
  2463. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  2464. When the inputs are one tensor and one scalar,
  2465. the scalar could only be a constant.
  2466. .. math::
  2467. out_{i} =\text{floor}(x_{i} // y_{i})
  2468. where the :math:`floor` indicates the Floor operator, for more details, please refer to the Floor operator.
  2469. .. warning::
  2470. - The input data does not support 0.
  2471. - When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
  2472. double thousandths in the mini form.
  2473. - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
  2474. - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
  2475. Inputs:
  2476. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2477. a bool or a tensor whose data type is number or bool.
  2478. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2479. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2480. Outputs:
  2481. Tensor, the shape is the same as the one after broadcasting,
  2482. and the data type is the one with higher precision or higher digits among the two inputs.
  2483. Raises:
  2484. TypeError: If neither `x` nor `y` is a Tensor.
  2485. Supported Platforms:
  2486. ``Ascend`` ``GPU`` ``CPU``
  2487. Examples:
  2488. >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  2489. >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  2490. >>> floor_mod = ops.FloorMod()
  2491. >>> output = floor_mod(x, y)
  2492. >>> print(output)
  2493. [2 1 2]
  2494. """
  2495. class Ceil(PrimitiveWithInfer):
  2496. r"""
  2497. Rounds a tensor up to the closest integer element-wise.
  2498. .. math::
  2499. out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
  2500. Inputs:
  2501. - **x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
  2502. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2503. Outputs:
  2504. Tensor, has the same shape as `x`.
  2505. Raises:
  2506. TypeError: If dtype of `x` is neither float16 nor float32.
  2507. Supported Platforms:
  2508. ``Ascend``
  2509. Examples:
  2510. >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  2511. >>> ceil_op = ops.Ceil()
  2512. >>> output = ceil_op(x)
  2513. >>> print(output)
  2514. [ 2. 3. -1.]
  2515. """
  2516. @prim_attr_register
  2517. def __init__(self):
  2518. """Initialize Ceil."""
  2519. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  2520. def infer_shape(self, x_shape):
  2521. return x_shape
  2522. def infer_dtype(self, x_dtype):
  2523. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  2524. return x_dtype
  2525. class Xdivy(_MathBinaryOp):
  2526. """
  2527. Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
  2528. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2529. The inputs must be two tensors or one tensor and one scalar.
  2530. When the inputs are two tensors,
  2531. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2532. When the inputs are one tensor and one scalar,
  2533. the scalar could only be a constant.
  2534. Inputs:
  2535. - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  2536. or a tensor whose data type is float16, float32 or bool.
  2537. - **y** (Union[Tensor, Number, bool]) - The second input is a number,
  2538. or a bool when the first input is a tensor, or a tensor whose data type is float16, float32 or bool.
  2539. Outputs:
  2540. Tensor, the shape is the same as the one after broadcasting,
  2541. and the data type is the one with higher precision or higher digits among the two inputs.
  2542. Raises:
  2543. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2544. Supported Platforms:
  2545. ``Ascend``
  2546. Examples:
  2547. >>> x = Tensor(np.array([2, 4, -1]), mindspore.float32)
  2548. >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  2549. >>> xdivy = ops.Xdivy()
  2550. >>> output = xdivy(x, y)
  2551. >>> print(output)
  2552. [ 1. 2. -0.5]
  2553. """
  2554. def infer_dtype(self, x_dtype, y_dtype):
  2555. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  2556. class Xlogy(_MathBinaryOp):
  2557. r"""
  2558. Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
  2559. Returns zero when `x` is zero.
  2560. .. math::
  2561. out_i = x_{i}\ln{y_{i}}
  2562. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2563. The inputs must be two tensors or one tensor and one scalar.
  2564. When the inputs are two tensors,
  2565. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2566. When the inputs are one tensor and one scalar,
  2567. the scalar could only be a constant.
  2568. Inputs:
  2569. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2570. a bool or a tensor whose data type is float16, float32 or bool.
  2571. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2572. a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
  2573. The value must be positive.
  2574. Outputs:
  2575. Tensor, the shape is the same as the one after broadcasting,
  2576. and the data type is the one with higher precision or higher digits among the two inputs.
  2577. Raises:
  2578. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2579. Supported Platforms:
  2580. ``Ascend``
  2581. Examples:
  2582. >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
  2583. >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  2584. >>> xlogy = ops.Xlogy()
  2585. >>> output = xlogy(x, y)
  2586. >>> print(output)
  2587. [-3.465736 0. 2.7725887]
  2588. """
  2589. def infer_dtype(self, x_dtype, y_dtype):
  2590. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  2591. class Acosh(PrimitiveWithInfer):
  2592. r"""
  2593. Computes inverse hyperbolic cosine of the inputs element-wise.
  2594. .. math::
  2595. out_i = \cosh^{-1}(input_i)
  2596. .. warning::
  2597. Given an input tensor x, the function computes inverse hyperbolic cosine of every element.
  2598. Input range is [1, inf].
  2599. Inputs:
  2600. - **x** (Tensor) - The data type should be one of the following types: float16, float32.
  2601. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2602. Outputs:
  2603. Tensor, has the same shape and type as `x`.
  2604. Raises:
  2605. TypeError: If `x` is not a Tensor.
  2606. Supported Platforms:
  2607. ``Ascend`` ``GPU`` ``CPU``
  2608. Examples:
  2609. >>> import numpy as np
  2610. >>> import mindspore.ops as ops
  2611. >>> from mindspore import Tensor, dtype
  2612. >>> acosh = ops.Acosh()
  2613. >>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), dtype.float32)
  2614. >>> output = acosh(x)
  2615. >>> print(output)
  2616. [0. 0.9624236 1.7627472 5.298292]
  2617. """
  2618. @prim_attr_register
  2619. def __init__(self):
  2620. """Initialize Acosh"""
  2621. def infer_shape(self, x_shape):
  2622. return x_shape
  2623. def infer_dtype(self, x_dtype):
  2624. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2625. return x_dtype
  2626. class Cosh(PrimitiveWithInfer):
  2627. r"""
  2628. Computes hyperbolic cosine of input element-wise.
  2629. .. math::
  2630. out_i = \cosh(input_i)
  2631. Inputs:
  2632. - **x** (Tensor) - The shape of tensor is
  2633. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2634. Outputs:
  2635. Tensor, has the same shape as `x`.
  2636. Raises:
  2637. TypeError: If `x` is not a Tensor.
  2638. Supported Platforms:
  2639. ``Ascend`` ``CPU``
  2640. Examples:
  2641. >>> cosh = ops.Cosh()
  2642. >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2643. >>> output = cosh(x)
  2644. >>> print(output)
  2645. [1.0289385 1.364684 1.048436 1.0040528]
  2646. """
  2647. @prim_attr_register
  2648. def __init__(self):
  2649. """Initialize Cosh"""
  2650. def infer_shape(self, x_shape):
  2651. return x_shape
  2652. def infer_dtype(self, x_dtype):
  2653. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2654. return x_dtype
  2655. class Asinh(PrimitiveWithInfer):
  2656. r"""
  2657. Computes inverse hyperbolic sine of the input element-wise.
  2658. .. math::
  2659. out_i = \sinh^{-1}(input_i)
  2660. Inputs:
  2661. - **x** (Tensor) - The shape of tensor is
  2662. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2663. The data type should be one of the following types: float16, float32.
  2664. Outputs:
  2665. Tensor, has the same shape and type as `x`.
  2666. Raises:
  2667. TypeError: If `x` is not a Tensor.
  2668. Supported Platforms:
  2669. ``Ascend`` ``GPU`` ``CPU``
  2670. Examples:
  2671. >>> asinh = ops.Asinh()
  2672. >>> x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
  2673. >>> output = asinh(x)
  2674. >>> print(output)
  2675. [-2.3124385 1.1947632 1.8184465 5.298342 ]
  2676. """
  2677. @prim_attr_register
  2678. def __init__(self):
  2679. """Initialize Asinh"""
  2680. def infer_shape(self, x_shape):
  2681. return x_shape
  2682. def infer_dtype(self, x_dtype):
  2683. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2684. return x_dtype
  2685. class Sinh(PrimitiveWithInfer):
  2686. r"""
  2687. Computes hyperbolic sine of the input element-wise.
  2688. .. math::
  2689. out_i = \sinh(input_i)
  2690. Inputs:
  2691. - **x** (Tensor) - The shape of tensor is
  2692. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2693. Outputs:
  2694. Tensor, has the same shape as `x`.
  2695. Raises:
  2696. TypeError: If `x` is not a Tensor.
  2697. Supported Platforms:
  2698. ``Ascend`` ``CPU``
  2699. Examples:
  2700. >>> sinh = ops.Sinh()
  2701. >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  2702. >>> output = sinh(x)
  2703. >>> print(output)
  2704. [0.6604918 0.28367308 0.44337422 0.6604918 ]
  2705. """
  2706. @prim_attr_register
  2707. def __init__(self):
  2708. """Initialize Sinh"""
  2709. def infer_shape(self, x_shape):
  2710. return x_shape
  2711. def infer_dtype(self, x_dtype):
  2712. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2713. return x_dtype
  2714. class _LogicBinaryOp(_BinaryOp):
  2715. """
  2716. Define logic binary operators.
  2717. """
  2718. @staticmethod
  2719. def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
  2720. """Staticmethod of infer dtype for _LogicBinaryOp."""
  2721. args_dtype = {"x": x_dtype, "y": y_dtype}
  2722. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
  2723. return mstype.tensor_type(mstype.bool_)
  2724. def infer_dtype(self, x_dtype, y_dtype):
  2725. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
  2726. class Equal(_LogicBinaryOp):
  2727. r"""
  2728. Computes the equivalence between two tensors element-wise.
  2729. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2730. The inputs must be two tensors or one tensor and one scalar.
  2731. When the inputs are two tensors, the shapes of them could be broadcast.
  2732. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  2733. .. math::
  2734. out_{i} =\begin{cases}
  2735. & \text{True, if } x_{i} = y_{i} \\
  2736. & \text{False, if } x_{i} \ne y_{i}
  2737. \end{cases}
  2738. Inputs:
  2739. - **x** (Union[Tensor, Number]) - The first input is a number or
  2740. a tensor whose data type is number.
  2741. - **y** (Union[Tensor, Number]) - The second input is a number
  2742. when the first input is a tensor or a tensor whose data type is number.
  2743. The data type is the same as the first input.
  2744. Outputs:
  2745. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2746. Raises:
  2747. TypeError: If neither `x` nor `y` is a Tensor.
  2748. Supported Platforms:
  2749. ``Ascend`` ``GPU`` ``CPU``
  2750. Examples:
  2751. >>> # case 1: The shape of two inputs are different
  2752. >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2753. >>> equal = ops.Equal()
  2754. >>> output = equal(x, 2.0)
  2755. >>> print(output)
  2756. [False True False]
  2757. >>> # case 2: The shape of two inputs are the same
  2758. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2759. >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2760. >>> equal = ops.Equal()
  2761. >>> output = equal(x, y)
  2762. >>> print(output)
  2763. [ True True False]
  2764. """
  2765. def infer_dtype(self, x_dtype, y_dtype):
  2766. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2767. def infer_value(self, x, y):
  2768. if x is None or y is None:
  2769. return None
  2770. if isinstance(x, Tensor) and x.has_init:
  2771. x = x.init_data()
  2772. if isinstance(y, Tensor) and y.has_init:
  2773. y = y.init_data()
  2774. return Tensor(x.asnumpy() == y.asnumpy())
  2775. class ApproximateEqual(_LogicBinaryOp):
  2776. r"""
  2777. Returns True if abs(x-y) is smaller than tolerance element-wise, otherwise False.
  2778. .. math::
  2779. out_i = \begin{cases}
  2780. & \text{ if } \left | x_{i} - y_{i} \right | < \text{tolerance},\ \ True \\
  2781. & \text{ if } \left | x_{i} - y_{i} \right | \ge \text{tolerance},\ \ False
  2782. \end{cases}
  2783. where :math:`\text{tolerance}` indicates Acceptable maximum tolerance.
  2784. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2785. If they have different data types, lower priority data type will be converted to
  2786. the relatively highest priority data type.
  2787. Args:
  2788. tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
  2789. Inputs:
  2790. - **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
  2791. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  2792. - **y** (Tensor) - A tensor of the same type and shape as 'x'.
  2793. Outputs:
  2794. Tensor, the shape is the same as the shape of 'x', and the data type is bool.
  2795. Raises:
  2796. TypeError: If `tolerance` is not a float.
  2797. RuntimeError: If the data type of `x`, `y` conversion of Parameter is required
  2798. when data type conversion of Parameter is not supported.
  2799. Supported Platforms:
  2800. ``Ascend``
  2801. Examples:
  2802. >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2803. >>> y = Tensor(np.array([2, 4, 6]), mindspore.float32)
  2804. >>> approximate_equal = ops.ApproximateEqual(2.)
  2805. >>> output = approximate_equal(x, y)
  2806. >>> print(output)
  2807. [ True True False]
  2808. """
  2809. @prim_attr_register
  2810. def __init__(self, tolerance=1e-05):
  2811. """Initialize ApproximateEqual"""
  2812. validator.check_value_type("tolerance", tolerance, [float], self.name)
  2813. def infer_shape(self, x_shape, y_shape):
  2814. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  2815. return x_shape
  2816. def infer_dtype(self, x_dtype, y_dtype):
  2817. args_dtype = {"x": x_dtype, "y": y_dtype}
  2818. valid_type = [mstype.float32, mstype.float16]
  2819. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name=self.name)
  2820. return mstype.tensor_type(mstype.bool_)
  2821. class EqualCount(PrimitiveWithInfer):
  2822. """
  2823. Computes the number of the same elements of two tensors.
  2824. The two input tensors must have the same data type and shape.
  2825. Inputs:
  2826. - **x** (Tensor) - The first input tensor. If the data type and shape of `y` are determined, then `x`
  2827. must be the same as `y`, and vice versa.
  2828. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  2829. - **y** (Tensor) - The second input tensor. If the data type and shape of `x` are determined, then `y`
  2830. must be the same as `x`, and vice versa.
  2831. Outputs:
  2832. Tensor, with the type same as input tensor and size as (1,).
  2833. Raises:
  2834. TypeError: If `x` or `y` is not a Tensor.
  2835. ValueError: If shape of `x` is not equal to shape of `y`.
  2836. Supported Platforms:
  2837. ``GPU`` ``CPU``
  2838. Examples:
  2839. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2840. >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2841. >>> equal_count = ops.EqualCount()
  2842. >>> output = equal_count(x, y)
  2843. >>> print(output)
  2844. [2]
  2845. """
  2846. @prim_attr_register
  2847. def __init__(self):
  2848. """Initialize EqualCount"""
  2849. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  2850. def infer_shape(self, x_shape, y_shape):
  2851. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  2852. output_shape = (1,)
  2853. return output_shape
  2854. def infer_dtype(self, x_dtype, y_dtype):
  2855. args = {'x': x_dtype, 'y': y_dtype}
  2856. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
  2857. return x_dtype
  2858. class NotEqual(_LogicBinaryOp):
  2859. r"""
  2860. Computes the non-equivalence of two tensors element-wise.
  2861. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2862. The inputs must be two tensors or one tensor and one scalar.
  2863. When the inputs are two tensors, the shapes of them could be broadcast.
  2864. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  2865. .. math::
  2866. out_{i} =\begin{cases}
  2867. & \text{True, if } x_{i} \ne y_{i} \\
  2868. & \text{False, if } x_{i} = y_{i}
  2869. \end{cases}
  2870. Inputs:
  2871. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2872. a bool or a tensor whose data type is number or bool.
  2873. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2874. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2875. Outputs:
  2876. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2877. Raises:
  2878. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  2879. TypeError: If neither `x` nor `y` is a Tensor.
  2880. Supported Platforms:
  2881. ``Ascend`` ``GPU`` ``CPU``
  2882. Examples:
  2883. >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2884. >>> not_equal = ops.NotEqual()
  2885. >>> output = not_equal(x, 2.0)
  2886. >>> print(output)
  2887. [ True False True]
  2888. >>>
  2889. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2890. >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2891. >>> not_equal = ops.NotEqual()
  2892. >>> output = not_equal(x, y)
  2893. >>> print(output)
  2894. [False False True]
  2895. """
  2896. def infer_dtype(self, x_dtype, y_dtype):
  2897. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2898. class Greater(_LogicBinaryOp):
  2899. r"""
  2900. Computes the boolean value of :math:`x > y` element-wise.
  2901. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2902. The inputs must be two tensors or one tensor and one scalar.
  2903. When the inputs are two tensors,
  2904. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2905. When the inputs are one tensor and one scalar,
  2906. the scalar could only be a constant.
  2907. .. math::
  2908. out_{i} =\begin{cases}
  2909. & \text{True, if } x_{i}>y_{i} \\
  2910. & \text{False, if } x_{i}<=y_{i}
  2911. \end{cases}
  2912. Note:
  2913. Broadcasting is supported.
  2914. Inputs:
  2915. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2916. a bool or a tensor whose data type is number or bool.
  2917. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2918. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2919. Outputs:
  2920. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  2921. Raises:
  2922. TypeError: If neither `x` nor `y` is a Tensor.
  2923. Supported Platforms:
  2924. ``Ascend`` ``GPU`` ``CPU``
  2925. Examples:
  2926. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2927. >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2928. >>> greater = ops.Greater()
  2929. >>> output = greater(x, y)
  2930. >>> print(output)
  2931. [False True False]
  2932. """
  2933. def infer_value(self, x, y):
  2934. if x is not None and y is not None:
  2935. x = x.asnumpy()
  2936. y = y.asnumpy()
  2937. out = np.array(np.greater(x, y))
  2938. return Tensor(out)
  2939. return None
  2940. class GreaterEqual(_LogicBinaryOp):
  2941. r"""
  2942. Computes the boolean value of :math:`x >= y` element-wise.
  2943. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  2944. The inputs must be two tensors or one tensor and one scalar.
  2945. When the inputs are two tensors,
  2946. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  2947. When the inputs are one tensor and one scalar,
  2948. the scalar could only be a constant.
  2949. .. math::
  2950. out_{i} =\begin{cases}
  2951. & \text{True, if } x_{i}>=y_{i} \\
  2952. & \text{False, if } x_{i}<y_{i}
  2953. \end{cases}
  2954. Inputs:
  2955. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  2956. a bool or a tensor whose data type is number or bool.
  2957. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  2958. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2959. Outputs:
  2960. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  2961. Raises:
  2962. TypeError: If neither `x` nor `y` is a Tensor.
  2963. Supported Platforms:
  2964. ``Ascend`` ``GPU`` ``CPU``
  2965. Examples:
  2966. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2967. >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2968. >>> greater_equal = ops.GreaterEqual()
  2969. >>> output = greater_equal(x, y)
  2970. >>> print(output)
  2971. [True True False]
  2972. """
  2973. def infer_value(self, x, y):
  2974. if x is not None and y is not None:
  2975. x = x.asnumpy()
  2976. y = y.asnumpy()
  2977. out = np.array(np.greater_equal(x, y))
  2978. return Tensor(out)
  2979. return None
  2980. class Lerp(Primitive):
  2981. """
  2982. Does a linear interpolation of two tensors start and end based on a float or tensor weight.
  2983. If `weight` is a tensor, the shapes of three inputs need to be broadcast;
  2984. If `weight` is a float, the shapes of `start` and `end` need to be broadcast.
  2985. .. math::
  2986. output_{i} = start_{i} + weight_{i} * (end_{i} - start_{i})
  2987. Inputs:
  2988. - **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
  2989. - **end** (Tensor) - The tensor with the ending points. Data type must be float16 or float32.
  2990. - **weight** (Union[float, Tensor]) – The weight for the interpolation formula. Must be a float
  2991. or a scalar tensor with float16 or float32 data type.
  2992. Outputs:
  2993. Tensor, has the same type and shape as input `start`.
  2994. Raises:
  2995. TypeError: If `start` or `end` is not a tensor.
  2996. TypeError: If `weight` is neither float nor tensor.
  2997. TypeError: If dtype of `start` or `end` is neither float16 nor float32.
  2998. TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
  2999. TypeError: If `start` and `end` have different data types.
  3000. TypeError: If `start`, `end` and `weight` have different data types when `weight` is a tensor.
  3001. ValueError: If `end` could not be broadcast to a tensor with shape of `start`.
  3002. ValueError: If `weight` could not be broadcast to tensors with shapes of `start` and `end` when it is a tensor.
  3003. Supported Platforms:
  3004. ``Ascend``
  3005. Examples:
  3006. >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
  3007. >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
  3008. >>> lerp = ops.Lerp()
  3009. >>> output = lerp(start, end, 0.5)
  3010. >>> print(output)
  3011. [5.5 6. 6.5 7. ]
  3012. """
  3013. @prim_attr_register
  3014. def __init__(self):
  3015. self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
  3016. class Less(_LogicBinaryOp):
  3017. r"""
  3018. Computes the boolean value of :math:`x < y` element-wise.
  3019. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  3020. The inputs must be two tensors or one tensor and one scalar.
  3021. When the inputs are two tensors,
  3022. dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
  3023. When the inputs are one tensor and one scalar,
  3024. the scalar could only be a constant.
  3025. .. math::
  3026. out_{i} =\begin{cases}
  3027. & \text{True, if } x_{i}<y_{i} \\
  3028. & \text{False, if } x_{i}>=y_{i}
  3029. \end{cases}
  3030. Inputs:
  3031. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  3032. a bool or a tensor whose data type is number or bool.
  3033. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  3034. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  3035. Outputs:
  3036. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  3037. Raises:
  3038. TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
  3039. Supported Platforms:
  3040. ``Ascend`` ``GPU`` ``CPU``
  3041. Examples:
  3042. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  3043. >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  3044. >>> less = ops.Less()
  3045. >>> output = less(x, y)
  3046. >>> print(output)
  3047. [False False True]
  3048. """
  3049. class LessEqual(_LogicBinaryOp):
  3050. r"""
  3051. Computes the boolean value of :math:`x <= y` element-wise.
  3052. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  3053. The inputs must be two tensors or one tensor and one scalar.
  3054. When the inputs are two tensors,
  3055. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  3056. When the inputs are one tensor and one scalar,
  3057. the scalar could only be a constant.
  3058. .. math::
  3059. out_{i} =\begin{cases}
  3060. & \text{True, if } x_{i}<=y_{i} \\
  3061. & \text{False, if } x_{i}>y_{i}
  3062. \end{cases}
  3063. Inputs:
  3064. - **x** (Union[Tensor, Number, bool]) - The first input is a number or
  3065. a bool or a tensor whose data type is number or bool.
  3066. - **y** (Union[Tensor, Number, bool]) - The second input is a number or
  3067. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  3068. Outputs:
  3069. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  3070. Raises:
  3071. TypeError: If neither `x` nor `y` is a Tensor.
  3072. Supported Platforms:
  3073. ``Ascend`` ``GPU`` ``CPU``
  3074. Examples:
  3075. >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  3076. >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  3077. >>> less_equal = ops.LessEqual()
  3078. >>> output = less_equal(x, y)
  3079. >>> print(output)
  3080. [ True False True]
  3081. """
  3082. class LogicalNot(PrimitiveWithInfer):
  3083. """
  3084. Computes the "logical NOT" of a tensor element-wise.
  3085. .. math::
  3086. out_{i} = \\neg x_{i}
  3087. Inputs:
  3088. - **x** (Tensor) - The input tensor whose dtype is bool.
  3089. :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
  3090. Outputs:
  3091. Tensor, the shape is the same as the `x`, and the dtype is bool.
  3092. Raises:
  3093. TypeError: If `x` is not a Tensor.
  3094. Supported Platforms:
  3095. ``Ascend`` ``GPU`` ``CPU``
  3096. Examples:
  3097. >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
  3098. >>> logical_not = ops.LogicalNot()
  3099. >>> output = logical_not(x)
  3100. >>> print(output)
  3101. [False True False]
  3102. """
  3103. @prim_attr_register
  3104. def __init__(self):
  3105. """Initialize LogicalNot"""
  3106. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  3107. def infer_shape(self, x_shape):
  3108. return x_shape
  3109. def infer_dtype(self, x_dtype):
  3110. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.bool_], self.name + " or '~' operator")
  3111. return mstype.tensor_type(mstype.bool_)
  3112. def infer_value(self, x):
  3113. if x is not None:
  3114. x = x.asnumpy()
  3115. return Tensor(np.logical_not(x))
  3116. return None
  3117. class LogicalAnd(_LogicBinaryOp):
  3118. r"""
  3119. Computes the "logical AND" of two tensors element-wise.
  3120. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  3121. The inputs must be two tensors or one tensor and one bool.
  3122. When the inputs are two tensors, the shapes of them could be broadcast,
  3123. and the data types of them must be bool.
  3124. When the inputs are one tensor and one bool, the bool object could only be a constant,
  3125. and the data type of the tensor must be bool.
  3126. .. math::
  3127. out_{i} = x_{i} \wedge y_{i}
  3128. Note:
  3129. LogicalAnd supports broadcasting.
  3130. Inputs:
  3131. - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  3132. - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  3133. a tensor whose data type is bool.
  3134. Outputs:
  3135. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  3136. Raises:
  3137. TypeError: If neither `x` nor `y` is a Tensor.
  3138. Supported Platforms:
  3139. ``Ascend`` ``GPU`` ``CPU``
  3140. Examples:
  3141. >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
  3142. >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
  3143. >>> logical_and = ops.LogicalAnd()
  3144. >>> output = logical_and(x, y)
  3145. >>> print(output)
  3146. [ True False False]
  3147. """
  3148. def infer_dtype(self, x_dtype, y_dtype):
  3149. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  3150. def infer_value(self, x, y):
  3151. if x is not None and y is not None:
  3152. x = x.asnumpy()
  3153. y = y.asnumpy()
  3154. out = np.array(np.logical_and(x, y))
  3155. return Tensor(out)
  3156. return None
  3157. class LogicalOr(_LogicBinaryOp):
  3158. """
  3159. Computes the "logical OR" of two tensors element-wise.
  3160. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  3161. The inputs must be two tensors or one tensor and one bool.
  3162. When the inputs are two tensors, the shapes of them could be broadcast,
  3163. and the data types of them must be bool.
  3164. When the inputs are one tensor and one bool, the bool object could only be a constant,
  3165. and the data type of the tensor must be bool.
  3166. .. math::
  3167. out_{i} = x_{i} \\vee y_{i}
  3168. Note:
  3169. LogicalOr supports broadcasting.
  3170. Inputs:
  3171. - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  3172. - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  3173. a tensor whose data type is bool.
  3174. Outputs:
  3175. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  3176. Raises:
  3177. TypeError: If neither `x` nor `y` is a Tensor.
  3178. Supported Platforms:
  3179. ``Ascend`` ``GPU`` ``CPU``
  3180. Examples:
  3181. >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
  3182. >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
  3183. >>> logical_or = ops.LogicalOr()
  3184. >>> output = logical_or(x, y)
  3185. >>> print(output)
  3186. [ True True True]
  3187. """
  3188. def infer_dtype(self, x_dtype, y_dtype):
  3189. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  3190. def infer_value(self, x, y):
  3191. if x is not None and y is not None:
  3192. x = x.asnumpy()
  3193. y = y.asnumpy()
  3194. out = np.array(np.logical_or(x, y))
  3195. return Tensor(out)
  3196. return None
  3197. class IsNan(Primitive):
  3198. r"""
  3199. Determines which elements are NaN for each position.
  3200. .. math::
  3201. out_i = \begin{cases}
  3202. & \text{ if } x_{i} = \text{Nan},\ \ True \\
  3203. & \text{ if } x_{i} \ne \text{Nan},\ \ False
  3204. \end{cases}
  3205. where :math:`Nan` means not a number.
  3206. Inputs:
  3207. - **x** (Tensor) - The input tensor.
  3208. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3209. Outputs:
  3210. Tensor, has the same shape of input, and the dtype is bool.
  3211. Raises:
  3212. TypeError: If `x` is not a Tensor.
  3213. Supported Platforms:
  3214. ``GPU`` ``CPU``
  3215. Examples:
  3216. >>> is_nan = ops.IsNan()
  3217. >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  3218. >>> output = is_nan(x)
  3219. >>> print(output)
  3220. [ True False False]
  3221. """
  3222. @prim_attr_register
  3223. def __init__(self):
  3224. """Initialize IsNan"""
  3225. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  3226. class IsInf(Primitive):
  3227. r"""
  3228. Determines which elements are inf or -inf for each position
  3229. .. math::
  3230. out_i = \begin{cases}
  3231. & \text{ if } x_{i} = \text{Inf},\ \ True \\
  3232. & \text{ if } x_{i} \ne \text{Inf},\ \ False
  3233. \end{cases}
  3234. where :math:`Inf` means not a number.
  3235. Inputs:
  3236. - **x** (Tensor) - The input tensor.
  3237. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3238. Outputs:
  3239. Tensor, has the same shape of input, and the dtype is bool.
  3240. Raises:
  3241. TypeError: If `x` is not a Tensor.
  3242. Supported Platforms:
  3243. ``GPU`` ``CPU``
  3244. Examples:
  3245. >>> is_inf = ops.IsInf()
  3246. >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  3247. >>> output = is_inf(x)
  3248. >>> print(output)
  3249. [False False True]
  3250. """
  3251. @prim_attr_register
  3252. def __init__(self):
  3253. """Initialize IsInf"""
  3254. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  3255. class IsFinite(PrimitiveWithInfer):
  3256. r"""
  3257. Determines which elements are finite for each position.
  3258. .. math::
  3259. out_i = \begin{cases}
  3260. & \text{ if } x_{i} = \text{Finite},\ \ True\ \\
  3261. & \text{ if } x_{i} \ne \text{Finite},\ \ False
  3262. \end{cases}
  3263. Inputs:
  3264. - **x** (Tensor) - The input tensor.
  3265. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3266. Outputs:
  3267. Tensor, has the same shape of input, and the dtype is bool.
  3268. Raises:
  3269. TypeError: If `x` is not a Tensor.
  3270. Supported Platforms:
  3271. ``Ascend`` ``GPU`` ``CPU``
  3272. Examples:
  3273. >>> is_finite = ops.IsFinite()
  3274. >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  3275. >>> output = is_finite(x)
  3276. >>> print(output)
  3277. [False True False]
  3278. """
  3279. @prim_attr_register
  3280. def __init__(self):
  3281. """Initialize IsFinite"""
  3282. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  3283. def infer_shape(self, x_shape):
  3284. return x_shape
  3285. def infer_dtype(self, x_dtype):
  3286. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
  3287. return mstype.tensor_type(mstype.bool_)
  3288. class FloatStatus(PrimitiveWithInfer):
  3289. """
  3290. Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
  3291. Inputs:
  3292. - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
  3293. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3294. Outputs:
  3295. Tensor, has the shape of `(1,)`, and the dtype is `mindspore.dtype.float32`.
  3296. Raises:
  3297. TypeError: If dtype of `x` is not in [float16, float32, float64].
  3298. Supported Platforms:
  3299. ``GPU``
  3300. Examples:
  3301. >>> float_status = ops.FloatStatus()
  3302. >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  3303. >>> result = float_status(x)
  3304. >>> print(result)
  3305. [1.]
  3306. """
  3307. @prim_attr_register
  3308. def __init__(self):
  3309. """Initialize FloatStatus"""
  3310. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  3311. def infer_shape(self, x_shape):
  3312. return [1]
  3313. def infer_dtype(self, x_dtype):
  3314. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float32, mstype.float16, mstype.float64], self.name)
  3315. return mstype.float32
  3316. class NPUAllocFloatStatus(PrimitiveWithInfer):
  3317. """
  3318. Allocates a flag to store the overflow status.
  3319. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  3320. Note:
  3321. Please refer to the Examples of class: `mindspore.ops.NPUAllocFloatStatus`.
  3322. Outputs:
  3323. Tensor, has the shape of `(8,)`.
  3324. Supported Platforms:
  3325. ``Ascend``
  3326. Examples:
  3327. >>> alloc_status = ops.NPUAllocFloatStatus()
  3328. >>> output = alloc_status()
  3329. >>> print(output)
  3330. [0. 0. 0. 0. 0. 0. 0. 0.]
  3331. """
  3332. @prim_attr_register
  3333. def __init__(self):
  3334. """Initialize NPUAllocFloatStatus"""
  3335. def infer_shape(self):
  3336. return [8]
  3337. def infer_dtype(self):
  3338. return mstype.float32
  3339. class NPUGetFloatStatus(PrimitiveWithInfer):
  3340. """
  3341. Updates the flag which is the output tensor of `NPUAllocFloatStatus` with the latest overflow status.
  3342. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  3343. If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
  3344. is overflow happened.
  3345. In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus operator,
  3346. need to ensure that the NPUClearFlotStatus and your compute has been executed.
  3347. We use Depend on ensure the execution order.
  3348. Inputs:
  3349. - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  3350. The data type must be float16 or float32.
  3351. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
  3352. Outputs:
  3353. Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
  3354. Raises:
  3355. TypeError: If `x` is not a Tensor.
  3356. TypeError: If dtype of `x` is neither float16 nor float32.
  3357. Supported Platforms:
  3358. ``Ascend``
  3359. Examples:
  3360. >>> self.alloc_status = ops.NPUAllocFloatStatus()
  3361. >>> self.get_status = ops.NPUGetFloatStatus()
  3362. >>> self.clear_status = ops.NPUClearFloatStatus()
  3363. >>> init = self.alloc_status()
  3364. >>> init = F.Depend(init, input) # Ensure clear_status after input
  3365. >>> clear_status = self.clear_status(init)
  3366. >>> input = F.Depend(input, clear_status) # Ensure your compute after clear_status
  3367. >>> output = Compute(input)
  3368. >>> init = F.Depend(init, output)
  3369. >>> flag = self.get_status(init) # Ensure get_status after your compute
  3370. >>> self.clear_status(init)
  3371. >>> print(init)
  3372. [0. 0. 0. 0. 0. 0. 0. 0.]
  3373. """
  3374. @prim_attr_register
  3375. def __init__(self):
  3376. """Initialize NPUGetFloatStatus"""
  3377. def infer_shape(self, x_shape):
  3378. cls_name = self.name
  3379. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  3380. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  3381. return [8]
  3382. def infer_dtype(self, x_dtype):
  3383. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  3384. return mstype.float32
  3385. class NPUClearFloatStatus(PrimitiveWithInfer):
  3386. """
  3387. Clears the flag which stores the overflow status.
  3388. Note:
  3389. The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
  3390. `NPUClearFloatStatus` is called.
  3391. In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus
  3392. operator, need to ensure that the NPUClearFlotStatus and your compute has been executed.
  3393. We use depend on ensure the execution order.
  3394. Please refer to the Examples of class: `mindspore.ops.NPUGetFloatStatus`.
  3395. Inputs:
  3396. - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  3397. The data type must be float16 or float32.
  3398. Outputs:
  3399. Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
  3400. Supported Platforms:
  3401. ``Ascend``
  3402. Examples:
  3403. >>> self.alloc_status = ops.NPUAllocFloatStatus()
  3404. >>> self.get_status = ops.NPUGetFloatStatus()
  3405. >>> self.clear_status = ops.NPUClearFloatStatus()
  3406. >>> init = self.alloc_status()
  3407. >>> init = F.Depend(init, input) # Ensure clear_status after input
  3408. >>> clear_status = self.clear_status(init)
  3409. >>> input = F.Depend(input, clear_status) # Ensure your compute after clear_status
  3410. >>> output = Compute(input)
  3411. >>> init = F.Depend(init, output)
  3412. >>> flag = self.get_status(init) # Ensure get_status after your compute
  3413. >>> self.clear_status(init)
  3414. >>> print(init)
  3415. [0. 0. 0. 0. 0. 0. 0. 0.]
  3416. """
  3417. @prim_attr_register
  3418. def __init__(self):
  3419. """Initialize NPUClearFloatStatus"""
  3420. def infer_shape(self, x_shape):
  3421. cls_name = self.name
  3422. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  3423. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  3424. return [8]
  3425. def infer_dtype(self, x_dtype):
  3426. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  3427. return mstype.float32
  3428. class Cos(Primitive):
  3429. r"""
  3430. Computes cosine of input element-wise.
  3431. .. warning::
  3432. Currently support Float16, Float32 data type. If use Float64, there may
  3433. be a problem of missing precision.
  3434. .. math::
  3435. out_i = cos(x_i)
  3436. Inputs:
  3437. - **x** (Tensor) - The shape of tensor is
  3438. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3439. Outputs:
  3440. Tensor, has the same shape as `x`.
  3441. Raises:
  3442. TypeError: If `x` is not a Tensor.
  3443. Supported Platforms:
  3444. ``Ascend`` ``GPU`` ``CPU``
  3445. Examples:
  3446. >>> cos = ops.Cos()
  3447. >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  3448. >>> output = cos(x)
  3449. >>> print(output)
  3450. [0.971338 0.67487574 0.95233357 0.9959527 ]
  3451. """
  3452. @prim_attr_register
  3453. def __init__(self):
  3454. """Initialize Cos"""
  3455. class ACos(PrimitiveWithInfer):
  3456. r"""
  3457. Computes arccosine of input tensors element-wise.
  3458. .. math::
  3459. out_i = cos^{-1}(x_i)
  3460. Inputs:
  3461. - **x** (Tensor) - The shape of tensor is
  3462. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3463. Outputs:
  3464. Tensor, has the same shape as `x`.
  3465. Raises:
  3466. TypeError: If `x` is not a Tensor.
  3467. Supported Platforms:
  3468. ``Ascend`` ``GPU`` ``CPU``
  3469. Examples:
  3470. >>> acos = ops.ACos()
  3471. >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  3472. >>> output = acos(x)
  3473. >>> print(output)
  3474. [0.7377037 1.5307858 1.2661037 0.97641146]
  3475. """
  3476. @prim_attr_register
  3477. def __init__(self):
  3478. """Initialize ACos"""
  3479. def infer_shape(self, x_shape):
  3480. return x_shape
  3481. def infer_dtype(self, x_dtype):
  3482. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  3483. return x_dtype
  3484. class Sin(PrimitiveWithInfer):
  3485. r"""
  3486. Computes sine of the input element-wise.
  3487. .. math::
  3488. out_i = sin(x_i)
  3489. Inputs:
  3490. - **x** (Tensor) - The shape of tensor is
  3491. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3492. Outputs:
  3493. Tensor, has the same shape as `x`.
  3494. Raises:
  3495. TypeError: If `x` is not a Tensor.
  3496. Supported Platforms:
  3497. ``Ascend`` ``GPU`` ``CPU``
  3498. Examples:
  3499. >>> sin = ops.Sin()
  3500. >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  3501. >>> output = sin(x)
  3502. >>> print(output)
  3503. [0.5810352 0.27635565 0.41687083 0.5810352 ]
  3504. """
  3505. @prim_attr_register
  3506. def __init__(self):
  3507. """Initialize Sin."""
  3508. def infer_shape(self, x_shape):
  3509. return x_shape
  3510. def infer_dtype(self, x_dtype):
  3511. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  3512. return x_dtype
  3513. class Asin(PrimitiveWithInfer):
  3514. r"""
  3515. Computes arcsine of input tensors element-wise.
  3516. .. math::
  3517. out_i = sin^{-1}(x_i)
  3518. Inputs:
  3519. - **x** (Tensor) - The shape of tensor is
  3520. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3521. Outputs:
  3522. Tensor, has the same shape as `x`.
  3523. Raises:
  3524. TypeError: If `x` is not a Tensor.
  3525. Supported Platforms:
  3526. ``Ascend`` ``GPU`` ``CPU``
  3527. Examples:
  3528. >>> asin = ops.Asin()
  3529. >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  3530. >>> output = asin(x)
  3531. >>> print(output)
  3532. [0.8330927 0.04001068 0.30469266 0.59438497]
  3533. """
  3534. @prim_attr_register
  3535. def __init__(self):
  3536. """Initialize Asin"""
  3537. def infer_shape(self, x_shape):
  3538. return x_shape
  3539. def infer_dtype(self, x_dtype):
  3540. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  3541. return x_dtype
  3542. class NMSWithMask(PrimitiveWithInfer):
  3543. r"""
  3544. When object detection problem is performed in the computer vision field, object detection algorithm generates
  3545. a plurality of bounding boxes. Selects some bounding boxes in descending order of score(Descending order is not
  3546. supported in Ascend platform currently). Use the box with the highest score calculate the overlap between other
  3547. boxes and the current box, and delete the box based on a certain threshold(IOU). The IOU is as follows,
  3548. .. math::
  3549. \text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
  3550. .. warning::
  3551. Only supports up to 2864 input boxes at one time.
  3552. Args:
  3553. iou_threshold (float): Specifies the threshold of overlap boxes with respect to
  3554. IOU. Default: 0.5.
  3555. Inputs:
  3556. - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
  3557. `N` is the number of input bounding boxes. Every bounding box
  3558. contains 5 values, the first 4 values are the coordinates(x0, y0, x1, y1) of bounding box which
  3559. represents the point of top-left and bottom-right, and the last value is the score of this bounding box.
  3560. The data type must be float16 or float32.
  3561. Outputs:
  3562. tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
  3563. - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. The list of bounding boxes
  3564. after non-max suppression calculation.
  3565. - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
  3566. valid input bounding boxes.
  3567. - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
  3568. valid output bounding boxes.
  3569. Raises:
  3570. ValueError: If the `iou_threshold` is not a float number, or if the first dimension
  3571. of input Tensor is less than or equal to 0, or if the data type of the input
  3572. Tensor is not float16 or float32.
  3573. Supported Platforms:
  3574. ``Ascend`` ``GPU`` ``CPU``
  3575. Examples:
  3576. >>> bbox = np.array([[100.0, 100.0, 50.0, 68.0, 0.63], [150.0, 75.0, 165.0, 115.0, 0.55],
  3577. ... [12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
  3578. >>> bbox[:, 2] += bbox[:, 0]
  3579. >>> bbox[:, 3] += bbox[:, 1]
  3580. >>> inputs = Tensor(bbox, mindspore.float32)
  3581. >>> nms = ops.NMSWithMask(0.1)
  3582. >>> output_boxes, indices, mask = nms(inputs)
  3583. >>> indices_np = indices.asnumpy()
  3584. >>> print(indices_np[mask.asnumpy()])
  3585. [0 1 2]
  3586. """
  3587. @prim_attr_register
  3588. def __init__(self, iou_threshold=0.5):
  3589. """Initialize NMSWithMask"""
  3590. validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
  3591. self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
  3592. self.is_ge = context.get_context("enable_ge")
  3593. def infer_shape(self, bboxes_shape):
  3594. cls_name = self.name
  3595. validator.check_equal_int(len(bboxes_shape), 2, "bboxes rank", cls_name)
  3596. validator.check_positive_int(bboxes_shape[0], "bboxes.shape[0]", cls_name)
  3597. validator.check_equal_int(bboxes_shape[1], 5, "bboxes.shape[1]", cls_name)
  3598. num = bboxes_shape[0]
  3599. return bboxes_shape, (num,), (num,)
  3600. def infer_dtype(self, bboxes_dtype):
  3601. validator.check_tensor_dtype_valid("bboxes", bboxes_dtype, [mstype.float16, mstype.float32], self.name)
  3602. return bboxes_dtype, mstype.int32, mstype.bool_
  3603. class Abs(Primitive):
  3604. r"""
  3605. Returns absolute value of a tensor element-wise.
  3606. .. math::
  3607. out_i = |x_i|
  3608. Inputs:
  3609. - **x** (Tensor) - The input tensor. The shape of tensor is
  3610. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3611. Outputs:
  3612. Tensor, has the same shape as the `x`.
  3613. Raises:
  3614. TypeError: If `x` is not a Tensor.
  3615. Supported Platforms:
  3616. ``Ascend`` ``GPU`` ``CPU``
  3617. Examples:
  3618. >>> x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
  3619. >>> abs = ops.Abs()
  3620. >>> output = abs(x)
  3621. >>> print(output)
  3622. [1. 1. 0.]
  3623. """
  3624. @prim_attr_register
  3625. def __init__(self):
  3626. """Initialize Abs"""
  3627. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  3628. class Sign(PrimitiveWithInfer):
  3629. r"""
  3630. Performs sign on the tensor element-wise.
  3631. .. math::
  3632. sign(x) = \begin{cases} -1, &if\ x < 0 \cr
  3633. 0, &if\ x = 0 \cr
  3634. 1, &if\ x > 0\end{cases}
  3635. Inputs:
  3636. - **x** (Tensor) - The input tensor.
  3637. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3638. Outputs:
  3639. Tensor, has the same shape and type as the `x`.
  3640. Raises:
  3641. TypeError: If `x` is not a Tensor.
  3642. Supported Platforms:
  3643. ``Ascend`` ``CPU`` ``GPU``
  3644. Examples:
  3645. >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
  3646. >>> sign = ops.Sign()
  3647. >>> output = sign(x)
  3648. >>> print(output)
  3649. [[ 1. 0. -1.]]
  3650. """
  3651. @prim_attr_register
  3652. def __init__(self):
  3653. pass
  3654. def infer_shape(self, x_shape):
  3655. return x_shape
  3656. def infer_dtype(self, x_dtype):
  3657. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  3658. return x_dtype
  3659. class Round(PrimitiveWithInfer):
  3660. r"""
  3661. Returns half to even of a tensor element-wise.
  3662. .. math::
  3663. out_i \approx x_i
  3664. Inputs:
  3665. - **x** (Tensor) - The input tensor.
  3666. Outputs:
  3667. Tensor, has the same shape and type as the `x`.
  3668. Raises:
  3669. TypeError: If `x` is not a Tensor.
  3670. Supported Platforms:
  3671. ``Ascend`` ``GPU`` ``CPU``
  3672. Examples:
  3673. >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
  3674. >>> round = ops.Round()
  3675. >>> output = round(x)
  3676. >>> print(output)
  3677. [ 1. 2. 2. 2. -4.]
  3678. """
  3679. @prim_attr_register
  3680. def __init__(self):
  3681. """Initialize Round"""
  3682. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  3683. def infer_shape(self, x_shape):
  3684. return x_shape
  3685. def infer_dtype(self, x_dtype):
  3686. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  3687. return x_dtype
  3688. class Tan(Primitive):
  3689. r"""
  3690. Computes tangent of `x` element-wise.
  3691. .. math::
  3692. out_i = tan(x_i)
  3693. Inputs:
  3694. - **x** (Tensor) - The shape of tensor is
  3695. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3696. Data type must be float16 or float32.
  3697. Outputs:
  3698. Tensor, has the same shape as `x`.
  3699. Raises:
  3700. TypeError: If dtype of `x` is neither float16 nor float32.
  3701. TypeError: If `x` is not a Tensor.
  3702. Supported Platforms:
  3703. ``Ascend`` ``CPU``
  3704. Examples:
  3705. >>> tan = ops.Tan()
  3706. >>> x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
  3707. >>> output = tan(x)
  3708. >>> print(output)
  3709. [-1.5574081 0. 1.5574081]
  3710. """
  3711. @prim_attr_register
  3712. def __init__(self):
  3713. """Initialize Tan"""
  3714. class Atan(PrimitiveWithInfer):
  3715. r"""
  3716. Computes the trigonometric inverse tangent of the input element-wise.
  3717. .. math::
  3718. out_i = tan^{-1}(x_i)
  3719. Inputs:
  3720. - **x** (Tensor): The shape of tensor is
  3721. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3722. The data type should be one of the following types: float16, float32.
  3723. Outputs:
  3724. A Tensor, has the same type as the input.
  3725. Raises:
  3726. TypeError: If `x` is not a Tensor.
  3727. Supported Platforms:
  3728. ``Ascend`` ``GPU`` ``CPU``
  3729. Examples:
  3730. >>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
  3731. >>> atan = ops.Atan()
  3732. >>> output = atan(x)
  3733. >>> print(output)
  3734. [0.7853982 0. ]
  3735. """
  3736. @prim_attr_register
  3737. def __init__(self):
  3738. pass
  3739. def infer_shape(self, x_shape):
  3740. return x_shape
  3741. def infer_dtype(self, x_type):
  3742. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  3743. return x_type
  3744. class Atanh(PrimitiveWithInfer):
  3745. r"""
  3746. Computes inverse hyperbolic tangent of the input element-wise.
  3747. .. math::
  3748. out_i = \tanh^{-1}(x_{i})
  3749. Inputs:
  3750. - **x** (Tensor): The shape of tensor is
  3751. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3752. Outputs:
  3753. A Tensor, has the same type as the input.
  3754. Raises:
  3755. TypeError: If `x` is not a Tensor.
  3756. Supported Platforms:
  3757. ``Ascend`` ``CPU``
  3758. Examples:
  3759. >>> x = Tensor(np.array([0, -0.5]), mindspore.float32)
  3760. >>> atanh = ops.Atanh()
  3761. >>> output = atanh(x)
  3762. >>> print(output)
  3763. [ 0. -0.54930615]
  3764. """
  3765. @prim_attr_register
  3766. def __init__(self):
  3767. pass
  3768. def infer_shape(self, x_shape):
  3769. return x_shape
  3770. def infer_dtype(self, x_type):
  3771. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  3772. return x_type
  3773. class Atan2(_MathBinaryOp):
  3774. r"""
  3775. Returns arctangent of x/y element-wise.
  3776. It returns :math:`\theta\ \in\ [-\pi, \pi]`
  3777. such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
  3778. Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
  3779. If they have different data types, lower priority data type will be converted to
  3780. the relatively highest priority data type.
  3781. Inputs:
  3782. - **x** (Tensor) - The input tensor.
  3783. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3784. The data type will give priority to the high-precision data type
  3785. - **y** (Tensor) - The input tensor.
  3786. It has the same shape with `x`. The data type will give priority to the high-precision data type.
  3787. Outputs:
  3788. Tensor, the shape is the same as the one after broadcasting,and the data type is same as `x`.
  3789. Raises:
  3790. TypeError: If `x` or `y` is not a Tensor.
  3791. RuntimeError: If the data type of `x` and `y` conversion of Parameter is required
  3792. when data type conversion of Parameter is not supported.
  3793. Supported Platforms:
  3794. ``Ascend`` ``CPU`` ``GPU``
  3795. Examples:
  3796. >>> x = Tensor(np.array([0, 1]), mindspore.float32)
  3797. >>> y = Tensor(np.array([1, 1]), mindspore.float32)
  3798. >>> atan2 = ops.Atan2()
  3799. >>> output = atan2(x, y)
  3800. >>> print(output)
  3801. [0. 0.7853982]
  3802. """
  3803. class SquareSumAll(PrimitiveWithInfer):
  3804. r"""
  3805. Returns the square sum of a tensor element-wise
  3806. .. math::
  3807. \left\{\begin{matrix}out_{x} = {\textstyle \sum_{0}^{N}} (x_{i})^2
  3808. \\out_{y} = {\textstyle \sum_{0}^{N}} (y_{i})^2
  3809. \end{matrix}\right.
  3810. Inputs:
  3811. - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
  3812. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3813. - **y** (Tensor) - The input tensor has the same type and shape as the `x`.
  3814. Note:
  3815. SquareSumAll only supports float16 and float32 data type.
  3816. Outputs:
  3817. - **output_y1** (Tensor) - The same type as the `x`.
  3818. - **output_y2** (Tensor) - The same type as the `x`.
  3819. Raises:
  3820. TypeError: If neither `x` nor `y` is a Tensor.
  3821. ValueError: If `x` and `y` are not the same shape.
  3822. Supported Platforms:
  3823. ``Ascend`` ``GPU``
  3824. Examples:
  3825. >>> x = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
  3826. >>> y = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
  3827. >>> square_sum_all = ops.SquareSumAll()
  3828. >>> output = square_sum_all(x, y)
  3829. >>> print(output)
  3830. (Tensor(shape=[], dtype=Float32, value= 4),
  3831. Tensor(shape=[], dtype=Float32, value= 20))
  3832. """
  3833. @prim_attr_register
  3834. def __init__(self):
  3835. """Initialize SquareSumAll"""
  3836. def infer_shape(self, x_shape, y_shape):
  3837. validator.check("x1_shape", x_shape, "x2_shape", y_shape, Rel.EQ, self.name)
  3838. return [], []
  3839. def infer_dtype(self, x_type, y_type):
  3840. valid_types = (mstype.float16, mstype.float32)
  3841. args = {"x1_type": x_type, "x2_type": y_type}
  3842. validator.check_tensors_dtypes_same_and_valid(args, valid_types, self.name)
  3843. return x_type, y_type
  3844. class BitwiseAnd(_BitwiseBinaryOp):
  3845. r"""
  3846. Returns bitwise `and` of two tensors element-wise.
  3847. .. math::
  3848. out_i = x_{i} \wedge y_{i}
  3849. Inputs of `x` and `y` comply with the implicit type conversion rules to
  3850. make the data types consistent.
  3851. If they have different data types, lower priority data type will be converted to
  3852. the relatively highest priority data type.
  3853. Inputs:
  3854. - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  3855. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3856. - **y** (Tensor) - The input tensor with same type as the `x`.
  3857. Outputs:
  3858. Tensor, has the same type as the `x`.
  3859. Raises:
  3860. TypeError: If `x` or `y` is not a Tensor.
  3861. RuntimeError: If the data type of `x` and `y` conversion of Parameter is required
  3862. when data type conversion of Parameter is not supported.
  3863. Supported Platforms:
  3864. ``Ascend``
  3865. Examples:
  3866. >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3867. >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3868. >>> bitwise_and = ops.BitwiseAnd()
  3869. >>> output = bitwise_and(x, y)
  3870. >>> print(output)
  3871. [ 0 0 1 -1 1 0 1]
  3872. """
  3873. class BitwiseOr(_BitwiseBinaryOp):
  3874. r"""
  3875. Returns bitwise `or` of two tensors element-wise.
  3876. .. math::
  3877. out_i = x_{i} \mid y_{i}
  3878. Inputs of `x` and `y` comply with the implicit type conversion rules to
  3879. make the data types consistent.
  3880. If they have different data types, lower priority data type will be converted to
  3881. the relatively highest priority data type.
  3882. Inputs:
  3883. - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  3884. - **y** (Tensor) - The input tensor with same type as the `x`.
  3885. Outputs:
  3886. Tensor, has the same type as the `x`.
  3887. Raises:
  3888. TypeError: If `x` or `y` is not a Tensor.
  3889. RuntimeError: If the data type of `x`, `y` conversion of Parameter is required
  3890. when data type conversion of Parameter is not supported.
  3891. Supported Platforms:
  3892. ``Ascend``
  3893. Examples:
  3894. >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3895. >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3896. >>> bitwise_or = ops.BitwiseOr()
  3897. >>> output = bitwise_or(x, y)
  3898. >>> print(output)
  3899. [ 0 1 1 -1 -1 3 3]
  3900. """
  3901. class BitwiseXor(_BitwiseBinaryOp):
  3902. r"""
  3903. Returns bitwise `xor` of two tensors element-wise.
  3904. .. math::
  3905. out_i = x_{i} \oplus y_{i}
  3906. Inputs of `x` and `y` comply with the implicit type conversion rules to
  3907. make the data types consistent.
  3908. If they have different data types, lower priority data type will be converted to
  3909. the relatively highest priority data type.
  3910. Inputs:
  3911. - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  3912. - **y** (Tensor) - The input tensor with same type as the `x`.
  3913. Outputs:
  3914. Tensor, has the same type as the `x`.
  3915. Raises:
  3916. TypeError: If `x` or `y` is not a Tensor.
  3917. RuntimeError: If the data type of `x`, `y` conversion of Parameter is required
  3918. when data type conversion of Parameter is not supported.
  3919. Supported Platforms:
  3920. ``Ascend``
  3921. Examples:
  3922. >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3923. >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3924. >>> bitwise_xor = ops.BitwiseXor()
  3925. >>> output = bitwise_xor(x, y)
  3926. >>> print(output)
  3927. [ 0 1 0 0 -2 3 2]
  3928. """
  3929. class BesselI0e(PrimitiveWithInfer):
  3930. r"""
  3931. Computes BesselI0e of input element-wise.
  3932. The formula is defined as:
  3933. .. math::
  3934. BesselI0e(x) = \exp(|x|) * bessel\_i0(x)
  3935. where bessel_i0 is Bessel function of the first kind with 0 order.
  3936. Inputs:
  3937. - **x** (Tensor) - The shape of tensor is
  3938. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3939. Data type must be float16 or float32.
  3940. Outputs:
  3941. Tensor, has the same shape as `x`.
  3942. Raises:
  3943. TypeError: If `x` is not a Tensor.
  3944. Supported Platforms:
  3945. ``Ascend``
  3946. Examples:
  3947. >>> bessel_i0e = ops.BesselI0e()
  3948. >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  3949. >>> output = bessel_i0e(x)
  3950. >>> print(output)
  3951. [0.7979961 0.5144438 0.75117415 0.9157829 ]
  3952. """
  3953. @prim_attr_register
  3954. def __init__(self):
  3955. """Initialize BesselI0e"""
  3956. def infer_shape(self, x):
  3957. return x
  3958. def infer_dtype(self, x):
  3959. validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
  3960. return x
  3961. class BesselI1e(Primitive):
  3962. r"""
  3963. Computes BesselI1e of input element-wise.
  3964. The formula is defined as:
  3965. .. math::
  3966. BesselI1e(x) = \exp(|x|) * bessel\_i1(x)
  3967. where bessel_i1 is Bessel function of the first kind with 1 order.
  3968. Inputs:
  3969. - **x** (Tensor) - The shape of tensor is
  3970. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3971. Data type must be float16 or float32.
  3972. Outputs:
  3973. Tensor, has the same shape as `x`.
  3974. Raises:
  3975. TypeError: If `x` is not a Tensor.
  3976. TypeError: If dtype of `x` is not float16 or float32.
  3977. Supported Platforms:
  3978. ``Ascend``
  3979. Examples:
  3980. >>> bessel_i1e = ops.BesselI1e()
  3981. >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  3982. >>> output = bessel_i1e(x)
  3983. >>> print(output)
  3984. [0.09507662 0.19699717 0.11505538 0.04116856]
  3985. """
  3986. @prim_attr_register
  3987. def __init__(self):
  3988. """Initialize BesselI1e"""
  3989. self.init_prim_io_names(inputs=['x'], outputs='output')
  3990. class Inv(PrimitiveWithInfer):
  3991. r"""
  3992. Computes Inv(Reciprocal) of input tensor element-wise.
  3993. .. math::
  3994. out_i = out_i = \frac{1}{x_{i} }
  3995. Inputs:
  3996. - **x** (Tensor) - The shape of tensor is
  3997. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  3998. Must be one of the following types: float16, float32, int32.
  3999. Outputs:
  4000. Tensor, has the same shape and data type as `x`.
  4001. Raises:
  4002. TypeError: If dtype of `x` is not one of float16, float32, int32.
  4003. Supported Platforms:
  4004. ``Ascend``
  4005. Examples:
  4006. >>> inv = ops.Inv()
  4007. >>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
  4008. >>> output = inv(x)
  4009. >>> print(output)
  4010. [4. 2.5 3.2258065 1.923077 ]
  4011. """
  4012. @prim_attr_register
  4013. def __init__(self):
  4014. pass
  4015. def infer_shape(self, x_shape):
  4016. return x_shape
  4017. def infer_dtype(self, x_dtype):
  4018. validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.float16, mstype.float32,
  4019. mstype.int32], self.name)
  4020. return x_dtype
  4021. class Invert(Primitive):
  4022. r"""
  4023. Flips all bits of input tensor element-wise.
  4024. .. math::
  4025. out_i = -x_{i}
  4026. Inputs:
  4027. - **x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  4028. Outputs:
  4029. Tensor, has the same shape as `x`.
  4030. Raises:
  4031. TypeError: If dtype of `x` is neither int16 nor uint16.
  4032. Supported Platforms:
  4033. ``Ascend``
  4034. Examples:
  4035. >>> invert = ops.Invert()
  4036. >>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
  4037. >>> output = invert(x)
  4038. >>> print(output)
  4039. [-26 -5 -14 -10]
  4040. """
  4041. @prim_attr_register
  4042. def __init__(self):
  4043. """Initialize Invert"""
  4044. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  4045. class Eps(PrimitiveWithInfer):
  4046. """
  4047. Creates a tensor filled with `x` dtype minimum value.
  4048. Inputs:
  4049. - **x** (Tensor) - Input tensor. The data type must be float16, float32 or float64.
  4050. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
  4051. Outputs:
  4052. Tensor, has the same type and shape as `x`, but filled with `x` dtype minimum val.
  4053. Supported Platforms:
  4054. ``Ascend`` ``GPU`` ``CPU``
  4055. Examples:
  4056. >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
  4057. >>> output = ops.Eps()(x)
  4058. >>> print(output)
  4059. [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
  4060. """
  4061. @prim_attr_register
  4062. def __init__(self):
  4063. """Initialize Eps"""
  4064. self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
  4065. def __infer__(self, input_x):
  4066. valid_dtypes = [mstype.float16, mstype.float32, mstype.float64]
  4067. validator.check_tensor_dtype_valid('input_x', input_x['dtype'], valid_dtypes, self.name)
  4068. x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
  4069. if x_nptype == np.float16:
  4070. min_val = 2 ** (-14)
  4071. elif x_nptype == np.float32:
  4072. min_val = 2 ** (-16)
  4073. else:
  4074. min_val = 2 ** (-52)
  4075. res = np.full(input_x['shape'], min_val, x_nptype)
  4076. out = {
  4077. 'value': Tensor(res),
  4078. 'shape': input_x['shape'],
  4079. 'dtype': input_x['dtype'],
  4080. }
  4081. return out
  4082. class LinSpace(PrimitiveWithInfer):
  4083. r"""
  4084. The OP returns a Tensor whose value is num evenly spaced in the interval start and stop (including start and stop),
  4085. and the length of the output Tensor is num.
  4086. .. math::
  4087. \begin{aligned}
  4088. &step = (stop - start)/(num - 1)\\
  4089. &output = [start, start+step, start+2*step, ... , stop]
  4090. \end{aligned}
  4091. Inputs:
  4092. - **start** (Tensor[float32]) - Start value of interval, With shape of 0-D.
  4093. - **stop** (Tensor[float32]) - Last value of interval, With shape of 0-D.
  4094. - **num** (int) - Number of ticks in the interval, inclusive of start and stop.
  4095. Outputs:
  4096. Tensor, has the same shape as `start`.
  4097. Supported Platforms:
  4098. ``Ascend`` ``GPU``
  4099. Examples:
  4100. >>> linspace = ops.LinSpace()
  4101. >>> start = Tensor(1, mindspore.float32)
  4102. >>> stop = Tensor(10, mindspore.float32)
  4103. >>> num = 5
  4104. >>> output = linspace(start, stop, num)
  4105. >>> print(output)
  4106. [ 1. 3.25 5.5 7.75 10. ]
  4107. """
  4108. @prim_attr_register
  4109. def __init__(self):
  4110. """Initialize LinSpace"""
  4111. def __infer__(self, start, stop, num):
  4112. args = {"start": start['dtype'], "stop": start['dtype']}
  4113. validator.check_tensors_dtypes_same_and_valid(args, (mstype.float32,), self.name)
  4114. start_shape = start['shape']
  4115. stop_shape = stop['shape']
  4116. validator.check_equal_int(len(start_shape), 0, "rank of start_shape", self.name)
  4117. validator.check_equal_int(len(stop_shape), 0, "rank of stop_shape", self.name)
  4118. num_v = num['value']
  4119. validator.check_value_type('num', num_v, [int], self.name)
  4120. validator.check_positive_int(num_v, "num", self.name)
  4121. out_shape = [num_v]
  4122. out = {'shape': out_shape,
  4123. 'dtype': start['dtype'],
  4124. 'value': None}
  4125. return out
  4126. class MatrixInverse(PrimitiveWithInfer):
  4127. """
  4128. Returns the inverse of the input matrix. If the matrix is irreversible, an error may be reported or an unknown
  4129. result may be returned.
  4130. Note:
  4131. The parameter 'adjoint' is only supporting False right now. Because complex number is not supported at present.
  4132. Args:
  4133. adjoint (bool) : An optional bool. Default: False.
  4134. Inputs:
  4135. - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
  4136. dimensions must be the same size. types: float32, float64.
  4137. Outputs:
  4138. Tensor, has the same type and shape as input `x`.
  4139. Raises:
  4140. TypeError: If `adjoint` is not a bool.
  4141. TypeError: If dtype of `x` is neither float32 nor float64.
  4142. ValueError: If the last two dimensions of `x` is not the same size.
  4143. ValueError: If the dimension of `x` is less than 2.
  4144. Supported Platforms:
  4145. ``GPU`` ``CPU``
  4146. Examples:
  4147. >>> x = Tensor(np.array([[[-0.710504 , -1.1207525],
  4148. ... [-1.7651395 , -1.7576632]],
  4149. ... [[ 0.52412605, 1.9070215],
  4150. ... [ 1.3384849 , 1.4274558]]]), mindspore.float32)
  4151. >>> matrix_inverse = ops.MatrixInverse(adjoint=False)
  4152. >>> output = matrix_inverse(x)
  4153. >>> print(output)
  4154. [[[ 2.4095483 -1.536419 ]
  4155. [-2.4197974 0.97401696]]
  4156. [[-0.79111797 1.0569006 ]
  4157. [ 0.74180895 -0.2904787 ]]]
  4158. """
  4159. @prim_attr_register
  4160. def __init__(self, adjoint=False):
  4161. """Initialize MatrixInverse"""
  4162. validator.check_type_name("adjoint", adjoint, False, self.name)
  4163. self.adjoint = adjoint
  4164. def infer_dtype(self, x_dtype):
  4165. valid_type = [mstype.float32, mstype.double]
  4166. validator.check_tensor_dtype_valid("x_dtype", x_dtype, valid_type, self.name)
  4167. return x_dtype
  4168. def infer_shape(self, x_shape):
  4169. validator.check_int(len(x_shape), 2, Rel.GE, self.name, None)
  4170. validator.check_equal_int(x_shape[-1], x_shape[-2], self.name, None)
  4171. return x_shape
  4172. class IndexAdd(Primitive):
  4173. """
  4174. Adds tensor y to specified axis and indices of tensor x. The axis should be in the range from 0 to len(x.dim) - 1,
  4175. and indices should be in the range from 0 to the size of x at the axis dimension.
  4176. Args:
  4177. axis (int): The dimension along which to index.
  4178. use_lock (bool): If true, use lock mode. If false, don't use lock mode. Default: True.
  4179. check_index_bound (bool): If true, check index boundary. If false, don't check index boundary. Default: True.
  4180. Inputs:
  4181. - **x** (Parameter) - The input tensor to add to.
  4182. - **indices** (Tensor) - Add the value of `x` and `y` along the dimension of the `axis` according to the
  4183. specified index value, with data type int32.
  4184. The `indices` must be 1D with the same size as the size of `y` in the `axis` dimension. The values
  4185. of `indices` should be in [0, b), where the b is the size of `x` in the `axis` dimension.
  4186. - **y** (Tensor) - The input tensor with the value to add. Must have same data type as `x`.
  4187. The shape must be the same as `x` except the `axis` th dimension.
  4188. Outputs:
  4189. Tensor, has the same shape and dtype as x.
  4190. Raises:
  4191. TypeError: If `x` is not a Tensor.
  4192. TypeError: If neither `indices` nor `y` is a Tensor.
  4193. ValueError: If axis is out of `x` rank's range.
  4194. ValueError: If `x` rank is not the same as `y` rank.
  4195. ValueError: If size of `indices` is not equal to dimension of y[axis].
  4196. ValueError: If `y`'s shape is not the same as `x` except the `axis` th dimension.
  4197. Supported Platforms:
  4198. ``Ascend`` ``GPU``
  4199. Examples:
  4200. >>> class Net(nn.Cell):
  4201. ... def __init__(self):
  4202. ... super(Net, self).__init__()
  4203. ... self.index_add = ops.IndexAdd(axis=1)
  4204. ... self.x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32))
  4205. ... self.indices = Tensor(np.array([0, 2]), mindspore.int32)
  4206. ...
  4207. ... def construct(self, y):
  4208. ... return self.index_add(self.x, self.indices, y)
  4209. ...
  4210. >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
  4211. >>> net = Net()
  4212. >>> output = net(y)
  4213. >>> print(output)
  4214. [[ 1.5 2. 4. ]
  4215. [ 5. 5. 7.5]
  4216. [ 9. 8. 11.5]]
  4217. """
  4218. __mindspore_signature__ = (
  4219. sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  4220. sig.make_sig('indices', dtype=sig.sig_dtype.T1),
  4221. sig.make_sig('input_y', dtype=sig.sig_dtype.T)
  4222. )
  4223. @prim_attr_register
  4224. def __init__(self, axis, use_lock=True, check_index_bound=True):
  4225. """Initialize InplaceAdd"""
  4226. self.init_prim_io_names(inputs=['input_x', 'indices', 'input_y'], outputs=['output'])
  4227. self.axis = axis
  4228. validator.check_value_type('axis', axis, [int], self.name)
  4229. class Erfinv(Primitive):
  4230. r"""
  4231. Computes the inverse error function of input. The inverse error function is defined in the range (-1, 1) as:
  4232. .. math::
  4233. erfinv(erf(x)) = x
  4234. Inputs:
  4235. - **input_x** (Tensor) - The input tensor to compute to, with data type float32, float16.
  4236. Outputs:
  4237. Tensor, has the same shape and dtype as `input_x`.
  4238. Raises:
  4239. TypeError: If dtype of `input_x` is not one of: float32, float16.
  4240. Supported Platforms:
  4241. ``Ascend``
  4242. Examples:
  4243. >>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
  4244. >>> erfinv = P.Erfinv()
  4245. >>> output = erfinv(x)
  4246. >>> print(output)
  4247. [ 0. 0.47695306 -1.1630805 ]
  4248. """
  4249. @prim_attr_register
  4250. def __init__(self):
  4251. """Initialize Erfinv"""
  4252. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  4253. class Conj(PrimitiveWithInfer):
  4254. """
  4255. Returns a Tensor that is the real part of the input.
  4256. Inputs:
  4257. - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
  4258. Outputs:
  4259. Tensor, has the float type.
  4260. Raises:
  4261. TypeError: If the dtype of input is not one of: complex64, complex128.
  4262. Supported Platforms:
  4263. ``GPU``
  4264. Examples:
  4265. >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
  4266. >>> conj = ops.Conj()
  4267. >>> output = conj(x)
  4268. >>> print(output)
  4269. 1.3-0.4j
  4270. """
  4271. @prim_attr_register
  4272. def __init__(self):
  4273. self.init_prim_io_names(
  4274. inputs=['input_tensor'],
  4275. outputs=['output_tensor'])
  4276. def infer_shape(self, input_shape):
  4277. return input_shape
  4278. def infer_dtype(self, input_dtype):
  4279. validator.check_tensor_dtype_valid('input_tensor', input_dtype,
  4280. [mstype.complex64, mstype.complex128], self.name)
  4281. return input_dtype
  4282. class Real(PrimitiveWithInfer):
  4283. """
  4284. Returns a Tensor that is the real part of the input.
  4285. Inputs:
  4286. - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
  4287. Outputs:
  4288. Tensor, has the float type.
  4289. Raises:
  4290. TypeError: If the dtype of input is not one of: complex64, complex128.
  4291. Supported Platforms:
  4292. ``GPU``
  4293. Examples:
  4294. >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
  4295. >>> conj = ops.Real()
  4296. >>> output = conj(x)
  4297. >>> print(output)
  4298. 1.3
  4299. """
  4300. @prim_attr_register
  4301. def __init__(self):
  4302. self.init_prim_io_names(
  4303. inputs=['input_tensor'],
  4304. outputs=['output_tensor'])
  4305. def infer_shape(self, input_shape):
  4306. return input_shape
  4307. def infer_dtype(self, input_dtype):
  4308. validator.check_tensor_dtype_valid('input_tensor', input_dtype,
  4309. [mstype.complex64, mstype.complex128], self.name)
  4310. if input_dtype == mstype.tensor_type(mstype.complex64):
  4311. output_dtype = mstype.float32
  4312. elif input_dtype == mstype.tensor_type(mstype.complex128):
  4313. output_dtype = mstype.float64
  4314. return output_dtype
  4315. class Complex(Primitive):
  4316. """
  4317. Returns a complex Tensor from the real part and the imag part.
  4318. Inputs:
  4319. - **real** (Tensor) - The real input tensor. types: float32, float64.
  4320. - **imag** (Tensor) - The imag input tensor. types: float32, float64.
  4321. Outputs:
  4322. Tensor, has the complex type.
  4323. Raises:
  4324. TypeError: If the dtype of input is not one of: float32, float64.
  4325. If the dtypes of two inputs are not same.
  4326. Supported Platforms:
  4327. ``GPU``
  4328. Examples:
  4329. >>> real = Tensor(np.asarray(1, mindspore.complex64)
  4330. >>> imag = Tensor(np.asarray(2, mindspore.complex64)
  4331. >>> complex = ops.Complex()
  4332. >>> output = complex(real, imag)
  4333. >>> print(output)
  4334. (1 + 2j)
  4335. """
  4336. @prim_attr_register
  4337. def __init__(self):
  4338. """Initialize Complex"""
  4339. self.init_prim_io_names(inputs=['input_real', 'input_imag'], outputs=['output'])
  4340. class Imag(PrimitiveWithInfer):
  4341. """
  4342. Returns a new tensor containing imaginary value of the input.
  4343. Inputs:
  4344. - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
  4345. Outputs:
  4346. Tensor, has the float type.
  4347. Raises:
  4348. TypeError: If the dtype of input is not one of: complex64, complex128.
  4349. Supported Platforms:
  4350. ``GPU``
  4351. Examples:
  4352. >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
  4353. >>> conj = ops.Imag()
  4354. >>> output = conj(x)
  4355. >>> print(output)
  4356. 0.4
  4357. """
  4358. @prim_attr_register
  4359. def __init__(self):
  4360. self.init_prim_io_names(
  4361. inputs=['input_tensor'],
  4362. outputs=['output_tensor'])
  4363. def infer_shape(self, input_shape):
  4364. return input_shape
  4365. def infer_dtype(self, input_dtype):
  4366. validator.check_tensor_dtype_valid('input_tensor', input_dtype,
  4367. [mstype.complex64, mstype.complex128], self.name)
  4368. if input_dtype == mstype.tensor_type(mstype.complex64):
  4369. output_dtype = mstype.float32
  4370. elif input_dtype == mstype.tensor_type(mstype.complex128):
  4371. output_dtype = mstype.float64
  4372. return output_dtype
  4373. class Trunc(Primitive):
  4374. """
  4375. Returns a new tensor with the truncated integer values of the elements of input.
  4376. Inputs:
  4377. - **input_x** (Tensor) - Input_x is a tensor.
  4378. Outputs:
  4379. Tensor, the same shape and data type as the input.
  4380. Raises:
  4381. TypeError: If `input_x` is not a Tensor.
  4382. Supported Platforms:
  4383. ``Ascend``
  4384. Examples:
  4385. >>> trunc = ops.Trunc()
  4386. >>> output = trunc(Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32))
  4387. >>> print(output)
  4388. [ 3. 0. 0. -3.]
  4389. """
  4390. @prim_attr_register
  4391. def __init__(self):
  4392. """Initialize Trunc"""