diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index f686e13f2e..1562df80f8 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -213,6 +213,7 @@ const char kNameRange[] = "Range"; const char kNameSquareSumAll[] = "SquareSumAll"; const char kNameAscendQuant[] = "Quant"; const char kNameAscendDequant[] = "Dequant"; +const char kNameReverseSequence[] = "ReverseSequence"; const char kNameCase[] = "Case"; // -----------------OpAdapter initialization-------------- @@ -429,6 +430,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameSquareSumAll), ADPT_DESC(SquareSumAll)}, {string(kNameAscendQuant), ADPT_DESC(AscendQuant)}, {string(kNameAscendDequant), ADPT_DESC(AscendDequant)}, + {string(kNameReverseSequence), ADPT_DESC(ReverseSequence)}, {string(kNameCase), ADPT_DESC(Case)}}; #ifdef ENABLE_GE adpt_map[string(kNamePrint)] = ADPT_DESC(Print); diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare.cc index bce309f3b3..3632d62651 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare.cc @@ -1340,6 +1340,12 @@ ATTR_MAP(CTCLoss) = { {"ignore_longer_outputs_than_inputs", ATTR_DESC(ignore_longer_outputs_than_inputs, AnyTraits())}}; OUTPUT_MAP(CTCLoss) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(gradient)}}; +// ReverseSequence +INPUT_MAP(ReverseSequence) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(seq_lengths)}}; +ATTR_MAP(ReverseSequence) = {{"seq_dim", ATTR_DESC(seq_dim, AnyTraits())}, + {"batch_dim", ATTR_DESC(batch_dim, AnyTraits())}}; +OUTPUT_MAP(ReverseSequence) = {{0, OUTPUT_DESC(y)}}; + // AscendQuant INPUT_MAP(AscendQuant) = {{1, INPUT_DESC(x)}}; ATTR_MAP(AscendQuant) = {{"scale", ATTR_DESC(scale, AnyTraits())}, diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare.h index 186a6f43c3..9da23b49da 100755 --- a/mindspore/ccsrc/transform/graph_ir/op_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare.h @@ -501,6 +501,8 @@ DECLARE_OP_ADAPTER(L2Loss) DECLARE_OP_USE_OUTPUT(L2Loss) DECLARE_OP_ADAPTER(CTCLoss) DECLARE_OP_USE_OUTPUT(CTCLoss) +DECLARE_OP_ADAPTER(ReverseSequence) +DECLARE_OP_USE_OUTPUT(ReverseSequence) DECLARE_OP_ADAPTER(AscendQuant) DECLARE_OP_USE_OUTPUT(AscendQuant) DECLARE_OP_ADAPTER(AscendDequant) diff --git a/mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py b/mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py index fbaf51e643..aa66b8f156 100644 --- a/mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +++ b/mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py @@ -31,14 +31,6 @@ apply_adagrad_v2_d_op_info = TBERegOp("ApplyAdagradV2") \ .input(3, "grad", False, "required", "all") \ .output(0, "var", False, "required", "all") \ .output(1, "accum", False, "required", "all") \ - .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, DataType.F16_5HD, - DataType.F16_5HD, DataType.F16_5HD) \ - .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_FracZ, - DataType.F16_FracZ, DataType.F16_FracZ) \ - .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_C1HWNCoC0, - DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, - DataType.F16_Default, DataType.F16_Default) \ .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ, diff --git a/mindspore/ops/_op_impl/tbe/bias_add.py b/mindspore/ops/_op_impl/tbe/bias_add.py index 8fe792d2ff..ef17fa6f88 100644 --- a/mindspore/ops/_op_impl/tbe/bias_add.py +++ b/mindspore/ops/_op_impl/tbe/bias_add.py @@ -28,9 +28,7 @@ bias_add_grad_op_info = TBERegOp("BiasAdd") \ .input(1, "bias", False, "required", "all") \ .output(0, "y", False, "required", "all") \ .op_pattern("dynamicFormat") \ - .dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \ - .dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None) \ - .dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None) \ + .dtype_format(DataType.None_None, DataType.None_None, DataType.None_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/bitwise_and.py b/mindspore/ops/_op_impl/tbe/bitwise_and.py index 332cf32ee1..a4d7757771 100644 --- a/mindspore/ops/_op_impl/tbe/bitwise_and.py +++ b/mindspore/ops/_op_impl/tbe/bitwise_and.py @@ -29,6 +29,7 @@ bitwise_and_op_info = TBERegOp("BitwiseAnd") \ .op_pattern("broadcast") \ .dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \ .dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/bitwise_or.py b/mindspore/ops/_op_impl/tbe/bitwise_or.py index 6af6a03eb1..54dd6fb584 100644 --- a/mindspore/ops/_op_impl/tbe/bitwise_or.py +++ b/mindspore/ops/_op_impl/tbe/bitwise_or.py @@ -29,6 +29,7 @@ bitwise_or_op_info = TBERegOp("BitwiseOr") \ .op_pattern("broadcast") \ .dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \ .dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/bitwise_xor.py b/mindspore/ops/_op_impl/tbe/bitwise_xor.py index 8073d13e62..fc8b609a5f 100644 --- a/mindspore/ops/_op_impl/tbe/bitwise_xor.py +++ b/mindspore/ops/_op_impl/tbe/bitwise_xor.py @@ -29,6 +29,7 @@ bitwise_xor_op_info = TBERegOp("BitwiseXor") \ .op_pattern("broadcast") \ .dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \ .dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/space_to_depth.py b/mindspore/ops/_op_impl/tbe/space_to_depth.py index 5cf6aa2b38..2b22a65e4c 100644 --- a/mindspore/ops/_op_impl/tbe/space_to_depth.py +++ b/mindspore/ops/_op_impl/tbe/space_to_depth.py @@ -26,17 +26,18 @@ space_to_depth_op_info = TBERegOp("SpaceToDepth") \ .attr("block_size", "required", "int", "all") \ .attr("data_format", "optional", "str", "all") \ .input(0, "x", False, "required", "all") \ + .input(1, "filter", False, "optional", "all") \ .output(0, "y", False, "required", "all") \ - .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ - .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ - .dtype_format(DataType.I8_NHWC, DataType.I8_NHWC) \ - .dtype_format(DataType.I16_NHWC, DataType.I16_NHWC) \ - .dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \ - .dtype_format(DataType.I64_NHWC, DataType.I64_NHWC) \ - .dtype_format(DataType.U8_NHWC, DataType.U8_NHWC) \ - .dtype_format(DataType.U16_NHWC, DataType.U16_NHWC) \ - .dtype_format(DataType.U32_NHWC, DataType.U32_NHWC) \ - .dtype_format(DataType.U64_NHWC, DataType.U64_NHWC) \ + .dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_5HD) \ + .dtype_format(DataType.F32_NHWC, DataType.F16_FracZ, DataType.F32_NHWC) \ + .dtype_format(DataType.I8_NHWC, DataType.F16_FracZ, DataType.I8_NHWC) \ + .dtype_format(DataType.I16_NHWC, DataType.F16_FracZ, DataType.I16_NHWC) \ + .dtype_format(DataType.I32_NHWC, DataType.F16_FracZ, DataType.I32_NHWC) \ + .dtype_format(DataType.I64_NHWC, DataType.F16_FracZ, DataType.I64_NHWC) \ + .dtype_format(DataType.U8_NHWC, DataType.F16_FracZ, DataType.U8_NHWC) \ + .dtype_format(DataType.U16_NHWC, DataType.F16_FracZ, DataType.U16_NHWC) \ + .dtype_format(DataType.U32_NHWC, DataType.F16_FracZ, DataType.U32_NHWC) \ + .dtype_format(DataType.U64_NHWC, DataType.F16_FracZ, DataType.U64_NHWC) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/unpack.py b/mindspore/ops/_op_impl/tbe/unpack.py index 314f81afa5..d641fab4f2 100644 --- a/mindspore/ops/_op_impl/tbe/unpack.py +++ b/mindspore/ops/_op_impl/tbe/unpack.py @@ -27,26 +27,8 @@ unpack_op_info = TBERegOp("Unpack") \ .attr("axis", "required", "int", "all") \ .input(0, "x", False, "required", "all") \ .output(0, "y", False, "dynamic", "all") \ - .dtype_format(DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.I16_Default, DataType.I16_Default) \ - .dtype_format(DataType.I32_Default, DataType.I32_Default) \ - .dtype_format(DataType.I64_Default, DataType.I64_Default) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default) \ - .dtype_format(DataType.U16_Default, DataType.U16_Default) \ - .dtype_format(DataType.U32_Default, DataType.U32_Default) \ - .dtype_format(DataType.U64_Default, DataType.U64_Default) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ - .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ - .dtype_format(DataType.I16_5HD, DataType.I16_5HD) \ - .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ - .dtype_format(DataType.I64_5HD, DataType.I64_5HD) \ - .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ - .dtype_format(DataType.U16_5HD, DataType.U16_5HD) \ - .dtype_format(DataType.U32_5HD, DataType.U32_5HD) \ - .dtype_format(DataType.U64_5HD, DataType.U64_5HD) \ - .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ - .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .op_pattern("dynamicFormat") \ + .dtype_format(DataType.None_None, DataType.None_None) \ .get_op_info() diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 6666c8bb5c..572556567d 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1132,7 +1132,7 @@ class SquaredDifference(_MathBinaryOp): The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, - both dtypes cannot be bool, and the shapes of them could be broadcast. + dtypes of them cannot be both bool, and the shapes of them could be broadcast. When the inputs are one tensor and one scalar, the scalar only could be a constant. @@ -1833,7 +1833,7 @@ class TruncateDiv(_MathBinaryOp): The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, - both dtypes cannot be bool, and the shapes of them could be broadcast. + dtypes of them cannot be both bool, and the shapes of them could be broadcast. When the inputs are one tensor and one scalar, the scalar only could be a constant. @@ -1862,7 +1862,7 @@ class TruncateMod(_MathBinaryOp): The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, - both dtypes cannot be bool, and the shapes of them could be broadcast. + dtypes of them cannot be both bool, and the shapes of them could be broadcast. When the inputs are one tensor and one scalar, the scalar only could be a constant. @@ -2014,7 +2014,7 @@ class Xdivy(_MathBinaryOp): The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, - both dtypes cannot be bool, and the shapes of them could be broadcast. + dtypes of them cannot be both bool, and the shapes of them could be broadcast. When the inputs are one tensor and one scalar, the scalar only could be a constant. @@ -2047,7 +2047,7 @@ class Xlogy(_MathBinaryOp): The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, - both dtypes cannot be bool, and the shapes of them could be broadcast. + dtypes of them cannot be both bool, and the shapes of them could be broadcast. When the inputs are one tensor and one scalar, the scalar only could be a constant. @@ -3234,7 +3234,7 @@ class BitwiseAnd(_BitwiseBinaryOp): Returns bitwise `and` of two tensors element-wise. Inputs: - - **input_x1** (Tensor) - The input tensor with int16 or uint16 data type. + - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. Outputs: @@ -3254,7 +3254,7 @@ class BitwiseOr(_BitwiseBinaryOp): Returns bitwise `or` of two tensors element-wise. Inputs: - - **input_x1** (Tensor) - The input tensor with int16 or uint16 data type. + - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. Outputs: @@ -3274,7 +3274,7 @@ class BitwiseXor(_BitwiseBinaryOp): Returns bitwise `xor` of two tensors element-wise. Inputs: - - **input_x1** (Tensor) - The input tensor with int16 or uint16 data type. + - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. Outputs: @@ -3297,7 +3297,7 @@ class BesselI0e(PrimitiveWithInfer): - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Outputs: - Tensor, has the same shape as `input_x`. + Tensor, has the same shape as `input_x`. Data type should be float16 or float32. Examples: >>> bessel_i0e = P.BesselI0e() @@ -3326,7 +3326,7 @@ class BesselI1e(PrimitiveWithInfer): - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Outputs: - Tensor, has the same shape as `input_x`. + Tensor, has the same shape as `input_x`. Data type should be float16 or float32. Examples: >>> bessel_i1e = P.BesselI1e() diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 9818792513..aaa24c1572 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1412,6 +1412,7 @@ class BiasAdd(PrimitiveWithInfer): Inputs: - **input_x** (Tensor) - Input value. The input shape can be 2-4 dimensions. - **bias** (Tensor) - Bias value, with shape :math:`(C)`. + The shape of `bias` must be the same as `input_x` in second dimension. Outputs: Tensor, with the same shape and type as `input_x`. @@ -2341,7 +2342,7 @@ class OneHot(PrimitiveWithInfer): Inputs: - **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`. - Data type must be int32. + Data type must be int32. - **depth** (int) - A scalar defining the depth of the one hot dimension. - **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32. - **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.