| @@ -26,6 +26,7 @@ reverse_sequence_op_info = AiCPURegOp("ReverseSequence") \ | |||||
| .dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \ | .dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \ | ||||
| .dtype_format(DataType.I16_Default, DataType.I32_Default, DataType.I16_Default) \ | .dtype_format(DataType.I16_Default, DataType.I32_Default, DataType.I16_Default) \ | ||||
| .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ | .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ | ||||
| .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \ | |||||
| .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \ | .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \ | ||||
| .dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \ | .dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \ | ||||
| .dtype_format(DataType.U16_Default, DataType.I32_Default, DataType.U16_Default) \ | .dtype_format(DataType.U16_Default, DataType.I32_Default, DataType.U16_Default) \ | ||||
| @@ -1892,7 +1892,7 @@ class RNNTLoss(PrimitiveWithInfer): | |||||
| - **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32. | - **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32. | ||||
| - **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`. | - **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`. | ||||
| - **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | - **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | ||||
| - **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | |||||
| - **label_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | |||||
| Outputs: | Outputs: | ||||
| - **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | - **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -28,16 +27,15 @@ class Net(nn.Cell): | |||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.ctc_loss = P.CTCLoss() | self.ctc_loss = P.CTCLoss() | ||||
| @ms_function | |||||
| def construct(self, inputs, labels_indices, labels_values, sequence_length): | def construct(self, inputs, labels_indices, labels_values, sequence_length): | ||||
| return self.ctc_loss(inputs, labels_indices, labels_values, sequence_length) | return self.ctc_loss(inputs, labels_indices, labels_values, sequence_length) | ||||
| def test_net_float32(): | def test_net_float32(): | ||||
| x = np.rand.randn(2, 2, 3).astype(np.float32) | |||||
| labels_indices = np.array([[0, 0], [1, 0]]).astype(np.int64) | |||||
| labels_values = np.array([2, 2]).astype(np.int32) | |||||
| x = np.random.randn(2, 2, 3).astype(np.float32) | |||||
| labels_indices = np.array([[0, 1], [1, 0]]).astype(np.int64) | |||||
| labels_values = np.array([1, 2]).astype(np.int32) | |||||
| sequence_length = np.array([2, 2]).astype(np.int32) | sequence_length = np.array([2, 2]).astype(np.int32) | ||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), Tensor(labels_indices), Tensor(labels_values), Tensor(sequence_length)) | output = net(Tensor(x), Tensor(labels_indices), Tensor(labels_values), Tensor(sequence_length)) | ||||
| print(output.asnumpy()) | |||||
| print(output) | |||||
| @@ -40,7 +40,7 @@ def test_net_int8(): | |||||
| batch_dim = 1 | batch_dim = 1 | ||||
| net = Net(seq_dim, batch_dim) | net = Net(seq_dim, batch_dim) | ||||
| output = net(Tensor(x), Tensor(seq_lengths)) | output = net(Tensor(x), Tensor(seq_lengths)) | ||||
| expected = np.array([1, 5, 9], [4, 2, 6], [7, 8, 3]).astype(np.int8) | |||||
| expected = np.array([[1, 5, 9], [4, 2, 6], [7, 8, 3]]).astype(np.int8) | |||||
| assert np.array_equal(output.asnumpy(), expected) | assert np.array_equal(output.asnumpy(), expected) | ||||
| @@ -51,5 +51,5 @@ def test_net_int32(): | |||||
| batch_dim = 0 | batch_dim = 0 | ||||
| net = Net(seq_dim, batch_dim) | net = Net(seq_dim, batch_dim) | ||||
| output = net(Tensor(x), Tensor(seq_lengths)) | output = net(Tensor(x), Tensor(seq_lengths)) | ||||
| expected = np.array([1, 2, 3], [5, 4, 6], [9, 8, 7]).astype(np.int32) | |||||
| expected = np.array([[1, 2, 3], [5, 4, 6], [9, 8, 7]]).astype(np.int32) | |||||
| assert np.array_equal(output.asnumpy(), expected) | assert np.array_equal(output.asnumpy(), expected) | ||||