From 0fa0fd39bbd69edcf919dd0f8770a57dca2148b9 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Fri, 27 Nov 2020 17:41:05 +0800 Subject: [PATCH] fix bugs --- mindspore/nn/layer/activation.py | 3 +++ mindspore/nn/loss/loss.py | 3 +++ mindspore/ops/operations/array_ops.py | 10 ++++++---- mindspore/ops/operations/math_ops.py | 2 +- mindspore/ops/operations/nn_ops.py | 3 +++ mindspore/ops/operations/random_ops.py | 3 +++ 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index f72a44d95b..3d9c4bcb24 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -374,6 +374,9 @@ class FastGelu(Cell): Outputs: Tensor, with the same type and shape as the `input_data`. + Supported Platforms: + ``Ascend`` + Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> fast_gelu = nn.FastGelu() diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 25d61e8454..fdcbcf5164 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -309,6 +309,9 @@ class SampledSoftmaxLoss(_Loss): Outputs: Tensor, a tensor of shape (N) with the per-example sampled softmax losses. + Supported Platforms: + ``GPU`` + Examples: >>> loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1) >>> weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index c0d260053b..1d63452595 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -554,6 +554,8 @@ class DynamicShape(Primitive): >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) >>> shape = ops.DynamicShape() >>> output = shape(input_tensor) + >>> print(output) + [3 2 1] """ @prim_attr_register @@ -709,7 +711,7 @@ class Unique(Primitive): containing indices of elements in the input coressponding to the output tensor. Supported Platforms: - ``Ascend`` ``CPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) @@ -779,7 +781,7 @@ class SparseGatherV2(GatherV2): Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. Supported Platforms: - ``GPU`` + ``Ascend`` ``GPU`` Examples: >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) @@ -2240,7 +2242,7 @@ class Pack(PrimitiveWithInfer): or if the shapes of elements in input_x are not the same. Supported Platforms: - ``Ascend`` + ``Ascend`` ``GPU`` Examples: >>> data1 = Tensor(np.array([0, 1]).astype(np.float32)) @@ -2295,7 +2297,7 @@ class Unpack(PrimitiveWithInfer): ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)). Supported Platforms: - ``Ascend`` + ``Ascend`` ``GPU`` Examples: >>> unpack = ops.Unpack() diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index fe13e1ba1e..c6258639ee 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1996,7 +1996,7 @@ class DivNoNan(_MathBinaryOp): and the data type is the one with higher precision or higher digits among the two inputs. Supported Platforms: - ``Ascend`` + ``Ascend`` ``GPU`` Examples: >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index d5cfcb8904..587499e9fb 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2971,6 +2971,9 @@ class FastGelu(PrimitiveWithInfer): Outputs: Tensor, with the same type and shape as input. + Supported Platforms: + ``Ascend`` + Examples: >>> tensor = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> fast_gelu = P.FastGelu() diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index 9ab3ef39a8..32a5fa61ba 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -560,6 +560,9 @@ class UniformCandidateSampler(PrimitiveWithInfer): - **sampled_expected_count** (Tensor) - The expected counts under the sampling distribution of each of sampled_candidates. Shape: (num_sampled, ). + Supported Platforms: + ``GPU`` + Examples: >>> sampler = ops.UniformCandidateSampler(1, 3, False, 4) >>> output1, output2, output3 = sampler(Tensor(np.array([[1],[3],[4],[6],[3]], dtype=np.int32)))