Browse Source

fix bugs

tags/v1.1.0
lihongkang 5 years ago
parent
commit
0fa0fd39bb
6 changed files with 19 additions and 5 deletions
  1. +3
    -0
      mindspore/nn/layer/activation.py
  2. +3
    -0
      mindspore/nn/loss/loss.py
  3. +6
    -4
      mindspore/ops/operations/array_ops.py
  4. +1
    -1
      mindspore/ops/operations/math_ops.py
  5. +3
    -0
      mindspore/ops/operations/nn_ops.py
  6. +3
    -0
      mindspore/ops/operations/random_ops.py

+ 3
- 0
mindspore/nn/layer/activation.py View File

@@ -374,6 +374,9 @@ class FastGelu(Cell):
Outputs: Outputs:
Tensor, with the same type and shape as the `input_data`. Tensor, with the same type and shape as the `input_data`.


Supported Platforms:
``Ascend``

Examples: Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> fast_gelu = nn.FastGelu() >>> fast_gelu = nn.FastGelu()


+ 3
- 0
mindspore/nn/loss/loss.py View File

@@ -309,6 +309,9 @@ class SampledSoftmaxLoss(_Loss):
Outputs: Outputs:
Tensor, a tensor of shape (N) with the per-example sampled softmax losses. Tensor, a tensor of shape (N) with the per-example sampled softmax losses.


Supported Platforms:
``GPU``

Examples: Examples:
>>> loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1) >>> loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1)
>>> weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32) >>> weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32)


+ 6
- 4
mindspore/ops/operations/array_ops.py View File

@@ -554,6 +554,8 @@ class DynamicShape(Primitive):
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> shape = ops.DynamicShape() >>> shape = ops.DynamicShape()
>>> output = shape(input_tensor) >>> output = shape(input_tensor)
>>> print(output)
[3 2 1]
""" """


@prim_attr_register @prim_attr_register
@@ -709,7 +711,7 @@ class Unique(Primitive):
containing indices of elements in the input coressponding to the output tensor. containing indices of elements in the input coressponding to the output tensor.


Supported Platforms: Supported Platforms:
``Ascend`` ``CPU``
``Ascend`` ``GPU`` ``CPU``


Examples: Examples:
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
@@ -779,7 +781,7 @@ class SparseGatherV2(GatherV2):
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.


Supported Platforms: Supported Platforms:
``GPU``
``Ascend`` ``GPU``


Examples: Examples:
>>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
@@ -2240,7 +2242,7 @@ class Pack(PrimitiveWithInfer):
or if the shapes of elements in input_x are not the same. or if the shapes of elements in input_x are not the same.


Supported Platforms: Supported Platforms:
``Ascend``
``Ascend`` ``GPU``


Examples: Examples:
>>> data1 = Tensor(np.array([0, 1]).astype(np.float32)) >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
@@ -2295,7 +2297,7 @@ class Unpack(PrimitiveWithInfer):
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)). ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).


Supported Platforms: Supported Platforms:
``Ascend``
``Ascend`` ``GPU``


Examples: Examples:
>>> unpack = ops.Unpack() >>> unpack = ops.Unpack()


+ 1
- 1
mindspore/ops/operations/math_ops.py View File

@@ -1996,7 +1996,7 @@ class DivNoNan(_MathBinaryOp):
and the data type is the one with higher precision or higher digits among the two inputs. and the data type is the one with higher precision or higher digits among the two inputs.


Supported Platforms: Supported Platforms:
``Ascend``
``Ascend`` ``GPU``


Examples: Examples:
>>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)


+ 3
- 0
mindspore/ops/operations/nn_ops.py View File

@@ -2971,6 +2971,9 @@ class FastGelu(PrimitiveWithInfer):
Outputs: Outputs:
Tensor, with the same type and shape as input. Tensor, with the same type and shape as input.


Supported Platforms:
``Ascend``

Examples: Examples:
>>> tensor = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> tensor = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> fast_gelu = P.FastGelu() >>> fast_gelu = P.FastGelu()


+ 3
- 0
mindspore/ops/operations/random_ops.py View File

@@ -560,6 +560,9 @@ class UniformCandidateSampler(PrimitiveWithInfer):
- **sampled_expected_count** (Tensor) - The expected counts under the sampling distribution of - **sampled_expected_count** (Tensor) - The expected counts under the sampling distribution of
each of sampled_candidates. Shape: (num_sampled, ). each of sampled_candidates. Shape: (num_sampled, ).


Supported Platforms:
``GPU``

Examples: Examples:
>>> sampler = ops.UniformCandidateSampler(1, 3, False, 4) >>> sampler = ops.UniformCandidateSampler(1, 3, False, 4)
>>> output1, output2, output3 = sampler(Tensor(np.array([[1],[3],[4],[6],[3]], dtype=np.int32))) >>> output1, output2, output3 = sampler(Tensor(np.array([[1],[3],[4],[6],[3]], dtype=np.int32)))


Loading…
Cancel
Save