Browse Source

!15358 Some API comments of mindspore should be fixed

From: @zhang_yi2020
Reviewed-by: @gemini524,@liangchenghui,@wuxuejian
Signed-off-by: @liangchenghui,@wuxuejian
pull/15358/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
1713717f5e
16 changed files with 29 additions and 24 deletions
  1. +1
    -1
      mindspore/compression/common/constant.py
  2. +1
    -0
      mindspore/dataset/transforms/py_transforms.py
  3. +1
    -1
      mindspore/explainer/_image_classification_runner.py
  4. +1
    -1
      mindspore/nn/layer/quant.py
  5. +1
    -1
      mindspore/nn/metrics/hausdorff_distance.py
  6. +1
    -1
      mindspore/nn/metrics/mean_surface_distance.py
  7. +1
    -1
      mindspore/nn/metrics/root_mean_square_surface_distance.py
  8. +2
    -2
      mindspore/nn/metrics/topk.py
  9. +1
    -1
      mindspore/nn/optim/sgd.py
  10. +1
    -1
      mindspore/nn/probability/bijector/bijector.py
  11. +2
    -2
      mindspore/nn/wrap/cell_wrapper.py
  12. +1
    -1
      mindspore/numpy/array_creations.py
  13. +1
    -0
      mindspore/numpy/math_ops.py
  14. +3
    -0
      mindspore/ops/composite/base.py
  15. +7
    -7
      mindspore/ops/operations/nn_ops.py
  16. +4
    -4
      mindspore/ops/operations/sponge_ops.py

+ 1
- 1
mindspore/compression/common/constant.py View File

@@ -24,7 +24,7 @@ __all__ = ["QuantDtype"]
@enum.unique
class QuantDtype(enum.Enum):
"""
An enum for quant datatype, contains `INT2`~`INT8`, `UINT2`~`UINT8`.
An enum for quant datatype, contains `INT2` ~ `INT8`, `UINT2` ~ `UINT8`.
"""
INT2 = "INT2"
INT3 = "INT3"


+ 1
- 0
mindspore/dataset/transforms/py_transforms.py View File

@@ -137,6 +137,7 @@ class Compose:
def reduce(operations):
"""
Wraps adjacent Python operations in a Compose to allow mixing of Python and C++ operations

Args:
operations (list): list of tensor operations



+ 1
- 1
mindspore/explainer/_image_classification_runner.py View File

@@ -236,7 +236,7 @@ class ImageClassificationRunner:
"""
Register uncertainty instance to compute the epistemic uncertainty base on the Bayes' theorem.

Notes:
Note:
Please refer to the documentation of mindspore.nn.probability.toolbox.uncertainty_evaluation for the
details. The actual output is standard deviation of the classification predictions and the corresponding
95% confidence intervals. Users have to invoke register_saliency() as well for the uncertainty results are


+ 1
- 1
mindspore/nn/layer/quant.py View File

@@ -1288,7 +1288,7 @@ class MulQuant(Cell):
ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.
quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
generated by compression.quant.create_quant_config method.
Default: both set to default FakeQuantWithMinMaxObserver.
Default: both set to default :class:`FakeQuantWithMinMaxObserver`.
quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.

Inputs:


+ 1
- 1
mindspore/nn/metrics/hausdorff_distance.py View File

@@ -258,7 +258,7 @@ class HausdorffDistance(Metric):
predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx`
is int.

Raises:
Raises:
ValueError: If the number of the inputs is not 3.
"""
self._is_update = True


+ 1
- 1
mindspore/nn/metrics/mean_surface_distance.py View File

@@ -93,7 +93,7 @@ class MeanSurfaceDistance(Metric):
predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx`
is int.

Raises:
Raises:
ValueError: If the number of the inputs is not 3.
"""
if len(inputs) != 3:


+ 1
- 1
mindspore/nn/metrics/root_mean_square_surface_distance.py View File

@@ -95,7 +95,7 @@ class RootMeanSquareDistance(Metric):
predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx`
is int.

Raises:
Raises:
ValueError: If the number of the inputs is not 3.
"""
if len(inputs) != 3:


+ 2
- 2
mindspore/nn/metrics/topk.py View File

@@ -96,7 +96,7 @@ class TopKCategoricalAccuracy(Metric):
class Top1CategoricalAccuracy(TopKCategoricalAccuracy):
"""
Calculates the top-1 categorical accuracy. This class is a specialized class for TopKCategoricalAccuracy.
Refer to class 'TopKCategoricalAccuracy' for more details.
Refer to :class:`TopKCategoricalAccuracy` for more details.

Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
@@ -116,7 +116,7 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy):
class Top5CategoricalAccuracy(TopKCategoricalAccuracy):
"""
Calculates the top-5 categorical accuracy. This class is a specialized class for TopKCategoricalAccuracy.
Refer to class 'TopKCategoricalAccuracy' for more details.
Refer to :class:`TopKCategoricalAccuracy` for more details.

Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],


+ 1
- 1
mindspore/nn/optim/sgd.py View File

@@ -52,7 +52,7 @@ class SGD(Optimizer):
.. math::
p_{t+1} = p_{t} - lr \ast v_{t+1}

To be noticed, for the first step, v_{t+1} = gradient
To be noticed, for the first step, :math:`v_{t+1} = gradient`

Here : where p, v and u denote the parameters, accum, and momentum respectively.



+ 1
- 1
mindspore/nn/probability/bijector/bijector.py View File

@@ -44,7 +44,7 @@ class Bijector(Cell):
has to be float type. During initialization, when `dtype` is None, there is no enforcement on the dtype
of the parameters. All parameters should have the same float type, otherwise a TypeError will be raised.
Specifically, the parameter type will follow the dtype of the input value, i.e. parameters of the bijector
will be casted into the same type as input value when `dtype`is None.
will be casted into the same type as input value when `dtype` is None.
When `dtype` is specified, it is forcing the parameters and input value to be the same dtype as `dtype`.
When the type of parameters or the type of the input value is not the same as `dtype`, a TypeError will be
raised. Only subtype of mindspore.float_type can be used to specify bijector's `dtype`.


+ 2
- 2
mindspore/nn/wrap/cell_wrapper.py View File

@@ -222,8 +222,8 @@ class ForwardValueAndGrad(Cell):
Inputs:
- **(\*inputs)** (Tuple(Tensor...)) - Tuple of inputs with shape :math:`(N, \ldots)`.
- **(sens)** - A sensitivity (gradient with respect to output) as the input of backpropagation.
If network has single output, the sens is a tensor.
If network has multiple outputs, the sens is the tuple(tensor).
If network has single output, the sens is a tensor.
If network has multiple outputs, the sens is the tuple(tensor).

Outputs:
- **forward value** - The result of network forward running.


+ 1
- 1
mindspore/numpy/array_creations.py View File

@@ -181,7 +181,7 @@ def asfarray(a, dtype=mstype.float32):
If non-float dtype is defined, this function will return a float32 tensor instead.

Args:
a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type


+ 1
- 0
mindspore/numpy/math_ops.py View File

@@ -726,6 +726,7 @@ def outer(a, b):

Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``,
the outer product is:

``[[a0*b0 a0*b1 ... a0*bN ]``

``[a1*b0 . ]``


+ 3
- 0
mindspore/ops/composite/base.py View File

@@ -111,6 +111,7 @@ class GradOperation(GradOperation_):
Given an input function `net = Net()` that takes `x` and `y` as inputs, and has a parameter `z`,
see `Net` in Examples.


To generate a gradient function that returns gradients with respect to the first input
(see `GradNetWrtX` in Examples).

@@ -122,6 +123,7 @@ class GradOperation(GradOperation_):
3. Call the gradient function with input function's inputs to get the gradients with respect to the first input:
`grad_op(net)(x, y)`.


To generate a gradient function that returns gradients with respect to all inputs (see `GradNetWrtXY` in Examples).

1. Construct a `GradOperation` higher-order function with `get_all=True` which
@@ -164,6 +166,7 @@ class GradOperation(GradOperation_):
4. Call the gradient function with input function's inputs
to get the gradients with respect to all inputs and given parameters: `gradient_function(x, y)`.


We can configure the sensitivity(gradient with respect to output) by setting `sens_param` as True and
passing an extra sensitivity input to the gradient function, the sensitivity input should has the
same shape and type with input function's output(see `GradNetWrtXYWithSensParam` in Examples).


+ 7
- 7
mindspore/ops/operations/nn_ops.py View File

@@ -2746,9 +2746,9 @@ class ApplyRMSProp(PrimitiveWithInfer):
where :math:`w` represents `var`, which will be updated.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
:math:`\rho` represents `decay`. :math:`\beta` is the momentum term, represents `momentum`.
:math:`\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\eta` represents `learning_rate`. :math:`\nabla Q_{i}(w)` represents `grad`.

Args:
use_locking (bool): Whether to enable a lock to protect the variable and accumlation tensors
@@ -2844,9 +2844,9 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer):
:math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
:math:`\rho` represents `decay`. :math:`\beta` is the momentum term, represents `momentum`.
:math:`\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\eta` represents `learning_rate`. :math:`\nabla Q_{i}(w)` represents `grad`.

Args:
use_locking (bool): Whether to enable a lock to protect the variable and accumlation tensors
@@ -7101,7 +7101,7 @@ class CTCGreedyDecoder(PrimitiveWithCheck):

class BasicLSTMCell(PrimitiveWithInfer):
"""
It's similar to operator DynamicRNN. BasicLSTMCell will be deprecated in the future.
It's similar to operator :class:`DynamicRNN`. BasicLSTMCell will be deprecated in the future.
Please use DynamicRNN instead.
"""



+ 4
- 4
mindspore/ops/operations/sponge_ops.py View File

@@ -1376,8 +1376,8 @@ class Dihedral14LJCFForceWithAtomEnergy(PrimitiveWithInfer):
and potential energy for each atom.

The calculation formula of force correction is the same as operator
Dihedral14LJForceWithDirectCF(), and the energy correction part is the same
as operator Dihedral14LJEnergy() and Dihedral14CFEnergy().
:class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same
as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`.

Args:
nb14_numbers (int32): the number of necessary dihedral 1,4 terms M.
@@ -1642,7 +1642,7 @@ class Dihedral14CFAtomEnergy(PrimitiveWithInfer):
Add the potential energy caused by Coulumb energy correction for each
necessary dihedral 1,4 terms to the total potential energy of each atom.

The calculation formula is the same as operator Dihedral14CFEnergy().
The calculation formula is the same as operator :class:`Dihedral14CFEnergy`.

Args:
nb14_numbers (int32): the number of necessary dihedral 1,4 terms M.
@@ -2253,7 +2253,7 @@ class LJForceWithPMEDirectForce(PrimitiveWithInfer):
The calculation formula of Lennard-Jones part is the same as operator
LJForce(), and the PME direct part is within PME method.

Agrs:
Args:
atom_numbers(int32): the number of atoms, N.
cutoff_square(float32): the square value of cutoff.
pme_beta(float32): PME beta parameter, same as operator PMEReciprocalForce().


Loading…
Cancel
Save