diff --git a/mindspore/compression/common/constant.py b/mindspore/compression/common/constant.py index e7fb0f85ea..1795c799c3 100644 --- a/mindspore/compression/common/constant.py +++ b/mindspore/compression/common/constant.py @@ -24,7 +24,7 @@ __all__ = ["QuantDtype"] @enum.unique class QuantDtype(enum.Enum): """ - An enum for quant datatype, contains `INT2`~`INT8`, `UINT2`~`UINT8`. + An enum for quant datatype, contains `INT2` ~ `INT8`, `UINT2` ~ `UINT8`. """ INT2 = "INT2" INT3 = "INT3" diff --git a/mindspore/dataset/transforms/py_transforms.py b/mindspore/dataset/transforms/py_transforms.py index b60ba57329..b63ff010ec 100644 --- a/mindspore/dataset/transforms/py_transforms.py +++ b/mindspore/dataset/transforms/py_transforms.py @@ -137,6 +137,7 @@ class Compose: def reduce(operations): """ Wraps adjacent Python operations in a Compose to allow mixing of Python and C++ operations + Args: operations (list): list of tensor operations diff --git a/mindspore/explainer/_image_classification_runner.py b/mindspore/explainer/_image_classification_runner.py index 713fc36ca6..63fb7a1b00 100644 --- a/mindspore/explainer/_image_classification_runner.py +++ b/mindspore/explainer/_image_classification_runner.py @@ -236,7 +236,7 @@ class ImageClassificationRunner: """ Register uncertainty instance to compute the epistemic uncertainty base on the Bayes' theorem. - Notes: + Note: Please refer to the documentation of mindspore.nn.probability.toolbox.uncertainty_evaluation for the details. The actual output is standard deviation of the classification predictions and the corresponding 95% confidence intervals. Users have to invoke register_saliency() as well for the uncertainty results are diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index 2d7ba786fb..fd0f9bc4a4 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -1288,7 +1288,7 @@ class MulQuant(Cell): ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be generated by compression.quant.create_quant_config method. - Default: both set to default FakeQuantWithMinMaxObserver. + Default: both set to default :class:`FakeQuantWithMinMaxObserver`. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8. Inputs: diff --git a/mindspore/nn/metrics/hausdorff_distance.py b/mindspore/nn/metrics/hausdorff_distance.py index 22915eab1e..8f0074dabc 100644 --- a/mindspore/nn/metrics/hausdorff_distance.py +++ b/mindspore/nn/metrics/hausdorff_distance.py @@ -258,7 +258,7 @@ class HausdorffDistance(Metric): predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx` is int. - Raises: + Raises: ValueError: If the number of the inputs is not 3. """ self._is_update = True diff --git a/mindspore/nn/metrics/mean_surface_distance.py b/mindspore/nn/metrics/mean_surface_distance.py index 0e7b446f70..fdd354c1f1 100644 --- a/mindspore/nn/metrics/mean_surface_distance.py +++ b/mindspore/nn/metrics/mean_surface_distance.py @@ -93,7 +93,7 @@ class MeanSurfaceDistance(Metric): predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx` is int. - Raises: + Raises: ValueError: If the number of the inputs is not 3. """ if len(inputs) != 3: diff --git a/mindspore/nn/metrics/root_mean_square_surface_distance.py b/mindspore/nn/metrics/root_mean_square_surface_distance.py index c593fabfde..564d74893f 100644 --- a/mindspore/nn/metrics/root_mean_square_surface_distance.py +++ b/mindspore/nn/metrics/root_mean_square_surface_distance.py @@ -95,7 +95,7 @@ class RootMeanSquareDistance(Metric): predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx` is int. - Raises: + Raises: ValueError: If the number of the inputs is not 3. """ if len(inputs) != 3: diff --git a/mindspore/nn/metrics/topk.py b/mindspore/nn/metrics/topk.py index 9767f87a8b..56d54f7cd6 100644 --- a/mindspore/nn/metrics/topk.py +++ b/mindspore/nn/metrics/topk.py @@ -96,7 +96,7 @@ class TopKCategoricalAccuracy(Metric): class Top1CategoricalAccuracy(TopKCategoricalAccuracy): """ Calculates the top-1 categorical accuracy. This class is a specialized class for TopKCategoricalAccuracy. - Refer to class 'TopKCategoricalAccuracy' for more details. + Refer to :class:`TopKCategoricalAccuracy` for more details. Examples: >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], @@ -116,7 +116,7 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy): class Top5CategoricalAccuracy(TopKCategoricalAccuracy): """ Calculates the top-5 categorical accuracy. This class is a specialized class for TopKCategoricalAccuracy. - Refer to class 'TopKCategoricalAccuracy' for more details. + Refer to :class:`TopKCategoricalAccuracy` for more details. Examples: >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index 9aff96c144..578e999e8a 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -52,7 +52,7 @@ class SGD(Optimizer): .. math:: p_{t+1} = p_{t} - lr \ast v_{t+1} - To be noticed, for the first step, v_{t+1} = gradient + To be noticed, for the first step, :math:`v_{t+1} = gradient` Here : where p, v and u denote the parameters, accum, and momentum respectively. diff --git a/mindspore/nn/probability/bijector/bijector.py b/mindspore/nn/probability/bijector/bijector.py index aee8b10b99..0753df3876 100644 --- a/mindspore/nn/probability/bijector/bijector.py +++ b/mindspore/nn/probability/bijector/bijector.py @@ -44,7 +44,7 @@ class Bijector(Cell): has to be float type. During initialization, when `dtype` is None, there is no enforcement on the dtype of the parameters. All parameters should have the same float type, otherwise a TypeError will be raised. Specifically, the parameter type will follow the dtype of the input value, i.e. parameters of the bijector - will be casted into the same type as input value when `dtype`is None. + will be casted into the same type as input value when `dtype` is None. When `dtype` is specified, it is forcing the parameters and input value to be the same dtype as `dtype`. When the type of parameters or the type of the input value is not the same as `dtype`, a TypeError will be raised. Only subtype of mindspore.float_type can be used to specify bijector's `dtype`. diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index bf2cccf3fa..aafa44320c 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -222,8 +222,8 @@ class ForwardValueAndGrad(Cell): Inputs: - **(\*inputs)** (Tuple(Tensor...)) - Tuple of inputs with shape :math:`(N, \ldots)`. - **(sens)** - A sensitivity (gradient with respect to output) as the input of backpropagation. - If network has single output, the sens is a tensor. - If network has multiple outputs, the sens is the tuple(tensor). + If network has single output, the sens is a tensor. + If network has multiple outputs, the sens is the tuple(tensor). Outputs: - **forward value** - The result of network forward running. diff --git a/mindspore/numpy/array_creations.py b/mindspore/numpy/array_creations.py index 24cc11f5ff..1e1aec727d 100644 --- a/mindspore/numpy/array_creations.py +++ b/mindspore/numpy/array_creations.py @@ -181,7 +181,7 @@ def asfarray(a, dtype=mstype.float32): If non-float dtype is defined, this function will return a float32 tensor instead. Args: - a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can + a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can be converted to a `Tensor`. This includes Tensor, list, tuple and numbers. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type diff --git a/mindspore/numpy/math_ops.py b/mindspore/numpy/math_ops.py index 6cc8d8d176..b15dad7339 100644 --- a/mindspore/numpy/math_ops.py +++ b/mindspore/numpy/math_ops.py @@ -726,6 +726,7 @@ def outer(a, b): Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``, the outer product is: + ``[[a0*b0 a0*b1 ... a0*bN ]`` ``[a1*b0 . ]`` diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index d44dbd59eb..24a50ddfb2 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -111,6 +111,7 @@ class GradOperation(GradOperation_): Given an input function `net = Net()` that takes `x` and `y` as inputs, and has a parameter `z`, see `Net` in Examples. + To generate a gradient function that returns gradients with respect to the first input (see `GradNetWrtX` in Examples). @@ -122,6 +123,7 @@ class GradOperation(GradOperation_): 3. Call the gradient function with input function's inputs to get the gradients with respect to the first input: `grad_op(net)(x, y)`. + To generate a gradient function that returns gradients with respect to all inputs (see `GradNetWrtXY` in Examples). 1. Construct a `GradOperation` higher-order function with `get_all=True` which @@ -164,6 +166,7 @@ class GradOperation(GradOperation_): 4. Call the gradient function with input function's inputs to get the gradients with respect to all inputs and given parameters: `gradient_function(x, y)`. + We can configure the sensitivity(gradient with respect to output) by setting `sens_param` as True and passing an extra sensitivity input to the gradient function, the sensitivity input should has the same shape and type with input function's output(see `GradNetWrtXYWithSensParam` in Examples). diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index a1b1475c48..af33b40f88 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2746,9 +2746,9 @@ class ApplyRMSProp(PrimitiveWithInfer): where :math:`w` represents `var`, which will be updated. :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`, :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`. - :math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`. - :math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. - :math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`. + :math:`\rho` represents `decay`. :math:`\beta` is the momentum term, represents `momentum`. + :math:`\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. + :math:`\eta` represents `learning_rate`. :math:`\nabla Q_{i}(w)` represents `grad`. Args: use_locking (bool): Whether to enable a lock to protect the variable and accumlation tensors @@ -2844,9 +2844,9 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer): :math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`. :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`, :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`. - :math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`. - :math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. - :math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`. + :math:`\rho` represents `decay`. :math:`\beta` is the momentum term, represents `momentum`. + :math:`\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. + :math:`\eta` represents `learning_rate`. :math:`\nabla Q_{i}(w)` represents `grad`. Args: use_locking (bool): Whether to enable a lock to protect the variable and accumlation tensors @@ -7101,7 +7101,7 @@ class CTCGreedyDecoder(PrimitiveWithCheck): class BasicLSTMCell(PrimitiveWithInfer): """ - It's similar to operator DynamicRNN. BasicLSTMCell will be deprecated in the future. + It's similar to operator :class:`DynamicRNN`. BasicLSTMCell will be deprecated in the future. Please use DynamicRNN instead. """ diff --git a/mindspore/ops/operations/sponge_ops.py b/mindspore/ops/operations/sponge_ops.py index c9d344b97d..92e6d25fc7 100644 --- a/mindspore/ops/operations/sponge_ops.py +++ b/mindspore/ops/operations/sponge_ops.py @@ -1376,8 +1376,8 @@ class Dihedral14LJCFForceWithAtomEnergy(PrimitiveWithInfer): and potential energy for each atom. The calculation formula of force correction is the same as operator - Dihedral14LJForceWithDirectCF(), and the energy correction part is the same - as operator Dihedral14LJEnergy() and Dihedral14CFEnergy(). + :class:`Dihedral14LJForceWithDirectCF`, and the energy correction part is the same + as operator :class:`Dihedral14LJEnergy` and :class:`Dihedral14CFEnergy`. Args: nb14_numbers (int32): the number of necessary dihedral 1,4 terms M. @@ -1642,7 +1642,7 @@ class Dihedral14CFAtomEnergy(PrimitiveWithInfer): Add the potential energy caused by Coulumb energy correction for each necessary dihedral 1,4 terms to the total potential energy of each atom. - The calculation formula is the same as operator Dihedral14CFEnergy(). + The calculation formula is the same as operator :class:`Dihedral14CFEnergy`. Args: nb14_numbers (int32): the number of necessary dihedral 1,4 terms M. @@ -2253,7 +2253,7 @@ class LJForceWithPMEDirectForce(PrimitiveWithInfer): The calculation formula of Lennard-Jones part is the same as operator LJForce(), and the PME direct part is within PME method. - Agrs: + Args: atom_numbers(int32): the number of atoms, N. cutoff_square(float32): the square value of cutoff. pme_beta(float32): PME beta parameter, same as operator PMEReciprocalForce().