diff --git a/mindspore/nn/layer/math.py b/mindspore/nn/layer/math.py index 9dd62f173d..bbdd3f180a 100644 --- a/mindspore/nn/layer/math.py +++ b/mindspore/nn/layer/math.py @@ -190,15 +190,18 @@ class LGamma(Cell): when x is an integer less or equal to 0, return +inf when x = +/- inf, return +inf - Supported Platforms: - ``Ascend`` ``GPU`` - Inputs: - **x** (Tensor) - The input tensor. Only float16, float32 are supported. Outputs: Tensor, has the same shape and dtype as the `x`. + Raises: + TypeError: If dtype of input x is not float16 nor float32. + + Supported Platforms: + ``Ascend`` ``GPU`` + Examples: >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> op = nn.LGamma() @@ -306,15 +309,18 @@ class DiGamma(Cell): digamma(x) = digamma(1 - x) - pi * cot(pi * x) - Supported Platforms: - ``Ascend`` ``GPU`` - Inputs: - **x** (Tensor[Number]) - The input tensor. Only float16, float32 are supported. Outputs: Tensor, has the same shape and dtype as the `x`. + Raises: + TypeError: If dtype of input x is not float16 nor float32. + + Supported Platforms: + ``Ascend`` ``GPU`` + Examples: >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> op = nn.DiGamma() @@ -568,9 +574,6 @@ class IGamma(Cell): Above :math:`Q(a, x)` is the upper regularized complete Gamma function. - Supported Platforms: - ``Ascend`` ``GPU`` - Inputs: - **a** (Tensor) - The input tensor. With float32 data type. `a` should have the same dtype with `x`. @@ -580,6 +583,13 @@ class IGamma(Cell): Outputs: Tensor, has the same dtype as `a` and `x`. + Raises: + TypeError: If dtype of input x and a is not float16 nor float32, + or if x has different dtype with a. + + Supported Platforms: + ``Ascend`` ``GPU`` + Examples: >>> input_a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32)) >>> input_x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32)) @@ -649,9 +659,6 @@ class LBeta(Cell): decomposing lgamma into the Stirling approximation and an explicit log_gamma_correction, and cancelling the large terms from the Striling analytically. - Supported Platforms: - ``Ascend`` ``GPU`` - Inputs: - **x** (Tensor) - The input tensor. With float16 or float32 data type. `x` should have the same dtype with `y`. @@ -661,6 +668,13 @@ class LBeta(Cell): Outputs: Tensor, has the same dtype as `x` and `y`. + Raises: + TypeError: If dtype of input x and a is not float16 nor float32, + or if x has different dtype with a. + + Supported Platforms: + ``Ascend`` ``GPU`` + Examples: >>> input_x = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32)) >>> input_y = Tensor(np.array([2.0, 3.0, 14.0, 15.0]).astype(np.float32)) @@ -956,9 +970,6 @@ class MatInverse(Cell): """ Calculates the inverse of Positive-Definite Hermitian matrix using Cholesky decomposition. - Supported Platforms: - ``GPU`` - Inputs: - **a** (Tensor[Number]) - The input tensor. It must be a positive-definite matrix. With float16 or float32 data type. @@ -966,6 +977,12 @@ class MatInverse(Cell): Outputs: Tensor, has the same dtype as the `a`. + Raises: + TypeError: If dtype of input x is not float16 nor float32. + + Supported Platforms: + ``GPU`` + Examples: >>> input_a = Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32)) >>> op = nn.MatInverse() @@ -993,9 +1010,6 @@ class MatDet(Cell): """ Calculates the determinant of Positive-Definite Hermitian matrix using Cholesky decomposition. - Supported Platforms: - ``GPU`` - Inputs: - **a** (Tensor[Number]) - The input tensor. It must be a positive-definite matrix. With float16 or float32 data type. @@ -1003,6 +1017,12 @@ class MatDet(Cell): Outputs: Tensor, has the same dtype as the `a`. + Raises: + TypeError: If dtype of input x is not float16 nor float32. + + Supported Platforms: + ``GPU`` + Examples: >>> input_a = Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32)) >>> op = nn.MatDet()