| @@ -72,7 +72,7 @@ class Softmax(Cell): | |||
| >>> softmax = nn.Softmax() | |||
| >>> output = softmax(input_x) | |||
| >>> print(output) | |||
| [0.03168 0.01166 0.0861 0.636 0.2341] | |||
| [0.03168 0.01166 0.0861 0.636 0.2341 ] | |||
| """ | |||
| def __init__(self, axis=-1): | |||
| @@ -179,7 +179,7 @@ class ReLU(Cell): | |||
| >>> relu = nn.ReLU() | |||
| >>> output = relu(input_x) | |||
| >>> print(output) | |||
| [0. 2. 0. 2. 0.] | |||
| [0. 2. 0. 2. 0.] | |||
| """ | |||
| def __init__(self): | |||
| @@ -209,7 +209,7 @@ class ReLU6(Cell): | |||
| >>> relu6 = nn.ReLU6() | |||
| >>> output = relu6(input_x) | |||
| >>> print(output) | |||
| [0. 0. 0. 2. 1.] | |||
| [0. 0. 0. 2. 1.] | |||
| """ | |||
| def __init__(self): | |||
| @@ -248,7 +248,7 @@ class LeakyReLU(Cell): | |||
| >>> output = leaky_relu(input_x) | |||
| >>> print(output) | |||
| [[-0.2 4. -1.6] | |||
| [ 2 -1. 9.]] | |||
| [ 2 -1. 9. ]] | |||
| """ | |||
| def __init__(self, alpha=0.2): | |||
| @@ -292,7 +292,7 @@ class Tanh(Cell): | |||
| >>> tanh = nn.Tanh() | |||
| >>> output = tanh(input_x) | |||
| >>> print(output) | |||
| [0.7617 0.964 0.995 0.964 0.7617] | |||
| [0.7617 0.964 0.995 0.964 0.7617] | |||
| """ | |||
| def __init__(self): | |||
| @@ -356,7 +356,7 @@ class Sigmoid(Cell): | |||
| >>> sigmoid = nn.Sigmoid() | |||
| >>> output = sigmoid(input_x) | |||
| >>> print(output) | |||
| [0.2688 0.11914 0.5 0.881 0.7305] | |||
| [0.2688 0.11914 0.5 0.881 0.7305 ] | |||
| """ | |||
| def __init__(self): | |||
| @@ -517,10 +517,9 @@ class LogSigmoid(Cell): | |||
| Examples: | |||
| >>> net = nn.LogSigmoid() | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> logsigmoid = net(input_x) | |||
| >>> print(logsigmoid) | |||
| [-3.1326166e-01, -1.2692806e-01, -4.8587345e-02] | |||
| >>> output = net(input_x) | |||
| >>> print(output) | |||
| [-0.31326166 -0.12692806 -0.04858734] | |||
| """ | |||
| def __init__(self): | |||
| @@ -78,10 +78,10 @@ class Dropout(Cell): | |||
| >>> net.set_train() | |||
| >>> output = net(x) | |||
| >>> print(output) | |||
| [[[0., 1.25, 0.], | |||
| [1.25, 1.25, 1.25]], | |||
| [[1.25, 1.25, 1.25], | |||
| [1.25, 1.25, 1.25]]] | |||
| [[[0. 1.25 0. ] | |||
| [1.25 1.25 1.25]] | |||
| [[1.25 1.25 1.25] | |||
| [1.25 1.25 1.25]]] | |||
| """ | |||
| def __init__(self, keep_prob=0.5, dtype=mstype.float32): | |||
| @@ -320,8 +320,8 @@ class ClipByNorm(Cell): | |||
| >>> net = nn.ClipByNorm() | |||
| >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) | |||
| >>> clip_norm = Tensor(np.array([100]).astype(np.float32)) | |||
| >>> result = net(input, clip_norm).shape | |||
| >>> print(result) | |||
| >>> output = net(input, clip_norm) | |||
| >>> print(output.shape) | |||
| (4, 16) | |||
| """ | |||
| @@ -392,7 +392,7 @@ class Norm(Cell): | |||
| >>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32) | |||
| >>> output = net(input) | |||
| >>> print(output) | |||
| [2.236068 9.848858 4. 5.656854] | |||
| [7.81025 6.708204 0. 8.602325] | |||
| """ | |||
| def __init__(self, axis=(), keep_dims=False): | |||
| @@ -514,7 +514,12 @@ class Pad(Cell): | |||
| ... return self.pad(x) | |||
| >>> x = np.random.random(size=(2, 3)).astype(np.float32) | |||
| >>> pad = Net() | |||
| >>> ms_output = pad(Tensor(x)) | |||
| >>> output = pad(Tensor(x)) | |||
| >>> print(output) | |||
| [[0. 0. 0. 0. 0. 0. ] | |||
| [0. 0. 0.82691735 0.36147234 0.70918983 0. ] | |||
| [0. 0. 0.7842975 0.44726616 0.4353459 0. ] | |||
| [0. 0. 0. 0. 0. 0. ]] | |||
| """ | |||
| def __init__(self, paddings, mode="CONSTANT"): | |||
| @@ -574,9 +579,8 @@ class Unfold(Cell): | |||
| >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1]) | |||
| >>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16) | |||
| >>> output = net(image) | |||
| >>> print(output) | |||
| [[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]], | |||
| [[1, 1], [1, 1]]]] | |||
| >>> print(output.shape) | |||
| (2, 12, 2, 2) | |||
| """ | |||
| def __init__(self, ksizes, strides, rates, padding="valid"): | |||
| @@ -627,8 +631,8 @@ class MatrixDiag(Cell): | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, -1]), mstype.float32) | |||
| >>> matrix_diag = nn.MatrixDiag() | |||
| >>> result = matrix_diag(x) | |||
| >>> print(result) | |||
| >>> output = matrix_diag(x) | |||
| >>> print(output) | |||
| [[1. 0.] | |||
| [0. -1.]] | |||
| """ | |||
| @@ -659,9 +663,11 @@ class MatrixDiagPart(Cell): | |||
| Examples: | |||
| >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | |||
| >>> matrix_diag_part = nn.MatrixDiagPart() | |||
| >>> result = matrix_diag_part(x) | |||
| >>> print(result) | |||
| [[-1., 1.], [-1., 1.], [-1., 1.]] | |||
| >>> output = matrix_diag_part(x) | |||
| >>> print(output) | |||
| [[-1. 1.] | |||
| [-1. 1.] | |||
| [-1. 1.]] | |||
| """ | |||
| def __init__(self): | |||
| super(MatrixDiagPart, self).__init__() | |||
| @@ -692,9 +698,14 @@ class MatrixSetDiag(Cell): | |||
| >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | |||
| >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) | |||
| >>> matrix_set_diag = nn.MatrixSetDiag() | |||
| >>> result = matrix_set_diag(x, diagonal) | |||
| >>> print(result) | |||
| [[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]] | |||
| >>> output = matrix_set_diag(x, diagonal) | |||
| >>> print(output) | |||
| [[[-1. 0.] | |||
| [ 0. 2.]] | |||
| [[-1. 0.] | |||
| [ 0. 1.]] | |||
| [[-1. 0.] | |||
| [ 0. 1.]]] | |||
| """ | |||
| def __init__(self): | |||
| super(MatrixSetDiag, self).__init__() | |||
| @@ -85,7 +85,6 @@ class SequentialCell(Cell): | |||
| >>> bn = nn.BatchNorm2d(2) | |||
| >>> relu = nn.ReLU() | |||
| >>> seq = nn.SequentialCell([conv, bn, relu]) | |||
| >>> | |||
| >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) | |||
| >>> output = seq(x) | |||
| >>> print(output) | |||
| @@ -158,10 +157,10 @@ class SequentialCell(Cell): | |||
| >>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32) | |||
| >>> output = seq(x) | |||
| >>> print(output) | |||
| [[[[0.12445523 0.12445523] | |||
| [0.12445523 0.12445523]] | |||
| [[0. 0. ] | |||
| [0. 0. ]]]] | |||
| [[[[0.08789019 0.08789019] | |||
| [0.08789019 0.08789019]] | |||
| [[0.07690391 0.07690391] | |||
| [0.07690391 0.07690391]]]] | |||
| """ | |||
| if _valid_cell(cell): | |||
| self._cells[str(len(self))] = cell | |||
| @@ -195,9 +194,11 @@ class CellList(_CellListBase, Cell): | |||
| >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) | |||
| >>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct | |||
| >>> cell_ls | |||
| CellList< (0): Conv2d<input_channels=100, ..., bias_init=None> | |||
| (1): BatchNorm2d<num_features=20, ..., moving_variance=Parameter (name=variance)> | |||
| (2): ReLU<> > | |||
| CellList< | |||
| (0): Conv2d<input_channels=100, ..., bias_init=None> | |||
| (1): BatchNorm2d<num_features=20, ..., moving_variance=Parameter (name=variance)> | |||
| (2): ReLU<> | |||
| > | |||
| """ | |||
| def __init__(self, *args): | |||
| _CellListBase.__init__(self) | |||
| @@ -52,13 +52,14 @@ class ImageGradients(Cell): | |||
| Examples: | |||
| >>> net = nn.ImageGradients() | |||
| >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) | |||
| >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32) | |||
| >>> output = net(image) | |||
| >>> print(output) | |||
| [[[[2,2] | |||
| [0,0]]]] | |||
| [[[[1,0] | |||
| [1,0]]]] | |||
| (Tensor(shape=[1, 1, 2, 2], dtype=Int32, value= | |||
| [[[[2, 2], | |||
| [0, 0]]]]), Tensor(shape=[1, 1, 2, 2], dtype=Int32, value= | |||
| [[[[1, 0], | |||
| [1, 0]]]])) | |||
| """ | |||
| def __init__(self): | |||
| super(ImageGradients, self).__init__() | |||
| @@ -214,8 +215,8 @@ class SSIM(Cell): | |||
| >>> net = nn.SSIM() | |||
| >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) | |||
| >>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) | |||
| >>> ssim = net(img1, img2) | |||
| >>> print(ssim) | |||
| >>> output = net(img1, img2) | |||
| >>> print(output) | |||
| [0.12174469] | |||
| """ | |||
| def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): | |||
| @@ -290,11 +291,11 @@ class MSSSIM(Cell): | |||
| Examples: | |||
| >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) | |||
| >>> img1 = Tensor(np.random.random((1, 3, 128, 128))) | |||
| >>> img2 = Tensor(np.random.random((1, 3, 128, 128))) | |||
| >>> result = net(img1, img2) | |||
| >>> print(result) | |||
| [0.20930639] | |||
| >>> img1 = Tensor(np.random.random((1,3,128,128))) | |||
| >>> img2 = Tensor(np.random.random((1,3,128,128))) | |||
| >>> output = net(img1, img2) | |||
| >>> print(output) | |||
| [0.22965115] | |||
| """ | |||
| def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11, | |||
| filter_sigma=1.5, k1=0.01, k2=0.03): | |||
| @@ -382,9 +383,9 @@ class PSNR(Cell): | |||
| >>> net = nn.PSNR() | |||
| >>> img1 = Tensor(np.random.random((1,3,16,16))) | |||
| >>> img2 = Tensor(np.random.random((1,3,16,16))) | |||
| >>> psnr = net(img1, img2) | |||
| >>> print(psnr) | |||
| [7.8297315] | |||
| >>> output = net(img1, img2) | |||
| >>> print(output) | |||
| [7.7229595] | |||
| """ | |||
| def __init__(self, max_val=1.0): | |||
| super(PSNR, self).__init__() | |||
| @@ -452,8 +453,7 @@ class CentralCrop(Cell): | |||
| >>> net = nn.CentralCrop(central_fraction=0.5) | |||
| >>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32) | |||
| >>> output = net(image) | |||
| >>> result = output.shape | |||
| >>> print(result) | |||
| >>> print(output.shape) | |||
| (4, 3, 2, 2) | |||
| """ | |||
| @@ -64,8 +64,7 @@ class ReduceLogSumExp(Cell): | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = nn.ReduceLogSumExp(1, keep_dims=True) | |||
| >>> output = op(input_x) | |||
| >>> result = output.shape | |||
| >>> print(reuslt) | |||
| >>> print(output.shape) | |||
| (3, 1, 5, 6) | |||
| """ | |||
| @@ -101,9 +100,9 @@ class Range(Cell): | |||
| Examples: | |||
| >>> net = nn.Range(1, 8, 2) | |||
| >>> out = net() | |||
| >>> print(out) | |||
| [1, 3, 5, 7] | |||
| >>> output = net() | |||
| >>> print(output) | |||
| [1 3 5 7] | |||
| """ | |||
| def __init__(self, start, limit=None, delta=1): | |||
| @@ -157,7 +156,7 @@ class LinSpace(Cell): | |||
| >>> linspace = nn.LinSpace(1, 10, 5) | |||
| >>> output = linspace() | |||
| >>> print(output) | |||
| [1, 3.25, 5.5, 7.75, 10] | |||
| [ 1. 3.25 5.5 7.75 10. ] | |||
| """ | |||
| def __init__(self, start, stop, num): | |||
| @@ -230,6 +229,7 @@ class LGamma(Cell): | |||
| >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) | |||
| >>> op = nn.LGamma() | |||
| >>> output = op(input_x) | |||
| >>> print(output) | |||
| [3.5762787e-07 6.9314754e-01 1.7917603e+00] | |||
| """ | |||
| @@ -830,9 +830,13 @@ class Moments(Cell): | |||
| Examples: | |||
| >>> net = nn.Moments(axis=3, keep_dims=True) | |||
| >>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32) | |||
| >>> mean, var = net(input_x) | |||
| mean: [[[[2.5], [4.5]]]] | |||
| var: [[[[1.25], [1.25]]]] | |||
| >>> output = net(input_x) | |||
| >>> print(output) | |||
| (Tensor(shape=[1, 1, 2, 1], dtype=Float32, value= | |||
| [[[[ 2.50000000e+00], | |||
| [ 4.50000000e+00]]]]), Tensor(shape=[1, 1, 2, 1], dtype=Float32, value= | |||
| [[[[ 1.25000000e+00], | |||
| [ 1.25000000e+00]]]])) | |||
| """ | |||
| def __init__(self, axis=None, keep_dims=None): | |||
| @@ -285,12 +285,11 @@ class BatchNorm1d(_BatchNorm): | |||
| Examples: | |||
| >>> net = nn.BatchNorm1d(num_features=4) | |||
| >>> input = Tensor(np.random.randint(0, 255, [3, 4]), mindspore.float32) | |||
| >>> result = net(input) | |||
| >>> print(result) | |||
| [[ 57.99971 50.99974 220.99889 222.99889 ] | |||
| [106.99947 193.99902 77.99961 101.99949 ] | |||
| [ 85.99957 188.99905 46.99976 226.99887 ]] | |||
| >>> input = Tensor(np.random.randint(0, 255, [2, 4]), mindspore.float32) | |||
| >>> output = net(input) | |||
| >>> print(output) | |||
| [[210.99895 136.99931 89.99955 240.9988 ] | |||
| [ 87.99956 157.9992 89.99955 42.999786]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -371,23 +370,15 @@ class BatchNorm2d(_BatchNorm): | |||
| Examples: | |||
| >>> net = nn.BatchNorm2d(num_features=3) | |||
| >>> input = Tensor(np.random.randint(0, 255, [1, 3, 4, 4]), mindspore.float32) | |||
| >>> result = net(input) | |||
| >>> print(result) | |||
| [[[[148.99925 148.99925 178.9991 77.99961 ] | |||
| [ 41.99979 97.99951 157.9992 94.99953 ] | |||
| [ 87.99956 158.9992 50.99974 179.9991 ] | |||
| [146.99927 27.99986 119.9994 253.99873 ]] | |||
| [[178.9991 187.99905 190.99904 88.99956 ] | |||
| [213.99893 158.9992 13.99993 200.999 ] | |||
| [224.99887 56.99971 246.99876 239.9988 ] | |||
| [ 97.99951 34.99983 28.99986 57.99971 ]] | |||
| [[ 14.99993 31.99984 136.99931 207.99896 ] | |||
| [180.9991 28.99986 23.99988 71.99964 ] | |||
| [112.99944 36.99981 213.99893 71.99964 ] | |||
| [ 8.99996 162.99919 157.9992 41.99979 ]]]] | |||
| >>> input = Tensor(np.random.randint(0, 255, [1, 3, 2, 2]), mindspore.float32) | |||
| >>> output = net(input) | |||
| >>> print(output) | |||
| [[[[128.99936 53.99973] | |||
| [191.99904 183.99908]] | |||
| [[146.99927 182.99908] | |||
| [184.99907 120.9994 ]] | |||
| [[ 33.99983 234.99883] | |||
| [188.99905 11.99994]]]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -618,7 +609,7 @@ class GroupNorm(Cell): | |||
| [[[[0. 0. 0. 0.] | |||
| [0. 0. 0. 0.] | |||
| [0. 0. 0. 0.] | |||
| [0. 0. 0. 0.]], | |||
| [0. 0. 0. 0.]] | |||
| [[0. 0. 0. 0.] | |||
| [0. 0. 0. 0.] | |||
| [0. 0. 0. 0.] | |||
| @@ -107,19 +107,7 @@ class MaxPool2d(_PoolNd): | |||
| Examples: | |||
| >>> pool = nn.MaxPool2d(kernel_size=3, stride=1) | |||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | |||
| >>> print(x) | |||
| [[[[1. 5. 5. 1.] | |||
| [0. 3. 4. 8.] | |||
| [4. 2. 7. 6.] | |||
| [4. 9. 0. 1.]] | |||
| [[3. 6. 2. 6.] | |||
| [4. 4. 7. 8.] | |||
| [0. 0. 4. 0.] | |||
| [1. 8. 7. 0.]]]] | |||
| >>> output = pool(x) | |||
| >>> reuslt = output.shape | |||
| >>> print(result) | |||
| (1, 2, 2, 2) | |||
| >>> print(output) | |||
| [[[[7. 8.] | |||
| [9. 9.]] | |||
| @@ -272,19 +260,7 @@ class AvgPool2d(_PoolNd): | |||
| Examples: | |||
| >>> pool = nn.AvgPool2d(kernel_size=3, stride=1) | |||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | |||
| >>> print(x) | |||
| [[[[5. 5. 9. 9.] | |||
| [8. 4. 3. 0.] | |||
| [2. 7. 1. 2.] | |||
| [1. 8. 3. 3.]] | |||
| [[6. 8. 2. 4.] | |||
| [3. 0. 2. 1.] | |||
| [0. 8. 9. 7.] | |||
| [2. 1. 4. 9.]]]] | |||
| >>> output = pool(x) | |||
| >>> result = output.shape | |||
| >>> print(result) | |||
| (1, 2, 2, 2) | |||
| >>> print(output) | |||
| [[[[4.888889 4.4444447] | |||
| [4.111111 3.4444444]] | |||
| @@ -234,9 +234,10 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver): | |||
| Examples: | |||
| >>> fake_quant = nn.FakeQuantWithMinMaxObserver() | |||
| >>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) | |||
| >>> result = fake_quant(input) | |||
| >>> print(result) | |||
| [[0.9882355, 1.9764705, 0.9882355], [-1.9764705, 0. , -0.9882355]] | |||
| >>> output = fake_quant(input) | |||
| >>> print(output) | |||
| [[ 0.9882355 1.9764705 0.9882355] | |||
| [-1.9764705 0. -0.9882355]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -589,11 +590,10 @@ class Conv2dBnFoldQuant(Cell): | |||
| Examples: | |||
| >>> qconfig = compression.quant.create_quant_config() | |||
| >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | |||
| >>> quant_config=qconfig) | |||
| ... quant_config=qconfig) | |||
| >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) | |||
| >>> result = conv2d_bnfold(input) | |||
| >>> output = result.shape | |||
| >>> print(output) | |||
| >>> output = conv2d_bnfold(input) | |||
| >>> print(output.shape) | |||
| (2, 6, 2, 2) | |||
| """ | |||
| @@ -775,11 +775,10 @@ class Conv2dBnWithoutFoldQuant(Cell): | |||
| Examples: | |||
| >>> qconfig = compression.quant.create_quant_config() | |||
| >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | |||
| >>> quant_config=qconfig) | |||
| ... quant_config=qconfig) | |||
| >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32) | |||
| >>> result = conv2d_no_bnfold(input) | |||
| >>> output = result.shape | |||
| >>> print(output) | |||
| >>> output = conv2d_no_bnfold(input) | |||
| >>> print(output.shape) | |||
| (2, 6, 2, 2) | |||
| """ | |||
| @@ -897,11 +896,10 @@ class Conv2dQuant(Cell): | |||
| Examples: | |||
| >>> qconfig = compression.quant.create_quant_config() | |||
| >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid", | |||
| >>> quant_config=qconfig) | |||
| ... quant_config=qconfig) | |||
| >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) | |||
| >>> result = conv2d_quant(input) | |||
| >>> output = result.shape | |||
| >>> print(output) | |||
| >>> output = conv2d_quant(input) | |||
| >>> print(output.shape) | |||
| (2, 6, 2, 2) | |||
| """ | |||
| @@ -1106,9 +1104,10 @@ class ActQuant(_QuantActivation): | |||
| >>> qconfig = compression.quant.create_quant_config() | |||
| >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig) | |||
| >>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32) | |||
| >>> result = act_quant(input) | |||
| >>> print(result) | |||
| [[0.9882355, 1.9764705, 0.], [0., 0., 0.]] | |||
| >>> output = act_quant(input) | |||
| >>> print(output) | |||
| [[0.9882355 1.9764705 0. ] | |||
| [0. 0. 0. ]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -1168,9 +1167,10 @@ class TensorAddQuant(Cell): | |||
| >>> add_quant = nn.TensorAddQuant(quant_config=qconfig) | |||
| >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) | |||
| >>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32) | |||
| >>> result = add_quant(input_x1, input_x2) | |||
| >>> print(result) | |||
| [[1.9764705, 3.011765, 1.9764705], [-0.9882355, 0.9882355, 0.]] | |||
| >>> output = add_quant(input_x1, input_x2) | |||
| >>> print(output) | |||
| [[ 1.9764705 3.011765 1.9764705] | |||
| [-0.9882355 0.9882355 0. ]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -1215,9 +1215,10 @@ class MulQuant(Cell): | |||
| >>> mul_quant = nn.MulQuant(quant_config=qconfig) | |||
| >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) | |||
| >>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32) | |||
| >>> result = mul_quant(input_x1, input_x2) | |||
| >>> print(result) | |||
| [[1.9764705, 4.0000005, 1.9764705], [-4., 0., -1.9764705]] | |||
| >>> output = mul_quant(input_x1, input_x2) | |||
| >>> print(output) | |||
| [[ 1.9764705 4.0000005 1.9764705] | |||
| [-4. 0. -1.9764705]] | |||
| """ | |||
| def __init__(self, | |||
| @@ -95,7 +95,8 @@ class L1Loss(_Loss): | |||
| >>> loss = nn.L1Loss() | |||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) | |||
| >>> loss(input_data, target_data) | |||
| >>> output = loss(input_data, target_data) | |||
| >>> print(output) | |||
| 0.33333334 | |||
| """ | |||
| def __init__(self, reduction='mean'): | |||
| @@ -183,7 +184,9 @@ class SmoothL1Loss(_Loss): | |||
| >>> loss = nn.SmoothL1Loss() | |||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) | |||
| >>> loss(input_data, target_data) | |||
| >>> output = loss(input_data, target_data) | |||
| >>> print(output) | |||
| [0. 0. 0.5] | |||
| """ | |||
| def __init__(self, beta=1.0): | |||
| super(SmoothL1Loss, self).__init__() | |||
| @@ -236,7 +239,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss): | |||
| >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) | |||
| >>> labels_np = np.ones([1,]).astype(np.int32) | |||
| >>> labels = Tensor(labels_np) | |||
| >>> loss(logits, labels) | |||
| >>> output = loss(logits, labels) | |||
| >>> print(output) | |||
| [5.6924148] | |||
| """ | |||
| def __init__(self, | |||
| sparse=False, | |||
| @@ -299,7 +304,7 @@ class SampledSoftmaxLoss(_Loss): | |||
| >>> labels = Tensor([0, 1, 2]) | |||
| >>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32) | |||
| >>> output = loss(weights, biases, labels, inputs) | |||
| >>> print(output) # output is ranndom | |||
| >>> print(output) | |||
| [ 4.0181947 46.050743 7.0009117] | |||
| """ | |||
| @@ -557,7 +562,7 @@ class CosineEmbeddingLoss(_Loss): | |||
| >>> cosine_embedding_loss = nn.CosineEmbeddingLoss() | |||
| >>> output = cosine_embedding_loss(x1, x2, y) | |||
| >>> print(output) | |||
| [0.0003426671] | |||
| [0.0003426075] | |||
| """ | |||
| def __init__(self, margin=0.0, reduction="mean"): | |||
| super(CosineEmbeddingLoss, self).__init__(reduction) | |||
| @@ -39,7 +39,9 @@ class TopKCategoricalAccuracy(Metric): | |||
| >>> topk = nn.TopKCategoricalAccuracy(3) | |||
| >>> topk.clear() | |||
| >>> topk.update(x, y) | |||
| >>> result = topk.eval() | |||
| >>> output = topk.eval() | |||
| >>> print(output) | |||
| 0.6666666666666666 | |||
| """ | |||
| def __init__(self, k): | |||
| super(TopKCategoricalAccuracy, self).__init__() | |||
| @@ -103,7 +105,9 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy): | |||
| >>> topk = nn.Top1CategoricalAccuracy() | |||
| >>> topk.clear() | |||
| >>> topk.update(x, y) | |||
| >>> result = topk.eval() | |||
| >>> output = topk.eval() | |||
| >>> print(output) | |||
| 0.0 | |||
| """ | |||
| def __init__(self): | |||
| super(Top1CategoricalAccuracy, self).__init__(1) | |||
| @@ -121,7 +125,9 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy): | |||
| >>> topk = nn.Top5CategoricalAccuracy() | |||
| >>> topk.clear() | |||
| >>> topk.update(x, y) | |||
| >>> result = topk.eval() | |||
| >>> output = topk.eval() | |||
| >>> print(output) | |||
| 1.0 | |||
| """ | |||
| def __init__(self): | |||
| super(Top5CategoricalAccuracy, self).__init__(5) | |||
| @@ -45,6 +45,7 @@ class Exp(PowerTransform): | |||
| ... ans2 = self.s1.inverse(value) | |||
| ... ans3 = self.s1.forward_log_jacobian(value) | |||
| ... ans4 = self.s1.inverse_log_jacobian(value) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,6 +53,7 @@ class GumbelCDF(Bijector): | |||
| ... ans2 = self.gum.inverse(value) | |||
| ... ans3 = self.gum.forward_log_jacobian(value) | |||
| ... ans4 = self.gum.inverse_log_jacobian(value) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -57,6 +57,7 @@ class PowerTransform(Bijector): | |||
| ... ans2 = self.s1.inverse(value) | |||
| ... ans3 = self.s1.forward_log_jacobian(value) | |||
| ... ans4 = self.s1.inverse_log_jacobian(value) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,6 +53,7 @@ class ScalarAffine(Bijector): | |||
| ... ans2 = self.s1.inverse(value) | |||
| ... ans3 = self.s1.forward_log_jacobian(value) | |||
| ... ans4 = self.s1.inverse_log_jacobian(value) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -50,62 +50,63 @@ class Bernoulli(Distribution): | |||
| >>> | |||
| >>> # To use the Bernoulli distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32) | |||
| >>> self.b2 = msd.Bernoulli(dtype=mstype.int32) | |||
| >>> | |||
| >>> # All the following calls in construct are valid. | |||
| >>> def construct(self, value, probs_b, probs_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing `prob` by the name of the function. | |||
| >>> ans = self.b1.prob(value) | |||
| >>> # Evaluate `prob` with respect to distribution b. | |||
| >>> ans = self.b1.prob(value, probs_b) | |||
| >>> # `probs` must be passed in during function calls. | |||
| >>> ans = self.b2.prob(value, probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.b1.mean() # return 0.5 | |||
| >>> ans = self.b1.mean(probs_b) # return probs_b | |||
| >>> # `probs` must be passed in during function calls. | |||
| >>> ans = self.b2.mean(probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: | |||
| >>> # Args: | |||
| >>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported. | |||
| >>> # probs1_b (Tensor): the probability of success of distribution b. | |||
| >>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of kl_loss. `cross_entropy` is similar. | |||
| >>> ans = self.b1.kl_loss('Bernoulli', probs_b) | |||
| >>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a) | |||
| >>> # An additional `probs_a` must be passed in. | |||
| >>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: (). | |||
| >>> # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| >>> ans = self.b1.sample() | |||
| >>> ans = self.b1.sample((2,3)) | |||
| >>> ans = self.b1.sample((2,3), probs_b) | |||
| >>> ans = self.b2.sample((2,3), probs_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32) | |||
| ... self.b2 = msd.Bernoulli(dtype=mstype.int32) | |||
| ... | |||
| ... # All the following calls in construct are valid. | |||
| ... def construct(self, value, probs_b, probs_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing `prob` by the name of the function. | |||
| ... ans = self.b1.prob(value) | |||
| ... # Evaluate `prob` with respect to distribution b. | |||
| ... ans = self.b1.prob(value, probs_b) | |||
| ... # `probs` must be passed in during function calls. | |||
| ... ans = self.b2.prob(value, probs_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.b1.mean() # return 0.5 | |||
| ... ans = self.b1.mean(probs_b) # return probs_b | |||
| ... # `probs` must be passed in during function calls. | |||
| ... ans = self.b2.mean(probs_a) | |||
| ... | |||
| ... | |||
| ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: | |||
| ... # Args: | |||
| ... # dist (str): the name of the distribution. Only 'Bernoulli' is supported. | |||
| ... # probs1_b (Tensor): the probability of success of distribution b. | |||
| ... # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs. | |||
| ... | |||
| ... # Examples of kl_loss. `cross_entropy` is similar. | |||
| ... ans = self.b1.kl_loss('Bernoulli', probs_b) | |||
| ... ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a) | |||
| ... # An additional `probs_a` must be passed in. | |||
| ... ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a) | |||
| ... | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: (). | |||
| ... # probs1 (Tensor): the probability of success. Default: self.probs. | |||
| ... ans = self.b1.sample() | |||
| ... ans = self.b1.sample((2,3)) | |||
| ... ans = self.b1.sample((2,3), probs_b) | |||
| ... ans = self.b2.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -46,59 +46,60 @@ class Categorical(Distribution): | |||
| >>> | |||
| >>> # To use a Categorical distribution in a network | |||
| >>> class net(Cell): | |||
| >>> def __init__(self, probs): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32) | |||
| >>> self.ca1 = msd.Categorical(dtype=mstype.int32) | |||
| >>> | |||
| >>> # All the following calls in construct are valid | |||
| >>> def construct(self, value): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # probs (Tensor): event probabilities. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing `prob` by the name of the function. | |||
| >>> ans = self.ca.prob(value) | |||
| >>> # Evaluate `prob` with respect to distribution b. | |||
| >>> ans = self.ca.prob(value, probs_b) | |||
| >>> # `probs` must be passed in during function calls. | |||
| >>> ans = self.ca1.prob(value, probs_a) | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # probs (Tensor): event probabilities. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.ca.mean() # return 0.8 | |||
| >>> ans = self.ca.mean(probs_b) | |||
| >>> # `probs` must be passed in during function calls. | |||
| >>> ans = self.ca1.mean(probs_a) | |||
| >>> | |||
| >>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: | |||
| >>> # Args: | |||
| >>> # dist (str): the name of the distribution. Only 'Categorical' is supported. | |||
| >>> # probs_b (Tensor): event probabilities of distribution b. | |||
| >>> # probs (Tensor): event probabilities of distribution a. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of kl_loss. `cross_entropy` is similar. | |||
| >>> ans = self.ca.kl_loss('Categorical', probs_b) | |||
| >>> ans = self.ca.kl_loss('Categorical', probs_b, probs_a) | |||
| >>> # An additional `probs` must be passed in. | |||
| >>> ans = self.ca1.kl_loss('Categorical', probs_b, probs_a) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: (). | |||
| >>> # probs (Tensor): event probabilities. Default: self.probs. | |||
| >>> ans = self.ca.sample() | |||
| >>> ans = self.ca.sample((2,3)) | |||
| >>> ans = self.ca.sample((2,3), probs_b) | |||
| >>> ans = self.ca1.sample((2,3), probs_a) | |||
| ... def __init__(self, probs): | |||
| ... super(net, self).__init__(): | |||
| ... self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32) | |||
| ... self.ca1 = msd.Categorical(dtype=mstype.int32) | |||
| ... | |||
| ... # All the following calls in construct are valid | |||
| ... def construct(self, value): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # probs (Tensor): event probabilities. Default: self.probs. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing `prob` by the name of the function. | |||
| ... ans = self.ca.prob(value) | |||
| ... # Evaluate `prob` with respect to distribution b. | |||
| ... ans = self.ca.prob(value, probs_b) | |||
| ... # `probs` must be passed in during function calls. | |||
| ... ans = self.ca1.prob(value, probs_a) | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # probs (Tensor): event probabilities. Default: self.probs. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.ca.mean() # return 0.8 | |||
| ... ans = self.ca.mean(probs_b) | |||
| ... # `probs` must be passed in during function calls. | |||
| ... ans = self.ca1.mean(probs_a) | |||
| ... | |||
| ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: | |||
| ... # Args: | |||
| ... # dist (str): the name of the distribution. Only 'Categorical' is supported. | |||
| ... # probs_b (Tensor): event probabilities of distribution b. | |||
| ... # probs (Tensor): event probabilities of distribution a. Default: self.probs. | |||
| ... | |||
| ... # Examples of kl_loss. `cross_entropy` is similar. | |||
| ... ans = self.ca.kl_loss('Categorical', probs_b) | |||
| ... ans = self.ca.kl_loss('Categorical', probs_b, probs_a) | |||
| ... # An additional `probs` must be passed in. | |||
| ... ans = self.ca1.kl_loss('Categorical', probs_b, probs_a) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: (). | |||
| ... # probs (Tensor): event probabilities. Default: self.probs. | |||
| ... ans = self.ca.sample() | |||
| ... ans = self.ca.sample((2,3)) | |||
| ... ans = self.ca.sample((2,3), probs_b) | |||
| ... ans = self.ca1.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -52,62 +52,63 @@ class Exponential(Distribution): | |||
| >>> | |||
| >>> # To use an Exponential distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.e1 = msd.Exponential(0.5, dtype=mstype.float32) | |||
| >>> self.e2 = msd.Exponential(dtype=mstype.float32) | |||
| >>> | |||
| >>> # All the following calls in construct are valid. | |||
| >>> def construct(self, value, rate_b, rate_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # rate (Tensor): the rate of the distribution. Default: self.rate. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing `prob` by the name of the function. | |||
| >>> ans = self.e1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.e1.prob(value, rate_b) | |||
| >>> # `rate` must be passed in during function calls. | |||
| >>> ans = self.e2.prob(value, rate_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows. | |||
| >>> # Args: | |||
| >>> # rate (Tensor): the rate of the distribution. Default: self.rate. | |||
| >>> | |||
| >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.e1.mean() # return 2 | |||
| >>> ans = self.e1.mean(rate_b) # return 1 / rate_b | |||
| >>> # `rate` must be passed in during function calls. | |||
| >>> ans = self.e2.mean(rate_a) | |||
| >>> | |||
| >>> | |||
| >>> # Interfaces of `kl_loss` and `cross_entropy` are the same. | |||
| >>> # Args: | |||
| >>> # dist (str): The name of the distribution. Only 'Exponential' is supported. | |||
| >>> # rate_b (Tensor): the rate of distribution b. | |||
| >>> # rate_a (Tensor): the rate of distribution a. Default: self.rate. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.e1.kl_loss('Exponential', rate_b) | |||
| >>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a) | |||
| >>> # An additional `rate` must be passed in. | |||
| >>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a) | |||
| >>> | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # probs1 (Tensor): the rate of the distribution. Default: self.rate. | |||
| >>> ans = self.e1.sample() | |||
| >>> ans = self.e1.sample((2,3)) | |||
| >>> ans = self.e1.sample((2,3), rate_b) | |||
| >>> ans = self.e2.sample((2,3), rate_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.e1 = msd.Exponential(0.5, dtype=mstype.float32) | |||
| ... self.e2 = msd.Exponential(dtype=mstype.float32) | |||
| ... | |||
| ... # All the following calls in construct are valid. | |||
| ... def construct(self, value, rate_b, rate_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # rate (Tensor): the rate of the distribution. Default: self.rate. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing `prob` by the name of the function. | |||
| ... ans = self.e1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.e1.prob(value, rate_b) | |||
| ... # `rate` must be passed in during function calls. | |||
| ... ans = self.e2.prob(value, rate_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows. | |||
| ... # Args: | |||
| ... # rate (Tensor): the rate of the distribution. Default: self.rate. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.e1.mean() # return 2 | |||
| ... ans = self.e1.mean(rate_b) # return 1 / rate_b | |||
| ... # `rate` must be passed in during function calls. | |||
| ... ans = self.e2.mean(rate_a) | |||
| ... | |||
| ... | |||
| ... # Interfaces of `kl_loss` and `cross_entropy` are the same. | |||
| ... # Args: | |||
| ... # dist (str): The name of the distribution. Only 'Exponential' is supported. | |||
| ... # rate_b (Tensor): the rate of distribution b. | |||
| ... # rate_a (Tensor): the rate of distribution a. Default: self.rate. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.e1.kl_loss('Exponential', rate_b) | |||
| ... ans = self.e1.kl_loss('Exponential', rate_b, rate_a) | |||
| ... # An additional `rate` must be passed in. | |||
| ... ans = self.e2.kl_loss('Exponential', rate_b, rate_a) | |||
| ... | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # probs1 (Tensor): the rate of the distribution. Default: self.rate. | |||
| ... ans = self.e1.sample() | |||
| ... ans = self.e1.sample((2,3)) | |||
| ... ans = self.e1.sample((2,3), rate_b) | |||
| ... ans = self.e2.sample((2,3), rate_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,62 +53,63 @@ class Geometric(Distribution): | |||
| >>> | |||
| >>> # To use a Geometric distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.g1 = msd.Geometric(0.5, dtype=mstype.int32) | |||
| >>> self.g2 = msd.Geometric(dtype=mstype.int32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, probs_b, probs_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing `prob` by the name of the function. | |||
| >>> ans = self.g1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.g1.prob(value, probs_b) | |||
| >>> # `probs` must be passed in during function calls. | |||
| >>> ans = self.g2.prob(value, probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.g1.mean() # return 1.0 | |||
| >>> ans = self.g1.mean(probs_b) | |||
| >>> # Probs must be passed in during function calls | |||
| >>> ans = self.g2.mean(probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. | |||
| >>> # Args: | |||
| >>> # dist (str): the name of the distribution. Only 'Geometric' is supported. | |||
| >>> # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b. | |||
| >>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.g1.kl_loss('Geometric', probs_b) | |||
| >>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a) | |||
| >>> # An additional `probs` must be passed in. | |||
| >>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a) | |||
| >>> | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| >>> ans = self.g1.sample() | |||
| >>> ans = self.g1.sample((2,3)) | |||
| >>> ans = self.g1.sample((2,3), probs_b) | |||
| >>> ans = self.g2.sample((2,3), probs_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.g1 = msd.Geometric(0.5, dtype=mstype.int32) | |||
| ... self.g2 = msd.Geometric(dtype=mstype.int32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, probs_b, probs_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing `prob` by the name of the function. | |||
| ... ans = self.g1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.g1.prob(value, probs_b) | |||
| ... # `probs` must be passed in during function calls. | |||
| ... ans = self.g2.prob(value, probs_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.g1.mean() # return 1.0 | |||
| ... ans = self.g1.mean(probs_b) | |||
| ... # Probs must be passed in during function calls | |||
| ... ans = self.g2.mean(probs_a) | |||
| ... | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same. | |||
| ... # Args: | |||
| ... # dist (str): the name of the distribution. Only 'Geometric' is supported. | |||
| ... # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b. | |||
| ... # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.g1.kl_loss('Geometric', probs_b) | |||
| ... ans = self.g1.kl_loss('Geometric', probs_b, probs_a) | |||
| ... # An additional `probs` must be passed in. | |||
| ... ans = self.g2.kl_loss('Geometric', probs_b, probs_a) | |||
| ... | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. | |||
| ... ans = self.g1.sample() | |||
| ... ans = self.g1.sample((2,3)) | |||
| ... ans = self.g1.sample((2,3), probs_b) | |||
| ... ans = self.g2.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -50,47 +50,48 @@ class Gumbel(TransformedDistribution): | |||
| >>> | |||
| >>> # To use a Gumbel distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, loc_b, scale_b): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same | |||
| >>> # arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function. | |||
| >>> ans = self.g1.prob(value) | |||
| >>> | |||
| >>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. | |||
| >>> ans = self.g1.mean() | |||
| >>> ans = self.g1.mode() | |||
| >>> ans = self.g1.sd() | |||
| >>> ans = self.g1.entropy() | |||
| >>> ans = self.g1.var() | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| >>> # Args: | |||
| >>> # dist (str): the type of the distributions. Only "Gumbel" is supported. | |||
| >>> # loc_b (Tensor): the loc of distribution b. | |||
| >>> # scale_b (Tensor): the scale distribution b. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.g1.kl_loss('Gumbel', loc_b, scale_b) | |||
| >>> ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> | |||
| >>> ans = self.g1.sample() | |||
| >>> ans = self.g1.sample((2,3)) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, loc_b, scale_b): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same | |||
| ... # arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function. | |||
| ... ans = self.g1.prob(value) | |||
| ... | |||
| ... # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. | |||
| ... ans = self.g1.mean() | |||
| ... ans = self.g1.mode() | |||
| ... ans = self.g1.sd() | |||
| ... ans = self.g1.entropy() | |||
| ... ans = self.g1.var() | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Only "Gumbel" is supported. | |||
| ... # loc_b (Tensor): the loc of distribution b. | |||
| ... # scale_b (Tensor): the scale distribution b. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.g1.kl_loss('Gumbel', loc_b, scale_b) | |||
| ... ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... | |||
| ... ans = self.g1.sample() | |||
| ... ans = self.g1.sample((2,3)) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,75 +53,76 @@ class LogNormal(msd.TransformedDistribution): | |||
| >>> | |||
| >>> # To use a LogNormal distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32) | |||
| >>> self.n2 = msd.LogNormal(dtype=mstype.float32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same | |||
| >>> # arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, | |||
| >>> # the mean of the underlying Normal distribution will be used. | |||
| >>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, | |||
| >>> # the standard deviation of the underlying Normal distribution will be used. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function. | |||
| >>> ans = self.n1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.n1.prob(value, loc_b, scale_b) | |||
| >>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. | |||
| >>> ans = self.n2.prob(value, loc_a, scale_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, | |||
| >>> # the mean of the underlying Normal distribution will be used. | |||
| >>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, | |||
| >>> # the standard deviation of the underlying Normal distribution will be used. | |||
| >>> | |||
| >>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.n1.mean() # return 0.0 | |||
| >>> ans = self.n1.mean(loc_b, scale_b) # return mean_b | |||
| >>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. | |||
| >>> ans = self.n2.mean(loc_a, scale_a) | |||
| >>> | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| >>> # Args: | |||
| >>> # dist (str): the type of the distributions. Only "Normal" is supported. | |||
| >>> # loc_b (Tensor): the loc of distribution b. | |||
| >>> # scale_b (Tensor): the scale distribution b. | |||
| >>> # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None, | |||
| >>> # the mean of the underlying Normal distribution will be used. | |||
| >>> # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None, | |||
| >>> # the standard deviation of the underlying Normal distribution will be used. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.n1.kl_loss('Normal', loc_b, scale_b) | |||
| >>> ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) | |||
| >>> # Additional `loc` and `scale` must be passed in since they were not passed in construct. | |||
| >>> ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None, | |||
| >>> # the mean of the underlying Normal distribution will be used. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None, | |||
| >>> # the standard deviation of the underlying Normal distribution will be used. | |||
| >>> ans = self.n1.sample() | |||
| >>> ans = self.n1.sample((2,3)) | |||
| >>> ans = self.n1.sample((2,3), loc_b, scale_b) | |||
| >>> ans = self.n2.sample((2,3), loc_a, scale_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.n2 = msd.LogNormal(dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same | |||
| ... # arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, | |||
| ... # the mean of the underlying Normal distribution will be used. | |||
| ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, | |||
| ... # the standard deviation of the underlying Normal distribution will be used. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function. | |||
| ... ans = self.n1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.n1.prob(value, loc_b, scale_b) | |||
| ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct. | |||
| ... ans = self.n2.prob(value, loc_a, scale_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, | |||
| ... # the mean of the underlying Normal distribution will be used. | |||
| ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, | |||
| ... # the standard deviation of the underlying Normal distribution will be used. | |||
| ... | |||
| ... # Example of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.n1.mean() # return 0.0 | |||
| ... ans = self.n1.mean(loc_b, scale_b) # return mean_b | |||
| ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct. | |||
| ... ans = self.n2.mean(loc_a, scale_a) | |||
| ... | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Only "Normal" is supported. | |||
| ... # loc_b (Tensor): the loc of distribution b. | |||
| ... # scale_b (Tensor): the scale distribution b. | |||
| ... # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None, | |||
| ... # the mean of the underlying Normal distribution will be used. | |||
| ... # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None, | |||
| ... # the standard deviation of the underlying Normal distribution will be used. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.n1.kl_loss('Normal', loc_b, scale_b) | |||
| ... ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) | |||
| ... # Additional `loc` and `scale` must be passed in since they were not passed in construct. | |||
| ... ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None, | |||
| ... # the mean of the underlying Normal distribution will be used. | |||
| ... # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None, | |||
| ... # the standard deviation of the underlying Normal distribution will be used. | |||
| ... ans = self.n1.sample() | |||
| ... ans = self.n1.sample((2,3)) | |||
| ... ans = self.n1.sample((2,3), loc_b, scale_b) | |||
| ... ans = self.n2.sample((2,3), loc_a, scale_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,50 +53,51 @@ class Logistic(Distribution): | |||
| >>> | |||
| >>> # To use a Normal distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32) | |||
| >>> self.l2 = msd.Logistic(dtype=mstype.float32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function | |||
| >>> ans = self.l1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.l1.prob(value, loc_b, scale_b) | |||
| >>> # `loc` and `scale` must be passed in during function calls | |||
| >>> ans = self.l2.prob(value, loc_a, scale_a) | |||
| >>> | |||
| >>> # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> | |||
| >>> # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.l1.mean() # return 0.0 | |||
| >>> ans = self.l1.mean(loc_b, scale_b) # return loc_b | |||
| >>> # `loc` and `scale` must be passed in during function calls. | |||
| >>> ans = self.l2.mean(loc_a, scale_a) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> ans = self.l1.sample() | |||
| >>> ans = self.l1.sample((2,3)) | |||
| >>> ans = self.l1.sample((2,3), loc_b, scale_b) | |||
| >>> ans = self.l2.sample((2,3), loc_a, scale_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.l2 = msd.Logistic(dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function | |||
| ... ans = self.l1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.l1.prob(value, loc_b, scale_b) | |||
| ... # `loc` and `scale` must be passed in during function calls | |||
| ... ans = self.l2.prob(value, loc_a, scale_a) | |||
| ... | |||
| ... # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... | |||
| ... # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.l1.mean() # return 0.0 | |||
| ... ans = self.l1.mean(loc_b, scale_b) # return loc_b | |||
| ... # `loc` and `scale` must be passed in during function calls. | |||
| ... ans = self.l2.mean(loc_a, scale_a) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... ans = self.l1.sample() | |||
| ... ans = self.l1.sample((2,3)) | |||
| ... ans = self.l1.sample((2,3), loc_b, scale_b) | |||
| ... ans = self.l2.sample((2,3), loc_a, scale_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -53,66 +53,67 @@ class Normal(Distribution): | |||
| >>> | |||
| >>> # To use a Normal distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) | |||
| >>> self.n2 = msd.Normal(dtype=mstype.float32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, mean_b, sd_b, mean_a, sd_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| >>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function | |||
| >>> ans = self.n1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.n1.prob(value, mean_b, sd_b) | |||
| >>> # `mean` and `sd` must be passed in during function calls | |||
| >>> ans = self.n2.prob(value, mean_a, sd_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| >>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| >>> | |||
| >>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.n1.mean() # return 0.0 | |||
| >>> ans = self.n1.mean(mean_b, sd_b) # return mean_b | |||
| >>> # `mean` and `sd` must be passed in during function calls. | |||
| >>> ans = self.n2.mean(mean_a, sd_a) | |||
| >>> | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| >>> # Args: | |||
| >>> # dist (str): the type of the distributions. Only "Normal" is supported. | |||
| >>> # mean_b (Tensor): the mean of distribution b. | |||
| >>> # sd_b (Tensor): the standard deviation distribution b. | |||
| >>> # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. | |||
| >>> # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.n1.kl_loss('Normal', mean_b, sd_b) | |||
| >>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) | |||
| >>> # Additional `mean` and `sd` must be passed in. | |||
| >>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # mean (Tensor): the mean of the distribution. Default: self._mean_value. | |||
| >>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. | |||
| >>> ans = self.n1.sample() | |||
| >>> ans = self.n1.sample((2,3)) | |||
| >>> ans = self.n1.sample((2,3), mean_b, sd_b) | |||
| >>> ans = self.n2.sample((2,3), mean_a, sd_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.n2 = msd.Normal(dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, mean_b, sd_b, mean_a, sd_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function | |||
| ... ans = self.n1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.n1.prob(value, mean_b, sd_b) | |||
| ... # `mean` and `sd` must be passed in during function calls | |||
| ... ans = self.n2.prob(value, mean_a, sd_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| ... | |||
| ... # Example of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.n1.mean() # return 0.0 | |||
| ... ans = self.n1.mean(mean_b, sd_b) # return mean_b | |||
| ... # `mean` and `sd` must be passed in during function calls. | |||
| ... ans = self.n2.mean(mean_a, sd_a) | |||
| ... | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Only "Normal" is supported. | |||
| ... # mean_b (Tensor): the mean of distribution b. | |||
| ... # sd_b (Tensor): the standard deviation distribution b. | |||
| ... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. | |||
| ... # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.n1.kl_loss('Normal', mean_b, sd_b) | |||
| ... ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) | |||
| ... # Additional `mean` and `sd` must be passed in. | |||
| ... ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # mean (Tensor): the mean of the distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. | |||
| ... ans = self.n1.sample() | |||
| ... ans = self.n1.sample((2,3)) | |||
| ... ans = self.n1.sample((2,3), mean_b, sd_b) | |||
| ... ans = self.n2.sample((2,3), mean_a, sd_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -54,19 +54,20 @@ class TransformedDistribution(Distribution): | |||
| >>> import mindspore.nn.probability.distribution as msd | |||
| >>> import mindspore.nn.probability.bijector as msb | |||
| >>> ln = msd.TransformedDistribution(msb.Exp(), | |||
| >>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) | |||
| >>> | |||
| ... msd.Normal(0.0, 1.0, dtype=mstype.float32)) | |||
| ... | |||
| >>> # To use a transformed distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.ln = msd.TransformedDistribution(msb.Exp(), | |||
| >>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) | |||
| >>> | |||
| >>> def construct(self, value): | |||
| >>> # Similar calls can be made to other functions | |||
| >>> # by replacing 'sample' by the name of the function. | |||
| >>> ans = self.ln.sample(shape=(2, 3)) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.ln = msd.TransformedDistribution(msb.Exp(), | |||
| ... msd.Normal(0.0, 1.0, dtype=mstype.float32)) | |||
| ... | |||
| ... def construct(self, value): | |||
| ... # Similar calls can be made to other functions | |||
| ... # by replacing 'sample' by the name of the function. | |||
| ... ans = self.ln.sample(shape=(2, 3)) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -52,66 +52,67 @@ class Uniform(Distribution): | |||
| >>> | |||
| >>> # To use a Uniform distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self) | |||
| >>> super(net, self).__init__(): | |||
| >>> self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32) | |||
| >>> self.u2 = msd.Uniform(dtype=mstype.float32) | |||
| >>> | |||
| >>> # All the following calls in construct are valid. | |||
| >>> def construct(self, value, low_b, high_b, low_a, high_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| >>> # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function. | |||
| >>> ans = self.u1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.u1.prob(value, low_b, high_b) | |||
| >>> # `high` and `low` must be passed in during function calls. | |||
| >>> ans = self.u2.prob(value, low_a, high_a) | |||
| >>> | |||
| >>> | |||
| >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| >>> # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| >>> | |||
| >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| >>> ans = self.u1.mean() # return 0.5 | |||
| >>> ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2 | |||
| >>> # `high` and `low` must be passed in during function calls. | |||
| >>> ans = self.u2.mean(low_a, high_a) | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. | |||
| >>> # Args: | |||
| >>> # dist (str): the type of the distributions. Should be "Uniform" in this case. | |||
| >>> # low_b (Tensor): the lower bound of distribution b. | |||
| >>> # high_b (Tensor): the upper bound of distribution b. | |||
| >>> # low_a (Tensor): the lower bound of distribution a. Default: self.low. | |||
| >>> # high_a (Tensor): the upper bound of distribution a. Default: self.high. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.u1.kl_loss('Uniform', low_b, high_b) | |||
| >>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a) | |||
| >>> # Additional `high` and `low` must be passed in. | |||
| >>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a) | |||
| >>> | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # low (Tensor): the lower bound of the distribution. Default: self.low. | |||
| >>> # high (Tensor): the upper bound of the distribution. Default: self.high. | |||
| >>> ans = self.u1.sample() | |||
| >>> ans = self.u1.sample((2,3)) | |||
| >>> ans = self.u1.sample((2,3), low_b, high_b) | |||
| >>> ans = self.u2.sample((2,3), low_a, high_a) | |||
| ... def __init__(self) | |||
| ... super(net, self).__init__(): | |||
| ... self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.u2 = msd.Uniform(dtype=mstype.float32) | |||
| ... | |||
| ... # All the following calls in construct are valid. | |||
| ... def construct(self, value, low_b, high_b, low_a, high_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function. | |||
| ... ans = self.u1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.u1.prob(value, low_b, high_b) | |||
| ... # `high` and `low` must be passed in during function calls. | |||
| ... ans = self.u2.prob(value, low_a, high_a) | |||
| ... | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.u1.mean() # return 0.5 | |||
| ... ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2 | |||
| ... # `high` and `low` must be passed in during function calls. | |||
| ... ans = self.u2.mean(low_a, high_a) | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same. | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Should be "Uniform" in this case. | |||
| ... # low_b (Tensor): the lower bound of distribution b. | |||
| ... # high_b (Tensor): the upper bound of distribution b. | |||
| ... # low_a (Tensor): the lower bound of distribution a. Default: self.low. | |||
| ... # high_a (Tensor): the upper bound of distribution a. Default: self.high. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.u1.kl_loss('Uniform', low_b, high_b) | |||
| ... ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a) | |||
| ... # Additional `high` and `low` must be passed in. | |||
| ... ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a) | |||
| ... | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # low (Tensor): the lower bound of the distribution. Default: self.low. | |||
| ... # high (Tensor): the upper bound of the distribution. Default: self.high. | |||
| ... ans = self.u1.sample() | |||
| ... ans = self.u1.sample((2,3)) | |||
| ... ans = self.u1.sample((2,3), low_b, high_b) | |||
| ... ans = self.u2.sample((2,3), low_a, high_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -31,14 +31,14 @@ class SparseToDense(Cell): | |||
| Examples: | |||
| >>> class SparseToDenseCell(nn.Cell): | |||
| >>> def __init__(self, dense_shape): | |||
| >>> super(SparseToDenseCell, self).__init__() | |||
| >>> self.dense_shape = dense_shape | |||
| >>> self.sparse_to_dense = nn.SparseToDense() | |||
| >>> def construct(self, indices, values): | |||
| >>> sparse = SparseTensor(indices, values, self.dense_shape) | |||
| >>> return self.sparse_to_dense(sparse) | |||
| >>> | |||
| ... def __init__(self, dense_shape): | |||
| ... super(SparseToDenseCell, self).__init__() | |||
| ... self.dense_shape = dense_shape | |||
| ... self.sparse_to_dense = nn.SparseToDense() | |||
| ... def construct(self, indices, values): | |||
| ... sparse = SparseTensor(indices, values, self.dense_shape) | |||
| ... return self.sparse_to_dense(sparse) | |||
| ... | |||
| >>> indices = Tensor([[0, 1], [1, 2]]) | |||
| >>> values = Tensor([1, 2], dtype=ms.float32) | |||
| >>> dense_shape = (3, 4) | |||
| @@ -1417,13 +1417,15 @@ class IFMR(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> data = Tensor(np.random.rand(1, 3, 6, 4).astype(np.float32)) | |||
| >>> data_min = Tensor([0.1], mstype.float32) | |||
| >>> data_max = Tensor([0.5], mstype.float32) | |||
| >>> data_min = Tensor([0.1], mindspore.float32) | |||
| >>> data_max = Tensor([0.5], mindspore.float32) | |||
| >>> cumsum = Tensor(np.random.rand(4).astype(np.int32)) | |||
| >>> ifmr = Q.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0), | |||
| >>> search_step=1.0, with_offset=False) | |||
| ... search_step=1.0, with_offset=False) | |||
| >>> output = ifmr(data, data_min, data_max, cumsum) | |||
| ([7.87401572e-03], [0.00000000e+00]) | |||
| >>> print(output) | |||
| (Tensor(shape=[1], dtype=Float32, value= [7.87401572e-03]), | |||
| Tensor(shape=[1], dtype=Float32, value= [0.00000000e+00])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -148,8 +148,8 @@ class ExpandDims(PrimitiveWithInfer): | |||
| >>> expand_dims = P.ExpandDims() | |||
| >>> output = expand_dims(input_tensor, 0) | |||
| >>> print(output) | |||
| [[[2.0, 2.0], | |||
| [2.0, 2.0]]] | |||
| [[[2. 2.] | |||
| [2. 2.]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -230,8 +230,8 @@ class SameTypeShape(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> out = P.SameTypeShape()(input_x, input_y) | |||
| >>> print(out) | |||
| >>> output = P.SameTypeShape()(input_x, input_y) | |||
| >>> print(output) | |||
| [[2. 2.] | |||
| [2. 2.]] | |||
| """ | |||
| @@ -342,8 +342,8 @@ class IsSubClass(PrimitiveWithInfer): | |||
| bool, the check result. | |||
| Examples: | |||
| >>> result = P.IsSubClass()(mindspore.int32, mindspore.intc) | |||
| >>> print(result) | |||
| >>> output = P.IsSubClass()(mindspore.int32, mindspore.intc) | |||
| >>> print(output) | |||
| True | |||
| """ | |||
| @@ -379,9 +379,9 @@ class IsInstance(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> a = 1 | |||
| >>> result = P.IsInstance()(a, mindspore.int64) | |||
| >>> print(result) | |||
| True | |||
| >>> output = P.IsInstance()(a, mindspore.int32) | |||
| >>> print(output) | |||
| False | |||
| """ | |||
| @prim_attr_register | |||
| @@ -429,9 +429,9 @@ class Reshape(PrimitiveWithInfer): | |||
| >>> reshape = P.Reshape() | |||
| >>> output = reshape(input_tensor, (3, 2)) | |||
| >>> print(output) | |||
| [[-0.1 0.3] | |||
| [3.6 0.4 ] | |||
| [0.5 -3.2]] | |||
| [[-0.1 0.3] | |||
| [ 3.6 0.4] | |||
| [ 0.5 -3.2]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -632,12 +632,12 @@ class Transpose(PrimitiveWithCheck): | |||
| >>> transpose = P.Transpose() | |||
| >>> output = transpose(input_tensor, perm) | |||
| >>> print(output) | |||
| [[[1. 4.] | |||
| [2. 5.] | |||
| [3. 6.]] | |||
| [[7. 10.] | |||
| [8. 11.] | |||
| [9. 12.]]] | |||
| [[[ 1. 4.] | |||
| [ 2. 5.] | |||
| [ 3. 6.]] | |||
| [[ 7. 10.] | |||
| [ 8. 11.] | |||
| [ 9. 12.]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -668,8 +668,9 @@ class Unique(Primitive): | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) | |||
| >>> out = P.Unique()(x) | |||
| (Tensor([1, 2, 5], mindspore.int32), Tensor([0, 1, 2, 1], mindspore.int32)) | |||
| >>> output = P.Unique()(x) | |||
| >>> print(output) | |||
| (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -696,11 +697,11 @@ class GatherV2(PrimitiveWithCheck): | |||
| >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) | |||
| >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32) | |||
| >>> axis = 1 | |||
| >>> out = P.GatherV2()(input_params, input_indices, axis) | |||
| >>> print(out) | |||
| [[2.0, 7.0], | |||
| [4.0, 54.0], | |||
| [2.0, 55.0]] | |||
| >>> output = P.GatherV2()(input_params, input_indices, axis) | |||
| >>> print(output) | |||
| [[ 2. 7.] | |||
| [ 4. 54.] | |||
| [ 2. 55.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -770,9 +771,10 @@ class Padding(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.array([[8], [10]]), mindspore.float32) | |||
| >>> pad_dim_size = 4 | |||
| >>> out = P.Padding(pad_dim_size)(x) | |||
| >>> print(out) | |||
| [[8, 0, 0, 0], [10, 0, 0, 0]] | |||
| >>> output = P.Padding(pad_dim_size)(x) | |||
| >>> print(output) | |||
| [[ 8. 0. 0. 0.] | |||
| [10. 0. 0. 0.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -811,9 +813,10 @@ class UniqueWithPad(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32) | |||
| >>> pad_num = 8 | |||
| >>> out = P.UniqueWithPad()(x, pad_num) | |||
| >>> print(out) | |||
| ([1, 5, 4, 3, 2, 8, 8, 8, 8, 8], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) | |||
| >>> output = P.UniqueWithPad()(x, pad_num) | |||
| >>> print(output) | |||
| (Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]), | |||
| Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -854,13 +857,14 @@ class Split(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> split = P.Split(1, 2) | |||
| >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) | |||
| >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32) | |||
| >>> output = split(x) | |||
| >>> print(output) | |||
| ([[1, 1], | |||
| [2, 2]], | |||
| [[1, 1], | |||
| [2, 2]]) | |||
| (Tensor(shape=[2, 2], dtype=Int32, value= | |||
| [[1, 1], | |||
| [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value= | |||
| [[1, 1], | |||
| [2, 2]])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1025,8 +1029,8 @@ class Fill(PrimitiveWithInfer): | |||
| >>> fill = P.Fill() | |||
| >>> output = fill(mindspore.float32, (2, 2), 1) | |||
| >>> print(output) | |||
| [[1.0, 1.0], | |||
| [1.0, 1.0]] | |||
| [[1. 1.] | |||
| [1. 1.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1156,8 +1160,8 @@ class OnesLike(PrimitiveWithInfer): | |||
| >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) | |||
| >>> output = oneslike(x) | |||
| >>> print(output) | |||
| [[1, 1], | |||
| [1, 1]] | |||
| [[1 1] | |||
| [1 1]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1189,8 +1193,8 @@ class ZerosLike(PrimitiveWithCheck): | |||
| >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32)) | |||
| >>> output = zeroslike(x) | |||
| >>> print(output) | |||
| [[0.0, 0.0], | |||
| [0.0, 0.0]] | |||
| [[0. 0.] | |||
| [0. 0.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1338,7 +1342,8 @@ class InvertPermutation(PrimitiveWithInfer): | |||
| >>> invert = P.InvertPermutation() | |||
| >>> input_data = (3, 4, 0, 2, 1) | |||
| >>> output = invert(input_data) | |||
| >>> output == (2, 4, 3, 0, 1) | |||
| >>> print(output) | |||
| (2, 4, 3, 0, 1) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1400,8 +1405,8 @@ class Argmax(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) | |||
| >>> index = P.Argmax(output_type=mindspore.int32)(input_x) | |||
| >>> print(index) | |||
| >>> output = P.Argmax(output_type=mindspore.int32)(input_x) | |||
| >>> print(output) | |||
| 1 | |||
| """ | |||
| @@ -1559,9 +1564,9 @@ class ArgMinWithValue(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.rand(5), mindspore.float32) | |||
| >>> index, output = P.ArgMinWithValue()(input_x) | |||
| >>> print((index, output)) | |||
| 0 0.0496291 | |||
| >>> output = P.ArgMinWithValue()(input_x) | |||
| >>> print(output) | |||
| (Tensor(shape=[], dtype=Int32, value= 2), Tensor(shape=[], dtype=Float32, value= 0.0595638)) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1616,8 +1621,8 @@ class Tile(PrimitiveWithInfer): | |||
| >>> tile = P.Tile() | |||
| >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32) | |||
| >>> multiples = (2, 3) | |||
| >>> result = tile(input_x, multiples) | |||
| >>> print(result) | |||
| >>> output = tile(input_x, multiples) | |||
| >>> print(output) | |||
| [[1. 2. 1. 2. 1. 2.] | |||
| [3. 4. 3. 4. 3. 4.] | |||
| [1. 2. 1. 2. 1. 2.] | |||
| @@ -1693,7 +1698,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer): | |||
| >>> num_segments = 4 | |||
| >>> output = P.UnsortedSegmentSum()(input_x, segment_ids, num_segments) | |||
| >>> print(output) | |||
| [3, 3, 4, 0] | |||
| [3. 3. 4. 0.] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1767,8 +1772,10 @@ class UnsortedSegmentMin(PrimitiveWithInfer): | |||
| >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) | |||
| >>> num_segments = 2 | |||
| >>> unsorted_segment_min = P.UnsortedSegmentMin() | |||
| >>> unsorted_segment_min(input_x, segment_ids, num_segments) | |||
| [[1., 2., 3.], [4., 2., 1.]] | |||
| >>> output = unsorted_segment_min(input_x, segment_ids, num_segments) | |||
| >>> print(output) | |||
| [[1. 2. 3.] | |||
| [4. 2. 1.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1821,8 +1828,10 @@ class UnsortedSegmentMax(PrimitiveWithInfer): | |||
| >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) | |||
| >>> num_segments = 2 | |||
| >>> unsorted_segment_max = P.UnsortedSegmentMax() | |||
| >>> unsorted_segment_max(input_x, segment_ids, num_segments) | |||
| [[1., 2., 3.], [4., 5., 6.]] | |||
| >>> output = unsorted_segment_max(input_x, segment_ids, num_segments) | |||
| >>> print(output) | |||
| [[1. 2. 3.] | |||
| [4. 5. 6.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1872,8 +1881,10 @@ class UnsortedSegmentProd(PrimitiveWithInfer): | |||
| >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32)) | |||
| >>> num_segments = 2 | |||
| >>> unsorted_segment_prod = P.UnsortedSegmentProd() | |||
| >>> unsorted_segment_prod(input_x, segment_ids, num_segments) | |||
| [[4., 4., 3.], [4., 5., 6.]] | |||
| >>> output = unsorted_segment_prod(input_x, segment_ids, num_segments) | |||
| >>> print(output) | |||
| [[4. 4. 3.] | |||
| [4. 5. 6.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1935,10 +1946,10 @@ class Concat(PrimitiveWithInfer): | |||
| >>> op = P.Concat() | |||
| >>> output = op((data1, data2)) | |||
| >>> print(output) | |||
| [[0, 1], | |||
| [2, 1], | |||
| [0, 1], | |||
| [2, 1]] | |||
| [[0 1] | |||
| [2 1] | |||
| [0 1] | |||
| [2 1]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -1983,7 +1994,8 @@ class ParallelConcat(PrimitiveWithInfer): | |||
| >>> op = P.ParallelConcat() | |||
| >>> output = op((data1, data2)) | |||
| >>> print(output) | |||
| [[0, 1], [2, 1]] | |||
| [[0 1] | |||
| [2 1]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2066,7 +2078,8 @@ class Pack(PrimitiveWithInfer): | |||
| >>> pack = P.Pack() | |||
| >>> output = pack([data1, data2]) | |||
| >>> print(output) | |||
| [[0, 1], [2, 3]] | |||
| [[0. 1.] | |||
| [2. 3.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2116,7 +2129,8 @@ class Unpack(PrimitiveWithInfer): | |||
| >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) | |||
| >>> output = unpack(input_x) | |||
| >>> print(output) | |||
| ([1, 1, 1, 1], [2, 2, 2, 2]) | |||
| (Tensor(shape=[4], dtype=Int32, value= [1, 1, 1, 1]), | |||
| Tensor(shape=[4], dtype=Int32, value= [2, 2, 2, 2])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2169,8 +2183,9 @@ class Slice(PrimitiveWithInfer): | |||
| >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], | |||
| ... [[3, 3, 3], [4, 4, 4]], | |||
| ... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32)) | |||
| >>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3)) | |||
| >>> print(type) | |||
| >>> slice = P.Slice() | |||
| >>> output = slice(data, (1, 0, 0), (1, 1, 3)) | |||
| >>> print(output) | |||
| [[[3 3 3]]] | |||
| """ | |||
| @@ -2223,7 +2238,8 @@ class ReverseV2(PrimitiveWithInfer): | |||
| >>> op = P.ReverseV2(axis=[1]) | |||
| >>> output = op(input_x) | |||
| >>> print(output) | |||
| [[4, 3, 2, 1], [8, 7, 6, 5]] | |||
| [[4 3 2 1] | |||
| [8 7 6 5]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2261,7 +2277,7 @@ class Rint(PrimitiveWithInfer): | |||
| >>> op = P.Rint() | |||
| >>> output = op(input_x) | |||
| >>> print(output) | |||
| [-2., 0., 2., 2.] | |||
| [-2. 0. 2. 2.] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2321,7 +2337,8 @@ class Select(PrimitiveWithInfer): | |||
| >>> input_cond = Tensor([True, False]) | |||
| >>> input_x = Tensor([2,3], mindspore.float32) | |||
| >>> input_y = Tensor([1,2], mindspore.float32) | |||
| >>> select(input_cond, input_x, input_y) | |||
| >>> output = select(input_cond, input_x, input_y) | |||
| >>> print(output) | |||
| [2. 2.] | |||
| """ | |||
| @@ -2454,10 +2471,8 @@ class StridedSlice(PrimitiveWithInfer): | |||
| ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32) | |||
| >>> slice = P.StridedSlice() | |||
| >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1)) | |||
| >>> output.shape | |||
| (1, 1, 3) | |||
| >>> output | |||
| [[[3, 3, 3]]] | |||
| >>> print(output) | |||
| [[[3. 3. 3.]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2648,13 +2663,13 @@ class DiagPart(PrimitiveWithInfer): | |||
| Examples | |||
| >>> input_x = Tensor([[1, 0, 0, 0], | |||
| >>> [0, 2, 0, 0], | |||
| >>> [0, 0, 3, 0], | |||
| >>> [0, 0, 0, 4]]) | |||
| ... [0, 2, 0, 0], | |||
| ... [0, 0, 3, 0], | |||
| ... [0, 0, 0, 4]]) | |||
| >>> diag_part = P.DiagPart() | |||
| >>> output = diag_part(input_x) | |||
| >>> print(output) | |||
| [1, 2, 3, 4] | |||
| [1 2 3 4] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2702,10 +2717,10 @@ class Eye(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> eye = P.Eye() | |||
| >>> out_tensor = eye(2, 2, mindspore.int32) | |||
| >>> print(out_tensor) | |||
| [[1, 0], | |||
| [0, 1]] | |||
| >>> output = eye(2, 2, mindspore.int32) | |||
| >>> print(output) | |||
| [[1 0] | |||
| [0 1]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2743,9 +2758,9 @@ class ScatterNd(PrimitiveWithInfer): | |||
| >>> shape = (3, 3) | |||
| >>> output = op(indices, update, shape) | |||
| >>> print(output) | |||
| [[0. 3.2 0.] | |||
| [0. 1.1 0.] | |||
| [0. 0. 0. ]] | |||
| [[0. 3.2 0. ] | |||
| [0. 1.1 0. ] | |||
| [0. 0. 0. ]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2794,8 +2809,8 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): | |||
| >>> resize = P.ResizeNearestNeighbor((2, 2)) | |||
| >>> output = resize(input_tensor) | |||
| >>> print(output) | |||
| [[[[-0.1 0.3] | |||
| [0.4 0.5 ]]]] | |||
| [[[[-0.1 0.3] | |||
| [ 0.4 0.5]]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2836,7 +2851,7 @@ class GatherNd(PrimitiveWithInfer): | |||
| >>> op = P.GatherNd() | |||
| >>> output = op(input_x, indices) | |||
| >>> print(output) | |||
| [-0.1, 0.5] | |||
| [-0.1 0.5] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2873,8 +2888,9 @@ class TensorScatterUpdate(PrimitiveWithInfer): | |||
| >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) | |||
| >>> op = P.TensorScatterUpdate() | |||
| >>> output = op(input_x, indices, update) | |||
| [[1.0, 0.3, 3.6], | |||
| [0.4, 2.2, -3.2]] | |||
| >>> print(output) | |||
| [[ 1. 0.3 3.6] | |||
| [ 0.4 2.2 -3.2]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2928,8 +2944,8 @@ class ScatterUpdate(_ScatterOp_Dynamic): | |||
| >>> op = P.ScatterUpdate() | |||
| >>> output = op(input_x, indices, updates) | |||
| >>> print(output) | |||
| [[2.0, 1.2, 1.0], | |||
| [3.0, 1.2, 1.0]] | |||
| [[2. 1.2 1. ] | |||
| [3. 1.2 1. ]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -2969,8 +2985,8 @@ class ScatterNdUpdate(_ScatterNdOp): | |||
| >>> op = P.ScatterNdUpdate() | |||
| >>> output = op(input_x, indices, update) | |||
| >>> print(output) | |||
| [[1. 0.3 3.6] | |||
| [0.4 2.2 -3.2]] | |||
| [[ 1. 0.3 3.6] | |||
| [ 0.4 2.2 -3.2]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3017,7 +3033,8 @@ class ScatterMax(_ScatterOp): | |||
| >>> scatter_max = P.ScatterMax() | |||
| >>> output = scatter_max(input_x, indices, update) | |||
| >>> print(output) | |||
| [[88.0, 88.0, 88.0], [88.0, 88.0, 88.0]] | |||
| [[88. 88. 88.] | |||
| [88. 88. 88.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3058,7 +3075,8 @@ class ScatterMin(_ScatterOp): | |||
| >>> scatter_min = P.ScatterMin() | |||
| >>> output = scatter_min(input_x, indices, update) | |||
| >>> print(output) | |||
| [[0.0, 1.0, 1.0], [0.0, 0.0, 0.0]] | |||
| [[0. 1. 1.] | |||
| [0. 0. 0.]] | |||
| """ | |||
| @@ -3093,7 +3111,8 @@ class ScatterAdd(_ScatterOp_Dynamic): | |||
| >>> scatter_add = P.ScatterAdd() | |||
| >>> output = scatter_add(input_x, indices, updates) | |||
| >>> print(output) | |||
| [[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]] | |||
| [[1. 1. 1.] | |||
| [3. 3. 3.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3170,7 +3189,8 @@ class ScatterMul(_ScatterOp): | |||
| >>> scatter_mul = P.ScatterMul() | |||
| >>> output = scatter_mul(input_x, indices, updates) | |||
| >>> print(output) | |||
| [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] | |||
| [[2. 2. 2.] | |||
| [4. 4. 4.]] | |||
| """ | |||
| @@ -3205,7 +3225,8 @@ class ScatterDiv(_ScatterOp): | |||
| >>> scatter_div = P.ScatterDiv() | |||
| >>> output = scatter_div(input_x, indices, updates) | |||
| >>> print(output) | |||
| [[3.0, 3.0, 3.0], [1.0, 1.0, 1.0]] | |||
| [[3. 3. 3.] | |||
| [1. 1. 1.]] | |||
| """ | |||
| @@ -3240,7 +3261,7 @@ class ScatterNdAdd(_ScatterNdOp): | |||
| >>> scatter_nd_add = P.ScatterNdAdd() | |||
| >>> output = scatter_nd_add(input_x, indices, updates) | |||
| >>> print(output) | |||
| [1, 10, 9, 4, 12, 6, 7, 17] | |||
| [ 1. 10. 9. 4. 12. 6. 7. 17.] | |||
| """ | |||
| @@ -3275,7 +3296,7 @@ class ScatterNdSub(_ScatterNdOp): | |||
| >>> scatter_nd_sub = P.ScatterNdSub() | |||
| >>> output = scatter_nd_sub(input_x, indices, updates) | |||
| >>> print(output) | |||
| [1, -6, -3, 4, -2, 6, 7, -1] | |||
| [ 1. -6. -3. 4. -2. 6. 7. -1.] | |||
| """ | |||
| @@ -3307,7 +3328,7 @@ class ScatterNonAliasingAdd(_ScatterNdOp): | |||
| >>> scatter_non_aliasing_add = P.ScatterNonAliasingAdd() | |||
| >>> output = scatter_non_aliasing_add(input_x, indices, updates) | |||
| >>> print(output) | |||
| [1, 10, 9, 4, 12, 6, 7, 17] | |||
| [ 1. 10. 9. 4. 12. 6. 7. 17.] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3347,9 +3368,10 @@ class SpaceToDepth(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32) | |||
| >>> block_size = 2 | |||
| >>> op = P.SpaceToDepth(block_size) | |||
| >>> output = op(x) | |||
| >>> output.asnumpy().shape == (1,12,1,1) | |||
| >>> space_to_depth = P.SpaceToDepth(block_size) | |||
| >>> output = space_to_depth(x) | |||
| >>> print(output) | |||
| (1, 12, 1, 1) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3404,8 +3426,8 @@ class DepthToSpace(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) | |||
| >>> block_size = 2 | |||
| >>> op = P.DepthToSpace(block_size) | |||
| >>> output = op(x) | |||
| >>> depth_to_space = P.DepthToSpace(block_size) | |||
| >>> output = depth_to_space(x) | |||
| >>> print(output.shape) | |||
| (1, 3, 2, 2) | |||
| """ | |||
| @@ -3472,9 +3494,12 @@ class SpaceToBatch(PrimitiveWithInfer): | |||
| >>> paddings = [[0, 0], [0, 0]] | |||
| >>> space_to_batch = P.SpaceToBatch(block_size, paddings) | |||
| >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) | |||
| >>> space_to_batch(input_x) | |||
| [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] | |||
| >>> output = space_to_batch(input_x) | |||
| >>> print(output) | |||
| [[[[1.]]] | |||
| [[[2.]]] | |||
| [[[3.]]] | |||
| [[[4.]]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3541,11 +3566,12 @@ class BatchToSpace(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> block_size = 2 | |||
| >>> crops = [[0, 0], [0, 0]] | |||
| >>> op = P.BatchToSpace(block_size, crops) | |||
| >>> batch_to_space = P.BatchToSpace(block_size, crops) | |||
| >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) | |||
| >>> output = op(input_x) | |||
| >>> output = batch_to_space(input_x) | |||
| >>> print(output) | |||
| [[[[1., 2.], [3., 4.]]]] | |||
| [[[[1. 2.] | |||
| [3. 4.]]]] | |||
| """ | |||
| @@ -3620,9 +3646,12 @@ class SpaceToBatchND(PrimitiveWithInfer): | |||
| >>> paddings = [[0, 0], [0, 0]] | |||
| >>> space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings) | |||
| >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) | |||
| >>> space_to_batch_nd(input_x) | |||
| [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] | |||
| >>> output = space_to_batch_nd(input_x) | |||
| >>> print(output) | |||
| [[[[1.]]] | |||
| [[[2.]]] | |||
| [[[3.]]] | |||
| [[[4.]]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3715,7 +3744,8 @@ class BatchToSpaceND(PrimitiveWithInfer): | |||
| >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) | |||
| >>> output = batch_to_space_nd(input_x) | |||
| >>> print(output) | |||
| [[[[1., 2.], [3., 4.]]]] | |||
| [[[[1. 2.] | |||
| [3. 4.]]]] | |||
| """ | |||
| @@ -3791,8 +3821,10 @@ class BroadcastTo(PrimitiveWithInfer): | |||
| >>> shape = (2, 3) | |||
| >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32)) | |||
| >>> broadcast_to = P.BroadcastTo(shape) | |||
| >>> broadcast_to(input_x) | |||
| [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]] | |||
| >>> output = broadcast_to(input_x) | |||
| >>> print(output) | |||
| [[1. 2. 3.] | |||
| [1. 2. 3.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3939,11 +3971,11 @@ class InplaceUpdate(PrimitiveWithInfer): | |||
| >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) | |||
| >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32) | |||
| >>> inplace_update = P.InplaceUpdate(indices) | |||
| >>> result = inplace_update(x, v) | |||
| >>> print(result) | |||
| [[0.5, 1.0], | |||
| [1.0, 1.5], | |||
| [5.0, 6.0]] | |||
| >>> output = inplace_update(x, v) | |||
| >>> print(output) | |||
| [[0.5 1. ] | |||
| [1. 1.5] | |||
| [5. 6. ]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3997,9 +4029,9 @@ class ReverseSequence(PrimitiveWithInfer): | |||
| >>> reverse_sequence = P.ReverseSequence(seq_dim=1) | |||
| >>> output = reverse_sequence(x, seq_lengths) | |||
| >>> print(output) | |||
| [[1 2 3] | |||
| [5 4 6] | |||
| [9 8 7]] | |||
| [[1. 2. 3.] | |||
| [5. 4. 6.] | |||
| [9. 8. 7.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4057,16 +4089,16 @@ class EditDistance(PrimitiveWithInfer): | |||
| >>> import mindspore.ops.operations as P | |||
| >>> context.set_context(mode=context.GRAPH_MODE) | |||
| >>> class EditDistance(nn.Cell): | |||
| >>> def __init__(self, hypothesis_shape, truth_shape, normalize=True): | |||
| >>> super(EditDistance, self).__init__() | |||
| >>> self.edit_distance = P.EditDistance(normalize) | |||
| >>> self.hypothesis_shape = hypothesis_shape | |||
| >>> self.truth_shape = truth_shape | |||
| >>> | |||
| >>> def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): | |||
| >>> return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, | |||
| >>> truth_indices, truth_values, self.truth_shape) | |||
| >>> | |||
| ... def __init__(self, hypothesis_shape, truth_shape, normalize=True): | |||
| ... super(EditDistance, self).__init__() | |||
| ... self.edit_distance = P.EditDistance(normalize) | |||
| ... self.hypothesis_shape = hypothesis_shape | |||
| ... self.truth_shape = truth_shape | |||
| ... | |||
| ... def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): | |||
| ... return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, | |||
| ... truth_indices, truth_values, self.truth_shape) | |||
| ... | |||
| >>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64)) | |||
| >>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32)) | |||
| >>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64)) | |||
| @@ -4074,9 +4106,10 @@ class EditDistance(PrimitiveWithInfer): | |||
| >>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32)) | |||
| >>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64)) | |||
| >>> edit_distance = EditDistance(hypothesis_shape, truth_shape) | |||
| >>> out = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values) | |||
| >>> print(out) | |||
| >>> [[1.0, 1.0], [1.0, 1.0]] | |||
| >>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values) | |||
| >>> print(output) | |||
| [[1. 1.] | |||
| [1. 1.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4166,9 +4199,15 @@ class Sort(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16) | |||
| >>> sort = P.Sort() | |||
| >>> sort(x) | |||
| ([[1.0, 2.0, 8.0], [3.0, 5.0, 9.0], [4.0, 6.0 ,7.0]], | |||
| [[2, 1, 0], [2, 0, 1], [0, 1, 2]]) | |||
| >>> output = sort(x) | |||
| >>> print(output) | |||
| (Tensor(shape=[3, 3], dtype=Float16, value= | |||
| [[ 1.0000e+00, 2.0000e+00, 8.0000e+00], | |||
| [ 3.0000e+00, 5.0000e+00, 9.0000e+00], | |||
| [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value= | |||
| [[2, 1, 0], | |||
| [2, 0, 1], | |||
| [0, 1, 2]])) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4208,9 +4247,12 @@ class EmbeddingLookup(PrimitiveWithInfer): | |||
| >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32) | |||
| >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32) | |||
| >>> offset = 4 | |||
| >>> out = P.EmbeddingLookup()(input_params, input_indices, offset) | |||
| >>> print(out) | |||
| [[[10, 11], [0 ,0]], [[0, 0], [10, 11]]] | |||
| >>> output = P.EmbeddingLookup()(input_params, input_indices, offset) | |||
| >>> print(output) | |||
| [[[10. 11.] | |||
| [ 0. 0.]] | |||
| [[ 0. 0.] | |||
| [10. 11.]]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4259,9 +4301,10 @@ class GatherD(PrimitiveWithInfer): | |||
| >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32) | |||
| >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32) | |||
| >>> dim = 1 | |||
| >>> out = P.GatherD()(x, dim, index) | |||
| >>> print(out) | |||
| [[1, 1], [4, 3]] | |||
| >>> output = P.GatherD()(x, dim, index) | |||
| >>> print(output) | |||
| [[1 1] | |||
| [4 3]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4304,9 +4347,9 @@ class Identity(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64) | |||
| >>> y = P.Identity()(x) | |||
| >>> print(y) | |||
| [1, 2, 3, 4] | |||
| >>> output = P.Identity()(x) | |||
| >>> print(output) | |||
| [1 2 3 4] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -4341,10 +4384,10 @@ class RepeatElements(PrimitiveWithInfer): | |||
| >>> repeat_elements = P.RepeatElements(rep = 2, axis = 0) | |||
| >>> output = repeat_elements(x) | |||
| >>> print(output) | |||
| [[0, 1, 2], | |||
| [0, 1, 2], | |||
| [3, 4, 5], | |||
| [3, 4, 5]], | |||
| [[0 1 2] | |||
| [0 1 2] | |||
| [3 4 5] | |||
| [3 4 5]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -76,16 +76,19 @@ class AllReduce(PrimitiveWithInfer): | |||
| >>> | |||
| >>> init() | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group") | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> return self.allreduce_sum(x) | |||
| >>> | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group") | |||
| ... | |||
| ... def construct(self, x): | |||
| ... return self.allreduce_sum(x) | |||
| ... | |||
| >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) | |||
| >>> net = Net() | |||
| >>> output = net(input_) | |||
| >>> print(output) | |||
| [[4. 5. 6. 0. 0. 0. 0. 0.] | |||
| [0. 0. 0. 0. 0. 0. 0. 0.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -249,17 +252,18 @@ class AllGather(PrimitiveWithInfer): | |||
| >>> from mindspore import Tensor | |||
| >>> | |||
| >>> init() | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.allgather = P.AllGather(group="nccl_world_group") | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> return self.allgather(x) | |||
| >>> | |||
| ... class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.allgather = P.AllGather(group="nccl_world_group") | |||
| ... | |||
| ... def construct(self, x): | |||
| ... return self.allgather(x) | |||
| ... | |||
| >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) | |||
| >>> net = Net() | |||
| >>> output = net(input_) | |||
| >>> print(output) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -364,16 +368,17 @@ class ReduceScatter(PrimitiveWithInfer): | |||
| >>> | |||
| >>> init() | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM) | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> return self.reducescatter(x) | |||
| >>> | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.reducescatter = P.ReduceScatter(ReduceOp.SUM) | |||
| ... | |||
| ... def construct(self, x): | |||
| ... return self.reducescatter(x) | |||
| ... | |||
| >>> input_ = Tensor(np.ones([8, 8]).astype(np.float32)) | |||
| >>> net = Net() | |||
| >>> output = net(input_) | |||
| >>> print(output) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -480,16 +485,20 @@ class Broadcast(PrimitiveWithInfer): | |||
| >>> | |||
| >>> init() | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.broadcast = P.Broadcast(1) | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> return self.broadcast((x,)) | |||
| >>> | |||
| >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.broadcast = P.Broadcast(1) | |||
| ... | |||
| ... def construct(self, x): | |||
| ... return self.broadcast((x,)) | |||
| ... | |||
| >>> input_ = Tensor(np.ones([2, 4]).astype(np.int32)) | |||
| >>> net = Net() | |||
| >>> output = net(input_) | |||
| >>> print(output) | |||
| (Tensor(shape[2,4], dtype=Int32, value= | |||
| [[1, 1, 1, 1], | |||
| [1, 1, 1, 1]]),) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -51,27 +51,26 @@ class ControlDepend(Primitive): | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.control_depend = P.ControlDepend() | |||
| >>> self.softmax = P.Softmax() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> mul = x * y | |||
| >>> softmax = self.softmax(x) | |||
| >>> ret = self.control_depend(mul, softmax) | |||
| >>> return ret | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.control_depend = P.ControlDepend() | |||
| ... self.softmax = P.Softmax() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... mul = x * y | |||
| ... softmax = self.softmax(x) | |||
| ... ret = self.control_depend(mul, softmax) | |||
| ... return ret | |||
| ... | |||
| >>> x = Tensor(np.ones([4, 5]), dtype=mindspore.float32) | |||
| >>> y = Tensor(np.ones([4, 5]), dtype=mindspore.float32) | |||
| >>> net = Net() | |||
| >>> output = net(x, y) | |||
| >>> print(output) | |||
| [[1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.]] | |||
| >>> print(output.dtype) | |||
| Float32 | |||
| [[1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.] | |||
| [1. 1. 1. 1. 1.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -100,29 +99,30 @@ class GeSwitch(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.square = P.Square() | |||
| >>> self.add = P.TensorAdd() | |||
| >>> self.value = Tensor(np.full((1), 3), mindspore.float32) | |||
| >>> self.switch = P.GeSwitch() | |||
| >>> self.merge = P.Merge() | |||
| >>> self.less = P.Less() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> cond = self.less(x, y) | |||
| >>> st1, sf1 = self.switch(x, cond) | |||
| >>> st2, sf2 = self.switch(y, cond) | |||
| >>> add_ret = self.add(st1, st2) | |||
| >>> st3, sf3 = self.switch(self.value, cond) | |||
| >>> sq_ret = self.square(sf3) | |||
| >>> ret = self.merge((add_ret, sq_ret)) | |||
| >>> return ret[0] | |||
| >>> | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.square = P.Square() | |||
| ... self.add = P.TensorAdd() | |||
| ... self.value = Tensor(np.full((1), 3), mindspore.float32) | |||
| ... self.switch = P.GeSwitch() | |||
| ... self.merge = P.Merge() | |||
| ... self.less = P.Less() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... cond = self.less(x, y) | |||
| ... st1, sf1 = self.switch(x, cond) | |||
| ... st2, sf2 = self.switch(y, cond) | |||
| ... add_ret = self.add(st1, st2) | |||
| ... st3, sf3 = self.switch(self.value, cond) | |||
| ... sq_ret = self.square(sf3) | |||
| ... ret = self.merge((add_ret, sq_ret)) | |||
| ... return ret[0] | |||
| ... | |||
| >>> x = Tensor(10.0, dtype=mindspore.float32) | |||
| >>> y = Tensor(5.0, dtype=mindspore.float32) | |||
| >>> net = Net() | |||
| >>> output = net(x, y) | |||
| >>> print(output) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -50,16 +50,17 @@ class ScalarSummary(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| >>> def __init__(self,): | |||
| >>> super(SummaryDemo, self).__init__() | |||
| >>> self.summary = P.ScalarSummary() | |||
| >>> self.add = P.TensorAdd() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> name = "x" | |||
| >>> self.summary(name, x) | |||
| >>> x = self.add(x, y) | |||
| >>> return x | |||
| ... def __init__(self,): | |||
| ... super(SummaryDemo, self).__init__() | |||
| ... self.summary = P.ScalarSummary() | |||
| ... self.add = P.TensorAdd() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... name = "x" | |||
| ... self.summary(name, x) | |||
| ... x = self.add(x, y) | |||
| ... return x | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -88,14 +89,15 @@ class ImageSummary(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.summary = P.ImageSummary() | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> name = "image" | |||
| >>> out = self.summary(name, x) | |||
| >>> return out | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.summary = P.ImageSummary() | |||
| ... | |||
| ... def construct(self, x): | |||
| ... name = "image" | |||
| ... out = self.summary(name, x) | |||
| ... return out | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -125,16 +127,17 @@ class TensorSummary(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| >>> def __init__(self,): | |||
| >>> super(SummaryDemo, self).__init__() | |||
| >>> self.summary = P.TensorSummary() | |||
| >>> self.add = P.TensorAdd() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> x = self.add(x, y) | |||
| >>> name = "x" | |||
| >>> self.summary(name, x) | |||
| >>> return x | |||
| ... def __init__(self,): | |||
| ... super(SummaryDemo, self).__init__() | |||
| ... self.summary = P.TensorSummary() | |||
| ... self.add = P.TensorAdd() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... x = self.add(x, y) | |||
| ... name = "x" | |||
| ... self.summary(name, x) | |||
| ... return x | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -163,16 +166,17 @@ class HistogramSummary(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| >>> def __init__(self,): | |||
| >>> super(SummaryDemo, self).__init__() | |||
| >>> self.summary = P.HistogramSummary() | |||
| >>> self.add = P.TensorAdd() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> x = self.add(x, y) | |||
| >>> name = "x" | |||
| >>> self.summary(name, x) | |||
| >>> return x | |||
| ... def __init__(self,): | |||
| ... super(SummaryDemo, self).__init__() | |||
| ... self.summary = P.HistogramSummary() | |||
| ... self.add = P.TensorAdd() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... x = self.add(x, y) | |||
| ... name = "x" | |||
| ... self.summary(name, x) | |||
| ... return x | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -206,33 +210,34 @@ class InsertGradientOf(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> def clip_gradient(dx): | |||
| >>> ret = dx | |||
| >>> if ret > 1.0: | |||
| >>> ret = 1.0 | |||
| >>> | |||
| >>> if ret < 0.2: | |||
| >>> ret = 0.2 | |||
| >>> | |||
| >>> return ret | |||
| >>> | |||
| ... ret = dx | |||
| ... if ret > 1.0: | |||
| ... ret = 1.0 | |||
| ... | |||
| ... if ret < 0.2: | |||
| ... ret = 0.2 | |||
| ... | |||
| ... return ret | |||
| ... | |||
| >>> clip = P.InsertGradientOf(clip_gradient) | |||
| >>> grad_all = C.GradOperation(get_all=True) | |||
| >>> def InsertGradientOfClipDemo(): | |||
| >>> def clip_test(x, y): | |||
| >>> x = clip(x) | |||
| >>> y = clip(y) | |||
| >>> c = x * y | |||
| >>> return c | |||
| >>> | |||
| >>> @ms_function | |||
| >>> def f(x, y): | |||
| >>> return clip_test(x, y) | |||
| >>> | |||
| >>> def fd(x, y): | |||
| >>> return grad_all(clip_test)(x, y) | |||
| >>> | |||
| >>> print("forward: ", f(1.1, 0.1)) | |||
| >>> print("clip_gradient:", fd(1.1, 0.1)) | |||
| ... def clip_test(x, y): | |||
| ... x = clip(x) | |||
| ... y = clip(y) | |||
| ... c = x * y | |||
| ... return c | |||
| ... | |||
| ... @ms_function | |||
| ... def f(x, y): | |||
| ... return clip_test(x, y) | |||
| ... | |||
| ... def fd(x, y): | |||
| ... return grad_all(clip_test)(x, y) | |||
| ... | |||
| ... print("forward: ", f(1.1, 0.1)) | |||
| ... print("clip_gradient:", fd(1.1, 0.1)) | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -266,21 +271,21 @@ class HookBackward(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> def hook_fn(grad_out): | |||
| >>> print(grad_out) | |||
| >>> | |||
| ... print(grad_out) | |||
| ... | |||
| >>> grad_all = GradOperation(get_all=True) | |||
| >>> hook = P.HookBackward(hook_fn) | |||
| >>> | |||
| >>> def hook_test(x, y): | |||
| >>> z = x * y | |||
| >>> z = hook(z) | |||
| >>> z = z * y | |||
| >>> return z | |||
| >>> | |||
| ... z = x * y | |||
| ... z = hook(z) | |||
| ... z = z * y | |||
| ... return z | |||
| ... | |||
| >>> def backward(x, y): | |||
| >>> return grad_all(hook_test)(x, y) | |||
| >>> | |||
| >>> backward(1, 2) | |||
| ... return grad_all(hook_test)(x, y) | |||
| ... | |||
| >>> output = backward(1, 2) | |||
| >>> print(output) | |||
| """ | |||
| def __init__(self, hook_fn, cell_id=""): | |||
| @@ -316,13 +321,14 @@ class Print(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class PrintDemo(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(PrintDemo, self).__init__() | |||
| >>> self.print = P.Print() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> self.print('Print Tensor x and Tensor y:', x, y) | |||
| >>> return x | |||
| ... def __init__(self): | |||
| ... super(PrintDemo, self).__init__() | |||
| ... self.print = P.Print() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... self.print('Print Tensor x and Tensor y:', x, y) | |||
| ... return x | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -356,15 +362,16 @@ class Assert(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class AssertDemo(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(AssertDemo, self).__init__() | |||
| >>> self.assert1 = P.Assert(summarize=10) | |||
| >>> self.add = P.TensorAdd() | |||
| >>> | |||
| >>> def construct(self, x, y): | |||
| >>> data = self.add(x, y) | |||
| >>> self.assert1(True, [data]) | |||
| >>> return data | |||
| ... def __init__(self): | |||
| ... super(AssertDemo, self).__init__() | |||
| ... self.assert1 = P.Assert(summarize=10) | |||
| ... self.add = P.TensorAdd() | |||
| ... | |||
| ... def construct(self, x, y): | |||
| ... data = self.add(x, y) | |||
| ... self.assert1(True, [data]) | |||
| ... return data | |||
| ... | |||
| """ | |||
| @prim_attr_register | |||
| @@ -55,14 +55,14 @@ class CropAndResize(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class CropAndResizeNet(nn.Cell): | |||
| >>> def __init__(self, crop_size): | |||
| >>> super(CropAndResizeNet, self).__init__() | |||
| >>> self.crop_and_resize = P.CropAndResize() | |||
| >>> self.crop_size = crop_size | |||
| >>> | |||
| >>> def construct(self, x, boxes, box_index): | |||
| >>> return self.crop_and_resize(x, boxes, box_index, self.crop_size) | |||
| >>> | |||
| ... def __init__(self, crop_size): | |||
| ... super(CropAndResizeNet, self).__init__() | |||
| ... self.crop_and_resize = P.CropAndResize() | |||
| ... self.crop_size = crop_size | |||
| ... | |||
| ... def construct(self, x, boxes, box_index): | |||
| ... return self.crop_and_resize(x, boxes, box_index, self.crop_size) | |||
| ... | |||
| >>> BATCH_SIZE = 1 | |||
| >>> NUM_BOXES = 5 | |||
| >>> IMAGE_HEIGHT = 256 | |||
| @@ -74,7 +74,7 @@ class CropAndResize(PrimitiveWithInfer): | |||
| >>> crop_size = (24, 24) | |||
| >>> crop_and_resize = CropAndResizeNet(crop_size=crop_size) | |||
| >>> output = crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_index)) | |||
| >>> output.shape | |||
| >>> print(output.shape) | |||
| (5, 24, 24, 3) | |||
| """ | |||
| @@ -35,6 +35,7 @@ class ScalarCast(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> scalar_cast = P.ScalarCast() | |||
| >>> output = scalar_cast(255.0, mindspore.int32) | |||
| >>> print(output) | |||
| 255 | |||
| """ | |||
| @@ -39,13 +39,14 @@ class Assign(PrimitiveWithCheck): | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y") | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> P.Assign()(self.y, x) | |||
| >>> return self.y | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y") | |||
| ... | |||
| ... def construct(self, x): | |||
| ... P.Assign()(self.y, x) | |||
| ... return self.y | |||
| ... | |||
| >>> x = Tensor([2.0], mindspore.float32) | |||
| >>> net = Net() | |||
| >>> output = net(x) | |||
| @@ -78,13 +79,20 @@ class InplaceAssign(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type as original `variable`. | |||
| Examples: | |||
| >>> def construct(self, x): | |||
| >>> val = x - 1.0 | |||
| >>> ret = x + 2.0 | |||
| >>> return InplaceAssign()(x, val, ret) | |||
| >>> x = Tensor([2.0], mindspore.float32) | |||
| >>> net = Net() | |||
| >>> net(x) | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.inplace_assign = P.InplaceAssign() | |||
| ... | |||
| ... def construct(self, x): | |||
| ... val = x - 1.0 | |||
| ... ret = x + 2.0 | |||
| ... return self.inplace_assign(x, val, ret) | |||
| ... | |||
| >>> x = Tensor([2.0], mindspore.float32) | |||
| >>> net = Net() | |||
| >>> output = net(x) | |||
| >>> print(output) | |||
| """ | |||
| @ prim_attr_register | |||
| def __init__(self): | |||
| @@ -116,10 +124,10 @@ class BoundingBoxEncode(PrimitiveWithInfer): | |||
| >>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) | |||
| >>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) | |||
| >>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) | |||
| >>> boundingbox_encode(anchor_box, groundtruth_box) | |||
| [[5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01] | |||
| >>> output = boundingbox_encode(anchor_box, groundtruth_box) | |||
| >>> print(output) | |||
| [[ 5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01] | |||
| [-1.0000000e+00 2.5000000e-01 0.0000000e+00 4.0551758e-01]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -170,9 +178,10 @@ class BoundingBoxDecode(PrimitiveWithInfer): | |||
| >>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) | |||
| >>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), | |||
| ... max_shape=(768, 1280), wh_ratio_clip=0.016) | |||
| >>> boundingbox_decode(anchor_box, deltas) | |||
| [[4.1953125 0. 0. 5.1953125] | |||
| [2.140625 0. 3.859375 60.59375]] | |||
| >>> output = boundingbox_decode(anchor_box, deltas) | |||
| >>> print(output) | |||
| [[ 4.1953125 0. 0. 5.1953125] | |||
| [ 2.140625 0. 3.859375 60.59375 ]] | |||
| """ | |||
| @@ -226,19 +235,19 @@ class CheckValid(PrimitiveWithInfer): | |||
| >>> from mindspore import Tensor | |||
| >>> from mindspore.ops import operations as P | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.check_valid = P.CheckValid() | |||
| >>> def construct(self, x, y): | |||
| >>> valid_result = self.check_valid(x, y) | |||
| >>> return valid_result | |||
| >>> | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.check_valid = P.CheckValid() | |||
| ... def construct(self, x, y): | |||
| ... valid_result = self.check_valid(x, y) | |||
| ... return valid_result | |||
| ... | |||
| >>> bboxes = Tensor(np.linspace(0, 6, 12).reshape(3, 4), mindspore.float32) | |||
| >>> img_metas = Tensor(np.array([2, 1, 3]), mindspore.float32) | |||
| >>> net = Net() | |||
| >>> output = net(bboxes, img_metas) | |||
| >>> print(output) | |||
| [True False False] | |||
| [ True False False] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -292,10 +301,12 @@ class IOU(PrimitiveWithInfer): | |||
| >>> iou = P.IOU() | |||
| >>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) | |||
| >>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) | |||
| >>> iou(anchor_boxes, gt_boxes) | |||
| [[0.0, 65504, 65504], | |||
| [0.0, 0.0, 0.0], | |||
| [0.22253, 0.0, 0.0]] | |||
| >>> output = iou(anchor_boxes, gt_boxes) | |||
| >>> print(output) | |||
| [[65000. 65500. -0.] | |||
| [65000. 65500. -0.] | |||
| [ 0. 0. 0.]] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -336,19 +347,20 @@ class MakeRefKey(Primitive): | |||
| Examples: | |||
| >>> from mindspore.ops import functional as F | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y") | |||
| >>> self.make_ref_key = P.MakeRefKey("y") | |||
| >>> | |||
| >>> def construct(self, x): | |||
| >>> key = self.make_ref_key() | |||
| >>> ref = F.make_ref(key, x, self.y) | |||
| >>> return ref * x | |||
| >>> | |||
| ... def __init__(self): | |||
| ... super(Net, self).__init__() | |||
| ... self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y") | |||
| ... self.make_ref_key = P.MakeRefKey("y") | |||
| ... | |||
| ... def construct(self, x): | |||
| ... key = self.make_ref_key() | |||
| ... ref = F.make_ref(key, x, self.y) | |||
| ... return ref * x | |||
| ... | |||
| >>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32) | |||
| >>> net = Net() | |||
| >>> net(x) | |||
| >>> output = net(x) | |||
| >>> print(output) | |||
| """ | |||
| @prim_attr_register | |||
| @@ -536,7 +548,9 @@ class PopulationCount(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> population_count = P.PopulationCount() | |||
| >>> x_input = Tensor([0, 1, 3], mindspore.int16) | |||
| >>> population_count(x_input) | |||
| >>> output = population_count(x_input) | |||
| >>> print(output) | |||
| [0 1 2] | |||
| """ | |||
| @prim_attr_register | |||
| @@ -396,16 +396,27 @@ class RandomCategorical(PrimitiveWithInfer): | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| >>> def __init__(self, num_sample): | |||
| >>> super(Net, self).__init__() | |||
| >>> self.random_categorical = P.RandomCategorical(mindspore.int64) | |||
| >>> self.num_sample = num_sample | |||
| >>> def construct(self, logits, seed=0): | |||
| >>> return self.random_categorical(logits, self.num_sample, seed) | |||
| >>> | |||
| ... def __init__(self, num_sample): | |||
| ... super(Net, self).__init__() | |||
| ... self.random_categorical = P.RandomCategorical(mindspore.int64) | |||
| ... self.num_sample = num_sample | |||
| ... def construct(self, logits, seed=0): | |||
| ... return self.random_categorical(logits, self.num_sample, seed) | |||
| ... | |||
| >>> x = np.random.random((10, 5)).astype(np.float32) | |||
| >>> net = Net(8) | |||
| >>> output = net(Tensor(x)) | |||
| >>> print(output) | |||
| [[0 2 1 3 4 2 0 2] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 0 3 4 2 0 2] | |||
| [0 2 1 3 4 3 0 3] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 1 3 4 2 0 2] | |||
| [0 2 0 3 4 2 0 2]] | |||
| """ | |||
| @prim_attr_register | |||