| @@ -132,7 +132,7 @@ class Dropout(Cell): | |||||
| >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32) | >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32) | ||||
| >>> net = nn.Dropout(keep_prob=0.8) | >>> net = nn.Dropout(keep_prob=0.8) | ||||
| >>> net.set_train() | >>> net.set_train() | ||||
| Dropout<keep_prob=0.8, dtype=Float32> | |||||
| Dropout<keep_prob=0.8> | |||||
| >>> output = net(x) | >>> output = net(x) | ||||
| >>> print(output.shape) | >>> print(output.shape) | ||||
| (2, 2, 3) | (2, 2, 3) | ||||
| @@ -42,9 +42,9 @@ class _CellListBase(): | |||||
| The sequential cell may be iterated using the construct method using for-in statement. | The sequential cell may be iterated using the construct method using for-in statement. | ||||
| But there are some scenarios that the construct method built-in does not fit. | But there are some scenarios that the construct method built-in does not fit. | ||||
| For convenience, we provide an interface that indicates the sequential | For convenience, we provide an interface that indicates the sequential | ||||
| cell may be interpretated as list of cells, so it can be accessed using | |||||
| cell may be interpreted as list of cells, so it can be accessed using | |||||
| iterator or subscript when a sequential cell instantiate is accessed | iterator or subscript when a sequential cell instantiate is accessed | ||||
| by iterator or subscript , it will be interpretated as a list of cells. | |||||
| by iterator or subscript , it will be interpreted as a list of cells. | |||||
| """ | """ | ||||
| def __init__(self): | def __init__(self): | ||||
| self.__cell_as_list__ = True | self.__cell_as_list__ = True | ||||
| @@ -151,7 +151,7 @@ class SequentialCell(Cell): | |||||
| """Appends a given cell to the end of the list. | """Appends a given cell to the end of the list. | ||||
| Examples: | Examples: | ||||
| >>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid') | |||||
| >>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid', weight_init="ones") | |||||
| >>> bn = nn.BatchNorm2d(2) | >>> bn = nn.BatchNorm2d(2) | ||||
| >>> relu = nn.ReLU() | >>> relu = nn.ReLU() | ||||
| >>> seq = nn.SequentialCell([conv, bn]) | >>> seq = nn.SequentialCell([conv, bn]) | ||||
| @@ -159,10 +159,10 @@ class SequentialCell(Cell): | |||||
| >>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32) | >>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32) | ||||
| >>> output = seq(x) | >>> output = seq(x) | ||||
| >>> print(output) | >>> print(output) | ||||
| [[[[0.08789019 0.08789019] | |||||
| [0.08789019 0.08789019]] | |||||
| [[0.07690391 0.07690391] | |||||
| [0.07690391 0.07690391]]]] | |||||
| [[[[26.999863 26.999863] | |||||
| [26.999863 26.999863]] | |||||
| [[26.999863 26.999863] | |||||
| [26.999863 26.999863]]]] | |||||
| """ | """ | ||||
| if _valid_cell(cell): | if _valid_cell(cell): | ||||
| self._cells[str(len(self))] = cell | self._cells[str(len(self))] = cell | ||||
| @@ -390,6 +390,7 @@ class SampledSoftmaxLoss(_Loss): | |||||
| ``GPU`` | ``GPU`` | ||||
| Examples: | Examples: | ||||
| >>> mindspore.set_seed(1) | |||||
| >>> loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1) | >>> loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1) | ||||
| >>> weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32) | >>> weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32) | ||||
| >>> biases = Tensor(np.random.randint(0, 9, [7]), mindspore.float32) | >>> biases = Tensor(np.random.randint(0, 9, [7]), mindspore.float32) | ||||
| @@ -397,7 +398,7 @@ class SampledSoftmaxLoss(_Loss): | |||||
| >>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32) | >>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32) | ||||
| >>> output = loss(weights, biases, labels, inputs) | >>> output = loss(weights, biases, labels, inputs) | ||||
| >>> print(output) | >>> print(output) | ||||
| [ 4.0181947 46.050743 7.0009117] | |||||
| [4.6051701e+01 1.4000047e+01 6.1989022e-06] | |||||
| """ | """ | ||||
| def __init__(self, num_sampled, num_classes, num_true=1, | def __init__(self, num_sampled, num_classes, num_true=1, | ||||
| @@ -154,10 +154,11 @@ def clip_by_global_norm(x, clip_norm=1.0, use_norm=None): | |||||
| >>> input_x = (Tensor(x1), Tensor(x2)) | >>> input_x = (Tensor(x1), Tensor(x2)) | ||||
| >>> out = clip_by_global_norm(input_x, 1.0) | >>> out = clip_by_global_norm(input_x, 1.0) | ||||
| >>> print(out) | >>> print(out) | ||||
| ([[ 2.98142403e-01, 4.47213590e-01], | |||||
| [ 1.49071202e-01, 2.98142403e-01]], | |||||
| (Tensor(shape=[2, 2], dtype=Float32, value= | |||||
| [[ 2.98142403e-01, 4.47213590e-01], | |||||
| [ 1.49071202e-01, 2.98142403e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= | |||||
| [[ 1.49071202e-01, 5.96284807e-01], | [[ 1.49071202e-01, 5.96284807e-01], | ||||
| [ 4.47213590e-01, 1.49071202e-01]]) | |||||
| [ 4.47213590e-01, 1.49071202e-01]])) | |||||
| """ | """ | ||||
| clip_norm = _check_value(clip_norm) | clip_norm = _check_value(clip_norm) | ||||
| @@ -148,7 +148,7 @@ def _axes_int_check(x1_shape, x2_shape, axes): | |||||
| def _validate_axes(x1_shape, x2_shape, axes): | def _validate_axes(x1_shape, x2_shape, axes): | ||||
| """ | """ | ||||
| Checks for axes having the correct length according to input, for any value in axis | Checks for axes having the correct length according to input, for any value in axis | ||||
| being out of range with given shape and also checking for compatiable axes values | |||||
| being out of range with given shape and also checking for compatible axes values | |||||
| with given inputs. | with given inputs. | ||||
| """ | """ | ||||
| shapes = [x1_shape, x2_shape] | shapes = [x1_shape, x2_shape] | ||||
| @@ -250,7 +250,7 @@ def tensor_dot(x1, x2, axes): | |||||
| x2_type = F.dtype(x2) | x2_type = F.dtype(x2) | ||||
| axes = _check_axes(axes) | axes = _check_axes(axes) | ||||
| _typecheck_input(x1_type, x2_type) | _typecheck_input(x1_type, x2_type) | ||||
| # input compability check & axes format update | |||||
| # input compatibility check & axes format update | |||||
| axes = _axes_int_check(x1_shape, x2_shape, axes) | axes = _axes_int_check(x1_shape, x2_shape, axes) | ||||
| _validate_axes(x1_shape, x2_shape, axes) | _validate_axes(x1_shape, x2_shape, axes) | ||||
| x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape(x1_shape, axes, 0) | x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape(x1_shape, axes, 0) | ||||
| @@ -297,7 +297,7 @@ def dot(x1, x2): | |||||
| Examples: | Examples: | ||||
| >>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32) | >>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32) | ||||
| >>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32) | >>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32) | ||||
| >>> output = C.Dot(input_x1, input_x2) | |||||
| >>> output = C.dot(input_x1, input_x2) | |||||
| >>> print(output) | >>> print(output) | ||||
| [[[3. 3.]] | [[[3. 3.]] | ||||
| [[3. 3.]]] | [[3. 3.]]] | ||||
| @@ -1938,7 +1938,7 @@ class UnsortedSegmentMin(PrimitiveWithCheck): | |||||
| Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`. | Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`. | ||||
| Supported Platforms: | Supported Platforms: | ||||
| ``Ascend`` | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)) | >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)) | ||||
| @@ -4110,13 +4110,15 @@ class MatrixInverse(PrimitiveWithInfer): | |||||
| Tensor, has the same type and shape as input `x`. | Tensor, has the same type and shape as input `x`. | ||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.random.uniform(-2, 2, (2, 2, 2)), mstype.float32) | |||||
| >>> mindspore.set_seed(1) | |||||
| >>> x = Tensor(np.random.uniform(-2, 2, (2, 2, 2)), mindspore.float32) | |||||
| >>> matrix_inverse = P.MatrixInverse(adjoint=False) | >>> matrix_inverse = P.MatrixInverse(adjoint=False) | ||||
| >>> result = matrix_inverse(x) | |||||
| [[[ 0.6804 0.8111] | |||||
| [-2.3257 -1.0616] | |||||
| [[-0.7074 -0.4963] | |||||
| [0.1896 -1.5285]]] | |||||
| >>> output = matrix_inverse(x) | |||||
| >>> print(output) | |||||
| [[[-0.39052644 -0.43528939] | |||||
| [ 0.98761106 -0.16393748]] | |||||
| [[ 0.52641493 -1.3895369 ] | |||||
| [-1.0693996 1.2040523 ]]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||