From 2e5b22adde426cf66275ac872ee37974f49111dd Mon Sep 17 00:00:00 2001 From: wangshuide2020 <7511764+wangshuide2020@user.noreply.gitee.com> Date: Tue, 2 Feb 2021 20:42:54 +0800 Subject: [PATCH] fix the example of Unique, SparseApplyProximalAdagrad, BoundingBoxEncode, SGD and Parameter. --- mindspore/common/parameter.py | 9 ++++----- mindspore/nn/layer/normalization.py | 2 +- mindspore/ops/_utils/utils.py | 6 +++--- mindspore/ops/operations/array_ops.py | 4 ++-- mindspore/ops/operations/nn_ops.py | 12 ++++++------ mindspore/ops/operations/other_ops.py | 6 +++--- 6 files changed, 19 insertions(+), 20 deletions(-) diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index 9887632165..b5e065f45d 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -1,4 +1,4 @@ -# Copyright 2020 Huawei Technologies Co., Ltd +# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -95,17 +95,16 @@ class Parameter(Tensor_): ... def __init__(self): ... super(Net, self).__init__() ... self.matmul = P.MatMul() - ... self.weight = Parameter(Tensor(np.ones((1,2))), name="w", requires_grad=True) + ... self.weight = Parameter(Tensor(np.ones((1, 2)), mindspore.float32), name="w", requires_grad=True) ... ... def construct(self, x): ... out = self.matmul(self.weight, x) ... return out >>> net = Net() - >>> x = Tensor(np.ones((2,1))) + >>> x = Tensor(np.ones((2, 1)), mindspore.float32) >>> print(net(x)) [[2.]] - >>> net.weight.set_data(Tensor(np.zeros((1,2)))) - Parameter (name=w, shape=(1, 2), dtype=Float64, requires_grad=True) + >>> _ = net.weight.set_data(Tensor(np.zeros((1, 2)), mindspore.float32)) >>> print(net(x)) [[0.]] """ diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 91924541e2..71b0f4551a 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -269,7 +269,7 @@ class BatchNorm1d(_BatchNorm): Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out})`. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` Raises: TypeError: If `num_features` is not an int. diff --git a/mindspore/ops/_utils/utils.py b/mindspore/ops/_utils/utils.py index e20fef9746..89d0a4550c 100644 --- a/mindspore/ops/_utils/utils.py +++ b/mindspore/ops/_utils/utils.py @@ -1,4 +1,4 @@ -# Copyright 2020 Huawei Technologies Co., Ltd +# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ def get_broadcast_shape(x_shape, y_shape, prim_name): List, the shape that broadcast between tensor x and tensor y. Raises: - ValueError: If tensor x and tensor y are not equal and could't broadcast. + ValueError: If tensor x and tensor y are not equal and couldn't broadcast. Examples: >>> x_shape = [1, 2, 3] @@ -66,7 +66,7 @@ def get_broadcast_shape(x_shape, y_shape, prim_name): def get_concat_offset(x_shp, x_type, axis, prim_name): """for concat and concatoffset check args and compute offset""" - validator.check_value_type("shape", x_shp, [tuple], prim_name) + validator.check_value_type("shape", x_shp, [tuple, list], prim_name) validator.check_positive_int(len(x_shp), "input_x rank", prim_name) validator.check_subclass("shape0", x_type[0], mstype.tensor, prim_name) validator.check_positive_int(len(x_shp[0]), "len of x_shp[0]", prim_name) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index d313ba0e19..0248673da6 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -758,14 +758,14 @@ class Unique(Primitive): >>> class UniqueNet(nn.Cell): ... def __init__(self): ... super(UniqueNet, self).__init__() - ... self.unique_op = P.Unique() + ... self.unique_op = ops.Unique() ... ... def construct(self, x): ... output, indices = self.unique_op(x) ... return output, indices ... >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) - >>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + >>> context.set_context(mode=context.GRAPH_MODE) >>> net = UniqueNet() >>> output = net(x) >>> print(output) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 82cc291de4..c354d855e0 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2677,7 +2677,7 @@ class SGD(PrimitiveWithCheck): >>> momentum = Tensor(0.1, mindspore.float32) >>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32) >>> output = sgd(parameters, gradient, learning_rate, accum, momentum, stat) - >>> print(output[0]) + >>> print(output) (Tensor(shape=[4], dtype=Float32, value= [ 1.98989999e+00, -4.90300000e-01, 1.69520009e+00, 3.98009992e+00]),) """ @@ -5629,14 +5629,14 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck): ... >>> net = Net() >>> grad = Tensor(np.array([[1, 1], [1, 1]], np.float32)) - >>> indices = Tensor(np.array([0], np.int32)) + >>> indices = Tensor(np.array([0, 1], np.int32)) >>> output = net(grad, indices) >>> print(output) (Tensor(shape=[2, 2], dtype=Float32, value= - [[ 2.97499990e+00, 6.07499981e+00], - [ 0.00000000e+00, 1.87500000e+00]]), Tensor(shape=[2, 2], dtype=Float32, value= - [[ 6.40000000e+01, 6.40000000e+01], - [ 6.40000000e+01, 6.40000000e+01]])) + [[ 2.09999990e+00, 5.199999981e+00], + [ 0.00000000e+00, 1.000000000e+00]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 1.00000000e+00, 1.000000000e+00], + [ 1.00000000e+00, 1.000000000e+00]])) """ __mindspore_signature__ = ( diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index ac36bc2a78..99c121ebe4 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -1,4 +1,4 @@ -# Copyright 2020 Huawei Technologies Co., Ltd +# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -133,8 +133,8 @@ class BoundingBoxEncode(PrimitiveWithInfer): >>> boundingbox_encode = ops.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) >>> output = boundingbox_encode(anchor_box, groundtruth_box) >>> print(output) - [[ -1. 0.25 0. 0.40551758] - [ -1. 0.25 0. 0.40551758]] + [[ -1. 0.25 0. 0.40551758] + [ -1. 0.25 0. 0.40551758]] """ @prim_attr_register