| @@ -74,7 +74,7 @@ class Tensor(Tensor_): | |||||
| >>> assert t3.dtype == ms.float32 | >>> assert t3.dtype == ms.float32 | ||||
| """ | """ | ||||
| def __init__(self, input_data=None, dtype=None, shape=None, init=None): | |||||
| def __init__(self, input_data=None, dtype=None, shape=None, init=None, check_zero_dims=True): | |||||
| self.init_finished = False | self.init_finished = False | ||||
| # If input data is numpy number, convert it to np array | # If input data is numpy number, convert it to np array | ||||
| if isinstance(input_data, np_types): | if isinstance(input_data, np_types): | ||||
| @@ -92,6 +92,14 @@ class Tensor(Tensor_): | |||||
| if isinstance(shape, numbers.Number): | if isinstance(shape, numbers.Number): | ||||
| shape = (shape,) | shape = (shape,) | ||||
| if check_zero_dims: | |||||
| if input_data is not None and isinstance(input_data, (tuple, list, np.ndarray)) \ | |||||
| and np.array(input_data).ndim > 1 and np.array(input_data).size == 0: | |||||
| raise ValueError("input_data can not contain zero dimension.") | |||||
| if shape is not None: | |||||
| if 0 in shape: | |||||
| raise ValueError("Shape can not contain zero value.") | |||||
| # If input_data is tuple/list/numpy.ndarray, it's support in check_type method. | # If input_data is tuple/list/numpy.ndarray, it's support in check_type method. | ||||
| if init is None: | if init is None: | ||||
| validator.check_value_type('input_data', input_data, (Tensor_, np.ndarray, list, tuple, float, int, bool), | validator.check_value_type('input_data', input_data, (Tensor_, np.ndarray, list, tuple, float, int, bool), | ||||
| @@ -465,9 +465,7 @@ Tensor::Tensor(const Tensor &tensor) | |||||
| cache_tensor_ptr_(tensor.cache_tensor_ptr_), | cache_tensor_ptr_(tensor.cache_tensor_ptr_), | ||||
| hashmap_tensor_ptr_(tensor.hashmap_tensor_ptr_), | hashmap_tensor_ptr_(tensor.hashmap_tensor_ptr_), | ||||
| padding_type_(tensor.padding_type()), | padding_type_(tensor.padding_type()), | ||||
| device_event_(tensor.device_event_) { | |||||
| CheckShape(tensor.shape_); | |||||
| } | |||||
| device_event_(tensor.device_event_) {} | |||||
| Tensor::Tensor(const Tensor &tensor, TypeId data_type) | Tensor::Tensor(const Tensor &tensor, TypeId data_type) | ||||
| : MetaTensor(data_type, tensor.shape_), | : MetaTensor(data_type, tensor.shape_), | ||||
| @@ -481,43 +479,29 @@ Tensor::Tensor(const Tensor &tensor, TypeId data_type) | |||||
| cache_tensor_ptr_(tensor.cache_tensor_ptr_), | cache_tensor_ptr_(tensor.cache_tensor_ptr_), | ||||
| hashmap_tensor_ptr_(tensor.hashmap_tensor_ptr_), | hashmap_tensor_ptr_(tensor.hashmap_tensor_ptr_), | ||||
| padding_type_(tensor.padding_type()), | padding_type_(tensor.padding_type()), | ||||
| device_event_(tensor.device_event_) { | |||||
| CheckShape(tensor.shape_); | |||||
| } | |||||
| device_event_(tensor.device_event_) {} | |||||
| Tensor::Tensor(TypeId data_type, const ShapeVector &shape, TensorDataPtr data) | Tensor::Tensor(TypeId data_type, const ShapeVector &shape, TensorDataPtr data) | ||||
| : MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) { | |||||
| CheckShape(shape); | |||||
| } | |||||
| : MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {} | |||||
| Tensor::Tensor(TypeId data_type, const ShapeVector &shape) | Tensor::Tensor(TypeId data_type, const ShapeVector &shape) | ||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape)) { | |||||
| CheckShape(shape); | |||||
| } | |||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape)) {} | |||||
| Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, size_t data_len) | Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, size_t data_len) | ||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) { | |||||
| CheckShape(shape); | |||||
| } | |||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) {} | |||||
| Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, TypeId src_data_type) | Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, TypeId src_data_type) | ||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) { | |||||
| CheckShape(shape); | |||||
| } | |||||
| : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) {} | |||||
| Tensor::Tensor(const std::vector<int64_t> &input, const TypePtr &data_type) | Tensor::Tensor(const std::vector<int64_t> &input, const TypePtr &data_type) | ||||
| : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {static_cast<int>(input.size())}), | : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {static_cast<int>(input.size())}), | ||||
| data_(MakeTensorData(data_type_, shape_, input.data(), input.size())), | data_(MakeTensorData(data_type_, shape_, input.data(), input.size())), | ||||
| id_(MakeId()) { | |||||
| CheckShape(shape_); | |||||
| } | |||||
| id_(MakeId()) {} | |||||
| Tensor::Tensor(const std::vector<double> &input, const TypePtr &data_type) | Tensor::Tensor(const std::vector<double> &input, const TypePtr &data_type) | ||||
| : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {static_cast<int>(input.size())}), | : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {static_cast<int>(input.size())}), | ||||
| data_(MakeTensorData(data_type_, shape_, input.data(), input.size())), | data_(MakeTensorData(data_type_, shape_, input.data(), input.size())), | ||||
| id_(MakeId()) { | |||||
| CheckShape(shape_); | |||||
| } | |||||
| id_(MakeId()) {} | |||||
| Tensor::Tensor(int64_t input, const TypePtr &data_type) | Tensor::Tensor(int64_t input, const TypePtr &data_type) | ||||
| : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {}), | : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {}), | ||||
| @@ -622,17 +606,6 @@ std::string Tensor::ToStringRepr() const { | |||||
| return buf.str(); | return buf.str(); | ||||
| } | } | ||||
| void Tensor::CheckShape(const ShapeVector &shape) const { | |||||
| // Check tensor's shape, ignore one-dimensional tensor, including empty tensor with shape=(0,). | |||||
| if (shape.size() > 1) { | |||||
| for (const auto &s : shape) { | |||||
| if (s == 0) { | |||||
| MS_EXCEPTION(ValueError) << "Zero is not supported in the shape of Tensor. "; | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| void Tensor::data_sync(bool need_wait) const { | void Tensor::data_sync(bool need_wait) const { | ||||
| if (need_wait) { | if (need_wait) { | ||||
| Wait(); | Wait(); | ||||
| @@ -2981,7 +2981,8 @@ class StridedSlice(PrimitiveWithInfer): | |||||
| ret_shape = self._compute_slicing_shape(x['shape'], begin_v, end_v, strides_v) | ret_shape = self._compute_slicing_shape(x['shape'], begin_v, end_v, strides_v) | ||||
| value = None if all(ret_shape) else Tensor(np.array([]).reshape(ret_shape), x['dtype'].element_type()) | |||||
| value = None if all(ret_shape) else Tensor(np.array([]).reshape(ret_shape), x['dtype'].element_type(), | |||||
| check_zero_dims=False) | |||||
| if "max_value" in x and "min_value" in x: | if "max_value" in x and "min_value" in x: | ||||
| validator.check_value_type("min_value", x["min_value"], [tuple, list], self.name) | validator.check_value_type("min_value", x["min_value"], [tuple, list], self.name) | ||||
| validator.check_value_type("max_value", x["max_value"], [tuple, list], self.name) | validator.check_value_type("max_value", x["max_value"], [tuple, list], self.name) | ||||
| @@ -67,17 +67,6 @@ def test_tensor(): | |||||
| assert isinstance(t4, ms.Tensor) | assert isinstance(t4, ms.Tensor) | ||||
| assert t4.dtype == ms.int64 | assert t4.dtype == ms.int64 | ||||
| def test_tensor_empty(): | |||||
| t = ms.Tensor(np.ones(0), ms.float32) | |||||
| assert isinstance(t, ms.Tensor) | |||||
| assert t.shape == (0,) | |||||
| def test_tensor_shape_has_zero(): | |||||
| with pytest.raises(ValueError): | |||||
| t = ms.Tensor(np.ones((1, 0)), ms.float32) | |||||
| print(t) | |||||
| def test_tensor_type_float16(): | def test_tensor_type_float16(): | ||||
| t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) | t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) | ||||
| @@ -0,0 +1,59 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| import pytest | |||||
| import mindspore | |||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | |||||
| from mindspore import context | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common.initializer import One | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| def test_zero_dimension_list(): | |||||
| Tensor([]) | |||||
| with pytest.raises(ValueError) as ex: | |||||
| Tensor([[]]) | |||||
| assert "input_data can not contain zero dimension." in str(ex.value) | |||||
| def test_zero_dimension_np_array(): | |||||
| with pytest.raises(ValueError) as ex: | |||||
| Tensor(np.ones((1, 0, 3))) | |||||
| assert "input_data can not contain zero dimension." in str(ex.value) | |||||
| def test_zero_dimension_with_zero_shape(): | |||||
| with pytest.raises(ValueError) as ex: | |||||
| Tensor(shape=(1, 0, 3), dtype=mindspore.float32, init=One()) | |||||
| assert "Shape can not contain zero value." in str(ex.value) | |||||
| def test_zero_dimension_with_operator(): | |||||
| class Net(nn.Cell): | |||||
| def __init__(self): | |||||
| super(Net, self).__init__() | |||||
| self.strided_slice = P.StridedSlice() | |||||
| def construct(self, x): | |||||
| a = self.strided_slice(x, (2, 4, 4), (-1, 2, 1), (1, 1, 1)) | |||||
| return a | |||||
| x = Tensor(np.ones((1, 3, 3))) | |||||
| net = Net() | |||||
| net(x) | |||||