/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ir/tensor.h" #include #include #include #include #include #include #include #include "device/device_address.h" #include "pipeline/static_analysis/abstract_value.h" namespace mindspore { namespace tensor { static std::string MakeId() { // Use atomic to make id generator thread safe. static std::atomic last_id{1}; return "T" + std::to_string(last_id.fetch_add(1, std::memory_order_relaxed)); } static TypeId TypeIdOf(const TypePtr &data_type, TypeId defaultTypeId) { return data_type ? data_type->type_id() : defaultTypeId; } static size_t SizeOf(const std::vector &shape) { return std::accumulate(shape.begin(), shape.end(), size_t(1), std::multiplies()); } template std::vector CopyData(const std::vector &shape, void *data, TypeId data_type) { const size_t count = SizeOf(shape); switch (data_type) { case kNumberTypeBool: case kNumberTypeUInt8: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeInt8: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeInt16: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeInt32: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeInt64: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeUInt16: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeUInt32: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeUInt64: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeFloat16: { auto buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeFloat32: { const float *buf = static_cast(data); return std::vector(buf, buf + count); } case kNumberTypeFloat64: { auto buf = static_cast(data); return std::vector(buf, buf + count); } default: break; } MS_LOG(EXCEPTION) << "Cannot construct Tensor because of unsupported data type: " << data_type << "."; } template std::vector CopyData(const std::vector &shape, void *data, size_t data_len) { size_t size = SizeOf(shape); if (size * sizeof(T) != data_len) { MS_LOG(EXCEPTION) << "Incorrect tensor input data length " << data_len << ", expect " << size * sizeof(T) << " item size " << sizeof(T); } auto buf = static_cast(data); return {buf, buf + size}; } // Tensor data implementation. template class TensorDataImpl : public TensorData { public: explicit TensorDataImpl(const std::vector &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {} TensorDataImpl(const std::vector &shape, void *data, size_t data_len) : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData(shape, data, data_len)) {} TensorDataImpl(const std::vector &shape, void *data, TypeId data_type) : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData(shape, data, data_type)) {} template TensorDataImpl(const std::vector &shape, InputIt first, InputIt last) : ndim_(shape.size()), data_size_(SizeOf(shape)), data_(first, last) {} template TensorDataImpl(const std::vector &shape, Scalar scalar) : ndim_(shape.size()), data_size_(SizeOf(shape)), data_({static_cast(scalar)}) {} ssize_t size() const override { return static_cast(data_size_); } ssize_t itemsize() const override { return static_cast(sizeof(T)); } ssize_t nbytes() const override { return size() * itemsize(); } ssize_t ndim() const override { return static_cast(ndim_); } void *data() override { static std::vector empty_data(1); if (data_size_ == 0) { // Prevent null pointer for empty shape. return empty_data.data(); } if (data_.empty()) { // Lazy allocation. data_.resize(data_size_); } return data_.data(); } bool equals(const TensorData &other) const override { auto ptr = dynamic_cast *>(&other); if (ptr) { return (ptr == this) || ((ndim_ == ptr->ndim_) && (data_size_ == ptr->data_size_) && (data_ == ptr->data_)); } return false; } std::string ToString() const override { std::ostringstream ss; ss << '['; for (auto value : data_) { ss << value << ','; } ss << ']'; return ss.str(); } private: size_t ndim_{0}; size_t data_size_{0}; std::vector data_; }; template TensorDataPtr MakeTensorData(TypeId data_type, const std::vector &shape, Args... args) { switch (data_type) { case kNumberTypeBool: case kNumberTypeUInt8: return std::make_shared>(shape, args...); case kNumberTypeInt8: return std::make_shared>(shape, args...); case kNumberTypeInt16: return std::make_shared>(shape, args...); case kNumberTypeInt32: return std::make_shared>(shape, args...); case kNumberTypeInt64: return std::make_shared>(shape, args...); case kNumberTypeUInt16: return std::make_shared>(shape, args...); case kNumberTypeUInt32: return std::make_shared>(shape, args...); case kNumberTypeUInt64: return std::make_shared>(shape, args...); case kNumberTypeFloat16: return std::make_shared>(shape, args...); case kNumberTypeFloat32: return std::make_shared>(shape, args...); case kNumberTypeFloat64: return std::make_shared>(shape, args...); default: break; } MS_LOG(EXCEPTION) << "Cannot construct Tensor because of unsupported data type: " << data_type << "."; } Tensor::Tensor(const Tensor &tensor) : MetaTensor(tensor), init_flag_(tensor.init_flag_), data_(tensor.data_), dirty_(tensor.dirty_), id_(tensor.id_), device_address_(tensor.device_address_) {} Tensor::Tensor(const Tensor &tensor, TypeId data_type) : MetaTensor(data_type, tensor.shape_), init_flag_(tensor.init_flag_), data_(MakeTensorData(data_type, tensor.shape_, tensor.data_->data(), tensor.data_type_)), dirty_(tensor.dirty_), id_(tensor.id_), device_address_(tensor.device_address_) {} Tensor::Tensor(TypeId data_type, const std::vector &shape, TensorDataPtr data) : MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {} Tensor::Tensor(TypeId data_type, const std::vector &shape) : Tensor(data_type, shape, MakeTensorData(data_type, shape)) {} Tensor::Tensor(TypeId data_type, const std::vector &shape, void *data, size_t data_len) : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) {} Tensor::Tensor(TypeId data_type, const std::vector &shape, void *data, TypeId src_data_type) : Tensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) {} Tensor::Tensor(const std::vector &input, const TypePtr &data_type) : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {static_cast(input.size())}), data_(MakeTensorData(data_type_, shape_, input.begin(), input.end())), id_(MakeId()) {} Tensor::Tensor(const std::vector &input, const TypePtr &data_type) : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {static_cast(input.size())}), data_(MakeTensorData(data_type_, shape_, input.begin(), input.end())), id_(MakeId()) {} Tensor::Tensor(int64_t input, const TypePtr &data_type) : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {}), data_(MakeTensorData(data_type_, {}, input)), id_(MakeId()) {} Tensor::Tensor(double input, const TypePtr &data_type) : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {}), data_(MakeTensorData(data_type_, {}, input)), id_(MakeId()) {} bool Tensor::operator==(const Tensor &tensor) const { return (&tensor == this || (MetaTensor::operator==(tensor) && data_ == tensor.data_)); } bool Tensor::ValueEqual(const Tensor &tensor) const { return (&tensor == this || (MetaTensor::operator==(tensor) && data_->equals(*tensor.data_))); } // assgin value to this tensor Tensor &Tensor::AssignValue(const Tensor &tensor) { if (this != &tensor) { MetaTensor::operator=(tensor); dirty_ = tensor.is_dirty(); device_address_ = tensor.device_address(); data_ = tensor.data_; id_ = tensor.id(); } return *this; } abstract::AbstractBasePtr Tensor::ToAbstract() { auto tens = shared_from_base(); auto dtype = tens->Dtype(); if (!IsSubType(dtype, kNumber)) { MS_LOG(EXCEPTION) << "Expect tensor type kNumber but got: " << dtype->ToString() << "."; } auto tensor_shape = tens->shape(); auto abs_tensor = std::make_shared(dtype, tensor_shape); abs_tensor->set_value(shared_from_base()); return abs_tensor; } std::string Tensor::GetShapeAndDataTypeInfo() const { std::ostringstream buf; buf << "Tensor shape:[" << shape() << "]" << this->Dtype()->ToString(); return buf.str(); } std::string Tensor::ToString() const { const int small_tensor_size = 30; std::ostringstream buf; buf << "Tensor shape:[" << shape() << "]" << this->Dtype()->ToString(); // only print small tensor if (DataSize() < small_tensor_size) { buf << "val:" << data().ToString(); } return buf.str(); } std::string Tensor::ToStringRepr() const { std::ostringstream buf; auto type_ptr = this->Dtype(); MS_EXCEPTION_IF_NULL(type_ptr); buf << "Tensor shape:[" << shape() << "]" << type_ptr->ToString(); buf << "\nval:" << data().ToString(); return buf.str(); } void Tensor::data_sync() const { if (device_address_ != nullptr) { if (!device_address_->SyncDeviceToHost(shape(), static_cast(data().nbytes()), data_type(), data_c())) { MS_LOG(EXCEPTION) << "SyncDeviceToHost when asnumpy."; } } } TypeId Tensor::set_data_type(const TypeId data_type) { if (data_type != data_type_) { data_ = MakeTensorData(data_type, shape_, data_->data(), data_type_); return MetaTensor::set_data_type(data_type); } return data_type; } } // namespace tensor namespace inference { MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector &shape) { return new Tensor(data_type, shape); } Tensor::Tensor(TypeId data_type, const std::vector &shape) { this->tensor_impl_ = std::make_shared(data_type, shape); } Tensor::Tensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } TypeId Tensor::data_type() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->data_type(); } TypeId Tensor::set_data_type(TypeId data_type) { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->set_data_type(data_type); } std::vector Tensor::shape() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->shape(); } size_t Tensor::set_shape(const std::vector &shape) { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->set_shape(shape); } int Tensor::DimensionSize(size_t index) const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->DimensionSize(index); } int Tensor::ElementsNum() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->ElementsNum(); } std::size_t Tensor::hash() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->hash(); } std::shared_ptr Tensor::tensor() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_; } size_t Tensor::Size() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->data().nbytes(); } void *Tensor::MutableData() const { MS_ASSERT(this->tensor_impl_ != nullptr); return this->tensor_impl_->data_c(); } } // namespace inference } // namespace mindspore