Browse Source

!12892 Parse method fix

From: @lizhenglong1992
Reviewed-by: @liucunwei,@pandoublefeng
Signed-off-by: @liucunwei
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 4 years ago
parent
commit
f96111e6de
5 changed files with 159 additions and 67 deletions
  1. +66
    -46
      mindspore/ccsrc/minddata/dataset/api/execute.cc
  2. +25
    -4
      mindspore/ccsrc/minddata/dataset/core/de_tensor.cc
  3. +54
    -17
      mindspore/ccsrc/minddata/dataset/core/device_tensor.cc
  4. +10
    -0
      mindspore/ccsrc/minddata/dataset/core/device_tensor.h
  5. +4
    -0
      mindspore/ccsrc/minddata/dataset/include/execute.h

+ 66
- 46
mindspore/ccsrc/minddata/dataset/api/execute.cc View File

@@ -41,6 +41,7 @@ namespace dataset {
using json = nlohmann::json;
struct Execute::ExtraInfo {
std::multimap<std::string, std::vector<uint32_t>> aipp_cfg_;
bool init_with_shared_ptr_ = true; // Initial execute object with shared_ptr as default
};

// FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
@@ -61,15 +62,10 @@ Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType
}

Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation;
// Initialize the op and other context
transforms_.emplace_back(op);

info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
operation = op->Parse(deviceType);
}
ops_.emplace_back(std::move(operation));
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -84,9 +80,12 @@ Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType
}

Execute::Execute(std::reference_wrapper<TensorTransform> op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
// Initialize the transforms_ and other context
std::shared_ptr<TensorOperation> operation = op.get().Parse();
ops_.emplace_back(std::move(operation));

info_ = std::make_shared<ExtraInfo>();
info_->init_with_shared_ptr_ = false;
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -102,15 +101,11 @@ Execute::Execute(std::reference_wrapper<TensorTransform> op, MapTargetDevice dev

// Execute function for the example case: auto decode(new vision::Decode());
Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation;
// Initialize the transforms_ and other context
std::shared_ptr<TensorTransform> smart_ptr_op(op);
transforms_.emplace_back(smart_ptr_op);

info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
operation = op->Parse(deviceType);
}
ops_.emplace_back(std::move(operation));
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -140,18 +135,10 @@ Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDev
}

Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
// Initialize the transforms_ and other context
transforms_ = ops;

info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(ops.begin(), ops.end(), std::back_inserter(ops_),
[](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
return operation->Parse();
});
} else {
for (auto &op : ops) {
ops_.emplace_back(op->Parse(deviceType));
}
}
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -166,8 +153,7 @@ Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDev
}

Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
info_ = std::make_shared<ExtraInfo>();
// Initialize the transforms_ and other context
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
@@ -177,6 +163,9 @@ Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,
ops_.emplace_back(op.get().Parse(deviceType));
}
}

info_ = std::make_shared<ExtraInfo>();
info_->init_with_shared_ptr_ = false;
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -192,17 +181,13 @@ Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,

// Execute function for the example vector case: auto decode(new vision::Decode());
Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](TensorTransform *operation) -> std::shared_ptr<TensorOperation> { return operation->Parse(); });
} else {
for (auto &op : ops) {
ops_.emplace_back(op->Parse(deviceType));
}
// Initialize the transforms_ and other context
for (auto &op : ops) {
std::shared_ptr<TensorTransform> smart_ptr_op(op);
transforms_.emplace_back(smart_ptr_op);
}

info_ = std::make_shared<ExtraInfo>();
device_type_ = deviceType;
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
@@ -231,8 +216,14 @@ Execute::~Execute() {
Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) {
// Validate input tensor
CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data");
CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");

// Parse TensorTransform transforms_ into TensorOperation ops_
if (info_->init_with_shared_ptr_) {
RETURN_IF_NOT_OK(ParseTransforms_());
}
CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");

// Validate and build runtime ops
std::vector<std::shared_ptr<TensorOp>> transforms; // record the transformations

@@ -310,9 +301,14 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
for (auto &tensor : input_tensor_list) {
CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data");
}
CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");

// Parse TensorTransform transforms_ into TensorOperation ops_
if (info_->init_with_shared_ptr_) {
RETURN_IF_NOT_OK(ParseTransforms_());
}
CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");

std::map<MapTargetDevice, std::string> env_list = {
{MapTargetDevice::kCpu, "kCpu"}, {MapTargetDevice::kGpu, "kGpu"}, {MapTargetDevice::kAscend310, "kAscend310"}};

@@ -491,6 +487,9 @@ Status AippInfoCollection(std::map<std::string, std::string> *aipp_options, cons
std::string Execute::AippCfgGenerator() {
std::string config_location = "./aipp.cfg";
#ifdef ENABLE_ACL
if (info_->init_with_shared_ptr_) {
ParseTransforms_();
}
std::vector<uint32_t> paras; // Record the parameters value of each Ascend operators
for (int32_t i = 0; i < ops_.size(); i++) {
// Validate operator ir
@@ -499,10 +498,7 @@ std::string Execute::AippCfgGenerator() {
MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] is null";
return "";
}
if (ops_[i]->ValidateParams() != Status::OK()) {
MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] has wrong parameters";
return "";
}

// Define map between operator name and parameter name
ops_[i]->to_json(&ir_info);
std::multimap<std::string, std::string> op_list = {{vision::kDvppCropJpegOperation, "size"},
@@ -587,6 +583,30 @@ std::string Execute::AippCfgGenerator() {
return config_location;
}

bool IsEmptyPtr(std::shared_ptr<TensorTransform> api_ptr) { return api_ptr == nullptr; }

Status Execute::ParseTransforms_() {
auto iter = std::find_if(transforms_.begin(), transforms_.end(), IsEmptyPtr);
if (iter != transforms_.end()) {
std::string err_msg = "Your input TensorTransforms contain at least one nullptr, please check your input";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}

if (device_type_ == MapTargetDevice::kCpu) {
(void)std::transform(transforms_.begin(), transforms_.end(), std::back_inserter(ops_),
[](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
return operation->Parse();
});
} else {
for (auto &transform_ : transforms_) {
ops_.emplace_back(transform_->Parse(device_type_));
}
}

return Status::OK();
}

Status Execute::validate_device_() {
if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310) {
std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";


+ 25
- 4
mindspore/ccsrc/minddata/dataset/core/de_tensor.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -61,6 +61,12 @@ DETensor::DETensor(std::shared_ptr<dataset::DeviceTensor> device_tensor_impl, bo
const std::string &DETensor::Name() const { return name_; }

enum mindspore::DataType DETensor::DataType() const {
#ifndef ENABLE_ANDROID
if (is_device_) {
ASSERT_NULL(device_tensor_impl_);
return static_cast<mindspore::DataType>(DETypeToMSType(device_tensor_impl_->DeviceDataType()));
}
#endif
ASSERT_NULL(tensor_impl_);
return static_cast<mindspore::DataType>(DETypeToMSType(tensor_impl_->type()));
}
@@ -81,19 +87,34 @@ const std::vector<int64_t> &DETensor::Shape() const { return shape_; }
std::shared_ptr<const void> DETensor::Data() const {
#ifndef ENABLE_ANDROID
if (is_device_) {
return std::shared_ptr<const void>(device_tensor_impl_->GetDeviceBuffer(), [](const void *) {});
MS_LOG(ERROR) << "Data() always return the data on the host.";
return nullptr;
}
#endif
return std::shared_ptr<const void>(tensor_impl_->GetBuffer(), [](const void *) {});
}

void *DETensor::MutableData() {
#ifndef ENABLE_ANDROID
if (is_device_) {
ASSERT_NULL(device_tensor_impl_);
return static_cast<void *>(device_tensor_impl_->GetDeviceMutableBuffer());
}
#endif
ASSERT_NULL(tensor_impl_);
return tensor_impl_->GetMutableBuffer();
return static_cast<void *>(tensor_impl_->GetMutableBuffer());
}

bool DETensor::IsDevice() const { return is_device_; }

std::shared_ptr<mindspore::MSTensor::Impl> DETensor::Clone() const { return std::make_shared<DETensor>(tensor_impl_); }
std::shared_ptr<mindspore::MSTensor::Impl> DETensor::Clone() const {
#ifndef ENABLE_ANDROID
if (is_device_) {
ASSERT_NULL(device_tensor_impl_);
return std::make_shared<DETensor>(device_tensor_impl_, is_device_);
}
#endif
return std::make_shared<DETensor>(tensor_impl_);
}
} // namespace dataset
} // namespace mindspore

+ 54
- 17
mindspore/ccsrc/minddata/dataset/core/device_tensor.cc View File

@@ -20,27 +20,11 @@

namespace mindspore {
namespace dataset {
Status DeviceTensor::SetYuvStrideShape_(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride) {
YUV_shape_ = {width, widthStride, height, heightStride};
return Status::OK();
}

std::vector<uint32_t> DeviceTensor::GetYuvStrideShape() { return YUV_shape_; }

Status DeviceTensor::SetAttributes(uint8_t *data_ptr, const uint32_t &dataSize, const uint32_t &width,
const uint32_t &widthStride, const uint32_t &height, const uint32_t &heightStride) {
device_data_ = data_ptr;
CHECK_FAIL_RETURN_UNEXPECTED(device_data_ != nullptr, "Fail to get the device data.");
SetSize_(dataSize);
SetYuvStrideShape_(width, widthStride, height, heightStride);
return Status::OK();
}

DeviceTensor::DeviceTensor(const TensorShape &shape, const DataType &type) : Tensor(shape, type) {
// grab the mem pool from global context and create the allocator for char data area
std::shared_ptr<MemoryPool> global_pool = GlobalContext::Instance()->mem_pool();
data_allocator_ = std::make_unique<Allocator<unsigned char>>(global_pool);
device_data_type_ = type;
}

Status DeviceTensor::CreateEmpty(const TensorShape &shape, const DataType &type, std::shared_ptr<DeviceTensor> *out) {
@@ -64,10 +48,63 @@ Status DeviceTensor::CreateEmpty(const TensorShape &shape, const DataType &type,
return Status::OK();
}

Status DeviceTensor::CreateFromDeviceMemory(const TensorShape &shape, const DataType &type, uint8_t *data_ptr,
const uint32_t &dataSize, const std::vector<uint32_t> &attributes,
std::shared_ptr<DeviceTensor> *out) {
CHECK_FAIL_RETURN_UNEXPECTED(shape.known(), "Invalid shape.");
CHECK_FAIL_RETURN_UNEXPECTED(type != DataType::DE_UNKNOWN, "Invalid data type.");
CHECK_FAIL_RETURN_UNEXPECTED(data_ptr != nullptr, "Data pointer is NULL");
CHECK_FAIL_RETURN_UNEXPECTED(dataSize > 0, "Invalid data size");

const DeviceTensorAlloc *alloc = GlobalContext::Instance()->device_tensor_allocator();
*out = std::allocate_shared<DeviceTensor>(*alloc, shape, type);

// if it's a string tensor and it has no elements, Just initialize the shape and type.
if (!type.IsNumeric() && shape.NumOfElements() == 0) {
return Status::OK();
}

CHECK_FAIL_RETURN_UNEXPECTED(type.IsNumeric(), "Number of elements is not 0. The type should be numeric.");

int64_t byte_size = (*out)->SizeInBytes();

// Don't allocate if we have a tensor with no elements.
if (byte_size != 0) {
RETURN_IF_NOT_OK((*out)->AllocateBuffer(byte_size));
}

CHECK_FAIL_RETURN_UNEXPECTED(
(*out)->SetAttributes(data_ptr, dataSize, attributes[0], attributes[1], attributes[2], attributes[3]),
"Fail to set attributes for DeviceTensor");

return Status::OK();
}

uint8_t *DeviceTensor::GetDeviceBuffer() { return device_data_; }

uint8_t *DeviceTensor::GetDeviceMutableBuffer() { return device_data_; }

DataType DeviceTensor::DeviceDataType() const { return device_data_type_; }

uint32_t DeviceTensor::DeviceDataSize() { return size_; }

Status DeviceTensor::SetYuvStrideShape_(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride) {
YUV_shape_ = {width, widthStride, height, heightStride};
return Status::OK();
}

std::vector<uint32_t> DeviceTensor::GetYuvStrideShape() { return YUV_shape_; }

Status DeviceTensor::SetAttributes(uint8_t *data_ptr, const uint32_t &dataSize, const uint32_t &width,
const uint32_t &widthStride, const uint32_t &height, const uint32_t &heightStride) {
device_data_ = data_ptr;
CHECK_FAIL_RETURN_UNEXPECTED(device_data_ != nullptr, "Fail to get the device data.");
SetSize_(dataSize);
SetYuvStrideShape_(width, widthStride, height, heightStride);
return Status::OK();
}

Status DeviceTensor::SetSize_(const uint32_t &new_size) {
size_ = new_size;
return Status::OK();


+ 10
- 0
mindspore/ccsrc/minddata/dataset/core/device_tensor.h View File

@@ -39,12 +39,20 @@ class DeviceTensor : public Tensor {

static Status CreateEmpty(const TensorShape &shape, const DataType &type, std::shared_ptr<DeviceTensor> *out);

static Status CreateFromDeviceMemory(const TensorShape &shape, const DataType &type, uint8_t *data_ptr,
const uint32_t &dataSize, const std::vector<uint32_t> &attributes,
std::shared_ptr<DeviceTensor> *out);

uint8_t *GetDeviceBuffer();

uint8_t *GetDeviceMutableBuffer();

std::vector<uint32_t> GetYuvStrideShape();

uint32_t DeviceDataSize();

DataType DeviceDataType() const;

bool HasDeviceData() { return device_data_ != nullptr; }

private:
@@ -58,6 +66,8 @@ class DeviceTensor : public Tensor {
uint8_t *device_data_;

uint32_t size_;

DataType device_data_type_;
};

} // namespace dataset


+ 4
- 0
mindspore/ccsrc/minddata/dataset/include/execute.h View File

@@ -67,8 +67,12 @@ class Execute {
std::string AippCfgGenerator();

private:
Status ParseTransforms_();

Status validate_device_();

std::vector<std::shared_ptr<TensorTransform>> transforms_;

std::vector<std::shared_ptr<TensorOperation>> ops_;

MapTargetDevice device_type_;


Loading…
Cancel
Save