Browse Source

!12672 Aipp config file generation

From: @lizhenglong1992
Reviewed-by: 
Signed-off-by:
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 4 years ago
parent
commit
c33d767314
15 changed files with 394 additions and 16 deletions
  1. +183
    -1
      mindspore/ccsrc/minddata/dataset/api/execute.cc
  2. +9
    -0
      mindspore/ccsrc/minddata/dataset/api/vision.cc
  3. +3
    -1
      mindspore/ccsrc/minddata/dataset/core/device_tensor.h
  4. +6
    -0
      mindspore/ccsrc/minddata/dataset/include/execute.h
  5. +2
    -0
      mindspore/ccsrc/minddata/dataset/include/vision_lite.h
  6. +3
    -2
      mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/CMakeLists.txt
  7. +1
    -0
      mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc
  8. +1
    -0
      mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc
  9. +39
    -0
      mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.cc
  10. +51
    -0
      mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.h
  11. +48
    -0
      mindspore/ccsrc/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc
  12. +20
    -0
      mindspore/ccsrc/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h
  13. +1
    -0
      mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc
  14. +1
    -0
      mindspore/ccsrc/minddata/dataset/kernels/tensor_op.h
  15. +26
    -12
      tests/st/cpp/dataset/test_de.cc

+ 183
- 1
mindspore/ccsrc/minddata/dataset/api/execute.cc View File

@@ -14,6 +14,8 @@
* limitations under the License.
*/

#include <algorithm>
#include <fstream>
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/core/de_tensor.h"
#include "minddata/dataset/core/device_resource.h"
@@ -30,15 +32,22 @@
#endif
#ifdef ENABLE_ACL
#include "minddata/dataset/core/ascend_resource.h"
#include "minddata/dataset/kernels/ir/vision/ascend_vision_ir.h"
#endif

namespace mindspore {
namespace dataset {

using json = nlohmann::json;
struct Execute::ExtraInfo {
std::multimap<std::string, std::vector<uint32_t>> aipp_cfg_;
};

// FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType) {
ops_.emplace_back(std::move(op));
device_type_ = deviceType;
info_ = std::make_shared<ExtraInfo>();
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
@@ -54,6 +63,7 @@ Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType
Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation;
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
@@ -96,6 +106,7 @@ Execute::Execute(TensorTransform op, MapTargetDevice deviceType) {
Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation;
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
@@ -117,6 +128,7 @@ Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) {

Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDevice deviceType)
: ops_(std::move(ops)), device_type_(deviceType) {
info_ = std::make_shared<ExtraInfo>();
#ifdef ENABLE_ACL
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
@@ -131,6 +143,7 @@ Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDev

Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(ops.begin(), ops.end(), std::back_inserter(ops_),
[](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
@@ -156,6 +169,7 @@ Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDev

Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
@@ -181,6 +195,7 @@ Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,
// Execute function for the example vector case: auto decode(new vision::Decode());
Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
info_ = std::make_shared<ExtraInfo>();
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
@@ -268,7 +283,11 @@ Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor
device_input = std::move(device_output);
}
CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
*output = mindspore::MSTensor(std::make_shared<DETensor>(device_input, true));
std::shared_ptr<mindspore::dataset::Tensor> host_output;
// Need to optimize later, waiting for computing department development, hence we pop data temporarily.
RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
*output = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
// *output = mindspore::MSTensor(std::make_shared<DETensor>(device_input, true)); Use in the future
#endif
}
return Status::OK();
@@ -346,6 +365,169 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
return Status::OK();
}

std::vector<uint32_t> AippSizeFilter(const std::vector<uint32_t> &resize_para, const std::vector<uint32_t> &crop_para) {
std::vector<uint32_t> aipp_size;
if (resize_para.size() == 0) {
aipp_size = crop_para;
} else if (crop_para.size() == 0) {
aipp_size = resize_para;
} else {
if (resize_para.size() == 1) {
aipp_size = *min_element(crop_para.begin(), crop_para.end()) < *resize_para.begin() ? crop_para : resize_para;
} else {
aipp_size =
*min_element(resize_para.begin(), resize_para.end()) < *min_element(crop_para.begin(), crop_para.end())
? resize_para
: crop_para;
}
}
return aipp_size;
}

std::vector<uint32_t> AippMeanFilter(const std::vector<uint32_t> &normalize_para) {
std::vector<uint32_t> aipp_mean;
if (normalize_para.size() == 6) {
std::transform(normalize_para.begin(), normalize_para.begin() + 3, std::back_inserter(aipp_mean),
[](uint32_t i) { return static_cast<uint32_t>(i / 10000); });
} else {
aipp_mean = {0, 0, 0};
}
return aipp_mean;
}

std::vector<float> AippStdFilter(const std::vector<uint32_t> &normalize_para) {
std::vector<float> aipp_std;
if (normalize_para.size() == 6) {
auto zeros = std::find(std::begin(normalize_para), std::end(normalize_para), 0);
if (zeros == std::end(normalize_para)) {
std::transform(normalize_para.begin() + 3, normalize_para.end(), std::back_inserter(aipp_std),
[](uint32_t i) { return static_cast<float>(10000 / i); });
} else {
MS_LOG(WARNING) << "Detect 0 in std vector, please verify your input";
aipp_std = {1.0, 1.0, 1.0};
}
} else {
aipp_std = {1.0, 1.0, 1.0};
}
return aipp_std;
}

Status AippInfoCollection(std::map<std::string, std::string> *aipp_options, const std::vector<uint32_t> &aipp_size,
const std::vector<uint32_t> &aipp_mean, const std::vector<float> &aipp_std) {
aipp_options->insert(std::make_pair("related_input_rank", "0"));
aipp_options->insert(std::make_pair("src_image_size_w", std::to_string(aipp_size[1])));
aipp_options->insert(std::make_pair("src_image_size_h", std::to_string(aipp_size[1])));
aipp_options->insert(std::make_pair("crop", "false"));
aipp_options->insert(std::make_pair("input_format", "YUV420SP_U8"));
aipp_options->insert(std::make_pair("aipp_mode", "static"));
aipp_options->insert(std::make_pair("csc_switch", "true"));
aipp_options->insert(std::make_pair("rbuv_swap_switch", "false"));
std::vector<int32_t> color_space_matrix = {256, 0, 359, 256, -88, -183, 256, 454, 0};
int count = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
std::string key_word = "matrix_r" + std::to_string(i) + "c" + std::to_string(j);
aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_matrix[count])));
++count;
}
}
std::vector<uint32_t> color_space_bias = {0, 128, 128};
for (int i = 0; i < 3; i++) {
std::string key_word = "input_bias_" + std::to_string(i);
aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_bias[i])));
}
for (int i = 0; i < aipp_mean.size(); i++) {
std::string key_word = "mean_chn_" + std::to_string(i);
aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_mean[i])));
}
for (int i = 0; i < aipp_mean.size(); i++) {
std::string key_word = "min_chn_" + std::to_string(i);
aipp_options->insert(std::make_pair(key_word, "0.0"));
}
for (int i = 0; i < aipp_std.size(); i++) {
std::string key_word = "var_reci_chn_" + std::to_string(i);
aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_std[i])));
}
return Status::OK();
}

std::string Execute::AippCfgGenerator() {
std::string config_location = "./aipp.cfg";
#ifdef ENABLE_ACL
std::vector<uint32_t> paras; // Record the parameters value of each Ascend operators
for (int32_t i = 0; i < ops_.size(); i++) {
json ir_info;
if (ops_[i] == nullptr) {
MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] is null";
return "";
}
if (ops_[i]->ValidateParams() != Status::OK()) {
MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] has wrong parameters";
return "";
}
ops_[i]->to_json(&ir_info);
std::multimap<std::string, std::string> op_list = {{vision::kDvppCropJpegOperation, "size"},
{vision::kDvppDecodeResizeOperation, "size"},
{vision::kDvppDecodeResizeCropOperation, "crop_size"},
{vision::kDvppDecodeResizeCropOperation, "resize_size"},
{vision::kDvppNormalizeOperation, "mean"},
{vision::kDvppNormalizeOperation, "std"},
{vision::kDvppResizeJpegOperation, "size"}};
for (auto pos = op_list.equal_range(ops_[i]->Name()); pos.first != pos.second; ++pos.first) {
auto paras_key_word = pos.first->second;
paras = ir_info[paras_key_word].get<std::vector<uint32_t>>();
info_->aipp_cfg_.insert(std::make_pair(ops_[i]->Name(), paras));
}
}
std::ofstream outfile;
outfile.open(config_location, std::ofstream::out);
if (!outfile.is_open()) {
MS_LOG(ERROR) << "Fail to open Aipp config file, please verify your system config(including authority)"
<< "We will return empty string which represent the location of Aipp config file in this case";
std::string except = "";
return except;
}
if (device_type_ == MapTargetDevice::kAscend310) {
// Process resize parameters and crop parameters to find out the final size of input data
std::vector<uint32_t> resize_paras;
std::vector<uint32_t> crop_paras;
auto iter = info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation);
if (iter != info_->aipp_cfg_.end()) {
resize_paras = iter->second;
}
iter = info_->aipp_cfg_.find(vision::kDvppCropJpegOperation);
if (iter != info_->aipp_cfg_.end()) {
crop_paras = iter->second;
if (crop_paras.size() == 1) {
crop_paras.emplace_back(crop_paras[0]);
}
}
std::vector<uint32_t> aipp_size = AippSizeFilter(resize_paras, crop_paras);
// Process normalization parameters to find out the final normalization parameters for Aipp module
std::vector<uint32_t> normalize_paras;
if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) {
for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second;
++pos.first) {
auto mean_or_std = pos.first->second;
normalize_paras.insert(normalize_paras.end(), mean_or_std.begin(), mean_or_std.end());
}
}
std::vector<uint32_t> aipp_mean = AippMeanFilter(normalize_paras);
std::vector<float> aipp_std = AippStdFilter(normalize_paras);
std::map<std::string, std::string> aipp_options;
AippInfoCollection(&aipp_options, aipp_size, aipp_mean, aipp_std);
std::string tab_char(4, ' ');
outfile << "aipp_op {" << std::endl;
for (auto &option : aipp_options) {
outfile << tab_char << option.first << " : " << option.second << std::endl;
}
outfile << "}";
outfile.close();
}
#endif
return config_location;
}

Status Execute::validate_device_() {
if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310) {
std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";


+ 9
- 0
mindspore/ccsrc/minddata/dataset/api/vision.cc View File

@@ -191,6 +191,15 @@ Normalize::Normalize(std::vector<float> mean, std::vector<float> std) : mean_(me

std::shared_ptr<TensorOperation> Normalize::Parse() { return std::make_shared<NormalizeOperation>(mean_, std_); }

std::shared_ptr<TensorOperation> Normalize::Parse(const MapTargetDevice &env) {
if (env == MapTargetDevice::kAscend310) {
#ifdef ENABLE_ACL
return std::make_shared<DvppNormalizeOperation>(mean_, std_);
#endif
}
return std::make_shared<NormalizeOperation>(mean_, std_);
}

#ifndef ENABLE_ANDROID
// NormalizePad Transform Operation.
NormalizePad::NormalizePad(const std::vector<float> &mean, const std::vector<float> &std, const std::string &dtype)


+ 3
- 1
mindspore/ccsrc/minddata/dataset/core/device_tensor.h View File

@@ -53,8 +53,10 @@ class DeviceTensor : public Tensor {
Status SetYuvStrideShape_(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride);

std::vector<uint32_t> YUV_shape_;
std::vector<uint32_t> YUV_shape_; // YUV_shape_ = {width, widthStride, height, heightStride}

uint8_t *device_data_;

uint32_t size_;
};



+ 6
- 0
mindspore/ccsrc/minddata/dataset/include/execute.h View File

@@ -19,6 +19,7 @@

#include <string>
#include <vector>
#include <map>
#include <memory>
#include "include/api/context.h"
#include "include/api/types.h"
@@ -63,6 +64,8 @@ class Execute {

Status DeviceMemoryRelease();

std::string AippCfgGenerator();

private:
Status validate_device_();

@@ -71,6 +74,9 @@ class Execute {
MapTargetDevice device_type_;

std::shared_ptr<DeviceResource> device_resource_;

struct ExtraInfo;
std::shared_ptr<ExtraInfo> info_;
};

} // namespace dataset


+ 2
- 0
mindspore/ccsrc/minddata/dataset/include/vision_lite.h View File

@@ -155,6 +155,8 @@ class Normalize : public TensorTransform {
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;

std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;

private:
std::vector<float> mean_;
std::vector<float> std_;


+ 3
- 2
mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/CMakeLists.txt View File

@@ -6,6 +6,7 @@ add_library(kernels-dvpp-image OBJECT
dvpp_decode_resize_crop_jpeg_op.cc
dvpp_decode_resize_jpeg_op.cc
dvpp_decode_jpeg_op.cc
dvpp_resize_jpeg_op.cc
dvpp_decode_png_op.cc)
dvpp_decode_png_op.cc
dvpp_normalize_op.cc
dvpp_resize_jpeg_op.cc)
add_dependencies(kernels-dvpp-image dvpp-utils)

+ 1
- 0
mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.cc View File

@@ -145,5 +145,6 @@ Status DvppCropJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &
processor_->SetCropParas(crop_width_, crop_height_);
return Status::OK();
}

} // namespace dataset
} // namespace mindspore

+ 1
- 0
mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.cc View File

@@ -134,5 +134,6 @@ Status DvppDecodeResizeJpegOp::SetAscendResource(const std::shared_ptr<DeviceRes
processor_->SetResizeParas(resized_width_, resized_height_);
return Status::OK();
}

} // namespace dataset
} // namespace mindspore

+ 39
- 0
mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.cc View File

@@ -0,0 +1,39 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <algorithm>
#include "minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.h"

namespace mindspore {
namespace dataset {
Status DvppNormalizeOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
std::vector<uint32_t> yuv_shape = input->GetYuvStrideShape();
(*output)->SetAttributes(input->GetDeviceBuffer(), input->DeviceDataSize(), yuv_shape[0], yuv_shape[1], yuv_shape[2],
yuv_shape[3]);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}

Status DvppNormalizeOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) { return Status::OK(); }

} // namespace dataset
} // namespace mindspore

+ 51
- 0
mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.h View File

@@ -0,0 +1,51 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_NORMALIZE_JPEG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_NORMALIZE_JPEG_OP_H

#include <memory>
#include <string>
#include <vector>
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
#include "mindspore/core/utils/log_adapter.h"

namespace mindspore {
namespace dataset {
class DvppNormalizeOp : public TensorOp {
public:
explicit DvppNormalizeOp(std::vector<float> mean, std::vector<float> std) : mean_(mean), std_(std) {}

~DvppNormalizeOp() = default;

Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;

std::string Name() const override { return kDvppNormalizeOp; }

Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;

private:
std::vector<float> mean_;
std::vector<float> std_;
};

} // namespace dataset
} // namespace mindspore

#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_NORMALIZE_JPEG_OP_H

+ 48
- 0
mindspore/ccsrc/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc View File

@@ -26,6 +26,7 @@
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_normalize_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.h"

namespace mindspore {
@@ -241,6 +242,53 @@ Status DvppDecodePngOperation::ValidateParams() { return Status::OK(); }

std::shared_ptr<TensorOp> DvppDecodePngOperation::Build() { return std::make_shared<DvppDecodePngOp>(); }

// DvppNormalize
DvppNormalizeOperation::DvppNormalizeOperation(const std::vector<float> &mean, const std::vector<float> &std)
: mean_(mean), std_(std) {}

Status DvppNormalizeOperation::ValidateParams() {
if (mean_.size() != 3) {
std::string err_msg = "DvppNormalization:: mean expecting size 3, got size: " + std::to_string(mean_.size());
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
if (std_.size() != 3) {
std::string err_msg = "DvppNormalization: std expecting size 3, got size: " + std::to_string(std_.size());
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
if (*min_element(mean_.begin(), mean_.end()) < 0 || *max_element(mean_.begin(), mean_.end()) > 256) {
std::string err_msg =
"Normalization can take parameters in range [0, 256] according to math theory of mean and sigma, got mean "
"vector" +
std::to_string(std_.size());
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
if (*min_element(std_.begin(), std_.end()) < 0 || *max_element(std_.begin(), std_.end()) > 256) {
std::string err_msg =
"Normalization can take parameters in range [0, 256] according to math theory of mean and sigma, got mean "
"vector" +
std::to_string(std_.size());
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_SYNTAX_ERROR(err_msg);
}
return Status::OK();
}

std::shared_ptr<TensorOp> DvppNormalizeOperation::Build() {
std::shared_ptr<DvppNormalizeOp> tensor_op = std::make_shared<DvppNormalizeOp>(mean_, std_);
return tensor_op;
}

Status DvppNormalizeOperation::to_json(nlohmann::json *out_json) {
nlohmann::json args;
args["mean"] = mean_;
args["std"] = std_;
*out_json = args;
return Status::OK();
}

// DvppResizeOperation
DvppResizeJpegOperation::DvppResizeJpegOperation(const std::vector<uint32_t> &resize) : resize_(resize) {}



+ 20
- 0
mindspore/ccsrc/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h View File

@@ -40,6 +40,7 @@ constexpr char kDvppDecodeResizeOperation[] = "DvppDecodeResize";
constexpr char kDvppDecodeResizeCropOperation[] = "DvppDecodeResizeCrop";
constexpr char kDvppDecodeJpegOperation[] = "DvppDecodeJpeg";
constexpr char kDvppDecodePngOperation[] = "DvppDecodePng";
constexpr char kDvppNormalizeOperation[] = "DvppNormalize";
constexpr char kDvppResizeJpegOperation[] = "DvppResizeJpeg";

/* ####################################### Derived TensorOperation classes ################################# */
@@ -121,6 +122,25 @@ class DvppDecodePngOperation : public TensorOperation {
std::string Name() const override { return kDvppDecodePngOperation; }
};

class DvppNormalizeOperation : public TensorOperation {
public:
explicit DvppNormalizeOperation(const std::vector<float> &mean, const std::vector<float> &std);

~DvppNormalizeOperation() = default;

std::shared_ptr<TensorOp> Build() override;

Status ValidateParams() override;

std::string Name() const override { return kDvppNormalizeOperation; }

Status to_json(nlohmann::json *out_json) override;

private:
std::vector<float> mean_;
std::vector<float> std_;
};

class DvppResizeJpegOperation : public TensorOperation {
public:
explicit DvppResizeJpegOperation(const std::vector<uint32_t> &resize);


+ 1
- 0
mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc View File

@@ -75,5 +75,6 @@ Status TensorOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resour
return Status(StatusCode::kMDUnexpectedError,
"This is a CPU operator which doesn't have Ascend Resource. Please verify your context");
}

} // namespace dataset
} // namespace mindspore

+ 1
- 0
mindspore/ccsrc/minddata/dataset/kernels/tensor_op.h View File

@@ -66,6 +66,7 @@ constexpr char kDvppDecodeResizeCropJpegOp[] = "DvppDecodeResizeCropJpegOp";
constexpr char kDvppDecodeResizeJpegOp[] = "DvppDecodeResizeJpegOp";
constexpr char kDvppDecodeJpegOp[] = "DvppDecodeJpegOp";
constexpr char kDvppDecodePngOp[] = "DvppDecodePngOp";
constexpr char kDvppNormalizeOp[] = "DvppNormalizeOp";
constexpr char kDvppResizeJpegOp[] = "DvppResizeJpegOp";
constexpr char kEqualizeOp[] = "EqualizeOp";
constexpr char kHwcToChwOp[] = "HWC2CHWOp";


+ 26
- 12
tests/st/cpp/dataset/test_de.cc View File

@@ -81,7 +81,7 @@ TEST_F(TestDE, TestDvpp) {

// Check image info
ASSERT_TRUE(rc.IsOk());
ASSERT_EQ(image.Shape().size(), 2);
ASSERT_EQ(image.Shape().size(), 3);
int32_t real_h = 0;
int32_t real_w = 0;
int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
@@ -92,9 +92,15 @@ TEST_F(TestDE, TestDvpp) {
real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
}
/* Use in the future
ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], real_w);
ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
*/
ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], 1);
ASSERT_EQ(image.Shape()[2], 1);
ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
#endif
}

@@ -119,7 +125,7 @@ TEST_F(TestDE, TestDvppSinkMode) {

// Check image info
ASSERT_TRUE(rc.IsOk());
ASSERT_EQ(image.Shape().size(), 2);
ASSERT_EQ(image.Shape().size(), 3);
int32_t real_h = 0;
int32_t real_w = 0;
int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
@@ -130,14 +136,15 @@ TEST_F(TestDE, TestDvppSinkMode) {
real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
}
ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], real_w);
ASSERT_EQ(image.DataSize(), 1.5 * real_w * real_h);
ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], 1);
ASSERT_EQ(image.Shape()[2], 1);
ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
Transform.DeviceMemoryRelease();
#endif
}

TEST_F(TestDE, TestDvppDecodeResizeCrop) {
TEST_F(TestDE, TestDvppDecodeResizeCropNormalize) {
#ifdef ENABLE_ACL
std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
@@ -146,18 +153,24 @@ TEST_F(TestDE, TestDvppDecodeResizeCrop) {
// Define dvpp transform
std::vector<int32_t> crop_paras = {416};
std::vector<int32_t> resize_paras = {512};
std::vector<float> mean = {0.485 * 255, 0.456 * 255, 0.406 * 255};
std::vector<float> std = {0.229 * 255, 0.224 * 255, 0.225 * 255};
auto decode(new vision::Decode());
auto resize(new vision::Resize(resize_paras));
auto centercrop(new vision::CenterCrop(crop_paras));
std::vector<TensorTransform *> transforms = {decode, resize, centercrop};
mindspore::dataset::Execute Transform(transforms, MapTargetDevice::kAscend310);
auto normalize(new vision::Normalize(mean, std));
std::vector<TensorTransform *> trans_lists = {decode, resize, centercrop, normalize};
mindspore::dataset::Execute Transform(trans_lists, MapTargetDevice::kAscend310);

std::string aipp_cfg = Transform.AippCfgGenerator();
ASSERT_EQ(aipp_cfg, "./aipp.cfg");

// Apply transform on images
Status rc = Transform(image, &image);

// Check image info
ASSERT_TRUE(rc.IsOk());
ASSERT_EQ(image.Shape().size(), 2);
ASSERT_EQ(image.Shape().size(), 3);
int32_t real_h = 0;
int32_t real_w = 0;
int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
@@ -168,9 +181,10 @@ TEST_F(TestDE, TestDvppDecodeResizeCrop) {
real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
}
ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], real_w);
ASSERT_EQ(image.DataSize(), 1.5 * real_w * real_h);
ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], 1);
ASSERT_EQ(image.Shape()[2], 1);
ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
Transform.DeviceMemoryRelease();
#endif
}

Loading…
Cancel
Save