| @@ -23,7 +23,9 @@ | |||
| #include "include/model.h" | |||
| #include "include/context.h" | |||
| #include "include/lite_session.h" | |||
| namespace { | |||
| constexpr int kNumPrintOfOutData = 50; | |||
| } | |||
| std::string RealPath(const char *path) { | |||
| const size_t max = 4096; | |||
| if (path == nullptr) { | |||
| @@ -90,7 +92,7 @@ void GenerateRandomData(int size, void *data, Distribution distribution) { | |||
| std::mt19937 random_engine; | |||
| int elements_num = size / sizeof(T); | |||
| (void)std::generate_n(static_cast<T *>(data), elements_num, | |||
| [&]() { return static_cast<T>(distribution(random_engine)); }); | |||
| [&distribution, &random_engine]() { return static_cast<T>(distribution(random_engine)); }); | |||
| } | |||
| int GenerateInputDataWithRandom(std::vector<mindspore::tensor::MSTensor *> inputs) { | |||
| @@ -129,7 +131,7 @@ int Run(mindspore::session::LiteSession *session) { | |||
| << " tensor elements num is:" << tensor.second->ElementsNum() << std::endl; | |||
| auto out_data = reinterpret_cast<float *>(tensor.second->MutableData()); | |||
| std::cout << "output data is:"; | |||
| for (int i = 0; i < tensor.second->ElementsNum() && i <= 50; i++) { | |||
| for (int i = 0; i < tensor.second->ElementsNum() && i <= kNumPrintOfOutData; i++) { | |||
| std::cout << out_data[i] << " "; | |||
| } | |||
| std::cout << std::endl; | |||
| @@ -659,16 +659,16 @@ int RunCallback(const char *model_path) { | |||
| GetInputsByTensorNameAndSetData(session); | |||
| // Definition of callback function before forwarding operator. | |||
| auto before_call_back = [&](const std::vector<mindspore::tensor::MSTensor *> &before_inputs, | |||
| const std::vector<mindspore::tensor::MSTensor *> &before_outputs, | |||
| const mindspore::CallBackParam &call_param) { | |||
| auto before_call_back = [](const std::vector<mindspore::tensor::MSTensor *> &before_inputs, | |||
| const std::vector<mindspore::tensor::MSTensor *> &before_outputs, | |||
| const mindspore::CallBackParam &call_param) { | |||
| std::cout << "Before forwarding " << call_param.node_name << " " << call_param.node_type << std::endl; | |||
| return true; | |||
| }; | |||
| // Definition of callback function after forwarding operator. | |||
| auto after_call_back = [&](const std::vector<mindspore::tensor::MSTensor *> &after_inputs, | |||
| const std::vector<mindspore::tensor::MSTensor *> &after_outputs, | |||
| const mindspore::CallBackParam &call_param) { | |||
| auto after_call_back = [](const std::vector<mindspore::tensor::MSTensor *> &after_inputs, | |||
| const std::vector<mindspore::tensor::MSTensor *> &after_outputs, | |||
| const mindspore::CallBackParam &call_param) { | |||
| std::cout << "After forwarding " << call_param.node_name << " " << call_param.node_type << std::endl; | |||
| return true; | |||
| }; | |||
| @@ -62,7 +62,7 @@ char *ReadFile(const char *file, size_t *size) { | |||
| ifs.seekg(0, std::ios::end); | |||
| *size = ifs.tellg(); | |||
| std::unique_ptr<char[]> buf(new (std::nothrow) char[*size]); | |||
| auto buf = std::make_unique<char[]>(*size); | |||
| if (buf == nullptr) { | |||
| MS_LOG(ERROR) << "malloc buf failed, file: " << real_path; | |||
| ifs.close(); | |||
| @@ -31,11 +31,21 @@ constexpr const char *ANDROID_LOG_TAG = "MS_LITE"; | |||
| #endif | |||
| int StrToInt(const char *env) { | |||
| if (env == nullptr) return 2; | |||
| if (strcmp(env, "0") == 0) return 0; | |||
| if (strcmp(env, "1") == 0) return 1; | |||
| if (strcmp(env, "2") == 0) return 2; | |||
| if (strcmp(env, "3") == 0) return 3; | |||
| if (env == nullptr) { | |||
| return 2; | |||
| } | |||
| if (strcmp(env, "0") == 0) { | |||
| return 0; | |||
| } | |||
| if (strcmp(env, "1") == 0) { | |||
| return 1; | |||
| } | |||
| if (strcmp(env, "2") == 0) { | |||
| return 2; | |||
| } | |||
| if (strcmp(env, "3") == 0) { | |||
| return 3; | |||
| } | |||
| return 2; | |||
| } | |||
| @@ -20,7 +20,6 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| std::vector<StringPack> ParseTensorBuffer(Tensor *tensor) { | |||
| if (tensor->data_c() == nullptr) { | |||
| MS_LOG(ERROR) << "Tensor data is null, cannot be parsed"; | |||
| @@ -193,7 +192,7 @@ uint64_t HashStringLen0to16(const char *s, size_t len) { | |||
| uint8_t c = s[len - 1]; | |||
| uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8); | |||
| uint32_t z = len + (static_cast<uint32_t>(c) << 2); | |||
| return ShiftMix(y * k2 ^ z * k0) * k2; | |||
| return ShiftMix((y * k2) ^ (z * k0)) * k2; | |||
| } | |||
| return k2; | |||
| } | |||
| @@ -23,9 +23,9 @@ namespace mindspore::lite { | |||
| int Executor::Run(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, | |||
| const std::vector<kernel::LiteKernel *> &kernels, mindspore::Allocator *allocator, | |||
| const KernelCallBack &before, const KernelCallBack &after) { | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(allocator != nullptr); | |||
| auto ret = CheckTensorsInvalid(in_tensors); | |||
| if (RET_OK != ret) { | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "CheckInputs failed"; | |||
| return ret; | |||
| } | |||
| @@ -44,9 +44,9 @@ int Executor::Run(const std::vector<Tensor *> &in_tensors, const std::vector<Ten | |||
| while (!kernel_queue.empty()) { | |||
| auto cur_kernel = kernel_queue.front(); | |||
| kernel_queue.pop(); | |||
| MS_ASSERT(nullptr != cur_kernel); | |||
| MS_ASSERT(cur_kernel != nullptr); | |||
| ret = cur_kernel->Execute(before, after); | |||
| if (RET_OK != ret) { | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "run kernel failed, name: " << cur_kernel->name(); | |||
| return ret; | |||
| } | |||
| @@ -123,10 +123,10 @@ void MindrtExecutor::TransferGraphOutput() { | |||
| int MindrtExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, | |||
| const std::vector<kernel::LiteKernel *> &kernels, mindspore::Allocator *allocator, | |||
| const KernelCallBack &before, const KernelCallBack &after) { | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(allocator != nullptr); | |||
| if (kernels.front()->type() != schema::PrimitiveType_Merge) { | |||
| auto ret = CheckTensorsInvalid(in_tensors); | |||
| if (RET_OK != ret) { | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "CheckInputs failed"; | |||
| return ret; | |||
| } | |||
| @@ -137,7 +137,7 @@ int MindrtExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::vect | |||
| } | |||
| auto ret = MindrtRun<Tensor>(input_data_, &output_data_, &before, &after); | |||
| if (RET_OK != ret) { | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "MindrtRun failed"; | |||
| return ret; | |||
| } | |||
| @@ -35,7 +35,7 @@ static int RunKernel(void *data, int index, float lhs_scale, float rhs_scale) { | |||
| auto kernel = executor->GetReadyKernel(index); | |||
| auto ret = kernel->Execute(); | |||
| executor->SetResult(index, ret); | |||
| if (0 != ret) { | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "run kernel failed, name: " << kernel->name(); | |||
| return 0; | |||
| } | |||
| @@ -46,7 +46,7 @@ static int RunKernel(void *data, int index, float lhs_scale, float rhs_scale) { | |||
| int ParallelExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, | |||
| const std::vector<kernel::LiteKernel *> &kernels, mindspore::Allocator *allocator, | |||
| const KernelCallBack &before, const KernelCallBack &after) { | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(allocator != nullptr); | |||
| for (auto &inTensor : in_tensors) { | |||
| if (inTensor == nullptr) { | |||
| MS_LOG(ERROR) << "Graph input tensor is nullptr"; | |||
| @@ -69,7 +69,7 @@ int ParallelExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::ve | |||
| std::vector<kernel::LiteKernel *> newReadyKernels; | |||
| while (!readyKernels.empty()) { | |||
| results.resize(readyKernels.size(), RET_OK); | |||
| if (0 != thread_pool_->ParallelLaunch(RunKernel, this, readyKernels.size())) { | |||
| if (thread_pool_->ParallelLaunch(RunKernel, this, readyKernels.size()) != 0) { | |||
| MS_LOG(ERROR) << "ParallelLaunch failed "; | |||
| return RET_ERROR; | |||
| } | |||
| @@ -94,8 +94,6 @@ int ParallelExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::ve | |||
| readyKernels.clear(); | |||
| readyKernels = std::move(newReadyKernels); | |||
| } | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite | |||
| @@ -25,7 +25,9 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| #define kMaxMallocSize 1024 * 1024 * 300 | |||
| namespace { | |||
| constexpr int kMaxMallocSize = 1024 * 1024 * 300; | |||
| } // namespace | |||
| Tensor::Tensor(const TypeId data_type, std::vector<int> shape, const schema::Format &format, Category category) | |||
| : data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {} | |||
| @@ -73,7 +75,7 @@ Tensor *Tensor::CopyTensor(const Tensor &src_tensor, bool copy_data) { | |||
| } | |||
| Tensor::~Tensor() { | |||
| if (nullptr != this->data_ && this->own_data_) { | |||
| if (this->data_ != nullptr && this->own_data_) { | |||
| if (this->allocator_ != nullptr) { | |||
| this->allocator_->Free(this->data_); | |||
| } else { | |||
| @@ -287,7 +289,7 @@ void Tensor::set_root_tensor(Tensor *tensor) { | |||
| } | |||
| int Tensor::MallocData(const AllocatorPtr allocator) { | |||
| if (nullptr != this->data_) { | |||
| if (this->data_ != nullptr) { | |||
| return RET_OK; | |||
| } | |||
| if (allocator != nullptr) { | |||
| @@ -303,7 +305,7 @@ int Tensor::MallocData(const AllocatorPtr allocator) { | |||
| } else { | |||
| this->data_ = allocator_->Malloc(data_size); | |||
| } | |||
| if (nullptr == this->data_) { | |||
| if (this->data_ == nullptr) { | |||
| MS_LOG(ERROR) << "Malloc tensor data failed, size=" << data_size; | |||
| return RET_ERROR; | |||
| } | |||
| @@ -312,13 +314,13 @@ int Tensor::MallocData(const AllocatorPtr allocator) { | |||
| } | |||
| void Tensor::FreeData() { | |||
| if (nullptr == this->data_) { | |||
| if (this->data_ == nullptr) { | |||
| return; | |||
| } | |||
| if (!this->own_data_) { | |||
| return; | |||
| } | |||
| if (nullptr == allocator_) { | |||
| if (allocator_ == nullptr) { | |||
| free(this->data_); | |||
| this->data_ = nullptr; | |||
| } else { | |||
| @@ -23,7 +23,6 @@ | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite { | |||
| TensorList::TensorList(std::vector<int> shape, std::vector<int> element_shape, Category category) | |||
| : Tensor(kObjectTypeTensorType, std::move(shape), schema::Format::Format_NHWC, category), | |||
| element_shape_(std::move(element_shape)) {} | |||
| @@ -300,5 +299,4 @@ STATUS TensorList::Decode(const int *data) { | |||
| } | |||
| bool TensorList::IsConst() const { return this->category_ == CONST_TENSOR || this->category_ == CONST_SCALAR; } | |||
| } // namespace mindspore::lite | |||
| @@ -238,8 +238,8 @@ int AnfExporter::SetGraphInputIndex(const std::unique_ptr<schema::MetaGraphT> &m | |||
| int AnfExporter::SetGraphoutputIndex(const CNodePtr &cnode, const size_t subgraph_index, | |||
| const std::unique_ptr<schema::MetaGraphT> &meta_graphT, | |||
| schema::CNodeT *return_node) { | |||
| MS_ASSERT(nullptr != meta_graphT); | |||
| MS_ASSERT(nullptr != return_node); | |||
| MS_ASSERT(meta_graphT != nullptr); | |||
| MS_ASSERT(return_node != nullptr); | |||
| for (size_t i = 1; i < cnode->inputs().size(); i++) { | |||
| auto input_node = cnode->input(i); | |||
| if (input_node == nullptr) { | |||
| @@ -556,8 +556,8 @@ int AnfExporter::ConvertInputValueNode(const CNodePtr &cnode, size_t index, cons | |||
| int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT, | |||
| schema::CNodeT *fb_node) { | |||
| MS_ASSERT(nullptr != meta_graphT); | |||
| MS_ASSERT(nullptr != fb_node); | |||
| MS_ASSERT(meta_graphT != nullptr); | |||
| MS_ASSERT(fb_node != nullptr); | |||
| if (cnode->inputs().size() <= 1) { | |||
| return RET_OK; | |||
| } | |||
| @@ -36,6 +36,9 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| namespace { | |||
| constexpr int kNumPrintMin = 5; | |||
| } | |||
| static const char *DELIM_COLON = ":"; | |||
| static const char *DELIM_COMMA = ","; | |||
| static const char *DELIM_SLASH = "/"; | |||
| @@ -376,7 +379,7 @@ int Benchmark::CompareStringData(const std::string &name, tensor::MSTensor *tens | |||
| std::vector<std::string> calib_strings = iter->second->strings_data; | |||
| std::vector<std::string> output_strings = MSTensorToStrings(tensor); | |||
| size_t compare_num = std::min(calib_strings.size(), output_strings.size()); | |||
| size_t print_num = std::min(compare_num, static_cast<size_t>(5)); | |||
| size_t print_num = std::min(compare_num, static_cast<size_t>(kNumPrintMin)); | |||
| std::cout << "Data of node " << name << " : " << std::endl; | |||
| for (size_t i = 0; i < compare_num; i++) { | |||
| @@ -703,8 +706,7 @@ void BenchmarkFlags::InitInputDataList() { | |||
| } | |||
| void BenchmarkFlags::InitResizeDimsList() { | |||
| std::string content; | |||
| content = this->resize_dims_in_; | |||
| std::string content = this->resize_dims_in_; | |||
| std::vector<int> shape; | |||
| auto shape_strs = StringSplit(content, std::string(DELIM_COLON)); | |||
| for (const auto &shape_str : shape_strs) { | |||
| @@ -268,7 +268,7 @@ int AnfTransform::DoQuantize(const FuncGraphPtr &old_graph, const converter::Fla | |||
| } | |||
| FuncGraphPtr AnfTransform::TransformFuncGraph(const FuncGraphPtr &old_graph, const converter::Flags *config) { | |||
| MS_ASSERT(nullptr != old_graph); | |||
| MS_ASSERT(old_graph != nullptr); | |||
| if (config == nullptr) { | |||
| MS_LOG(ERROR) << "config should be specified"; | |||
| return nullptr; | |||
| @@ -107,7 +107,7 @@ schema::MetaGraphT *Converter::Convert(const std::unique_ptr<converter::Flags> & | |||
| int RunConverter(int argc, const char **argv) { | |||
| std::ostringstream oss; | |||
| std::unique_ptr<converter::Flags> flags(new (std::nothrow) converter::Flags); | |||
| auto flags = std::make_unique<converter::Flags>(); | |||
| if (flags == nullptr) { | |||
| oss.clear(); | |||
| oss << "NEW FLAGS ERROR:" << RET_MEMORY_FAILED << " " << GetErrorInfo(RET_MEMORY_FAILED); | |||
| @@ -112,7 +112,6 @@ STATUS FusionPass::MatchPatterns(schema::MetaGraphT *graph) { | |||
| STATUS FusionPass::MatchOnePattern(schema::MetaGraphT *graph, FusionPattern *pattern) { | |||
| MS_ASSERT(graph != nullptr); | |||
| MS_ASSERT(pattern != nullptr); | |||
| // std::vector<std::unordered_map<std::string, Path *>> patternMatchPaths; | |||
| auto outputOp = pattern->GetPatternOp(pattern->GetOutput()); | |||
| if (outputOp == nullptr) { | |||
| MS_LOG(ERROR) << "Can not find the output of the pattern"; | |||
| @@ -23,8 +23,6 @@ | |||
| namespace mindspore { | |||
| namespace lite { | |||
| // using namespace std; | |||
| FusionPattern::FusionPattern(std::string name) { this->name = std::move(name); } | |||
| FusionPattern::~FusionPattern() = default; | |||
| @@ -50,7 +48,7 @@ FusionPattern &FusionPattern::AddPatternOp(const std::string &id, const std::vec | |||
| hasError = true; | |||
| } | |||
| std::shared_ptr<PatternOp> op(new PatternOp()); | |||
| auto op = std::make_shared<PatternOp>(); | |||
| if (op == nullptr) { | |||
| MS_LOG(ERROR) << "new an object failed"; | |||
| hasError = true; | |||
| @@ -44,7 +44,7 @@ STATUS MulAddFusionPass::DefinePattern() { | |||
| baOp->types = {schema::PrimitiveType_AddFusion}; | |||
| baOp->left = mulOp; | |||
| std::unique_ptr<FusionPattern> fusionPattern(new (std::nothrow) FusionPattern("MulAddFusion")); | |||
| auto fusionPattern = std::make_unique<FusionPattern>("MulAddFusion"); | |||
| if (fusionPattern == nullptr) { | |||
| MS_LOG(ERROR) << "new fusionPattern failed"; | |||
| return RET_ERROR; | |||
| @@ -137,7 +137,7 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt | |||
| MS_ASSERT(addNode != nullptr); | |||
| // replace mulNode as scale | |||
| mulNode->primitive->value.type = schema::PrimitiveType_ScaleFusion; | |||
| std::unique_ptr<ScaleFusionT> scaleParam(new (std::nothrow) ScaleFusionT()); | |||
| auto scaleParam = std::make_unique<ScaleFusionT>(); | |||
| if (scaleParam == nullptr) { | |||
| MS_LOG(ERROR) << "new transposeParam failed"; | |||
| return RET_ERROR; | |||
| @@ -160,7 +160,7 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt | |||
| } | |||
| } else { | |||
| // replace addnode as activation | |||
| std::unique_ptr<ActivationT> activationParam(new ActivationT()); | |||
| auto activationParam = std::make_unique<ActivationT>(); | |||
| MS_ASSERT(addNode->primitive != nullptr); | |||
| MS_ASSERT(addNode->primitive->value.AsAddFusion() != nullptr); | |||
| activationParam->activation_type = addNode->primitive->value.AsAddFusion()->activation_type; | |||
| @@ -97,7 +97,7 @@ STATUS QuantCastFusionPass::DefinePattern() { | |||
| dstOp->types = {schema::PrimitiveType_QuantDTypeCast}; | |||
| dstOp->left = srcOp; | |||
| std::unique_ptr<FusionPattern> fusionPattern(new (std::nothrow) FusionPattern(kQuantCastFusionPattern)); | |||
| auto fusionPattern = std::make_unique<FusionPattern>(kQuantCastFusionPattern); | |||
| if (fusionPattern == nullptr) { | |||
| MS_LOG(ERROR) << "new fusionPattern failde"; | |||
| return RET_ERROR; | |||
| @@ -122,7 +122,7 @@ STATUS QuantCastFusionPass::DefinePattern() { | |||
| dstOp->types = {schema::PrimitiveType_QuantDTypeCast}; | |||
| dstOp->left = formatOp; | |||
| std::unique_ptr<FusionPattern> fusionPattern(new (std::nothrow) FusionPattern(kQuantCastPassFusionPattern)); | |||
| auto fusionPattern = std::make_unique<FusionPattern>(kQuantCastPassFusionPattern); | |||
| if (fusionPattern == nullptr) { | |||
| MS_LOG(ERROR) << "new fusionPattern failde"; | |||
| return RET_ERROR; | |||
| @@ -74,7 +74,7 @@ STATUS BatchNormConvertScalePass::ConvertBNToScale(MetaGraphT *graph, const std: | |||
| MS_ASSERT(graph != nullptr); | |||
| MS_ASSERT(bnNode != nullptr); | |||
| bnNode->primitive->value.type = schema::PrimitiveType_ScaleFusion; | |||
| std::unique_ptr<ScaleFusionT> scaleParam(new (std::nothrow) ScaleFusionT()); | |||
| auto scaleParam = std::make_unique<ScaleFusionT>(); | |||
| if (scaleParam == nullptr) { | |||
| MS_LOG(ERROR) << "new scaleParam failed"; | |||
| return RET_ERROR; | |||
| @@ -101,7 +101,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std | |||
| MS_ASSERT(graph != nullptr); | |||
| MS_ASSERT(bnNode != nullptr); | |||
| GetTransParam(graph, bnNode); | |||
| newScaleWeightTensor = std::unique_ptr<TensorT>(new (std::nothrow) TensorT); | |||
| newScaleWeightTensor = std::make_unique<TensorT>(); | |||
| if (newScaleWeightTensor == nullptr) { | |||
| MS_LOG(ERROR) << "new weightTensor failed"; | |||
| return RET_ERROR; | |||
| @@ -123,7 +123,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std | |||
| return RET_ERROR; | |||
| } | |||
| newScaleBiasTensor = std::unique_ptr<TensorT>(new (std::nothrow) TensorT); | |||
| newScaleBiasTensor = std::make_unique<TensorT>(); | |||
| if (newScaleBiasTensor == nullptr) { | |||
| MS_LOG(ERROR) << "new weightTensor failed"; | |||
| return RET_ERROR; | |||
| @@ -267,7 +267,7 @@ NodeIter DTypeTransPass::InsertDTypeTransNode(schema::MetaGraphT *graph, NodeIte | |||
| } else { | |||
| tile_name = exist_node_name + "_post"; | |||
| } | |||
| auto trans_node = std::unique_ptr<CNodeT>(new (std::nothrow) CNodeT); | |||
| auto trans_node = std::make_unique<CNodeT>(); | |||
| if (trans_node == nullptr) { | |||
| MS_LOG(ERROR) << "new TransNode failed"; | |||
| *error_code = RET_ERROR; | |||
| @@ -78,7 +78,7 @@ STATUS ComputeDataToInt8(const std::unique_ptr<TensorT> &tensor, int32_t index) | |||
| STATUS ComputeDataToInt32(const std::unique_ptr<TensorT> &tensor) { | |||
| MS_ASSERT(tensor != nullptr); | |||
| auto bShapeSize = GetShapeSize(*(tensor)); | |||
| std::unique_ptr<int32_t[]> qDatas(new (std::nothrow) int32_t[bShapeSize]); | |||
| auto qDatas = std::make_unique<int32_t[]>(bShapeSize); | |||
| if (qDatas == nullptr) { | |||
| MS_LOG(ERROR) << "new qDatas failed"; | |||
| return RET_ERROR; | |||