| @@ -12,6 +12,8 @@ set(MS_VERSION_REVISION 0) | |||
| set(DIR_PREFIX mindspore-lite) | |||
| set(MS_VERSION ${MS_VERSION_MAJOY}.${MS_VERSION_MINOR}.${MS_VERSION_REVISION}) | |||
| set(MAIN_DIR ${DIR_PREFIX}-${MS_VERSION}) | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}") | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}") | |||
| if (SUPPORT_GPU) | |||
| set(PROCESS_UNIT gpu) | |||
| @@ -14,8 +14,8 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_INCLUDE_MS_TENSOR_H_ | |||
| #define MINDSPORE_INCLUDE_MS_TENSOR_H_ | |||
| #ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ | |||
| #define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ | |||
| #include <utility> | |||
| #include <vector> | |||
| @@ -105,4 +105,4 @@ class MS_API MSTensor { | |||
| }; | |||
| } // namespace tensor | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_INCLUDE_MS_TENSOR_H_ | |||
| #endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ | |||
| @@ -14,27 +14,21 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_CAST_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_CAST_CONST_FOLD_PASS_H | |||
| #ifndef MINDSPORE_LITE_INCLUDE_VERSION_H_ | |||
| #define MINDSPORE_LITE_INCLUDE_VERSION_H_ | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include <string> | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class CastConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| CastConstFoldPass() : ConstFoldPass(OpT_Cast) {} | |||
| ~CastConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| /// \brief Global method to get a version string. | |||
| /// | |||
| /// \return The version string of MindSpore Lite. | |||
| std::string Version() { | |||
| return "MindSpore Lite " + std::to_string(MS_VERSION_MAJOY) + "." + std::to_string(MS_VERSION_MINOR) + "." + | |||
| std::to_string(MS_VERSION_REVISION); | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_CAST_CONST_FOLD_PASS_H | |||
| #endif // LITE_VERSION_H | |||
| @@ -31,7 +31,7 @@ public class LiteSession { | |||
| private long sessionPtr; | |||
| LiteSession() { | |||
| public LiteSession() { | |||
| this.sessionPtr = 0; | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| package com.mindspore.lite; | |||
| public class Version { | |||
| static { | |||
| System.loadLibrary("mindspore-lite-jni"); | |||
| } | |||
| public static native String version(); | |||
| } | |||
| @@ -0,0 +1,23 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include <jni.h> | |||
| #include "common/ms_log.h" | |||
| #include "include/version.h" | |||
| extern "C" JNIEXPORT jstring JNICALL Java_com_mindspore_lite_Version_version(JNIEnv *env, jobject thiz) { | |||
| return env->NewStringUTF(mindspore::lite::Version().c_str()); | |||
| } | |||
| @@ -113,8 +113,6 @@ int Executor::TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format | |||
| MS_ASSERT(nullptr != tensor); | |||
| MS_ASSERT(nullptr != allocator); | |||
| MS_ASSERT(4 == tensor->shape().size()); | |||
| // auto src_format = tensor->GetFormat(); | |||
| // todo | |||
| MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to " | |||
| << schema::EnumNameFormat(dst_format) << " in uint8"; | |||
| return RET_ERROR; | |||
| @@ -212,9 +212,7 @@ int LiteSession::CompileGraph(Model *model) { | |||
| return RET_OK; | |||
| } | |||
| std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputs() const { | |||
| return this->input_vec_; | |||
| } | |||
| std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputs() const { return this->input_vec_; } | |||
| int LiteSession::RunGraph(const session::KernelCallBack &before, const session::KernelCallBack &after) { | |||
| MS_EXCEPTION_IF_NULL(this->context_); | |||
| @@ -60,17 +60,17 @@ class LiteSession : public session::LiteSession { | |||
| int ConvertTensors(const lite::Model *model); | |||
| void InitGraphInOutTensors(const lite::Model *model); | |||
| // init this->inputs_ | |||
| void InitGraphInputTensors(const lite::Model *model); | |||
| // init this->input_vec_ | |||
| void InitGraphInputMSTensors(const lite::Model *model); | |||
| // init this->outputs_ | |||
| void InitGraphOutputTensors(const lite::Model *model); | |||
| // init this->input_map_ | |||
| void InitGraphInputMap(const lite::Model *model); | |||
| // init this->output_map_ | |||
| void InitGraphOutputMap(const lite::Model *model); | |||
| // resize inputs | |||
| int ResizeInputs(const std::vector<mindspore::tensor::MSTensor *> &inputs); | |||
| protected: | |||
| @@ -380,34 +380,6 @@ int ModelImpl::BuildOps() { | |||
| auto srcPrim = cNode->primitive(); | |||
| this->ops_[name] = CopyPrimitive(srcPrim); | |||
| // flatbuffers::FlatBufferBuilder fbb(1024); | |||
| // schema::Conv2DBuilder conv2DBuilder(fbb); | |||
| // conv2DBuilder.add_padMode(srcPrim->value_as_Conv2D()->padMode()); | |||
| // conv2DBuilder.add_channelOut(srcPrim->value_as_Conv2D()->channelOut()); | |||
| // conv2DBuilder.add_channelIn(srcPrim->value_as_Conv2D()->channelIn()); | |||
| // conv2DBuilder.add_strideH(srcPrim->value_as_Conv2D()->strideH()); | |||
| // conv2DBuilder.add_strideW(srcPrim->value_as_Conv2D()->strideW()); | |||
| // conv2DBuilder.add_dilateH(srcPrim->value_as_Conv2D()->dilateH()); | |||
| // conv2DBuilder.add_dilateW(srcPrim->value_as_Conv2D()->dilateW()); | |||
| // conv2DBuilder.add_kernelH(srcPrim->value_as_Conv2D()->kernelH()); | |||
| // conv2DBuilder.add_kernelW(srcPrim->value_as_Conv2D()->kernelW()); | |||
| // conv2DBuilder.add_padUp(srcPrim->value_as_Conv2D()->padUp()); | |||
| // conv2DBuilder.add_padDown(srcPrim->value_as_Conv2D()->padDown()); | |||
| // conv2DBuilder.add_padLeft(srcPrim->value_as_Conv2D()->padLeft()); | |||
| // conv2DBuilder.add_padRight(srcPrim->value_as_Conv2D()->padRight()); | |||
| // conv2DBuilder.add_format(srcPrim->value_as_Conv2D()->format()); | |||
| // conv2DBuilder.add_group(srcPrim->value_as_Conv2D()->group()); | |||
| // conv2DBuilder.add_activationType(srcPrim->value_as_Conv2D()->activationType()); | |||
| // schema::PrimitiveBuilder primBuilder(fbb); | |||
| // primBuilder.add_value_type(srcPrim->value_type()); | |||
| // primBuilder.add_value(conv2DBuilder.Finish()); | |||
| // | |||
| // fbb.Finish(conv2DBuilder.Finish()); | |||
| // auto buf = fbb.GetBufferPointer(); | |||
| // auto conv2D = flatbuffers::GetRoot<schema::Conv2D>(buf); | |||
| // fbb.Clear(); | |||
| // | |||
| // return const_cast<mindspore::predict::OpDef *>(opDef); | |||
| } | |||
| return 0; | |||
| } | |||
| @@ -137,7 +137,6 @@ int Pooling::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso | |||
| MS_LOG(ERROR) << "unsupported round mode."; | |||
| } | |||
| } | |||
| // todo: fmk type | |||
| auto input_shape = input->shape(); | |||
| input_shape.at(1) = output_h; | |||
| input_shape.at(2) = output_w; | |||
| @@ -1,7 +1,5 @@ | |||
| /** | |||
| * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). | |||
| * | |||
| * Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| @@ -1,7 +1,5 @@ | |||
| /** | |||
| * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). | |||
| * | |||
| * Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| @@ -33,7 +33,6 @@ class ParamValueLite : public Value { | |||
| size_t tensor_size() const { return tensor_size_; } | |||
| void set_tensor_size(size_t size) { tensor_size_ = size; } | |||
| // todo | |||
| void *tensor_addr() const { return tensor_addr_; } | |||
| void set_tensor_addr(void *addr) { tensor_addr_ = addr; } | |||
| @@ -254,7 +254,6 @@ OpParameter *PopulatePreluParameter(const mindspore::lite::PrimitiveC *primitive | |||
| OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primitive) { | |||
| auto pooling_primitive = dynamic_cast<const mindspore::lite::Pooling *>(primitive); | |||
| // todo use malloc instead | |||
| auto *pooling_param = new (std::nothrow) PoolingParameter(); | |||
| if (pooling_param == nullptr) { | |||
| MS_LOG(ERROR) << "new PoolingParameter failed."; | |||
| @@ -357,7 +356,6 @@ OpParameter *PopulateConvParameter(const mindspore::lite::PrimitiveC *primitive) | |||
| auto conv_primitive = dynamic_cast<const mindspore::lite::Conv2D *>(primitive); | |||
| conv_param->kernel_h_ = conv_primitive->GetKernelH(); | |||
| conv_param->kernel_w_ = conv_primitive->GetKernelW(); | |||
| // todo format | |||
| conv_param->group_ = conv_primitive->GetGroup(); | |||
| conv_param->stride_h_ = conv_primitive->GetStrideH(); | |||
| conv_param->stride_w_ = conv_primitive->GetStrideW(); | |||
| @@ -447,7 +445,6 @@ OpParameter *PopulateDeconvDwParameter(const mindspore::lite::PrimitiveC *primit | |||
| auto conv_primitive = dynamic_cast<const mindspore::lite::DeDepthwiseConv2D *>(primitive); | |||
| conv_param->kernel_h_ = conv_primitive->GetKernelH(); | |||
| conv_param->kernel_w_ = conv_primitive->GetKernelW(); | |||
| // todo format, group | |||
| conv_param->stride_h_ = conv_primitive->GetStrideH(); | |||
| conv_param->stride_w_ = conv_primitive->GetStrideW(); | |||
| @@ -20,7 +20,6 @@ | |||
| using mindspore::schema::Format; | |||
| namespace mindspore::kernel { | |||
| LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { | |||
| // todo | |||
| if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { | |||
| return PackNHWCToNC4HW4Fp32; | |||
| } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { | |||
| @@ -39,7 +38,6 @@ LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format ds | |||
| } | |||
| LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { | |||
| // todo | |||
| if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { | |||
| return PackNHWCToNHWC4Int8; | |||
| } else { | |||
| @@ -48,7 +46,6 @@ LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format ds | |||
| } | |||
| LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) { | |||
| // todo | |||
| switch (data_type) { | |||
| case kNumberTypeInt8: | |||
| return LayoutTransformInt8(src_format, dst_format); | |||
| @@ -236,7 +236,6 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector<kernel::LiteKer | |||
| kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector<tensor::Tensor *> &in_tensors, | |||
| const std::vector<tensor::Tensor *> &out_tensors, | |||
| const mindspore::lite::PrimitiveC *primitive) { | |||
| // todo: support NPU, APU | |||
| MS_ASSERT(nullptr != primitive); | |||
| auto data_type = in_tensors.front()->data_type(); | |||
| kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, static_cast<schema::PrimitiveType>(primitive->Type())}; | |||
| @@ -77,7 +77,6 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) { | |||
| return false; | |||
| } | |||
| auto value_node = utils::cast<ValueNodePtr>(indexNode); | |||
| map_remove_get_item_[tuple_get_item_node->input(1)->fullname_with_scope()] = GetValue<int>(value_node->value()); | |||
| } else { | |||
| inputs.emplace_back(cnode->input(i)); | |||
| } | |||
| @@ -201,12 +200,7 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) { | |||
| primT->value.type == schema::PrimitiveType_MakeTuple) { | |||
| continue; | |||
| } | |||
| map_remove_get_item_.clear(); | |||
| RemoveIfMakeTuple(cnode); | |||
| // if (!RemoveIfTupleGetItem(cnode)) { | |||
| // MS_LOG(ERROR) << "RemoveIfTupleGetItem failed"; | |||
| // return nullptr; | |||
| // } | |||
| if (primT->value.type == schema::PrimitiveType_Return) { | |||
| AddOutPutIfReturn(meta_graphT, cnode); | |||
| @@ -54,7 +54,6 @@ class AnfExporter { | |||
| private: | |||
| std::map<std::string, int> node_id_map_; | |||
| std::vector<schema::CNodeT *> graph_input_nodes_; | |||
| std::map<std::string, int> map_remove_get_item_; | |||
| }; | |||
| schema::MetaGraphT *Export(const FuncGraphPtr &func_graph); | |||
| @@ -158,7 +158,6 @@ int AnfImporterFromMetaGraphT::ConverterCNode() { | |||
| MS_LOG(ERROR) << "Can't find input node."; | |||
| return RET_ERROR; | |||
| } | |||
| // todo: CheckInputNodeType, the first node should be op; | |||
| op_inputs.push_back(node); | |||
| } | |||
| auto new_cnode = func_graph_->NewCNode(op_inputs); | |||
| @@ -72,577 +72,6 @@ static std::unordered_map<int, TypeId> kDefaultValueSwitchMap{ | |||
| {onnx::TensorProto_DataType_STRING, kObjectTypeString}, | |||
| }; | |||
| #if 0 | |||
| std::shared_ptr<ValueTuple> ParserScalarAttrValue(const std::string &attr_name, | |||
| const std::unordered_map<string, ValuePtr> &kv) { | |||
| std::string str = attr_name; | |||
| auto replace = [&](const string &orgStr, const string &newStr) { | |||
| std::string::size_type pos(0); | |||
| while ((pos = str.find(orgStr)) != std::string::npos) { | |||
| str.replace(pos, orgStr.length(), newStr); | |||
| } | |||
| return str; | |||
| }; | |||
| // remove "scalar:" | |||
| str = replace("scalar:", ""); | |||
| // remove "Tuple" | |||
| str = replace("Tuple", ""); | |||
| // remove "List" | |||
| str = replace("List", ""); | |||
| std::stack<std::string> rules; | |||
| std::stack<ValuePtr> value; | |||
| int num = 0, count = 0; | |||
| for (size_t i = 0; i < str.length(); i++) { | |||
| if (str[i] == '[') { | |||
| rules.push("["); | |||
| } else if (str[i] == ']') { | |||
| // rules | |||
| std::vector<ValuePtr> vec; | |||
| while (rules.top() != "[") { | |||
| rules.pop(); | |||
| vec.push_back(value.top()); | |||
| value.pop(); | |||
| } | |||
| // pop "[" | |||
| rules.pop(); | |||
| // make tuple for names | |||
| std::string res = "dummy"; | |||
| // make tuple for values | |||
| reverse(vec.begin(), vec.end()); | |||
| auto vt = std::make_shared<ValueTuple>(vec); | |||
| if (rules.empty() && value.empty()) { | |||
| return vt; | |||
| } | |||
| rules.push(res); | |||
| value.push(vt); | |||
| } else if (str[i] == ',') { | |||
| continue; | |||
| } else { | |||
| count++; | |||
| if (str[i + 1] == '[' || str[i + 1] == ']' || str[i + 1] == ',') { | |||
| auto value_name = str.substr(i - count + 1, count); | |||
| value.push(kv.at(value_name)); | |||
| rules.push(value_name); | |||
| count = 0; | |||
| num++; | |||
| } | |||
| } | |||
| } | |||
| return {}; | |||
| } | |||
| std::shared_ptr<abstract::AbstractTuple> | |||
| ParserAttrShape(const std::string &attr_name, const std::unordered_map<string, abstract::AbstractTensorPtr> &kv) { | |||
| std::string str = attr_name; | |||
| auto replace = [&](const string &orgStr, const string &newStr) { | |||
| std::string::size_type pos(0); | |||
| while ((pos = str.find(orgStr)) != std::string::npos) { | |||
| str.replace(pos, orgStr.length(), newStr); | |||
| } | |||
| return str; | |||
| }; | |||
| // remove "scalar:" | |||
| str = replace("shape:", ""); | |||
| // remove "Tuple" | |||
| str = replace("Tuple", ""); | |||
| // remove "List" | |||
| str = replace("List", ""); | |||
| std::stack<std::string> rules; | |||
| std::stack<abstract::AbstractBasePtr> value; | |||
| int num = 0, count = 0; | |||
| for (size_t i = 0; i < str.length(); i++) { | |||
| if (str[i] == '[') { | |||
| rules.push("["); | |||
| } else if (str[i] == ']') { | |||
| // rules | |||
| std::vector<abstract::AbstractBasePtr> vec; | |||
| while (rules.top() != "[") { | |||
| rules.pop(); | |||
| vec.push_back(value.top()); | |||
| value.pop(); | |||
| } | |||
| // pop "[" | |||
| rules.pop(); | |||
| // make tuple for names | |||
| std::string res = "dummy"; | |||
| // make tuple for values | |||
| reverse(vec.begin(), vec.end()); | |||
| auto vt = std::make_shared<abstract::AbstractTuple>(vec); | |||
| if (rules.empty() && value.empty()) { | |||
| return vt; | |||
| } | |||
| rules.push(res); | |||
| value.push(vt); | |||
| } else if (str[i] == ',') { | |||
| continue; | |||
| } else { | |||
| count++; | |||
| if (str[i + 1] == '[' || str[i + 1] == ']' || str[i + 1] == ',') { | |||
| auto value_name = str.substr(i - count + 1, count); | |||
| value.push(kv.at(value_name)); | |||
| rules.push(value_name); | |||
| count = 0; | |||
| num++; | |||
| } | |||
| } | |||
| } | |||
| return {}; | |||
| } | |||
| #define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \ | |||
| ValuePtr ParseAttrInScalar_##type##_##valuetype(const onnx::TensorProto &attr_tensor) { \ | |||
| if (attr_tensor.type##_data_size() == 1) { \ | |||
| auto value = static_cast<valuetype>(attr_tensor.type##_data(0)); \ | |||
| return MakeValue<valuetype>(value); \ | |||
| } else { \ | |||
| MS_LOG(ERROR) << "size of scalar tensor doesn't equal 1!"; \ | |||
| } \ | |||
| return {}; \ | |||
| } | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(double, double) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(float, float) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(string, string) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(int32, int32) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(int32, bool) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(int64, int64) | |||
| PARSE_ONNXATTR_IN_SCALAR_FORM(uint64, uint64) | |||
| bool AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node, | |||
| const onnx::ValueInfoProto &value_proto) { | |||
| MS_EXCEPTION_IF_NULL(node); | |||
| if (!value_proto.has_type() || !value_proto.has_name()) { | |||
| MS_LOG(ERROR) << "onnx ValueInfoProto has no type or name! "; | |||
| return false; | |||
| } | |||
| node->set_name(value_proto.name()); | |||
| const auto &type_proto = value_proto.type(); | |||
| if (!type_proto.has_tensor_type()) { | |||
| MS_LOG(ERROR) << "onnx TypeProto has no tesor_type! "; | |||
| return false; | |||
| } | |||
| const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); | |||
| if (!tensor_typeproto.has_elem_type() || !tensor_typeproto.has_shape()) { | |||
| MS_LOG(ERROR) << "onnx TypeProto_Tensor has no elem_type or shape! "; | |||
| return false; | |||
| } | |||
| const onnx::TensorShapeProto &tensor_shape = tensor_typeproto.shape(); | |||
| std::vector<int> shape; | |||
| for (int i = 0; i < tensor_shape.dim_size(); ++i) { | |||
| shape.push_back(tensor_shape.dim(i).dim_value()); | |||
| } | |||
| if (kDefaultValueSwitchMap.find(tensor_typeproto.elem_type()) == kDefaultValueSwitchMap.end()) { | |||
| MS_LOG(ERROR) << "onnx TypeProto_Tensor elem_type is not support yet!"; | |||
| return false; | |||
| } | |||
| auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[tensor_typeproto.elem_type()]); | |||
| auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape); | |||
| node->set_abstract(abstract_tensor); | |||
| if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { | |||
| tensor::Tensor *tensor_info = new tensor::Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); | |||
| MS_EXCEPTION_IF_NULL(tensor_info); | |||
| tensor_info->MallocData(); | |||
| const onnx::TensorProto initialize_proto = default_para_map_[value_proto.name()]; | |||
| std::string initial_data = initialize_proto.raw_data(); | |||
| auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->Data()); | |||
| MS_EXCEPTION_IF_NULL(tensor_data_buf); | |||
| auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), initial_data.data(), initial_data.size()); | |||
| if (EOK != ret) { | |||
| MS_LOG(ERROR) << "memcpy_s error"; | |||
| return false; | |||
| } | |||
| ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); | |||
| MS_EXCEPTION_IF_NULL(param_value); | |||
| param_value->set_tensor_addr(tensor_data_buf); | |||
| param_value->set_tensor_size(tensor_info->Size()); | |||
| node->set_default_param(param_value); | |||
| } | |||
| anfnode_build_map_[value_proto.name()] = node; | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, | |||
| const onnx::GraphProto &importProto) { | |||
| MS_EXCEPTION_IF_NULL(outputFuncGraph); | |||
| MS_LOG(INFO) << "Parameters had default paramerer size is: " << importProto.initializer_size(); | |||
| for (int i = 0; i < importProto.initializer_size(); ++i) { | |||
| const onnx::TensorProto &initializer_proto = importProto.initializer(i); | |||
| if (!initializer_proto.has_name()) { | |||
| MS_LOG(ERROR) << "initializer vector of onnx GraphProto has no name at index: " << i; | |||
| return false; | |||
| } | |||
| default_para_map_[initializer_proto.name()] = initializer_proto; | |||
| } | |||
| MS_LOG(INFO) << "all parameters size: " << importProto.input_size(); | |||
| for (int i = 0; i < importProto.input_size(); ++i) { | |||
| const onnx::ValueInfoProto &input_proto = importProto.input(i); | |||
| if (!BuildParameterForFuncGraph(outputFuncGraph->add_parameter(), input_proto)) { | |||
| MS_LOG(ERROR) << "Build parameter for funcgraph fail at index: " << i; | |||
| return false; | |||
| } | |||
| } | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, | |||
| const onnx::TensorProto &attr_tensor) { | |||
| MS_EXCEPTION_IF_NULL(prim); | |||
| const int attr_tensor_type = attr_tensor.data_type(); | |||
| if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { | |||
| MS_LOG(ERROR) << "Obtain attr in type-form has not support input type:" << attr_tensor_type; | |||
| return false; | |||
| } | |||
| prim->AddAttr(attr_name, TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); | |||
| return true; | |||
| } | |||
| ValuePtr AnfImporterFromProtobuf::ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor) { | |||
| const int attr_tensor_type = attr_tensor.data_type(); | |||
| switch (attr_tensor_type) { | |||
| case onnx::TensorProto_DataType_STRING: { | |||
| return ParseAttrInScalar_string_string(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_INT32: { | |||
| return ParseAttrInScalar_int32_int32(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_INT64: { | |||
| return ParseAttrInScalar_int64_int64(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_UINT64: { | |||
| return ParseAttrInScalar_uint64_uint64(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_FLOAT: { | |||
| return ParseAttrInScalar_float_float(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_DOUBLE: { | |||
| return ParseAttrInScalar_double_double(attr_tensor); | |||
| } | |||
| case onnx::TensorProto_DataType_BOOL: { | |||
| return ParseAttrInScalar_int32_bool(attr_tensor); | |||
| } | |||
| default: | |||
| MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; | |||
| return {}; | |||
| } | |||
| return {}; | |||
| } | |||
| bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, | |||
| const onnx::TensorProto &attr_tensor) { | |||
| MS_EXCEPTION_IF_NULL(prim); | |||
| MS_LOG(ERROR) << "parse attr type don't support attr type is tensor"; | |||
| return false; | |||
| } | |||
| bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto) { | |||
| MS_EXCEPTION_IF_NULL(prim); | |||
| const std::string &attr_name = attr_proto.name(); | |||
| if (!attr_proto.has_ref_attr_name()) { | |||
| MS_LOG(ERROR) << "CNode parse attr type has no ref_attr_name"; | |||
| return false; | |||
| } | |||
| const std::string &ref_attr_name = attr_proto.ref_attr_name(); | |||
| string type; | |||
| std::size_t pos(0); | |||
| if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | |||
| } else if ((pos = ref_attr_name.find("type:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("type:").length() - 1); | |||
| } else if ((pos = ref_attr_name.find("tensor:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("tensor:").length() - 1); | |||
| } | |||
| std::unordered_map<std::string, ValuePtr> kv; | |||
| for (int i = 0; i < attr_proto.tensors_size(); i++) { | |||
| const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); | |||
| switch (kParseTypeSwitchMap[type]) { | |||
| case FORM_PARSE_TYPE: { | |||
| return ObtainCNodeAttrInTypeForm(prim, attr_name, attr_tensor); | |||
| } | |||
| case FORM_PARSE_SCALAR: { | |||
| auto res = ObtainCNodeAttrInScalarForm(attr_tensor); | |||
| kv.insert(std::pair<string, ValuePtr>(attr_tensor.name(), res)); | |||
| break; | |||
| } | |||
| case FORM_PARSE_TENSOR: { | |||
| return ObtainCNodeAttrInTensorForm(prim, attr_name, attr_tensor); | |||
| } | |||
| default: | |||
| MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; | |||
| return false; | |||
| } | |||
| } | |||
| if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { | |||
| if (kv.size() == 1) { | |||
| std::unordered_map<std::string, ValuePtr>::iterator iter = kv.begin(); | |||
| prim->AddAttr(attr_name, iter->second); | |||
| } else { | |||
| auto res = ParserScalarAttrValue(ref_attr_name, kv); | |||
| prim->AddAttr(attr_name, res); | |||
| } | |||
| } | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &value_node_name, | |||
| const onnx::TensorProto &attr_tensor) { | |||
| const int attr_tensor_type = attr_tensor.data_type(); | |||
| std::vector<int> shape; | |||
| for (int i = 0; i < attr_tensor.dims_size(); ++i) { | |||
| shape.push_back(attr_tensor.dims(i)); | |||
| } | |||
| tensor::TensorPtr tensor_info = std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape); | |||
| tensor_info->MallocData(); | |||
| const std::string &tensor_buf = attr_tensor.raw_data(); | |||
| auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->Data()); | |||
| auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); | |||
| if (EOK != ret) { | |||
| MS_LOG(ERROR) << "memcpy_s error"; | |||
| return false; | |||
| } | |||
| auto new_value_node = NewValueNode(MakeValue(tensor_info)); | |||
| MS_EXCEPTION_IF_NULL(new_value_node); | |||
| auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type]); | |||
| auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape); | |||
| new_value_node->set_abstract(abstract_tensor); | |||
| anfnode_build_map_[value_node_name] = new_value_node; | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::ObtainValueNodeInTypeForm(const std::string &value_node_name, | |||
| const onnx::TensorProto &attr_tensor) { | |||
| const int attr_tensor_type = attr_tensor.data_type(); | |||
| if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { | |||
| MS_LOG(ERROR) << "Obtain ValueNode attr in type-form has not support input type: " << attr_tensor_type; | |||
| return false; | |||
| } | |||
| auto new_value_node = NewValueNode(TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); | |||
| abstract::AbstractTypePtr abs_type = std::make_shared<abstract::AbstractType>(std::make_shared<TypeType>()); | |||
| new_value_node->set_abstract(abs_type); | |||
| anfnode_build_map_[value_node_name] = new_value_node; | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::GetAttrValueForValueNode(const std::string &value_node_name, | |||
| const onnx::AttributeProto &attr_proto) { | |||
| const std::string &attr_name = attr_proto.name(); | |||
| if (!attr_proto.has_ref_attr_name()) { | |||
| MS_LOG(ERROR) << "CNode parse attr type has no ref_attr_name"; | |||
| return false; | |||
| } | |||
| const std::string &ref_attr_name = attr_proto.ref_attr_name(); | |||
| string type; | |||
| std::size_t pos(0); | |||
| if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | |||
| } else if ((pos = ref_attr_name.find("type:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("type:").length() - 1); | |||
| } else if ((pos = ref_attr_name.find("tensor:")) != std::string::npos) { | |||
| type = ref_attr_name.substr(pos, string("tensor:").length() - 1); | |||
| } | |||
| std::unordered_map<std::string, ValuePtr> kv; | |||
| for (int i = 0; i < attr_proto.tensors_size(); i++) { | |||
| const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); | |||
| switch (kParseTypeSwitchMap[type]) { | |||
| case FORM_PARSE_TYPE: { | |||
| return ObtainValueNodeInTypeForm(value_node_name, attr_tensor); | |||
| } | |||
| case FORM_PARSE_SCALAR: { | |||
| auto res = ObtainCNodeAttrInScalarForm(attr_tensor); | |||
| kv.insert(std::pair<string, ValuePtr>(attr_tensor.name(), res)); | |||
| break; | |||
| } | |||
| case FORM_PARSE_TENSOR: { | |||
| return ObtainValueNodeInTensorForm(value_node_name, attr_tensor); | |||
| } | |||
| default: | |||
| MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; | |||
| return false; | |||
| } | |||
| } | |||
| ValueNodePtr new_value_node; | |||
| if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { | |||
| if (kv.size() == 1) { | |||
| std::unordered_map<std::string, ValuePtr>::iterator iter = kv.begin(); | |||
| new_value_node = NewValueNode(iter->second); | |||
| new_value_node->set_abstract(iter->second->ToAbstract()); | |||
| } else { | |||
| auto value_ptr = ParserScalarAttrValue(ref_attr_name, kv); | |||
| new_value_node = NewValueNode(value_ptr); | |||
| new_value_node->set_abstract(value_ptr->ToAbstract()); | |||
| } | |||
| anfnode_build_map_[value_node_name] = new_value_node; | |||
| } | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto) { | |||
| const std::string &value_node_name = node_proto.output(0); | |||
| const onnx::AttributeProto &attr_proto = node_proto.attribute(0); | |||
| if (!attr_proto.has_ref_attr_name()) { | |||
| MS_LOG(ERROR) << "parse ValueNode don't have ref_attr_name"; | |||
| return false; | |||
| } | |||
| return GetAttrValueForValueNode(value_node_name, attr_proto); | |||
| } | |||
| std::unordered_map<std::string, abstract::AbstractTensorPtr> | |||
| AnfImporterFromProtobuf::GetAbstractForCNode(const onnx::AttributeProto &attr_proto) { | |||
| std::unordered_map<std::string, abstract::AbstractTensorPtr> kv; | |||
| for (int i = 0; i < attr_proto.tensors_size(); i++) { | |||
| std::vector<int> shape_vec; | |||
| const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); | |||
| for (int j = 0; j < attr_tensor.dims_size(); ++j) { | |||
| shape_vec.push_back(attr_tensor.dims(j)); | |||
| } | |||
| auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[attr_tensor.data_type()]); | |||
| auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vec); | |||
| kv.insert(std::pair<string, abstract::AbstractTensorPtr>(attr_tensor.name(), abstract_tensor)); | |||
| } | |||
| return kv; | |||
| } | |||
| CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, | |||
| const onnx::NodeProto &node_proto) { | |||
| MS_EXCEPTION_IF_NULL(outputFuncGraph); | |||
| if (!node_proto.has_op_type()) { | |||
| MS_LOG(ERROR) << "Get CNode op_type failed!"; | |||
| return nullptr; | |||
| } | |||
| const std::string &node_name = node_proto.output(0); | |||
| const std::string &fullname_with_scope = node_proto.domain(); | |||
| const std::string &node_type = node_proto.op_type(); | |||
| PrimitivePtr prim = std::make_shared<mindspore::Primitive>(node_type); | |||
| MS_EXCEPTION_IF_NULL(prim); | |||
| prim->set_instance_name(node_type); | |||
| std::unordered_map<std::string, abstract::AbstractTensorPtr> kv; | |||
| string shape_ref_attr_name; | |||
| for (int i = 0; i < node_proto.attribute_size(); ++i) { | |||
| const onnx::AttributeProto &attr_proto = node_proto.attribute(i); | |||
| if (attr_proto.ref_attr_name().find("shape:") != string::npos) { | |||
| shape_ref_attr_name = attr_proto.ref_attr_name(); | |||
| kv = GetAbstractForCNode(attr_proto); | |||
| continue; | |||
| } | |||
| if (!GetAttrValueForCNode(prim, attr_proto)) { | |||
| MS_LOG(ERROR) << "Get CNode attr failed!"; | |||
| return nullptr; | |||
| } | |||
| } | |||
| std::vector<AnfNodePtr> inputs; | |||
| inputs.clear(); | |||
| inputs.push_back(NewValueNode(prim)); | |||
| for (int i = 0; i < node_proto.input_size(); ++i) { | |||
| const std::string &input_name = node_proto.input(i); | |||
| if (anfnode_build_map_.find(input_name) == anfnode_build_map_.end()) { | |||
| MS_LOG(ERROR) << node_name << " input " << i << input_name << "can't find in nodes have parsed"; | |||
| return nullptr; | |||
| } | |||
| inputs.push_back(anfnode_build_map_[input_name]); | |||
| } | |||
| CNodePtr cnode_ptr = outputFuncGraph->NewCNode(inputs); | |||
| MS_EXCEPTION_IF_NULL(cnode_ptr); | |||
| if (0 == kv.size()) { | |||
| AbstractBasePtrList elem; | |||
| for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) { | |||
| elem.push_back(cnode_ptr->input(index)->abstract()); | |||
| } | |||
| cnode_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); | |||
| } else if (1 == kv.size()) { | |||
| std::unordered_map<std::string, abstract::AbstractTensorPtr>::iterator iter = kv.begin(); | |||
| cnode_ptr->set_abstract(iter->second); | |||
| } else { | |||
| auto abstract = ParserAttrShape(shape_ref_attr_name, kv); | |||
| cnode_ptr->set_abstract(abstract); | |||
| } | |||
| cnode_ptr->set_fullname_with_scope(fullname_with_scope); | |||
| anfnode_build_map_[node_name] = cnode_ptr; | |||
| return cnode_ptr; | |||
| } | |||
| bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, | |||
| const onnx::GraphProto &importProto, | |||
| const CNodePtr &cnode_ptr) { | |||
| MS_EXCEPTION_IF_NULL(outputFuncGraph); | |||
| MS_EXCEPTION_IF_NULL(cnode_ptr); | |||
| std::vector<AnfNodePtr> inputs; | |||
| if (importProto.output_size() > 1) { | |||
| inputs.clear(); | |||
| inputs.push_back(NewValueNode(prim::kPrimMakeTuple)); | |||
| AbstractBasePtrList elem; | |||
| for (int out_size = 0; out_size < importProto.output_size(); ++out_size) { | |||
| const onnx::ValueInfoProto &output_node = importProto.output(out_size); | |||
| const std::string &out_tuple = output_node.name(); | |||
| inputs.push_back(anfnode_build_map_[out_tuple]); | |||
| elem.push_back(anfnode_build_map_[out_tuple]->abstract()); | |||
| } | |||
| auto maketuple_ptr = outputFuncGraph->NewCNode(inputs); | |||
| maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); | |||
| inputs.clear(); | |||
| inputs.push_back(NewValueNode(prim::kPrimReturn)); | |||
| inputs.push_back(maketuple_ptr); | |||
| auto return_node = outputFuncGraph->NewCNode(inputs); | |||
| MS_EXCEPTION_IF_NULL(return_node); | |||
| outputFuncGraph->set_return(return_node); | |||
| MS_LOG(INFO) << "Construct funcgraph finined, all success."; | |||
| } else { | |||
| const onnx::ValueInfoProto &output_node = importProto.output(0); | |||
| const onnx::TypeProto &output_typeproto = output_node.type(); | |||
| int output_type = output_typeproto.tensor_type().elem_type(); | |||
| std::vector<int> output_shape; | |||
| for (int i = 0; i < output_typeproto.tensor_type().shape().dim_size(); ++i) { | |||
| output_shape.push_back(output_typeproto.tensor_type().shape().dim(i).dim_value()); | |||
| } | |||
| auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[output_type]); | |||
| auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, output_shape); | |||
| inputs.clear(); | |||
| inputs.push_back(NewValueNode(prim::kPrimReturn)); | |||
| inputs.push_back(cnode_ptr); | |||
| auto return_node = outputFuncGraph->NewCNode(inputs); | |||
| MS_EXCEPTION_IF_NULL(return_node); | |||
| return_node->set_abstract(abstract_tensor); | |||
| outputFuncGraph->set_return(return_node); | |||
| MS_LOG(INFO) << "Construct funcgraph finined, all success!"; | |||
| } | |||
| return true; | |||
| } | |||
| bool AnfImporterFromProtobuf::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, | |||
| const onnx::GraphProto &importProto) { | |||
| MS_EXCEPTION_IF_NULL(outputFuncGraph); | |||
| MS_LOG(INFO) << "The CNdoe size : " << importProto.node_size(); | |||
| CNodePtr cnode_ptr = nullptr; | |||
| for (int i = 0; i < importProto.node_size(); ++i) { | |||
| const onnx::NodeProto &node_proto = importProto.node(i); | |||
| const std::string &node_type = node_proto.op_type(); | |||
| if (node_type == kConstantValueNode) { | |||
| if (!BuildValueNodeForFuncGraph(node_proto)) { | |||
| MS_LOG(ERROR) << "Build ValueNode for funcgraph fail at index: : " << i; | |||
| return false; | |||
| } | |||
| continue; | |||
| } | |||
| cnode_ptr = BuildCNodeForFuncGraph(outputFuncGraph, node_proto); | |||
| if (cnode_ptr == nullptr) { | |||
| MS_LOG(ERROR) << "Build CNode for funcgraph fail at index: : " << i; | |||
| return false; | |||
| } | |||
| } | |||
| BuildReturnForFuncGraph(outputFuncGraph, importProto, cnode_ptr); | |||
| return true; | |||
| } | |||
| #else | |||
| #define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \ | |||
| void ParseAttrInScalar_##type##_##valuetype(const PrimitivePtr &prim, const std::string &attr_name, \ | |||
| const onnx::TensorProto &attr_tensor) { \ | |||
| @@ -1050,7 +479,6 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out | |||
| return nullptr; | |||
| } | |||
| auto primitiveT = std::make_unique<schema::PrimitiveT>(); | |||
| // auto * primitiveTValue = new PrimitiveTValue(primitiveT.release()); | |||
| std::shared_ptr<PrimitiveTValue> primitiveTValuePtr = std::make_shared<PrimitiveTValue>(primitiveT.release()); | |||
| primitiveTValuePtr->SetQuantType(quantType); | |||
| node_parser->Populate(prim, primitiveTValuePtr.get(), inputs); | |||
| @@ -1171,7 +599,6 @@ bool AnfImporterFromProtobuf::ImportNodesForGraph(const FuncGraphPtr &outputFunc | |||
| BuildReturnForFuncGraph(outputFuncGraph, importProto, cnode_ptr); | |||
| return true; | |||
| } | |||
| #endif | |||
| bool AnfImporterFromProtobuf::BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, | |||
| const schema::QuantType &quantType) { | |||
| @@ -82,16 +82,8 @@ int Benchmark::ReadInputFile() { | |||
| } | |||
| if (this->_flags->inDataType == kImage) { | |||
| // int cvFlags; | |||
| // if (inTensor->Channel() == 3) { | |||
| // cvFlags = 0; // cv::IMREAD_COLOR; | |||
| // } else if (inTensor->Channel() == 1) { | |||
| // cvFlags = 1; // cv::IMREAD_GRAYSCALE; | |||
| // } else { | |||
| // MS_LOG(ERROR) << "Image mode only support imgChannel == 1 or 3, imgChannel : %lld", (long | |||
| // long)inTensor->Channel(); return RET_PARAM_INVALID; | |||
| // } | |||
| // todo fill inTensor->GetData() | |||
| MS_LOG(ERROR) << "Not supported image input"; | |||
| return RET_ERROR; | |||
| } else { | |||
| for (auto i = 0; i < _flags->input_data_list.size(); i++) { | |||
| auto cur_tensor = msInputs.at(i); | |||
| @@ -99,13 +91,12 @@ int Benchmark::ReadInputFile() { | |||
| size_t size; | |||
| char *binBuf = ReadFile(_flags->input_data_list[i].c_str(), &size); | |||
| if (binBuf == nullptr) { | |||
| MS_LOG(ERROR) << "ReadFile return nullptr"; | |||
| return RET_ERROR; | |||
| MS_LOG(ERROR) << "ReadFile return nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| auto tensorDataSize = cur_tensor->Size(); | |||
| if (size != tensorDataSize) { | |||
| std::cerr << "Input binary file size error, required: %zu, in fact: %zu" << tensorDataSize | |||
| << size << std::endl; | |||
| std::cerr << "Input binary file size error, required: %zu, in fact: %zu" << tensorDataSize << size << std::endl; | |||
| MS_LOG(ERROR) << "Input binary file size error, required: " << tensorDataSize << ", in fact: " << size; | |||
| return RET_ERROR; | |||
| } | |||
| @@ -15,6 +15,10 @@ | |||
| */ | |||
| #include "tools/benchmark/benchmark.h" | |||
| #include "include/version.h" | |||
| int main(int argc, const char **argv) { return mindspore::lite::RunBenchmark(argc, argv); } | |||
| int main(int argc, const char **argv) { | |||
| MS_LOG(INFO) << mindspore::lite::Version(); | |||
| return mindspore::lite::RunBenchmark(argc, argv); | |||
| } | |||
| @@ -175,7 +175,7 @@ STATUS IsolateNode(schema::MetaGraphT *graphT, CNodeT *node) { | |||
| } | |||
| } | |||
| // todo whether need to remove weightInputTensores | |||
| // whether need to remove weightInputTensores | |||
| // remove all node's outputTensors | |||
| RemoveTensor(graphT, outputTensorIdxes); | |||
| node->inputIndex.clear(); | |||
| @@ -51,7 +51,6 @@ class NodeUtils { | |||
| schema::TensorT *output, std::vector<int32_t> &stride); | |||
| }; | |||
| // todo check this | |||
| enum kTransFilterType { | |||
| kKCHW2HWCK, // 0 | |||
| kKCHW2KHWC, | |||
| @@ -82,7 +82,6 @@ MetaGraphT *Converter::Convert(const converter::Flags *flag) { | |||
| MS_LOG(ERROR) << "Parse to metaGraph return nullptr"; | |||
| return nullptr; | |||
| } | |||
| // todo hangangqiang | |||
| graph = ModelParser::Fb2Anf(meta_graph); | |||
| } | |||
| if (graph == nullptr) { | |||
| @@ -1,50 +0,0 @@ | |||
| set(OP_SRC | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/tensor.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/context.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/runtime/allocator.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/op.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/op_common.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/op_factory.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/op_registry.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/common/op_func_comm.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/common/op_nc4hw4_comm.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/add.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/cast.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/concat.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/fp32/add_fp32.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/fp32/concat_fp32.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/uint8/add_uint8.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/uint8/concat_uint8.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/expand_dim.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/mul.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/range.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/reshape.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/uint8/reshape_uint8.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/rsqrt.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/shape.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/slice.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/stack.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/strided_slice.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/sub.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/tile.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/operator/cpu/creator/transpose.cc | |||
| ) | |||
| add_library(const_fold_mid OBJECT | |||
| ${OP_SRC} | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/add_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/cast_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/concat_v2_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/expand_dims_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/mul_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/range_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/reshape_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/rsqrt_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/shape_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/slice_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/stack_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/strided_slice_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/sub_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/tile_const_fold_pass.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/transpose_const_fold_pass.cc) | |||
| @@ -1,97 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/add_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/add.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS AddConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS AddConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Add; | |||
| desc.arch = kCPU; | |||
| MS_ASSERT(inputs.size() == kArithOpInputNum); | |||
| auto inTensor0 = inputs.at(kArithOpInputTensorIndex0); | |||
| auto inTensor1 = inputs.at(kArithOpInputTensorIndex1); | |||
| MS_ASSERT(inTensor0 != nullptr); | |||
| MS_ASSERT(inTensor1 != nullptr); | |||
| DataType dataType; | |||
| if (inTensor0->GetNDim() > 1) { | |||
| dataType = inTensor0->GetDataType(); | |||
| } else { | |||
| dataType = inTensor1->GetDataType(); | |||
| } | |||
| switch (dataType) { | |||
| case DataType_DT_UINT8: { | |||
| op = new (std::nothrow) OpAdd<uint8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT32: { | |||
| op = new (std::nothrow) OpAdd<int32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_FLOAT: { | |||
| op = new (std::nothrow) OpAdd<float>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT8: { | |||
| op = new (std::nothrow) OpAdd<int8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_UINT32: { | |||
| op = new (std::nothrow) OpAdd<uint32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| default: { | |||
| MS_LOGE("Unsupported dataType: %d", dataType); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpAdd return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS AddConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kArithOpOutputNum) { | |||
| MS_LOGE("The number of output for add must be %u, nodeName: %s", kArithOpOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_ADD_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_ADD_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "converter/common/tensor_util.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class AddConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| AddConstFoldPass() : ConstFoldPass(OpT_Add) {} | |||
| ~AddConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_ADD_CONST_FOLD_PASS_H | |||
| @@ -1,68 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/cast_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/cast.h" | |||
| #define CAST_OUTPUT_NUM 1 | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS CastConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS CastConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Cast; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpCast(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpCast return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpCast InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpCast Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS CastConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpCast Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != CAST_OUTPUT_NUM) { | |||
| MS_LOGE("The number of output for cast must be %u, nodeName: %s", CAST_OUTPUT_NUM, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,65 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/concat_v2_const_fold_pass.h" | |||
| #include "src/operator/cpu/creator/concat.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS ConcatV2ConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS ConcatV2ConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Concat; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpConcat(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpConcat return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpConcat InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpConcat Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS ConcatV2ConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpConcat Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kConcatOutputNum) { | |||
| MS_LOGE("The number of output for concat must be %u, nodeName: %s", kConcatOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,110 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_CONCAT_V2_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_CONCAT_V2_CONST_FOLD_PASS_H | |||
| #include <vector> | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "converter/common/tensor_util.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "securec/include/securec.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class ConcatV2ConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| ConcatV2ConstFoldPass() : ConstFoldPass(OpT_Concat) {} | |||
| ~ConcatV2ConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| private: | |||
| template <typename T> | |||
| STATUS DoConcat(SubGraphDefT *subGraph, const std::vector<uint32_t> &inTensorIdxes, int axis) { | |||
| MS_ASSERT(this->outputTensor != nullptr); | |||
| std::vector<TensorDefT *> inTensors; | |||
| std::vector<T *> inDatas; | |||
| for (size_t i = 0; i < inTensorIdxes.size(); i++) { | |||
| auto &inTensor = subGraph->allTensors.at(inTensorIdxes.at(i)); | |||
| MS_ASSERT(inTensor != nullptr); | |||
| inTensors.emplace_back(inTensor.get()); | |||
| void *inData = inTensor->data.data(); | |||
| MS_ASSERT(inData != nullptr); | |||
| T *castedInData = static_cast<T *>(inData); | |||
| MS_ASSERT(castedInData != nullptr); | |||
| inDatas.emplace_back(castedInData); | |||
| } | |||
| auto &inShape = subGraph->allTensors.at(inTensorIdxes.at(0))->dims; | |||
| std::vector<int32_t> outputDims; | |||
| for (size_t i = 0; i < inShape.size(); i++) { | |||
| if (i == axis) { | |||
| int32_t axisDim = 0; | |||
| for (size_t j = 0; j < inTensors.size(); j++) { | |||
| axisDim += inTensors.at(j)->dims.at(i); | |||
| } | |||
| outputDims.push_back(axisDim); | |||
| continue; | |||
| } | |||
| outputDims.push_back(inShape.at(i)); | |||
| } | |||
| size_t outShapeSize = 1; | |||
| for (auto dim : outputDims) { | |||
| outShapeSize *= dim; | |||
| } | |||
| size_t elementSize = GetElementSize(subGraph->allTensors.at(inTensorIdxes.at(0))->dataType); | |||
| this->outputTensor->dims = outputDims; | |||
| this->outputTensor->data.clear(); | |||
| this->outputTensor->data.resize(outShapeSize * elementSize); | |||
| void *outData = this->outputTensor->data.data(); | |||
| MS_ASSERT(outData != nullptr); | |||
| T *castedOutData = static_cast<T *>(outData); | |||
| size_t copyBlockTile = 1; | |||
| for (int i = axis + 1; i < inShape.size(); i++) { | |||
| copyBlockTile *= inShape[i]; | |||
| } | |||
| std::vector<size_t> inCopyBlocks; | |||
| size_t outCopyBlock = 0; | |||
| for (size_t i = 0; i < inTensors.size(); i++) { | |||
| inCopyBlocks.emplace_back(copyBlockTile * (inTensors.at(i)->dims.at(axis))); | |||
| outCopyBlock += inCopyBlocks.back(); | |||
| } | |||
| size_t outIndex = 0; | |||
| while (outIndex < outShapeSize) { | |||
| for (size_t i = 0; i < inDatas.size(); i++) { | |||
| ::memcpy_s(castedOutData + outIndex, inCopyBlocks.at(i), inDatas.at(i), inCopyBlocks.at(i)); | |||
| outIndex += inCopyBlocks.at(i); | |||
| inDatas.at(i) += inCopyBlocks.at(i); | |||
| } | |||
| } | |||
| return RET_OK; | |||
| } | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_CONCAT_V2_CONST_FOLD_PASS_H | |||
| @@ -1,207 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/const_fold_pass.h" | |||
| #include <vector> | |||
| #include "utils/log_adapter.h" | |||
| #include "converter/common/graph_util.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS ConstFoldPass::Run(GraphNode *graphNode) { | |||
| MS_ASSERT(graphNode != nullptr); | |||
| auto subGraph = graphNode->subGraph; | |||
| auto node = graphNode->opDef; | |||
| MS_ASSERT(subGraph != nullptr); | |||
| MS_ASSERT(node != nullptr); | |||
| if (GetOpType(*node) != opType) { | |||
| return RET_OK; | |||
| } | |||
| if (!IsFoldable(subGraph, node)) { | |||
| MS_LOGD("All input should be ConstTensor, node : %s"); | |||
| return RET_OK; | |||
| } | |||
| for (uint32_t i : node->inputIndex) { | |||
| TensorDefT *tensorDefT = subGraph->allTensors.at(i).get(); | |||
| MS_ASSERT(tensorDefT != nullptr); | |||
| auto tensor = CopyTensorDefT2Tensor(tensorDefT); | |||
| if (tensor == nullptr) { | |||
| MS_LOGE("Pack TensorDefT return nullptr"); | |||
| FreeTensors(); | |||
| return RET_ERROR; | |||
| } | |||
| inputs.emplace_back(tensor); | |||
| } | |||
| for (uint32_t i : node->outputIndex) { | |||
| TensorDefT *tensorDefT = subGraph->allTensors.at(i).get(); | |||
| MS_ASSERT(tensorDefT != nullptr); | |||
| auto tensor = CopyTensorDefT2Tensor(tensorDefT, false); | |||
| if (tensor == nullptr) { | |||
| MS_LOGE("Pack TensorDefT return nullptr"); | |||
| FreeTensors(); | |||
| return RET_ERROR; | |||
| } | |||
| outputs.emplace_back(tensor); | |||
| } | |||
| auto status = CreateOp(subGraph, node); | |||
| if (status != RET_OK) { | |||
| MS_LOGE("CreateOp error: %d, node: %s", status, node->name.c_str()); | |||
| FreeTensors(); | |||
| return status; | |||
| } | |||
| for (auto &outputTensor : outputs) { | |||
| auto statusTmp = outputTensor->MallocData(); | |||
| if (statusTmp != RET_OK) { | |||
| MS_LOGE("OutTensor MallocData error: %d, nodeName: %s", statusTmp, node->name.c_str()); | |||
| FreeTensors(); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| status = DoFold(subGraph, node); | |||
| if (status != RET_OK) { | |||
| MS_LOGE("DoFold error: %d, node: %s", status, node->name.c_str()); | |||
| FreeTensors(); | |||
| return status; | |||
| } | |||
| if (this->outputTensor->data.empty()) { | |||
| MS_LOGI("outputTensor's data has not been set, node : %s", node->name.c_str()); | |||
| FreeTensors(); | |||
| return RET_OK; | |||
| } | |||
| this->outputTensor->refCount = schema::NodeType_ValueNode; | |||
| bool isSubNode = false; | |||
| for (auto &inNode : subGraph->nodes) { | |||
| if (inNode->name == node->name) { | |||
| isSubNode = true; | |||
| break; | |||
| } | |||
| } | |||
| if (!isSubNode) { | |||
| MS_LOGE("Node %s is not in subGraph %s", node->name.c_str(), subGraph->name.c_str()); | |||
| return RET_PARAM_INVALID; | |||
| } else { | |||
| status = RemoveTensor(subGraph, node->inputIndex); | |||
| if (status != RET_OK) { | |||
| MS_LOGE("RemoveTensor failed, node : %s", node->name.c_str()); | |||
| FreeTensors(); | |||
| return status; | |||
| } | |||
| // we can not erase nodes in iter loop, so just isolate the node | |||
| node->inputIndex.clear(); | |||
| node->outputIndex.clear(); | |||
| } | |||
| FreeTensors(); | |||
| return RET_OK; | |||
| } | |||
| OpDef *ConstFoldPass::PackOpDefT(const OpDefT *opDefT) { | |||
| flatbuffers::FlatBufferBuilder builder(1024); | |||
| auto offset = OpDef::Pack(builder, opDefT); | |||
| builder.Finish(offset); | |||
| auto buf = builder.GetBufferPointer(); | |||
| auto opDef = flatbuffers::GetRoot<mindspore::predict::OpDef>(buf); | |||
| return const_cast<mindspore::predict::OpDef *>(opDef); | |||
| } | |||
| Tensor *ConstFoldPass::CopyTensorDefT2Tensor(const TensorDefT *tensorDefT, bool needCopyData) { | |||
| if (tensorDefT == nullptr) { | |||
| MS_LOGE("tensorDefT is null"); | |||
| return nullptr; | |||
| } | |||
| std::vector<int64_t> dims; | |||
| for (size_t i = 0; i < tensorDefT->dims.size(); i++) { | |||
| dims.emplace_back(tensorDefT->dims.at(i)); | |||
| } | |||
| auto tensor = new (std::nothrow) Tensor(tensorDefT->dataType, dims, tensorDefT->format, nullptr); | |||
| if (tensor == nullptr) { | |||
| MS_LOGE("new tensor error"); | |||
| return nullptr; | |||
| } | |||
| if (needCopyData) { | |||
| auto status = tensor->MallocData(); | |||
| if (status != RET_OK) { | |||
| MS_LOGE("malloc tensor data error: %d", status); | |||
| delete (tensor); | |||
| return nullptr; | |||
| } | |||
| size_t dataLength = tensor->GetDataSize(); | |||
| status = ::memcpy_s(tensor->GetData(), dataLength, tensorDefT->data.data(), dataLength); | |||
| if (status != 0) { | |||
| MS_LOGE("memcpy_s error: %d", status); | |||
| delete (tensor); | |||
| return nullptr; | |||
| } | |||
| } | |||
| return tensor; | |||
| } | |||
| STATUS ConstFoldPass::CopyTensor2TensorDefT(const Tensor *tensor, TensorDefT *tensorDefT) { | |||
| MS_ASSERT(tensorDefT != nullptr); | |||
| if (tensor == nullptr) { | |||
| MS_LOGE("tensor is null"); | |||
| return RET_ERROR; | |||
| } | |||
| tensorDefT->dims.clear(); | |||
| for (size_t i = 0; i < tensor->GetNDim(); i++) { | |||
| tensorDefT->dims.emplace_back(tensor->GetDims().at(i)); | |||
| } | |||
| tensorDefT->dataType = tensor->GetDataType(); | |||
| tensorDefT->format = tensor->GetFormat(); | |||
| size_t dataLength = tensor->GetDataSize(); | |||
| tensorDefT->data.resize(dataLength); | |||
| auto ret = ::memcpy_s(tensorDefT->data.data(), dataLength, tensor->GetData(), dataLength); | |||
| if (ret != 0) { | |||
| MS_LOGE("memcpy_s error: %d", ret); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| bool ConstFoldPass::IsFoldable(SubGraphDefT *subGraph, OpDefT *node) { | |||
| bool isFoldable = true; | |||
| for (auto tensorIdx : node->inputIndex) { | |||
| auto &tensor = subGraph->allTensors.at(tensorIdx); | |||
| if (tensor->refCount != schema::NodeType_ValueNode || tensor->data.empty()) { | |||
| isFoldable = false; | |||
| break; | |||
| } | |||
| } | |||
| return isFoldable; | |||
| } | |||
| void ConstFoldPass::FreeTensors() { | |||
| for (auto tensor : inputs) { | |||
| if (tensor != nullptr) { | |||
| delete (tensor); | |||
| } | |||
| } | |||
| inputs.clear(); | |||
| for (auto tensor : outputs) { | |||
| if (tensor != nullptr) { | |||
| delete (tensor); | |||
| } | |||
| } | |||
| outputs.clear(); | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,64 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_CONST_FOLD_PASS_H | |||
| #include <vector> | |||
| #include "mindspore/lite/tools/converter/optimizer.h" | |||
| #include "include/tensor.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "converter/common/converter_op_utils.h" | |||
| #include "securec/include/securec.h" | |||
| #include "src/op.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class ConstFoldPass : public NodePass { | |||
| public: | |||
| explicit ConstFoldPass(schema::PrimitiveType opType) : opType(opType) {} | |||
| ~ConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| protected: | |||
| bool IsFoldable(SubGraphDefT *subGraph, OpDefT *node); | |||
| virtual STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) = 0; | |||
| virtual STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) = 0; | |||
| protected: | |||
| OpDef *PackOpDefT(const OpDefT *opDefT); | |||
| Tensor *CopyTensorDefT2Tensor(const TensorDefT *tensorDefT, bool needCopyData = true); | |||
| STATUS CopyTensor2TensorDefT(const Tensor *tensor, TensorDefT *tensorDefT); | |||
| void FreeTensors(); | |||
| protected: | |||
| schema::PrimitiveType opType; | |||
| TensorDefT *outputTensor = nullptr; | |||
| std::vector<Tensor *> inputs; | |||
| std::vector<Tensor *> outputs; | |||
| OpBase *op = nullptr; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/expand_dims_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/expand_dim.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS ExpandDimsConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS ExpandDimsConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_ExpandDims; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpExpandDim(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpExpandDim return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpExpandDim InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpExpandDim Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS ExpandDimsConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpExpandDim Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kExpandDimsOutputNum) { | |||
| MS_LOGE("The number of output for expandDim must be %u, nodeName: %s", kExpandDimsOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,40 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_EXPANDDIMS_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_EXPANDDIMS_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class ExpandDimsConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| ExpandDimsConstFoldPass() : ConstFoldPass(OpT_ExpandDims) {} | |||
| ~ExpandDimsConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_EXPANDDIMS_CONST_FOLD_PASS_H | |||
| @@ -1,101 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/mul_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "converter/common/tensor_util.h" | |||
| #include "converter/common/converter_op_utils.h" | |||
| #include "src/operator/cpu/creator/mul.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS MulConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS MulConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Mul; | |||
| desc.arch = kCPU; | |||
| MS_ASSERT(inputs.size() == kArithOpInputNum); | |||
| auto inTensor0 = inputs.at(kArithOpInputTensorIndex0); | |||
| auto inTensor1 = inputs.at(kArithOpInputTensorIndex1); | |||
| MS_ASSERT(inTensor0 != nullptr); | |||
| MS_ASSERT(inTensor1 != nullptr); | |||
| DataType dataType; | |||
| if (inTensor0->GetNDim() > 1) { | |||
| dataType = inTensor0->GetDataType(); | |||
| } else { | |||
| dataType = inTensor1->GetDataType(); | |||
| } | |||
| op = nullptr; | |||
| switch (dataType) { | |||
| case DataType_DT_UINT8: { | |||
| op = new (std::nothrow) OpMul<uint8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT32: { | |||
| op = new (std::nothrow) OpMul<int32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_FLOAT: { | |||
| op = new (std::nothrow) OpMul<float>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT8: { | |||
| op = new (std::nothrow) OpMul<int8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_UINT32: { | |||
| op = new (std::nothrow) OpMul<uint32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| default: { | |||
| MS_LOGE("Unsupported dataType: %d", dataType); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpMul return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpMul InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpMul Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS MulConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpMul Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kArithOpOutputNum) { | |||
| MS_LOGE("The number of output for mul must be %u, nodeName: %s", kArithOpOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_MUL_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_MUL_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "converter/common/tensor_util.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class MulConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| MulConstFoldPass() : ConstFoldPass(OpT_Mul) {} | |||
| ~MulConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_MUL_CONST_FOLD_PASS_H | |||
| @@ -1,68 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/range_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/range.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| #define kRangeOutputNum 1 | |||
| STATUS RangeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS RangeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Range; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpRange(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpAdd return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS RangeConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpAdd Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kRangeOutputNum) { | |||
| MS_LOGE("The number of range for range must be %u, nodeName: %s", kRangeOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_RANGE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_RANGE_CONST_FOLD_PASS_H | |||
| #include <cmath> | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class RangeConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| RangeConstFoldPass() : ConstFoldPass(OpT_Range) {} | |||
| ~RangeConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_RANGE_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/reshape_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/reshape.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS ReshapeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS ReshapeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Reshape; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpReshape(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpReshape return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpReshape InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpReshape Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS ReshapeConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpReshape Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kReshapeOutputNum) { | |||
| MS_LOGE("The number of output for Reshape must be %u, nodeName: %s", kReshapeOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,43 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_RESHAPE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_RESHAPE_CONST_FOLD_PASS_H | |||
| #include <vector> | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class ReshapeConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| ReshapeConstFoldPass() : ConstFoldPass(OpT_Reshape) {} | |||
| ~ReshapeConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| private: | |||
| STATUS CalNewShape(const TensorDefT &inTensor, std::vector<int64_t> &outShape); | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_RESHAPE_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/rsqrt_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/fp32/rsqrt_fp32.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS RsqrtConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS RsqrtConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Rsqrt; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) RsqrtFp32(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpRsqrt return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpRsqrt InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpRsqrt Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS RsqrtConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpRsqrt Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kRsqrtOutputNum) { | |||
| MS_LOGE("The number of output for Rsqrt must be %u, nodeName: %s", kRsqrtOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_RSQRT_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_RSQRT_CONST_FOLD_PASS_H | |||
| #include <cmath> | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class RsqrtConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| RsqrtConstFoldPass() : ConstFoldPass(OpT_Rsqrt) {} | |||
| ~RsqrtConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_RSQRT_CONST_FOLD_PASS_H | |||
| @@ -1,65 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/shape_const_fold_pass.h" | |||
| #include "src/operator/cpu/creator/shape.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS ShapeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS ShapeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Shape; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpShape(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpShape return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpShape InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpShape Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS ShapeConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpShape Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kShapeOutputNum) { | |||
| MS_LOGE("The number of output for shape must be %u, nodeName: %s", kShapeOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,40 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_SHAPE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_SHAPE_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class ShapeConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| ShapeConstFoldPass() : ConstFoldPass(OpT_Shape) {} | |||
| ~ShapeConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_SHAPE_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/slice_const_fold_pass.h" | |||
| #include "src/operator/cpu/creator/slice.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| // todo if slice op has placeholder tensor | |||
| STATUS SliceConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS SliceConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Slice; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpSlice(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpSlice return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSlice InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSlice Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS SliceConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSlice Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kSliceOutputNum) { | |||
| MS_LOGE("The number of output for slice must be %u, nodeName: %s", kSliceOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_SLICE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_SLICE_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| // This Op only supports 1-4D cases | |||
| class SliceConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| SliceConstFoldPass() : ConstFoldPass(OpT_Slice) {} | |||
| ~SliceConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_SLICE_CONST_FOLD_PASS_H | |||
| @@ -1,65 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/stack_const_fold_pass.h" | |||
| #include "src/operator/cpu/creator/stack.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS StackConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS StackConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Stack; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpStack(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpStack return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStack InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStack Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS StackConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStack Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kStackOutputNum) { | |||
| MS_LOGE("The number of output for stack must be %u, nodeName: %s", kStackOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,42 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_STACK_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_STACK_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "securec/include/securec.h" | |||
| #include "utils/log_adapter.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class StackConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| StackConstFoldPass() : ConstFoldPass(OpT_Stack) {} | |||
| ~StackConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_STACK_CONST_FOLD_PASS_H | |||
| @@ -1,65 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/strided_slice_const_fold_pass.h" | |||
| #include "src/operator/cpu/creator/strided_slice.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS StridedSliceConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS StridedSliceConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Slice; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpStridedSlice(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpStridedSlice return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStridedSlice InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStridedSlice Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS StridedSliceConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpStridedSlice Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kStridedSliceOutputNum) { | |||
| MS_LOGE("The number of output for slice must be %u, nodeName: %s", kStridedSliceOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_STRIDED_SLICE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_STRIDED_SLICE_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| // This Op only supports 1-4D cases | |||
| class StridedSliceConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| StridedSliceConstFoldPass() : ConstFoldPass(OpT_StridedSlice) {} | |||
| ~StridedSliceConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_STRIDED_SLICE_CONST_FOLD_PASS_H | |||
| @@ -1,100 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/sub_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "converter/common/tensor_util.h" | |||
| #include "converter/common/converter_op_utils.h" | |||
| #include "src/operator/cpu/creator/sub.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS SubConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS SubConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Sub; | |||
| desc.arch = kCPU; | |||
| MS_ASSERT(inputs.size() == kArithOpInputNum); | |||
| auto inTensor0 = inputs.at(kArithOpInputTensorIndex0); | |||
| auto inTensor1 = inputs.at(kArithOpInputTensorIndex1); | |||
| MS_ASSERT(inTensor0 != nullptr); | |||
| MS_ASSERT(inTensor1 != nullptr); | |||
| DataType dataType; | |||
| if (inTensor0->GetNDim() > 1) { | |||
| dataType = inTensor0->GetDataType(); | |||
| } else { | |||
| dataType = inTensor1->GetDataType(); | |||
| } | |||
| switch (dataType) { | |||
| case DataType_DT_UINT8: { | |||
| op = new (std::nothrow) OpSub<uint8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT32: { | |||
| op = new (std::nothrow) OpSub<int32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_FLOAT: { | |||
| op = new (std::nothrow) OpSub<float>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_INT8: { | |||
| op = new (std::nothrow) OpSub<int8_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| case DataType_DT_UINT32: { | |||
| op = new (std::nothrow) OpSub<uint32_t>(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| } break; | |||
| default: { | |||
| MS_LOGE("Unsupported dataType: %d", dataType); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpSub return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSub InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSub Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS SubConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpSub Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kArithOpOutputNum) { | |||
| MS_LOGE("The number of output for sub must be %u, nodeName: %s", kArithOpOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_SUB_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_SUB_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "converter/common/tensor_util.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class SubConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| SubConstFoldPass() : ConstFoldPass(OpT_Sub) {} | |||
| ~SubConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_SUB_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/tile_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/tile.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS TileConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS TileConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Tile; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpTile(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpTile return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTile InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTile Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS TileConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTile Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kTileOutputNum) { | |||
| MS_LOGE("The number of output for tile must be %u, nodeName: %s", kTileOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,42 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_TILE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_TILE_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "securec/include/securec.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class TileConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| TileConstFoldPass() : ConstFoldPass(OpT_Tile) {} | |||
| ~TileConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_TILE_CONST_FOLD_PASS_H | |||
| @@ -1,66 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "tools/converter/legacy_optimizer/const_fold/transpose_const_fold_pass.h" | |||
| #include "utils/log_adapter.h" | |||
| #include "src/operator/cpu/creator/transpose.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| STATUS TransposeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } | |||
| STATUS TransposeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { | |||
| InnerContext ctx; | |||
| OpDesc desc{}; | |||
| desc.type = OpT_Transpose; | |||
| desc.arch = kCPU; | |||
| op = new (std::nothrow) OpTranspose(inputs, outputs, *PackOpDefT(node), &ctx, desc); | |||
| if (op == nullptr) { | |||
| MS_LOGE("new OpTranspose return nullptr"); | |||
| return RET_ERROR; | |||
| } | |||
| auto ret = op->InferShape(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTranspose InferShape Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| ret = op->Init(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTranspose Init Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| STATUS TransposeConstFoldPass::DoFold(SubGraphDefT *subGraph, OpDefT *node) { | |||
| MS_ASSERT(op != nullptr); | |||
| auto ret = op->Execute(inputs, outputs); | |||
| if (ret != RET_OK) { | |||
| MS_LOGE("OpTranspose Execute Failed"); | |||
| return RET_ERROR; | |||
| } | |||
| if (node->outputIndex.size() != kTransposeOutputNum) { | |||
| MS_LOGE("The number of output for transpose must be %u, nodeName: %s", kTransposeOutputNum, node->name.c_str()); | |||
| return RET_ERROR; | |||
| } | |||
| this->outputTensor = subGraph->allTensors.at(node->outputIndex.front()).get(); | |||
| CopyTensor2TensorDefT(outputs.front(), this->outputTensor); | |||
| return RET_OK; | |||
| } | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| @@ -1,41 +0,0 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_TRANSPOSE_CONST_FOLD_PASS_H | |||
| #define MINDSPORE_PREDICT_TRANSPOSE_CONST_FOLD_PASS_H | |||
| #include "converter/optimizer/const_fold/const_fold_pass.h" | |||
| #include "converter/common/tensor_util.h" | |||
| namespace mindspore { | |||
| namespace lite { | |||
| class TransposeConstFoldPass : public ConstFoldPass { | |||
| public: | |||
| TransposeConstFoldPass() : ConstFoldPass(OpT_Transpose) {} | |||
| ~TransposeConstFoldPass() override = default; | |||
| STATUS Run(GraphNode *graphNode) override; | |||
| STATUS CreateOp(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| STATUS DoFold(SubGraphDefT *subGraph, OpDefT *node) override; | |||
| }; | |||
| } // namespace lite | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_PREDICT_TRANSPOSE_CONST_FOLD_PASS_H | |||
| @@ -295,7 +295,6 @@ STATUS BatchNormFoldFusionPass::CheckPath(MetaGraphT *graph, | |||
| MS_ASSERT(mulFold->inputIndex.size() == 3); | |||
| MS_ASSERT(preConv->inputIndex.front() == convNode->inputIndex.front()); | |||
| MS_ASSERT(preConv->inputIndex.at(1) == mulFold->inputIndex.front()); | |||
| // todo | |||
| return RET_OK; | |||
| } | |||
| @@ -40,7 +40,7 @@ struct Path { | |||
| struct PatternOp { | |||
| std::string id; // id of op in pattern | |||
| std::vector<schema::PrimitiveType> types; // type of matchable op | |||
| // TODO(...): only support node with no more than two preNode now | |||
| // only support node with no more than two preNode now | |||
| // avoid loop reference | |||
| std::shared_ptr<PatternOp> left; // left input patternOp of this patternOp | |||
| std::shared_ptr<PatternOp> right; // right input patternOp of this patternOp | |||
| @@ -49,7 +49,6 @@ STATUS QuantCastFusionPass::DoFusion(MetaGraphT *graph, const std::string &patte | |||
| auto dstNode = graph->nodes.at(dstPath->nodeIdx).get(); | |||
| MS_ASSERT(dstNode != nullptr); | |||
| // todo check | |||
| if (srcNode->inputIndex.empty() && srcNode->outputIndex.empty()) { | |||
| MS_LOG(DEBUG) << "srcNode " << srcNode->name.c_str() << " has been removed"; | |||
| return RET_NO_CHANGE; | |||
| @@ -74,9 +74,9 @@ STATUS WeightFormatTransformPass::QuantDataFormatTrans(MetaGraphT *graph) { | |||
| // node->primitive->value.AsConv2D()->format = schema::Format_NHWC; | |||
| weightTensor->format = curDstFormat; | |||
| } else { | |||
| MS_LOG(WARNING) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; | |||
| // todo(00445839): consider varible weight condition | |||
| return ERROR; | |||
| } | |||
| } | |||
| } | |||
| @@ -112,9 +112,9 @@ STATUS WeightFormatTransformPass::NonQuantDataFormatTrans(MetaGraphT *graph) { | |||
| // node->attr.AsConv2D()->format = Format_NCHW; | |||
| weightTensor->format = curDstFormat; | |||
| } else { | |||
| MS_LOG(WARNING) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; | |||
| // todo(00445839): consider varible weight condition | |||
| return ERROR; | |||
| } | |||
| } else { // weight should be CKHW | |||
| Format curDstFormat; | |||
| @@ -128,9 +128,9 @@ STATUS WeightFormatTransformPass::NonQuantDataFormatTrans(MetaGraphT *graph) { | |||
| // node->attr.AsDepthwiseConv2D()->format = Format_NCHW; | |||
| weightTensor->format = curDstFormat; | |||
| } else { | |||
| MS_LOG(WARNING) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" | |||
| << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; | |||
| // todo(00445839): consider varible weight condition | |||
| return ERROR; | |||
| } | |||
| } | |||
| } | |||
| @@ -72,11 +72,11 @@ STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod | |||
| } | |||
| } | |||
| if (attribute_name == "ceil_mode") { | |||
| MS_ASSERT(false); // todo (h00500767) | |||
| attr->roundMode = schema::RoundMode_CEIL; | |||
| } | |||
| if (attribute_name == "dilations") { | |||
| MS_ASSERT(false); // todo pooling op not support dilations now | |||
| MS_LOG(ERROR) << "pooling op not support dilations now"; | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (op != nullptr) { | |||
| @@ -311,7 +311,6 @@ STATUS AwareQuantizer::SetAttrToConvolution(const schema::MetaGraphT *subGraph, | |||
| } | |||
| STATUS AwareQuantizer::GenerateQuantParam() { | |||
| // todo why? | |||
| MS_ASSERT(graph->inputIndex.size() == 1); | |||
| // set graphInputNode input | |||
| for (auto graphInputIndex : graph->inputIndex) { | |||
| @@ -469,7 +469,7 @@ QuantParamCalcRegister::QuantParamCalcRegister() { | |||
| _registerMap[schema::PrimitiveType_Pooling] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_Resize] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_Reshape] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_Shape] = linearCalcer; // todo if shape influence postNode's output quantParam | |||
| _registerMap[schema::PrimitiveType_Shape] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_SoftMax] = new CalcToSet(0, 1); | |||
| _registerMap[schema::PrimitiveType_Squeeze] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_RealDiv] = new CalcRealDiv(); | |||
| @@ -481,7 +481,6 @@ QuantParamCalcRegister::QuantParamCalcRegister() { | |||
| _registerMap[schema::PrimitiveType_FullConnection] = commonCalcer; | |||
| _registerMap[schema::PrimitiveType_Nchw2Nhwc] = linearCalcer; | |||
| _registerMap[schema::PrimitiveType_Nhwc2Nchw] = linearCalcer; | |||
| // todo | |||
| // detection_postprocess op's quant param will not infer only fetch from preNode or postNode | |||
| // because we will not insert quantTransNode after this node in tflite_graph_8bit model if input data is float. | |||
| // if quantTransNode is inserted after detection_postprocess node, there will be some errors | |||