From 29f068912f745d8dfcfaa2d7f3d32828a4af5ec8 Mon Sep 17 00:00:00 2001 From: mengyuanli Date: Mon, 26 Apr 2021 17:12:46 +0800 Subject: [PATCH] 1.add partial and call kernel for scheduler. 2.add some tools function --- .../cpu/nnacl/partial_fusion_parameter.h | 29 ++++++++++++++ .../core/mindrt/include/actor/op_actor.h | 14 +++---- mindspore/lite/src/common/prim_util.cc | 16 ++++++++ mindspore/lite/src/common/prim_util.h | 2 + mindspore/lite/src/common/tensor_util.cc | 2 +- mindspore/lite/src/lite_kernel.cc | 32 ++++++++-------- mindspore/lite/src/lite_kernel_util.cc | 35 +++++++++++++++++ mindspore/lite/src/lite_kernel_util.h | 7 ++++ mindspore/lite/src/lite_mindrt.cc | 2 +- .../lite/src/ops/populate/call_populate.cc | 34 +++++++++++++++++ .../lite/src/ops/populate/partial_populate.cc | 5 +-- .../lite/src/runtime/kernel/arm/base/call.cc | 38 +++++++++++++++++++ .../lite/src/runtime/kernel/arm/base/call.h | 38 +++++++++++++++++++ .../runtime/kernel/arm/base/partial_fusion.cc | 37 ++++++++++++++++++ .../runtime/kernel/arm/base/partial_fusion.h | 38 +++++++++++++++++++ .../kernel/arm/fp32/arithmetic_fp32.cc | 5 ++- mindspore/lite/src/sub_graph_kernel.cc | 6 +++ mindspore/lite/src/sub_graph_kernel.h | 2 + mindspore/lite/src/tensor.cc | 2 + mindspore/lite/src/tensor.h | 2 + 20 files changed, 316 insertions(+), 30 deletions(-) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/partial_fusion_parameter.h create mode 100644 mindspore/lite/src/ops/populate/call_populate.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/call.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/call.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.h diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/partial_fusion_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/partial_fusion_parameter.h new file mode 100644 index 0000000000..dfed17ca85 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/partial_fusion_parameter.h @@ -0,0 +1,29 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_PARTIAL_FUSION_H_ +#define MINDSPORE_NNACL_PARTIAL_FUSION_H_ + +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/nnacl_utils.h" + +typedef struct PartialParameter { + OpParameter op_parameter_; + int sub_graph_index_; +} PartialParameter; + +#endif // MINDSPORE_NNACL_ARTITHMETIC_H_ diff --git a/mindspore/core/mindrt/include/actor/op_actor.h b/mindspore/core/mindrt/include/actor/op_actor.h index f6685ae1ca..631fc6ecf5 100644 --- a/mindspore/core/mindrt/include/actor/op_actor.h +++ b/mindspore/core/mindrt/include/actor/op_actor.h @@ -54,7 +54,7 @@ using OpDataPtr = std::shared_ptr>; template struct OpContext { uuids::uuid *sequential_num_; - std::vector> *outputData_; + std::vector> *output_data_; std::vector> *results_; const void *kernel_call_back_before_; const void *kernel_call_back_after_; @@ -97,14 +97,14 @@ class OpActor : public ActorBase { }; template -Future> MindrtAsyncRun(const std::vector> &inputData, OpContext *context) { +Future> MindrtAsyncRun(const std::vector> &input_data, OpContext *context) { std::list> futures; for (auto promise : *(context->results_)) { futures.push_back(promise.GetFuture()); } Future> collect = mindspore::Collect(futures); - for (auto data : inputData) { + for (auto data : input_data) { Async(data->op_id_, &mindspore::OpActor::RunOpData, data, context); } @@ -112,18 +112,18 @@ Future> MindrtAsyncRun(const std::vector> &inputData } template -int MindrtRun(const std::vector> &inputData, std::vector> *outputData, +int MindrtRun(const std::vector> &input_data, std::vector> *output_data, const void *kernel_call_back_before, const void *kernel_call_back_after) { OpContext context; - std::vector> promises(outputData->size()); + std::vector> promises(output_data->size()); uuids::uuid uid; context.sequential_num_ = &uid; context.results_ = &promises; - context.outputData_ = outputData; + context.output_data_ = output_data; context.kernel_call_back_before_ = kernel_call_back_before; context.kernel_call_back_after_ = kernel_call_back_after; - auto collect = MindrtAsyncRun(inputData, &context); + auto collect = MindrtAsyncRun(input_data, &context); collect.Wait(); if (!collect.IsOK()) { return -1; diff --git a/mindspore/lite/src/common/prim_util.cc b/mindspore/lite/src/common/prim_util.cc index e8cad919a6..b029cdf0a1 100644 --- a/mindspore/lite/src/common/prim_util.cc +++ b/mindspore/lite/src/common/prim_util.cc @@ -64,6 +64,22 @@ bool IsPartialNode(const void *primitive) { return false; } +bool IsCallNode(const void *primitive) { + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + return reinterpret_cast(primitive)->value_type() == schema::PrimitiveType_Call; + } + return false; +} + +bool IsSwitchNode(const void *primitive) { + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + return reinterpret_cast(primitive)->value_type() == schema::PrimitiveType_Switch; + } + return false; +} + int GetPartialGraphIndex(const void *primitive) { int index = -1; int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); diff --git a/mindspore/lite/src/common/prim_util.h b/mindspore/lite/src/common/prim_util.h index f414a2d644..fadb8e601d 100644 --- a/mindspore/lite/src/common/prim_util.h +++ b/mindspore/lite/src/common/prim_util.h @@ -24,6 +24,8 @@ const char *PrimitiveTypeName(int type); const char *PrimitiveCurVersionTypeName(int type); int GenPrimVersionKey(int primitive_type, int schema_version); bool IsPartialNode(const void *primitive); +bool IsCallNode(const void *node); +bool IsSwitchNode(const void *node); int GetPartialGraphIndex(const void *primitive); bool IsWhileNode(const void *primitive); int GetWhileBodySubgraphIndex(const void *primitive); diff --git a/mindspore/lite/src/common/tensor_util.cc b/mindspore/lite/src/common/tensor_util.cc index a747abec65..0213397381 100644 --- a/mindspore/lite/src/common/tensor_util.cc +++ b/mindspore/lite/src/common/tensor_util.cc @@ -213,7 +213,7 @@ int CheckTensorsInvalid(const std::vector &tensors) { return RET_ERROR; } if (tensor->data_type() != kObjectTypeTensorType && tensor->data_c() == nullptr) { - MS_LOG(ERROR) << "Graph input tensor is nullptr " << tensors; + MS_LOG(ERROR) << "Graph input tensor data is nullptr " << tensor->tensor_name(); return RET_ERROR; } auto shape = tensor->shape(); diff --git a/mindspore/lite/src/lite_kernel.cc b/mindspore/lite/src/lite_kernel.cc index b28d06ad1a..51a123e852 100644 --- a/mindspore/lite/src/lite_kernel.cc +++ b/mindspore/lite/src/lite_kernel.cc @@ -188,23 +188,25 @@ void LiteKernel::FindInoutKernels(const std::vector &scope // clean io kernels this->in_kernels_.clear(); this->out_kernels_.clear(); - // find io kernels - for (auto *scope_kernel : scope_kernels) { - if (scope_kernel == this) { - continue; - } - for (auto *tensor : this->in_tensors_) { - if (lite::IsContain(scope_kernel->out_tensors(), tensor)) { - if (!lite::IsContain(this->in_kernels(), scope_kernel)) { - this->AddInKernel(scope_kernel); - } + // find io kernels, need optimize time + for (auto *tensor : this->in_tensors_) { + for (auto *scope_kernel : scope_kernels) { + if (scope_kernel == this) { + continue; + } + if (lite::IsContain(scope_kernel->out_tensors(), tensor) && !lite::IsContain(this->in_kernels(), scope_kernel)) { + this->AddInKernel(scope_kernel); } } - for (auto *tensor : this->out_tensors_) { - if (lite::IsContain(scope_kernel->in_tensors(), tensor)) { - if (!lite::IsContain(this->out_kernels(), scope_kernel)) { - this->AddOutKernel(scope_kernel); - } + } + + for (auto *tensor : this->out_tensors_) { + for (auto *scope_kernel : scope_kernels) { + if (scope_kernel == this) { + continue; + } + if (lite::IsContain(scope_kernel->in_tensors(), tensor) && !lite::IsContain(this->out_kernels(), scope_kernel)) { + this->AddOutKernel(scope_kernel); } } } diff --git a/mindspore/lite/src/lite_kernel_util.cc b/mindspore/lite/src/lite_kernel_util.cc index 2d72fcdbb2..97b662ba5a 100644 --- a/mindspore/lite/src/lite_kernel_util.cc +++ b/mindspore/lite/src/lite_kernel_util.cc @@ -17,6 +17,7 @@ #include "src/lite_kernel_util.h" #include #include +#include "src/sub_graph_kernel.h" namespace mindspore::kernel { using mindspore::lite::RET_ERROR; @@ -187,4 +188,38 @@ void LiteKernelUtil::InitTensorInitRefCount(const std::vector &inputs) { return -1; } +bool LiteKernelUtil::IsSwitchCall(kernel::LiteKernel *kernel) { + auto *subgraph_kernel = reinterpret_cast(kernel); + if (subgraph_kernel == nullptr) { + return false; + } + for (auto &node : subgraph_kernel->nodes()) { + if (node->Type() == schema::PrimitiveType_Switch && + InputsContainsSpecificNode(node, schema::PrimitiveType_PartialFusion) && node->out_kernels().size() == 1 && + node->out_kernels().front()->Type() == schema::PrimitiveType_Call) { + return true; + } + } + + return false; +} + +kernel::LiteKernel *LiteKernelUtil::GetInputsSpecificNode(const kernel::LiteKernel *kernel, + const schema::PrimitiveType &primitive_type) { + for (auto input : kernel->in_kernels()) { + if (input->Type() == primitive_type) { + return input; + } + } + return nullptr; +} + +bool LiteKernelUtil::InputsContainsSpecificNode(const kernel::LiteKernel *kernel, + const schema::PrimitiveType &primitive_type) { + if (GetInputsSpecificNode(kernel, primitive_type)) { + return true; + } + return false; +} + } // namespace mindspore::kernel diff --git a/mindspore/lite/src/lite_kernel_util.h b/mindspore/lite/src/lite_kernel_util.h index ff7380df23..4a3d20c8b0 100644 --- a/mindspore/lite/src/lite_kernel_util.h +++ b/mindspore/lite/src/lite_kernel_util.h @@ -35,6 +35,13 @@ class LiteKernelUtil { static void InitTensorInitRefCount(const std::vector &kernels); static int SetInput(const LiteKernel &kernelMod, const std::vector &inputs); + + static bool IsSwitchCall(kernel::LiteKernel *kernel); + + static kernel::LiteKernel *GetInputsSpecificNode(const kernel::LiteKernel *kernel, + const schema::PrimitiveType &primitive_type); + + static bool InputsContainsSpecificNode(const kernel::LiteKernel *kernel, const schema::PrimitiveType &primitive_type); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/lite_mindrt.cc b/mindspore/lite/src/lite_mindrt.cc index 91262ce8de..7750df45ae 100644 --- a/mindspore/lite/src/lite_mindrt.cc +++ b/mindspore/lite/src/lite_mindrt.cc @@ -48,7 +48,7 @@ int LiteOpActor::CompileArrow() { void LiteOpActor::AsyncOutput(OpContext *context) { for (auto op_arrow : output_op_arrows_) { - auto data = context->outputData_->at(op_arrow->from_output_index_); + auto data = context->output_data_->at(op_arrow->from_output_index_); Async(op_arrow->to_op_id_, &mindspore::OpActor::RunOpData, data, context); } return; diff --git a/mindspore/lite/src/ops/populate/call_populate.cc b/mindspore/lite/src/ops/populate/call_populate.cc new file mode 100644 index 0000000000..79bb34871f --- /dev/null +++ b/mindspore/lite/src/ops/populate/call_populate.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +using mindspore::schema::PrimitiveType_Call; + +namespace mindspore { +namespace lite { +OpParameter *PopulateCallParameter(const void *prim) { + OpParameter *call_parameter = reinterpret_cast(malloc(sizeof(OpParameter))); + if (call_parameter == nullptr) { + MS_LOG(ERROR) << "malloc CallParameter failed."; + return nullptr; + } + memset(call_parameter, 0, sizeof(OpParameter)); + auto primitive = static_cast(prim); + call_parameter->type_ = primitive->value_type(); + return reinterpret_cast(call_parameter); +} +REG_POPULATE(PrimitiveType_Call, PopulateCallParameter, SCHEMA_CUR) +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/partial_populate.cc b/mindspore/lite/src/ops/populate/partial_populate.cc index 4ff60edffb..f088fb9d70 100644 --- a/mindspore/lite/src/ops/populate/partial_populate.cc +++ b/mindspore/lite/src/ops/populate/partial_populate.cc @@ -14,14 +14,11 @@ * limitations under the License. */ #include "src/ops/populate/populate_register.h" +#include "nnacl/partial_fusion_parameter.h" using mindspore::schema::PrimitiveType_PartialFusion; namespace mindspore { namespace lite { -typedef struct PartialParameter { - OpParameter op_parameter_; - int sub_graph_index_; -} PartialParameter; OpParameter *PopulatePartialParameter(const void *prim) { PartialParameter *partial_parameter = reinterpret_cast(malloc(sizeof(PartialParameter))); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/call.cc b/mindspore/lite/src/runtime/kernel/arm/base/call.cc new file mode 100644 index 0000000000..ebacb7b214 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/call.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/base/call.h" +#include "src/kernel_registry.h" +#include "include/errorcode.h" +#include "src/tensorlist.h" +#include "src/common/utils.h" + +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_Call; + +// this file is useless when move create actor before schedule. +namespace mindspore::kernel { +int CallCPUKernel::Init() { return RET_OK; } +int CallCPUKernel::ReSize() { return RET_OK; } +int CallCPUKernel::Run() { return RET_OK; } + +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Call, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Call, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Call, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Call, LiteKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/call.h b/mindspore/lite/src/runtime/kernel/arm/base/call.h new file mode 100644 index 0000000000..11e0d186ac --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/call.h @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CALL_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CALL_H_ + +#include +#include "src/runtime/kernel/arm/base/carry_data.h" +#include "src/tensor.h" +#include "src/tensorlist.h" + +// this file is useless when move create actor before schedule. +namespace mindspore::kernel { +class CallCPUKernel : public LiteKernel { + public: + CallCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} + ~CallCPUKernel() override = default; + int Init() override; + int ReSize() override; + int Run() override; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CALL_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.cc b/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.cc new file mode 100644 index 0000000000..0a7949c30b --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/base/partial_fusion.h" +#include "src/kernel_registry.h" +#include "include/errorcode.h" +#include "src/tensorlist.h" +#include "src/common/utils.h" + +// this file is going to be removed when move create actor before schedule. +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_PartialFusion; + +namespace mindspore::kernel { +int PartialFusionKernel::Init() { return RET_OK; } +int PartialFusionKernel::ReSize() { return RET_OK; } +int PartialFusionKernel::Run() { return RET_OK; } +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PartialFusion, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_PartialFusion, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_PartialFusion, LiteKernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_PartialFusion, LiteKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.h b/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.h new file mode 100644 index 0000000000..79d35a656e --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/partial_fusion.h @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_PARTIAL_FUSION_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_PARTIAL_FUSION_H_ + +#include +#include "src/runtime/kernel/arm/base/carry_data.h" +#include "src/tensor.h" +#include "src/tensorlist.h" + +// this file is going to be removed when move create actor before schedule. +namespace mindspore::kernel { +class PartialFusionKernel : public LiteKernel { + public: + PartialFusionKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} + ~PartialFusionKernel() override = default; + int Init() override; + int ReSize() override; + int Run() override; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_PARTIAL_FUSION_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index 76418256af..78eeedde44 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -72,7 +72,8 @@ int ArithmeticCPUKernel::CheckDataType() { auto in0_dataType = in_tensors_.at(0)->data_type(); auto in1_dataType = in_tensors_.at(1)->data_type(); if (in0_dataType != in1_dataType) { - MS_LOG(ERROR) << "The dataTypes of input tensor0 and input tensor1 should be the same."; + MS_LOG(ERROR) << "The dataTypes of input tensor0 and input tensor1 should be the same. input 0 dataType: " + << in0_dataType << " input 1 dataType: " << in1_dataType; return RET_ERROR; } return RET_OK; @@ -408,7 +409,7 @@ int ArithmeticsRun(void *cdata, int task_id) { int ArithmeticCPUKernel::Run() { if (CheckDataType() != RET_OK) { - MS_LOG(ERROR) << "ArithmeticCPUKernel check dataType failed."; + MS_LOG(ERROR) << "ArithmeticCPUKernel check dataType failed, kernel name: " << this->name(); return RET_ERROR; } if (!input0_broadcast_) { diff --git a/mindspore/lite/src/sub_graph_kernel.cc b/mindspore/lite/src/sub_graph_kernel.cc index d2fa37fd35..d2dae3a8dc 100644 --- a/mindspore/lite/src/sub_graph_kernel.cc +++ b/mindspore/lite/src/sub_graph_kernel.cc @@ -23,6 +23,7 @@ #include "src/common/version_manager.h" #include "src/runtime/infer_manager.h" #include "src/common/tensor_util.h" +#include "src/common/utils.h" namespace mindspore::kernel { using mindspore::lite::RET_ERROR; @@ -141,6 +142,11 @@ void SubGraphKernel::InitOutTensorInitRefCount() { node->InitOutTensorInitRefCount(); } } +void SubGraphKernel::DropNode(LiteKernel *node) { + lite::VectorErase(&nodes_, node); + lite::VectorErase(&in_nodes_, node); + lite::VectorErase(&out_nodes_, node); +} int CpuSubGraph::Prepare() { auto ret = SubGraphKernel::Prepare(); diff --git a/mindspore/lite/src/sub_graph_kernel.h b/mindspore/lite/src/sub_graph_kernel.h index 728eb5ad24..65f7720607 100644 --- a/mindspore/lite/src/sub_graph_kernel.h +++ b/mindspore/lite/src/sub_graph_kernel.h @@ -112,6 +112,8 @@ class SubGraphKernel : public LiteKernel { std::vector nodes() { return this->nodes_; } + void DropNode(LiteKernel *node); + protected: std::vector nodes_{}; // entry nodes in nodes diff --git a/mindspore/lite/src/tensor.cc b/mindspore/lite/src/tensor.cc index 047da46e8e..e0cdef9954 100644 --- a/mindspore/lite/src/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -348,6 +348,8 @@ void *Tensor::MutableData() { return this->data_; } +void Tensor::IncRefCount() { ++ref_count_; } + void Tensor::DecRefCount() { if (this->IsConst() || this->IsGraphInput()) { return; diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index 1fb28d6661..cd667316ba 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -145,6 +145,8 @@ class Tensor : public mindspore::tensor::MSTensor { void ResetRefCount() { this->ref_count_ = this->init_ref_count_; } + void IncRefCount(); + void DecRefCount(); std::string ToString() const;