| @@ -188,6 +188,7 @@ int NPUManager::AddModel(void *model_buf, uint32_t size, const std::string &mode | |||||
| model_desc_.push_back(desc); | model_desc_.push_back(desc); | ||||
| mc_builder_->MemBufferDestroy(buffer); | mc_builder_->MemBufferDestroy(buffer); | ||||
| index_++; | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -214,4 +215,6 @@ int NPUManager::LoadOMModel() { | |||||
| } | } | ||||
| std::shared_ptr<hiai::AiModelMngerClient> NPUManager::GetClient() { return client_; } | std::shared_ptr<hiai::AiModelMngerClient> NPUManager::GetClient() { return client_; } | ||||
| int NPUManager::index() { return index_; } | |||||
| } // namespace mindspore::lite | } // namespace mindspore::lite | ||||
| @@ -43,6 +43,8 @@ class NPUManager { | |||||
| // provide to executor. | // provide to executor. | ||||
| std::shared_ptr<hiai::AiModelMngerClient> GetClient(); | std::shared_ptr<hiai::AiModelMngerClient> GetClient(); | ||||
| int index(); | |||||
| private: | private: | ||||
| void CheckSupportNPU(); | void CheckSupportNPU(); | ||||
| @@ -53,6 +55,8 @@ class NPUManager { | |||||
| std::string GetExecutorPath(); | std::string GetExecutorPath(); | ||||
| private: | private: | ||||
| int index_ = 0; | |||||
| bool is_npu_check_executor = false; | bool is_npu_check_executor = false; | ||||
| bool is_support_npu = false; | bool is_support_npu = false; | ||||
| @@ -52,7 +52,7 @@ domi::ModelBufferData *SubGraphNpuKernel::BuildIRModel() { | |||||
| domi::HiaiIrBuild ir_build; | domi::HiaiIrBuild ir_build; | ||||
| auto om_model_buff = new (std::nothrow) domi::ModelBufferData; | auto om_model_buff = new (std::nothrow) domi::ModelBufferData; | ||||
| if (om_model_buff == nullptr) { | if (om_model_buff == nullptr) { | ||||
| MS_LOG(ERROR) << "om model buffer is nullptr."; | |||||
| MS_LOG(ERROR) << "OM model buffer is nullptr."; | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| if (!ir_build.CreateModelBuff(model, *om_model_buff)) { | if (!ir_build.CreateModelBuff(model, *om_model_buff)) { | ||||
| @@ -155,9 +155,8 @@ int SubGraphNpuKernel::BuildNPUOutputOp() { | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| void SubGraphNpuKernel::SetIndex(int index) { this->index_ = index; } | |||||
| std::string SubGraphNpuKernel::GetOMModelName() { return this->name_ + ".om"; } | |||||
| std::string SubGraphNpuKernel::GetOMModelName() { return this->name_ + std::to_string(index_) + ".om"; } | |||||
| int SubGraphNpuKernel::Init() { | int SubGraphNpuKernel::Init() { | ||||
| model_buffer_data_ = BuildIRModel(); | model_buffer_data_ = BuildIRModel(); | ||||
| if (model_buffer_data_ == nullptr) { | if (model_buffer_data_ == nullptr) { | ||||
| @@ -34,7 +34,6 @@ class SubGraphNpuKernel : public SubGraphKernel { | |||||
| const lite::InnerContext *ctx = nullptr) | const lite::InnerContext *ctx = nullptr) | ||||
| : SubGraphKernel(inputs, outputs, inKernels, outKernels, nodes, ctx) { | : SubGraphKernel(inputs, outputs, inKernels, outKernels, nodes, ctx) { | ||||
| subgraph_type_ = kNpuSubGraph; | subgraph_type_ = kNpuSubGraph; | ||||
| this->name_ = "NpuSubGraph"; | |||||
| } | } | ||||
| ~SubGraphNpuKernel() override = default; | ~SubGraphNpuKernel() override = default; | ||||
| @@ -56,8 +55,6 @@ class SubGraphNpuKernel : public SubGraphKernel { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| void SetIndex(int index); | |||||
| private: | private: | ||||
| domi::ModelBufferData *BuildIRModel(); | domi::ModelBufferData *BuildIRModel(); | ||||
| @@ -72,8 +69,6 @@ class SubGraphNpuKernel : public SubGraphKernel { | |||||
| std::string GetOMModelName(); | std::string GetOMModelName(); | ||||
| private: | private: | ||||
| int index_; | |||||
| domi::ModelBufferData *model_buffer_data_; | domi::ModelBufferData *model_buffer_data_; | ||||
| std::vector<ge::Operator> subgraph_input_op_; | std::vector<ge::Operator> subgraph_input_op_; | ||||
| @@ -34,7 +34,7 @@ int FloorNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, cons | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| op_->set_input_x(*npu_inputs[0]); | op_->set_input_x(*npu_inputs[0]); | ||||
| return REK_OK; | |||||
| return RET_OK; | |||||
| } | } | ||||
| ge::Operator *mindspore::kernel::FloorNPUKernel::GetNPUOp() { return this->op_; } | ge::Operator *mindspore::kernel::FloorNPUKernel::GetNPUOp() { return this->op_; } | ||||
| @@ -237,7 +237,7 @@ int Scheduler::ConstructSubGraphs(std::vector<kernel::LiteKernel *> *kernels) { | |||||
| } | } | ||||
| auto cur_sub_graph_type = mindspore::lite::Scheduler::GetKernelSubGraphType(head_kernel); | auto cur_sub_graph_type = mindspore::lite::Scheduler::GetKernelSubGraphType(head_kernel); | ||||
| auto sub_kernels = FindAllSubGraphKernels(head_kernel, &is_kernel_sinked); | auto sub_kernels = FindAllSubGraphKernels(head_kernel, &is_kernel_sinked); | ||||
| auto subgraph = CreateSubGraphKernel(sub_kernels, cur_sub_graph_type, kernels->size()); | |||||
| auto subgraph = CreateSubGraphKernel(sub_kernels, cur_sub_graph_type); | |||||
| if (subgraph == nullptr) { | if (subgraph == nullptr) { | ||||
| MS_LOG(ERROR) << "Create SubGraphKernel failed"; | MS_LOG(ERROR) << "Create SubGraphKernel failed"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -248,7 +248,7 @@ int Scheduler::ConstructSubGraphs(std::vector<kernel::LiteKernel *> *kernels) { | |||||
| } | } | ||||
| kernel::SubGraphKernel *Scheduler::CreateSubGraphKernel(const std::vector<kernel::LiteKernel *> &kernels, | kernel::SubGraphKernel *Scheduler::CreateSubGraphKernel(const std::vector<kernel::LiteKernel *> &kernels, | ||||
| kernel::SubGraphType type, int index) { | |||||
| kernel::SubGraphType type) { | |||||
| if (type == kernel::kApuSubGraph) { | if (type == kernel::kApuSubGraph) { | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| @@ -267,10 +267,15 @@ kernel::SubGraphKernel *Scheduler::CreateSubGraphKernel(const std::vector<kernel | |||||
| } | } | ||||
| if (type == kernel::kNpuSubGraph) { | if (type == kernel::kNpuSubGraph) { | ||||
| #if SUPPORT_NPU | #if SUPPORT_NPU | ||||
| auto sub_kernel = | |||||
| new kernel::SubGraphNpuKernel(input_tensors, output_tensors, input_kernels, output_kernels, kernels, context_); | |||||
| sub_kernel->SetIndex(index); | |||||
| auto sub_kernel = new (std::nothrow) | |||||
| kernel::SubGraphNpuKernel(input_tensors, output_tensors, input_kernels, output_kernels, kernels, context_); | |||||
| if (sub_kernel == nullptr) { | |||||
| MS_LOG(ERROR) << "NPU subgraph new failed."; | |||||
| return nullptr; | |||||
| } | |||||
| sub_kernel->set_name("NPUSubgraph" + std::to_string(NPUManager::GetInstance()->index())); | |||||
| if (sub_kernel->Init() != RET_OK) { | if (sub_kernel->Init() != RET_OK) { | ||||
| MS_LOG(ERROR) << "NPU subgraph init failed."; | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| return sub_kernel; | return sub_kernel; | ||||
| @@ -46,7 +46,7 @@ class Scheduler { | |||||
| int ConstructSubGraphs(std::vector<kernel::LiteKernel *> *kernels); | int ConstructSubGraphs(std::vector<kernel::LiteKernel *> *kernels); | ||||
| kernel::SubGraphKernel *CreateSubGraphKernel(const std::vector<kernel::LiteKernel *> &kernels, | kernel::SubGraphKernel *CreateSubGraphKernel(const std::vector<kernel::LiteKernel *> &kernels, | ||||
| kernel::SubGraphType type, int index); | |||||
| kernel::SubGraphType type); | |||||
| std::vector<kernel::LiteKernel *> FindAllSubGraphKernels( | std::vector<kernel::LiteKernel *> FindAllSubGraphKernels( | ||||
| kernel::LiteKernel *head_kernel, std::map<const kernel::LiteKernel *, bool> *sinked_kernel_map); | kernel::LiteKernel *head_kernel, std::map<const kernel::LiteKernel *, bool> *sinked_kernel_map); | ||||