| @@ -49,11 +49,12 @@ int Executor::Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_ | |||||
| MS_LOG(ERROR) << "CheckInputs failed"; | MS_LOG(ERROR) << "CheckInputs failed"; | ||||
| return ret; | return ret; | ||||
| } | } | ||||
| MS_ASSERT(std::all_of(kernels.begin(), kernels.end(), [](kernel::LiteKernel *kernel) { | |||||
| return std::all_of(kernel->in_tensors().begin(), kernel->in_tensors().end(), [](Tensor *in_tensor) { | |||||
| return in_tensor->IsConst() || in_tensor->IsGraphInput() || in_tensor->ref_count() == 0; | |||||
| }); | |||||
| })); | |||||
| // clear ref_count | |||||
| for (auto *kernel : kernels) { | |||||
| for (auto *tensor : kernel->in_tensors()) { | |||||
| tensor->set_ref_count(0); | |||||
| } | |||||
| } | |||||
| std::queue<kernel::LiteKernel *> kernel_queue; | std::queue<kernel::LiteKernel *> kernel_queue; | ||||
| for (auto kernel : kernels) { | for (auto kernel : kernels) { | ||||
| if (kernel->IsReady(kernel->in_tensors())) { | if (kernel->IsReady(kernel->in_tensors())) { | ||||
| @@ -115,7 +115,7 @@ int LiteKernel::PreProcess() { | |||||
| for (auto *output : this->out_tensors()) { | for (auto *output : this->out_tensors()) { | ||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| if (output->ElementsNum() >= MAX_MALLOC_SIZE / static_cast<int>(sizeof(int64_t))) { | |||||
| if (output->ElementsNum() >= lite::MAX_MALLOC_SIZE / static_cast<int>(sizeof(int64_t))) { | |||||
| MS_LOG(ERROR) << "The size of output tensor is too big"; | MS_LOG(ERROR) << "The size of output tensor is too big"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| @@ -251,6 +251,9 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) { | |||||
| } | } | ||||
| auto tensor_name = in_node->name_ + std::to_string(i); | auto tensor_name = in_node->name_ + std::to_string(i); | ||||
| this->input_map_[tensor_name] = in_tensor; | this->input_map_[tensor_name] = in_tensor; | ||||
| if (!in_tensor->tensor_name().empty()) { | |||||
| this->input_map_[in_tensor->tensor_name()] = in_tensor; | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -49,6 +49,10 @@ void *DefaultAllocator::Malloc(size_t size) { | |||||
| MS_LOG(ERROR) << "MallocData out of max_size, size: " << size; | MS_LOG(ERROR) << "MallocData out of max_size, size: " << size; | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| if (this->GetTotalSize() >= MAX_THREAD_POOL_SIZE) { | |||||
| MS_LOG(ERROR) << "Memory pool is exhausted"; | |||||
| return nullptr; | |||||
| } | |||||
| Lock(); | Lock(); | ||||
| auto iter = freeList_.lower_bound(size); | auto iter = freeList_.lower_bound(size); | ||||
| if (iter != freeList_.end() && (iter->second->size >= size) && (iter->second->size < (size << shiftFactor_))) { | if (iter != freeList_.end() && (iter->second->size >= size) && (iter->second->size < (size << shiftFactor_))) { | ||||
| @@ -39,7 +39,6 @@ class Allocator { | |||||
| virtual void Free(void *ptr) = 0; | virtual void Free(void *ptr) = 0; | ||||
| virtual void SetContext(const AllocatorContext &ctx) {} | virtual void SetContext(const AllocatorContext &ctx) {} | ||||
| virtual size_t GetTotalSize() { return 0; } | virtual size_t GetTotalSize() { return 0; } | ||||
| virtual void Clear() {} | |||||
| static std::shared_ptr<Allocator> Create(); | static std::shared_ptr<Allocator> Create(); | ||||
| virtual void *Prepare(void *ptr) { return ptr; } | virtual void *Prepare(void *ptr) { return ptr; } | ||||
| std::string name; | std::string name; | ||||
| @@ -53,7 +52,7 @@ class DefaultAllocator : public Allocator { | |||||
| void *Malloc(size_t size) override; | void *Malloc(size_t size) override; | ||||
| void Free(void *ptr) override; | void Free(void *ptr) override; | ||||
| size_t GetTotalSize() override; | size_t GetTotalSize() override; | ||||
| void Clear() override; | |||||
| void Clear(); | |||||
| private: | private: | ||||
| void Lock(); | void Lock(); | ||||
| @@ -72,7 +71,8 @@ class DefaultAllocator : public Allocator { | |||||
| bool lockFlag_ = false; | bool lockFlag_ = false; | ||||
| }; | }; | ||||
| #define MAX_MALLOC_SIZE (2000 * 1024 * 1024) | |||||
| constexpr int64_t MAX_MALLOC_SIZE = static_cast<size_t>(2000) * 1024 * 1024; | |||||
| constexpr int64_t MAX_THREAD_POOL_SIZE = static_cast<size_t>(3000) * 1024 * 1024; | |||||
| } // namespace mindspore::lite | } // namespace mindspore::lite | ||||
| @@ -42,7 +42,7 @@ class OpenCLAllocator : public Allocator { | |||||
| void Free(void *ptr) override; | void Free(void *ptr) override; | ||||
| size_t GetTotalSize() override; | size_t GetTotalSize() override; | ||||
| void Clear() override; | |||||
| void Clear(); | |||||
| void *GetImage(void *host_ptr); | void *GetImage(void *host_ptr); | ||||
| void *GetBuffer(void *host_ptr); | void *GetBuffer(void *host_ptr); | ||||
| void *MapBuffer(void *host_ptr, int flags, void *command_queue = nullptr, bool sync = true); | void *MapBuffer(void *host_ptr, int flags, void *command_queue = nullptr, bool sync = true); | ||||