You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

executor.cc 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/session/executor.h"
  17. #include <algorithm>
  18. #include <exception>
  19. #include "runtime/device/kernel_runtime_manager.h"
  20. #include "backend/session/executor_manager.h"
  21. #include "utils/comm_manager.h"
  22. #include "utils/scoped_long_running.h"
  23. namespace mindspore {
  24. namespace session {
  25. namespace {
  26. void UpdateOutputTensors(const VectorRef *outputs,
  27. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  28. MS_EXCEPTION_IF_NULL(outputs);
  29. for (auto item : *outputs) {
  30. if (utils::isa<VectorRefPtr>(item)) {
  31. auto vector_ref = utils::cast<VectorRef>(item);
  32. UpdateOutputTensors(&vector_ref, tensor_to_node);
  33. } else if (utils::isa<tensor::TensorPtr>(item)) {
  34. auto tensor = utils::cast<tensor::TensorPtr>(item);
  35. MS_EXCEPTION_IF_NULL(tensor);
  36. auto iter = tensor_to_node.find(tensor);
  37. if (iter != tensor_to_node.end()) {
  38. auto &node = iter->second.first;
  39. auto &output_index = iter->second.second;
  40. auto address = AnfAlgo::GetMutableOutputAddr(node, output_index);
  41. tensor->set_device_address(address);
  42. if (AnfAlgo::IsDynamicShape(node)) {
  43. auto updated_shape = AnfAlgo::GetOutputInferShape(node, output_index);
  44. ShapeVector int_shape;
  45. std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
  46. tensor->set_shape(int_shape);
  47. }
  48. }
  49. if (tensor->NeedSyncDeviceToHostImmediately()) {
  50. tensor->data_sync(false);
  51. tensor->set_device_address(nullptr);
  52. tensor->set_sync_status(kNeedSyncHostToDevice);
  53. }
  54. }
  55. }
  56. }
  57. void NotifyOutputTensors(const VectorRef *outputs) {
  58. MS_EXCEPTION_IF_NULL(outputs);
  59. for (auto item : *outputs) {
  60. if (utils::isa<VectorRefPtr>(item)) {
  61. auto vector_ref = utils::cast<VectorRef>(item);
  62. NotifyOutputTensors(&vector_ref);
  63. } else if (utils::isa<tensor::TensorPtr>(item)) {
  64. auto tensor = utils::cast<tensor::TensorPtr>(item);
  65. MS_EXCEPTION_IF_NULL(tensor);
  66. tensor->SetNeedWait(false);
  67. }
  68. }
  69. }
  70. bool TensorInVector(const VectorRef *outputs) {
  71. MS_EXCEPTION_IF_NULL(outputs);
  72. for (auto item : *outputs) {
  73. if (utils::isa<VectorRefPtr>(item)) {
  74. auto vector_ref = utils::cast<VectorRef>(item);
  75. if (TensorInVector(&vector_ref)) {
  76. return true;
  77. }
  78. } else if (utils::isa<tensor::TensorPtr>(item)) {
  79. return true;
  80. }
  81. }
  82. return false;
  83. }
  84. } // namespace
  85. void CompileNodesTask::Run() {
  86. MS_EXCEPTION_IF_NULL(session_);
  87. MS_EXCEPTION_IF_NULL(segment_);
  88. graph_id_ = session_->CompileGraphImpl(segment_->nodes_, output_nodes_);
  89. }
  90. void CompileGraphTask::Run() {
  91. MS_EXCEPTION_IF_NULL(session_);
  92. graph_id_ = session_->CompileGraphImpl(NOT_NULL(func_graph_));
  93. }
  94. void BuildGraphTask::Run() {
  95. MS_EXCEPTION_IF_NULL(session_);
  96. session_->BuildGraphImpl(graph_id_);
  97. }
  98. void RunGraphTask::Run() {
  99. MS_EXCEPTION_IF_NULL(session_);
  100. try {
  101. auto graph = session_->GetGraph(graph_id_);
  102. MS_EXCEPTION_IF_NULL(graph);
  103. graph->ResetGraphRunningStatus();
  104. session_->RunGraphImpl(graph_id_, input_tensors_, &outputs_);
  105. graph->OnRunGraphFinished();
  106. UpdateOutputTensors(&outputs_, tensor_to_node_);
  107. } catch (const std::exception &e) {
  108. MsException::GetInstance().SetException();
  109. }
  110. for (auto &tensor : input_need_lock_tensors_) {
  111. tensor->SetNeedWait(false);
  112. }
  113. NotifyOutputTensors(&outputs_);
  114. ExecutorManager::Instance().OnRunGraphFinished();
  115. }
  116. void BuildOpTask::Run() {
  117. MS_EXCEPTION_IF_NULL(session_);
  118. session_->BuildOpImpl(*op_run_info_, graph_info_, input_tensors_, tensors_mask_);
  119. }
  120. void RunOpTask::Run() {
  121. MS_EXCEPTION_IF_NULL(session_);
  122. session_->RunOpImpl(*op_run_info_, graph_info_, input_tensors_, &outputs_);
  123. }
  124. void CreateCommGroupTask::Run() { result_ = CommManager::GetInstance().CreateGroupSync(group_name_, ranks_); }
  125. void DestroyCommGroupTask::Run() { result_ = CommManager::GetInstance().DestroyGroup(group_name_); }
  126. Executor::Executor(const std::string &device_name, uint32_t device_id) {
  127. device_name_ = device_name;
  128. device_id_ = device_id;
  129. worker_ = std::make_shared<std::thread>(&Executor::WorkerLoop, this);
  130. }
  131. Executor::~Executor() { WorkerJoin(); }
  132. void Executor::WorkerJoin() {
  133. // Avoid worker thread join itself which will cause deadlock
  134. if (worker_->joinable() && worker_->get_id() != std::this_thread::get_id()) {
  135. {
  136. std::unique_lock<std::mutex> lock(task_mutex_);
  137. auto task = std::make_shared<ExitTask>();
  138. ready_tasks_.push(task);
  139. task_cond_var_.notify_all();
  140. }
  141. worker_->join();
  142. }
  143. }
  144. void Executor::WorkerLoop() {
  145. while (true) {
  146. std::shared_ptr<Task> task;
  147. {
  148. std::unique_lock<std::mutex> lock(task_mutex_);
  149. task_cond_var_.wait(lock, [this] { return !ready_tasks_.empty(); });
  150. task = ready_tasks_.front();
  151. ready_tasks_.pop();
  152. }
  153. if (task->type_ == kExit) {
  154. OnWorkerExit();
  155. return;
  156. }
  157. try {
  158. task->Run();
  159. } catch (const std::exception &e) {
  160. MsException::GetInstance().SetException();
  161. }
  162. {
  163. std::unique_lock<std::mutex> lock(task_mutex_);
  164. done_tasks_.emplace_back(task);
  165. }
  166. if (task->type_ != kRunGraph || task->sync_run_) {
  167. sync_cond_var_.notify_all();
  168. }
  169. }
  170. }
  171. std::vector<std::shared_ptr<RunGraphTask>> Executor::GetNewReadyTasks() {
  172. std::vector<std::shared_ptr<RunGraphTask>> new_ready_tasks;
  173. std::unique_lock<std::mutex> lock(pending_task_mutex_);
  174. for (auto iter = pending_tasks_.begin(); iter != pending_tasks_.end();) {
  175. auto task = *iter;
  176. if (IsTaskReady(task)) {
  177. new_ready_tasks.emplace_back(task);
  178. pending_tasks_.erase(iter++);
  179. } else {
  180. iter++;
  181. }
  182. }
  183. return new_ready_tasks;
  184. }
  185. void Executor::OnRunGraphFinished() {
  186. auto new_ready_tasks = GetNewReadyTasks();
  187. std::unique_lock<std::mutex> lock(task_mutex_);
  188. for (auto &task : new_ready_tasks) {
  189. ready_tasks_.push(task);
  190. }
  191. if (new_ready_tasks.size() > 0) {
  192. task_cond_var_.notify_all();
  193. }
  194. reenter_cond_var_.notify_all();
  195. }
  196. bool Executor::IsTaskReady(const std::shared_ptr<RunGraphTask> &task) {
  197. MS_EXCEPTION_IF_NULL(task);
  198. for (auto &input : task->input_need_wait_tensors_) {
  199. MS_EXCEPTION_IF_NULL(input);
  200. if (input->NeedWait()) {
  201. return false;
  202. }
  203. }
  204. auto session = task->session_;
  205. MS_EXCEPTION_IF_NULL(session);
  206. auto graph = session->GetGraph(task->graph_id_);
  207. if (graph != nullptr) {
  208. return graph->IsPreGraphFinished();
  209. }
  210. return true;
  211. }
  212. void Executor::SyncRunTask(const std::shared_ptr<Task> &task) {
  213. std::unique_lock<std::mutex> lock(task_mutex_);
  214. ready_tasks_.push(task);
  215. done_tasks_.clear();
  216. task_cond_var_.notify_all();
  217. sync_cond_var_.wait(lock);
  218. MsException::GetInstance().CheckException();
  219. }
  220. GraphId Executor::CompileGraph(const SessionPtr &session, const GraphSegmentPtr &segment,
  221. const AnfNodePtrList &outputs) {
  222. auto task = std::make_shared<CompileNodesTask>();
  223. task->session_ = session;
  224. task->segment_ = segment;
  225. task->output_nodes_ = outputs;
  226. SyncRunTask(task);
  227. return task->graph_id_;
  228. }
  229. GraphId Executor::CompileGraph(const SessionPtr &session, NotNull<FuncGraphPtr> func_graph) {
  230. auto task = std::make_shared<CompileGraphTask>();
  231. task->session_ = session;
  232. task->func_graph_ = func_graph;
  233. SyncRunTask(task);
  234. return task->graph_id_;
  235. }
  236. void Executor::BuildGraph(const SessionPtr &session, GraphId graphId) {
  237. auto task = std::make_shared<BuildGraphTask>();
  238. task->session_ = session;
  239. task->graph_id_ = graphId;
  240. SyncRunTask(task);
  241. }
  242. void Executor::RunGraph(const SessionPtr &session, const GraphId &graph_id,
  243. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  244. MS_EXCEPTION_IF_NULL(session);
  245. MS_EXCEPTION_IF_NULL(outputs);
  246. auto task = std::make_shared<RunGraphTask>();
  247. task->session_ = session;
  248. task->graph_id_ = graph_id;
  249. task->input_tensors_ = inputs;
  250. session->CreateOutputTensors(graph_id, inputs, outputs, &task->tensor_to_node_);
  251. task->outputs_ = *outputs;
  252. task->sync_run_ = true;
  253. mindspore::ScopedLongRunning long_running;
  254. SyncRunTask(task);
  255. }
  256. void Executor::RunGraphAsync(const SessionPtr &session, const GraphId &graph_id,
  257. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  258. MS_EXCEPTION_IF_NULL(session);
  259. MS_EXCEPTION_IF_NULL(outputs);
  260. auto task = std::make_shared<RunGraphTask>();
  261. task->session_ = session;
  262. task->graph_id_ = graph_id;
  263. task->input_tensors_ = inputs;
  264. task->input_need_lock_tensors_ = session->GetInputNeedLockTensors(graph_id, inputs);
  265. for (auto &tensor : inputs) {
  266. if (tensor->NeedWait()) {
  267. if (tensor->IsGraphOutput()) {
  268. task->input_need_wait_tensors_.emplace_back(tensor);
  269. } else {
  270. mindspore::ScopedLongRunning long_running;
  271. tensor->Wait();
  272. }
  273. }
  274. }
  275. for (auto &tensor : task->input_need_lock_tensors_) {
  276. tensor->SetNeedWait(true);
  277. }
  278. session->CreateOutputTensors(graph_id, inputs, outputs, &task->tensor_to_node_);
  279. // maintain a copy of output vector
  280. task->outputs_ = *outputs;
  281. // sync run graph without output tensor(int dataset graph)
  282. if (!TensorInVector(outputs)) {
  283. task->sync_run_ = true;
  284. mindspore::ScopedLongRunning long_running;
  285. SyncRunTask(task);
  286. return;
  287. }
  288. auto graph = session->GetGraph(task->graph_id_);
  289. if (graph != nullptr) {
  290. if (!graph->IsPostGraphFinished()) {
  291. mindspore::ScopedLongRunning long_running;
  292. std::unique_lock<std::mutex> lock(reenter_mutex_);
  293. reenter_cond_var_.wait(lock, [graph] { return graph->IsPostGraphFinished(); });
  294. }
  295. }
  296. bool ready = IsTaskReady(task);
  297. if (!ready) {
  298. std::unique_lock<std::mutex> lock(pending_task_mutex_);
  299. pending_tasks_.push_back(task);
  300. return;
  301. }
  302. std::unique_lock<std::mutex> lock(task_mutex_);
  303. ready_tasks_.push(task);
  304. done_tasks_.clear();
  305. task_cond_var_.notify_all();
  306. }
  307. void Executor::BuildOp(const SessionPtr &session, OpRunInfo *op_run_info, const GraphInfo &graph_info,
  308. const std::vector<tensor::TensorPtr> &input_tensors, const std::vector<int64_t> &tensors_mask) {
  309. auto task = std::make_shared<BuildOpTask>();
  310. task->session_ = session;
  311. task->op_run_info_ = op_run_info;
  312. task->graph_info_ = graph_info;
  313. task->input_tensors_ = input_tensors;
  314. task->tensors_mask_ = tensors_mask;
  315. SyncRunTask(task);
  316. }
  317. void Executor::RunOp(const SessionPtr &session, OpRunInfo *op_run_info, const GraphInfo &graph_info,
  318. const std::vector<tensor::TensorPtr> &input_tensors, VectorRef *outputs) {
  319. auto task = std::make_shared<RunOpTask>();
  320. task->session_ = session;
  321. task->op_run_info_ = op_run_info;
  322. task->graph_info_ = graph_info;
  323. task->input_tensors_ = input_tensors;
  324. for (auto &tensor : input_tensors) {
  325. if (tensor->NeedWait()) {
  326. tensor->Wait();
  327. }
  328. }
  329. SyncRunTask(task);
  330. *outputs = task->outputs_;
  331. }
  332. bool Executor::CreateCommGroup(const std::string &group_name, std::vector<uint32_t> ranks) {
  333. auto task = std::make_shared<CreateCommGroupTask>();
  334. task->group_name_ = group_name;
  335. task->ranks_ = ranks;
  336. SyncRunTask(task);
  337. return task->result_;
  338. }
  339. bool Executor::DestroyCommGroup(const std::string &group_name) {
  340. auto task = std::make_shared<DestroyCommGroupTask>();
  341. task->group_name_ = group_name;
  342. SyncRunTask(task);
  343. return task->result_;
  344. }
  345. void Executor::OnWorkerExit() {
  346. if (device_name_ == kAscendDevice) {
  347. device::KernelRuntimeManager::Instance().ReleaseKernelRuntime(kAscendDevice, device_id_);
  348. }
  349. }
  350. } // namespace session
  351. } // namespace mindspore