You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_runtime.cc 60 kB

5 years ago
4 years ago
6 years ago
6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
6 years ago
5 years ago
5 years ago
6 years ago
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "runtime/device/kernel_runtime.h"
  17. #include <functional>
  18. #include <utility>
  19. #include <vector>
  20. #include <set>
  21. #include "backend/optimizer/common/helper.h"
  22. #include "backend/session/anf_runtime_algorithm.h"
  23. #include "backend/session/kernel_graph.h"
  24. #include "common/trans.h"
  25. #include "debug/data_dump/dump_json_parser.h"
  26. #include "frontend/operator/ops.h"
  27. #include "ir/value.h"
  28. #include "utils/ms_context.h"
  29. #include "utils/ms_utils.h"
  30. #include "utils/shape_utils.h"
  31. #include "utils/utils.h"
  32. #include "frontend/parallel/context.h"
  33. #include "debug/env_config_parser.h"
  34. #include "pipeline/pynative/pynative_profiling.h"
  35. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  36. #include "ps/ps_cache/ps_cache_manager.h"
  37. #endif
  38. using mindspore::kernel::Address;
  39. using mindspore::kernel::AddressPtr;
  40. namespace mindspore {
  41. namespace device {
  42. namespace {
  43. std::vector<AnfNodePtr> GetGraphInputs(const session::KernelGraph *graph) {
  44. MS_EXCEPTION_IF_NULL(graph);
  45. auto graph_inputs = graph->inputs();
  46. std::vector<AnfNodePtr> result(graph_inputs.begin(), graph_inputs.end());
  47. std::set<AnfNodePtr> inputs_set(graph_inputs.begin(), graph_inputs.end());
  48. auto kernels = graph->execution_order();
  49. for (auto &kernel : kernels) {
  50. MS_EXCEPTION_IF_NULL(kernel);
  51. auto input_num = AnfAlgo::GetInputTensorNum(kernel);
  52. for (size_t i = 0; i < input_num; ++i) {
  53. auto input_node = kernel->input(i + 1);
  54. auto input_real_node = AnfAlgo::VisitKernelWithReturnType(input_node, 0).first;
  55. MS_EXCEPTION_IF_NULL(input_real_node);
  56. if (input_real_node->isa<Parameter>() && inputs_set.find(input_real_node) == inputs_set.end()) {
  57. (void)inputs_set.insert(input_real_node);
  58. (void)result.emplace_back(input_real_node);
  59. }
  60. }
  61. }
  62. return result;
  63. }
  64. } // namespace
  65. constexpr size_t kMinInputSize = 2;
  66. KernelRuntime::~KernelRuntime() {}
  67. bool KernelRuntime::Load(session::KernelGraph *graph, bool is_task_sink) { return true; }
  68. bool KernelRuntime::LoadData(session::KernelGraph *) { return false; }
  69. bool KernelRuntime::NodeOutputDeviceAddressExist(const AnfNodePtr &kernel, size_t index) {
  70. MS_EXCEPTION_IF_NULL(kernel);
  71. if (AnfAlgo::OutputAddrExist(kernel, index)) {
  72. const auto &address = AnfAlgo::GetOutputAddr(kernel, index);
  73. MS_EXCEPTION_IF_NULL(address);
  74. return address->DeviceType() == GetTargetDeviceAddressType();
  75. }
  76. return false;
  77. }
  78. void KernelRuntime::AssignMemory(session::KernelGraph *graph) {
  79. auto context_ptr = MsContext::GetInstance();
  80. MS_EXCEPTION_IF_NULL(context_ptr);
  81. MS_EXCEPTION_IF_NULL(mem_manager_);
  82. mem_manager_->ResetDynamicMemory();
  83. AssignStaticMemory(graph);
  84. AssignDynamicMemory(graph);
  85. UpdateRefNodeOutputMem(graph);
  86. }
  87. void KernelRuntime::RunOpMallocPre(const session::KernelGraph &graph,
  88. const std::vector<tensor::TensorPtr> &input_tensors) {
  89. const auto &nodes = graph.execution_order();
  90. // Malloc for Node output
  91. for (const auto &node : nodes) {
  92. auto output_num = AnfAlgo::GetOutputTensorNum(node);
  93. for (size_t i = 0; i < output_num; ++i) {
  94. MS_EXCEPTION_IF_NULL(node);
  95. auto runtime_info = node->user_data<session::OpRuntimeInfo>();
  96. MS_EXCEPTION_IF_NULL(runtime_info);
  97. auto const &output_format = runtime_info->output_format(i);
  98. auto output_type = runtime_info->output_type(i);
  99. auto tensor_size = runtime_info->output_tensor_size(i);
  100. // Create DeviceAddress without ptr.
  101. // Get real device ptr after KernelBuild finish.
  102. auto device_address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type);
  103. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  104. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  105. }
  106. }
  107. // Malloc for graph input
  108. if (input_tensors.size() != graph.inputs().size()) {
  109. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  110. << " should be equal to graph input parameter size " << graph.inputs().size();
  111. }
  112. for (size_t input_index = 0; input_index < graph.inputs().size(); ++input_index) {
  113. auto item = graph.inputs()[input_index];
  114. MS_EXCEPTION_IF_NULL(item);
  115. if (!item->isa<Parameter>()) {
  116. continue;
  117. }
  118. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  119. for (size_t index = 0; index < output_size; index++) {
  120. auto current_tensor = input_tensors[input_index];
  121. MS_EXCEPTION_IF_NULL(current_tensor);
  122. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  123. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  124. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  125. continue;
  126. }
  127. auto op_runtime_info = item->user_data<session::OpRuntimeInfo>();
  128. MS_EXCEPTION_IF_NULL(op_runtime_info);
  129. TypeId output_type_id = op_runtime_info->output_type(index);
  130. auto output_tensor_size = op_runtime_info->output_tensor_size(index);
  131. auto output_format = op_runtime_info->output_format(index);
  132. auto device_address =
  133. CreateDeviceAddress(nullptr, output_tensor_size, output_format, output_type_id, {item, index});
  134. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  135. current_tensor->set_device_address(device_address);
  136. current_tensor->set_sync_status(kNeedSyncHostToDevice);
  137. }
  138. }
  139. }
  140. void KernelRuntime::RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, session::KernelGraph *graph,
  141. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  142. MS_EXCEPTION_IF_NULL(graph);
  143. MS_EXCEPTION_IF_NULL(mem_manager_);
  144. mem_manager_->ResetDynamicMemory();
  145. RunOpAssignInputMemory(input_tensors, graph);
  146. AssignStaticMemoryValueNode(graph);
  147. for (const auto &cnode : graph->execution_order()) {
  148. RunOpAssignOutputMemory(cnode, tensor_to_node);
  149. RunOpAssignWorkSpaceMemory(cnode);
  150. }
  151. UpdateRefNodeOutputMem(graph);
  152. }
  153. void KernelRuntime::RunOpClearMemory(const session::KernelGraph *graph) const {
  154. MS_EXCEPTION_IF_NULL(graph);
  155. // clear input parameter memory resource
  156. for (const auto &input_node : graph->inputs()) {
  157. MS_EXCEPTION_IF_NULL(input_node);
  158. AnfAlgo::SetOutputAddr(nullptr, 0, input_node.get());
  159. }
  160. // clear input value node memory resource
  161. for (const auto &value_node : graph->graph_value_nodes()) {
  162. MS_EXCEPTION_IF_NULL(value_node);
  163. AnfAlgo::SetOutputAddr(nullptr, 0, value_node.get());
  164. }
  165. for (const auto &cnode : graph->execution_order()) {
  166. MS_EXCEPTION_IF_NULL(cnode);
  167. // clear output memory resource
  168. size_t output_num = AnfAlgo::GetOutputTensorNum(cnode);
  169. for (size_t index = 0; index < output_num; ++index) {
  170. AnfAlgo::SetOutputAddr(nullptr, index, cnode.get());
  171. }
  172. // clear workspace memory resource
  173. auto kernel_mod = AnfAlgo::GetKernelMod(cnode);
  174. MS_EXCEPTION_IF_NULL(kernel_mod);
  175. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  176. for (size_t index = 0; index < workspace_lists.size(); ++index) {
  177. AnfAlgo::SetWorkspaceAddr(nullptr, index, cnode.get());
  178. }
  179. }
  180. }
  181. #ifdef ENABLE_DEBUGGER
  182. bool KernelRuntime::DumpDataEnabled() {
  183. auto &dump_json_parser = DumpJsonParser::GetInstance();
  184. return dump_json_parser.e2e_dump_enabled();
  185. }
  186. bool KernelRuntime::DumpDataEnabledIteration() {
  187. auto &dump_json_parser = DumpJsonParser::GetInstance();
  188. if (!dump_json_parser.e2e_dump_enabled()) {
  189. return false;
  190. }
  191. auto cur_iter = dump_json_parser.cur_dump_iter();
  192. if (dump_json_parser.IsDumpIter(cur_iter)) {
  193. return true;
  194. }
  195. return false;
  196. }
  197. #endif
  198. void KernelRuntime::AssignStaticMemory(session::KernelGraph *graph) {
  199. AssignStaticMemoryInput(graph);
  200. AssignStaticMemoryValueNode(graph);
  201. AssignStaticMemoryOutput(graph);
  202. }
  203. void KernelRuntime::RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  204. const session::KernelGraph *graph) {
  205. MS_EXCEPTION_IF_NULL(graph);
  206. MS_EXCEPTION_IF_NULL(mem_manager_);
  207. if (input_tensors.size() != graph->inputs().size()) {
  208. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  209. << " should be equal to graph input parameter size " << graph->inputs().size();
  210. }
  211. for (size_t input_index = 0; input_index < graph->inputs().size(); ++input_index) {
  212. auto item = graph->inputs()[input_index];
  213. MS_EXCEPTION_IF_NULL(item);
  214. if (!item->isa<Parameter>()) {
  215. continue;
  216. }
  217. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  218. for (size_t index = 0; index < output_size; index++) {
  219. auto current_tensor = input_tensors[input_index];
  220. MS_EXCEPTION_IF_NULL(current_tensor);
  221. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  222. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  223. if (output_address->ptr_ == nullptr) {
  224. mem_manager_->MallocMemFromMemPool(output_address, output_address->size());
  225. }
  226. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  227. continue;
  228. }
  229. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  230. if (output_type_id == kTypeUnknown) {
  231. output_type_id = AnfAlgo::GetOutputInferDataType(item, index);
  232. }
  233. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  234. auto device_address =
  235. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  236. MS_EXCEPTION_IF_NULL(device_address);
  237. MS_EXCEPTION_IF_NULL(mem_manager_);
  238. auto ret = mem_manager_->MallocMemFromMemPool(device_address, tensor_size);
  239. if (!ret) {
  240. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  241. }
  242. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  243. }
  244. }
  245. }
  246. void KernelRuntime::RunOpAssignOutputMemory(
  247. const AnfNodePtr &kernel, const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  248. MS_EXCEPTION_IF_NULL(kernel);
  249. MS_EXCEPTION_IF_NULL(mem_manager_);
  250. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  251. MS_EXCEPTION_IF_NULL(kernel_mod);
  252. auto output_sizes = kernel_mod->GetOutputSizeList();
  253. if (output_sizes.empty()) {
  254. return;
  255. }
  256. // Use device_address Allocated in RunOpMallocPre.
  257. for (auto &iter : tensor_to_node) {
  258. auto device_address = iter.first->device_address();
  259. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(device_address), iter.second.second,
  260. iter.second.first.get());
  261. }
  262. for (size_t i = 0; i < output_sizes.size(); ++i) {
  263. if (AnfAlgo::OutputAddrExist(kernel, i, false)) {
  264. auto address = AnfAlgo::GetMutableOutputAddr(kernel, i, false);
  265. MS_EXCEPTION_IF_NULL(address);
  266. if (address->ptr() == nullptr) {
  267. MS_EXCEPTION_IF_NULL(mem_manager_);
  268. mem_manager_->MallocMemFromMemPool(address, address->size());
  269. }
  270. continue;
  271. }
  272. if (AnfAlgo::GetCNodeName(kernel) == kApplyMomentumOpName) {
  273. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i);
  274. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  275. continue;
  276. }
  277. std::string output_format = AnfAlgo::GetOutputFormat(kernel, i);
  278. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  279. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {kernel, i});
  280. device_address->set_host_shape(trans::GetRuntimePaddingShape(kernel, i));
  281. MS_EXCEPTION_IF_NULL(device_address);
  282. auto ret = mem_manager_->MallocMemFromMemPool(device_address, output_sizes[i]);
  283. if (!ret) {
  284. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << output_sizes[i];
  285. }
  286. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  287. }
  288. }
  289. void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) {
  290. MS_EXCEPTION_IF_NULL(kernel);
  291. MS_EXCEPTION_IF_NULL(mem_manager_);
  292. if (kernel->isa<CNode>()) {
  293. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  294. MS_EXCEPTION_IF_NULL(kernel_mod);
  295. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  296. for (size_t i = 0; i < workspace_lists.size(); ++i) {
  297. auto device_address = CreateDeviceAddress(nullptr, workspace_lists[i], "", kTypeUnknown);
  298. MS_EXCEPTION_IF_NULL(device_address);
  299. auto ret = mem_manager_->MallocMemFromMemPool(device_address, workspace_lists[i]);
  300. if (!ret) {
  301. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << workspace_lists[i];
  302. }
  303. AnfAlgo::SetWorkspaceAddr(device_address, i, kernel.get());
  304. }
  305. }
  306. }
  307. void KernelRuntime::RunOpAssignOutputNodeMemory(const ValuePtr &pre_output_value, session::KernelGraph *graph) {
  308. if (pre_output_value == nullptr) {
  309. return;
  310. }
  311. std::vector<tensor::TensorPtr> pre_output_tensors;
  312. TensorValueToTensor(pre_output_value, &pre_output_tensors);
  313. MS_EXCEPTION_IF_NULL(graph);
  314. auto output_nodes = graph->outputs();
  315. if (pre_output_tensors.size() != output_nodes.size()) {
  316. MS_LOG(EXCEPTION) << "The size of pre output tensors [" << pre_output_tensors.size()
  317. << "] is not equal to the size of output nodes of graph [" << output_nodes.size() << "]";
  318. }
  319. // share output address with pre output tensors
  320. for (size_t i = 0; i < output_nodes.size(); ++i) {
  321. auto output_node_with_index = AnfAlgo::VisitKernel(output_nodes[i], 0);
  322. auto output_node = output_node_with_index.first;
  323. MS_EXCEPTION_IF_NULL(output_node);
  324. if (!output_node->isa<CNode>()) {
  325. if (output_node->isa<Parameter>()) {
  326. auto param = output_node->cast<ParameterPtr>();
  327. if (param != nullptr && !param->has_default()) {
  328. MS_LOG(EXCEPTION) << "The output parameter should be real parameter!";
  329. }
  330. }
  331. continue;
  332. }
  333. auto real_output_cnode = output_node->cast<CNodePtr>();
  334. MS_EXCEPTION_IF_NULL(real_output_cnode);
  335. MS_EXCEPTION_IF_NULL(pre_output_tensors[i]);
  336. if (pre_output_tensors[i]->device_address() == nullptr) {
  337. MS_LOG(INFO) << "The address of pre output tensor [" << i << "] is a nullptr!";
  338. continue;
  339. }
  340. if (opt::IsNopNode(real_output_cnode)) {
  341. if (real_output_cnode->inputs().size() < kMinInputSize) {
  342. MS_LOG(EXCEPTION) << "The input size of output node: " << real_output_cnode->DebugString()
  343. << " should large than one!";
  344. }
  345. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  346. output_node_with_index.second, real_output_cnode->input(1).get());
  347. } else {
  348. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  349. output_node_with_index.second, output_node_with_index.first.get());
  350. }
  351. }
  352. }
  353. void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) {
  354. MS_EXCEPTION_IF_NULL(graph);
  355. MS_EXCEPTION_IF_NULL(mem_manager_);
  356. MS_LOG(INFO) << "AssignStaticMemoryInput start for graph " << graph->graph_id();
  357. auto graph_inputs = GetGraphInputs(graph);
  358. auto graph_valid_input = graph->valid_inputs();
  359. graph_inputs.insert(graph_inputs.end(), graph->child_graph_result().begin(), graph->child_graph_result().end());
  360. std::vector<AnfNodePtr> need_alloc_nodes;
  361. auto add_need_alloc_nodes = [&need_alloc_nodes, graph, this](const AnfNodePtr &node) {
  362. MS_EXCEPTION_IF_NULL(node);
  363. if (!node->isa<Parameter>()) {
  364. return;
  365. }
  366. if (NodeOutputDeviceAddressExist(node, 0)) {
  367. return;
  368. }
  369. auto input_param = node->cast<ParameterPtr>();
  370. if (input_param != nullptr && !input_param->IsUsedByRealKernelInGraph(graph->graph_id())) {
  371. return;
  372. }
  373. need_alloc_nodes.push_back(node);
  374. };
  375. for (size_t i = 0; i < graph_inputs.size(); ++i) {
  376. auto input_node = graph_inputs[i];
  377. MS_EXCEPTION_IF_NULL(input_node);
  378. if (i < graph_valid_input.size() && !graph_valid_input[i]) {
  379. continue;
  380. }
  381. if (AnfAlgo::CheckPrimitiveType(input_node, prim::kPrimMakeTuple)) {
  382. auto outs = AnfAlgo::GetAllOutput(input_node);
  383. for (auto &out : outs) {
  384. MS_EXCEPTION_IF_NULL(out);
  385. add_need_alloc_nodes(out);
  386. }
  387. }
  388. add_need_alloc_nodes(input_node);
  389. }
  390. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  391. bool ps_cache_check = false;
  392. #endif
  393. for (auto &item : need_alloc_nodes) {
  394. MS_EXCEPTION_IF_NULL(item);
  395. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  396. for (size_t index = 0; index < output_size; index++) {
  397. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  398. // if graph output is a weight and doesn't link to any cnode, it's data type will be unknown
  399. if (output_type_id == kTypeUnknown) {
  400. MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
  401. continue;
  402. }
  403. DeviceAddressPtr device_address = nullptr;
  404. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  405. const std::string &param_name = item->fullname_with_scope();
  406. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  407. MS_LOG(INFO) << "Parameter(" << param_name << ")"
  408. << " enables the embeddingLookup cache in parameter server training mode.";
  409. // PS embeddingLookup cache check.
  410. if (!ps_cache_check) {
  411. CheckIfSupportPSEmbeddingCache(graph);
  412. ps_cache_check = true;
  413. }
  414. const auto &address = ps::ps_cache_instance.QueryHashTableAddr(param_name);
  415. MS_EXCEPTION_IF_NULL(address.addr);
  416. device_address = CreateDeviceAddress(address.addr, address.size, AnfAlgo::GetOutputFormat(item, index),
  417. output_type_id, {item, index});
  418. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  419. continue;
  420. }
  421. #endif
  422. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  423. device_address =
  424. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  425. MS_LOG(INFO) << "Assign Static Memory for Input node, size:" << tensor_size
  426. << " node:" << item->fullname_with_scope() << " index: " << index;
  427. if (mem_manager_->MallocMem(kStaticMem, tensor_size, device_address, graph->graph_id()) == nullptr) {
  428. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
  429. }
  430. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  431. }
  432. }
  433. MS_LOG(INFO) << "AssignStaticMemoryInput end";
  434. }
  435. void KernelRuntime::AssignStaticMemoryOutput(const session::KernelGraph *graph) {
  436. MS_EXCEPTION_IF_NULL(graph);
  437. MS_LOG(INFO) << "AssignStaticMemoryOutput start for graph " << graph->graph_id();
  438. auto nodes = AnfAlgo::GetAllOutput(graph->output(), {prim::kPrimTupleGetItem});
  439. std::vector<session::KernelWithIndex> non_communication_op;
  440. // Assign Communicate Op Memory firstly.
  441. for (const auto &node : nodes) {
  442. auto kernel_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0, true);
  443. MS_EXCEPTION_IF_NULL(kernel_with_index.first);
  444. if (!kernel_with_index.first->isa<CNode>() || !AnfAlgo::IsRealKernel(kernel_with_index.first)) {
  445. continue;
  446. }
  447. if (AnfAlgo::IsCommunicationOp(kernel_with_index.first)) {
  448. AssignCommunicationNodeMem(kStaticMem, kernel_with_index.first);
  449. } else {
  450. non_communication_op.emplace_back(kernel_with_index);
  451. }
  452. }
  453. for (const auto &item_with_index : non_communication_op) {
  454. MS_EXCEPTION_IF_NULL(item_with_index.first);
  455. MS_LOG(DEBUG) << "AssignNodeOutputMem for " << item_with_index.first->fullname_with_scope();
  456. AssignNodeOutputMem(kStaticMem, item_with_index.first, SizeToInt(item_with_index.second));
  457. }
  458. MS_LOG(INFO) << "AssignStaticMemoryOutput end";
  459. }
  460. void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph *graph) {
  461. MS_EXCEPTION_IF_NULL(graph);
  462. auto &kernels = graph->execution_order();
  463. for (auto &kernel : kernels) {
  464. MS_EXCEPTION_IF_NULL(kernel);
  465. auto output_num = AnfAlgo::GetOutputTensorNum(kernel);
  466. if (output_num == 0) {
  467. MS_LOG(DEBUG) << "This kernel has no output size.";
  468. continue;
  469. }
  470. for (size_t i = 0; i < output_num; ++i) {
  471. session::AnfWithOutIndex out_pair(kernel, i);
  472. if (graph->IsInRefOutputMap(out_pair)) {
  473. auto origin_pair = graph->GetRefCorrespondOutput(out_pair);
  474. MS_EXCEPTION_IF_NULL(origin_pair.first);
  475. auto origin_node_output_addr = AnfAlgo::GetMutableOutputAddr(origin_pair.first, origin_pair.second);
  476. MS_EXCEPTION_IF_NULL(origin_node_output_addr);
  477. auto cur_node_output_addr = AnfAlgo::GetMutableOutputAddr(kernel, i);
  478. if (origin_node_output_addr.get() != cur_node_output_addr.get()) {
  479. MS_LOG(DEBUG) << "REF address is not same, ref node output need address update";
  480. MS_LOG(DEBUG) << "REF origin op is " << origin_pair.first->DebugString() << ", output index is "
  481. << origin_pair.second << ", cur op is " << kernel->DebugString() << ", out index is " << i;
  482. AnfAlgo::SetOutputAddr(origin_node_output_addr, i, kernel.get());
  483. }
  484. }
  485. }
  486. }
  487. }
  488. void KernelRuntime::AssignCommunicationNodeMem(MemType type, const AnfNodePtr &node) {
  489. AssignCommunicationNodeInputMem(type, node);
  490. AssignCommunicationNodeOutputMem(type, node);
  491. AssignWorkSpaceMem(type, node);
  492. }
  493. void KernelRuntime::GenKernelEvents(const session::KernelGraph *graph) {
  494. MS_EXCEPTION_IF_NULL(graph);
  495. auto &kernels = graph->execution_order();
  496. if (kernels.empty() || graph_kernel_events_map_.find(graph->graph_id()) != graph_kernel_events_map_.end()) {
  497. return;
  498. }
  499. auto kernel_events =
  500. std::pair<std::vector<std::vector<std::function<void()>>>, std::vector<std::vector<std::function<void()>>>>();
  501. auto &kernel_pre_run_events = kernel_events.first;
  502. auto &kernel_post_run_events = kernel_events.second;
  503. kernel_pre_run_events.resize(kernels.size());
  504. kernel_post_run_events.resize(kernels.size());
  505. for (size_t i = 0; i < kernels.size(); ++i) {
  506. auto &kernel = kernels[i];
  507. if (!AnfAlgo::IsCommunicationOp(kernel)) {
  508. continue;
  509. }
  510. auto pre_event = CreateDeviceEvent();
  511. auto post_event = CreateDeviceEvent();
  512. MS_EXCEPTION_IF_NULL(pre_event);
  513. MS_EXCEPTION_IF_NULL(post_event);
  514. pre_event->set_wait_stream(communication_stream_);
  515. pre_event->set_record_stream(stream_);
  516. post_event->set_wait_stream(stream_);
  517. post_event->set_record_stream(communication_stream_);
  518. kernel_pre_run_events[i].emplace_back([pre_event]() {
  519. pre_event->RecordEvent();
  520. pre_event->WaitEvent();
  521. });
  522. kernel_post_run_events[i].emplace_back([post_event]() { post_event->RecordEvent(); });
  523. bool found_nearest_child = false;
  524. for (size_t j = i + 1; j < kernels.size(); ++j) {
  525. auto &child = kernels[j];
  526. MS_EXCEPTION_IF_NULL(child);
  527. if (AnfAlgo::IsCommunicationOp(child)) {
  528. continue;
  529. }
  530. auto input_size = child->inputs().size() - 1;
  531. for (size_t k = 0; k < input_size; ++k) {
  532. auto kernel_index = AnfAlgo::VisitKernelWithReturnType(AnfAlgo::GetInputNode(child, k), 0, true);
  533. if (kernel_index.first == kernel) {
  534. found_nearest_child = true;
  535. break;
  536. }
  537. }
  538. if (found_nearest_child) {
  539. kernel_pre_run_events[j].emplace_back([post_event]() { post_event->WaitEvent(); });
  540. break;
  541. }
  542. }
  543. if (!found_nearest_child) {
  544. kernel_post_run_events[i].emplace_back([post_event]() { post_event->WaitEvent(); });
  545. }
  546. }
  547. graph_kernel_events_map_[graph->graph_id()] = std::move(kernel_events);
  548. }
  549. void KernelRuntime::AssignCommunicationNodeOutputMem(MemType type, const AnfNodePtr &node) {
  550. MS_EXCEPTION_IF_NULL(node);
  551. MS_EXCEPTION_IF_NULL(mem_manager_);
  552. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  553. MS_EXCEPTION_IF_NULL(kernel_mod);
  554. auto output_sizes = kernel_mod->GetOutputSizeList();
  555. if (output_sizes.empty()) {
  556. MS_LOG(INFO) << "This kernel[" << node->DebugString() << "] has no output size.";
  557. return;
  558. }
  559. auto context_ptr = MsContext::GetInstance();
  560. MS_EXCEPTION_IF_NULL(context_ptr);
  561. size_t total_size = 0;
  562. size_t output_index = 0;
  563. std::vector<size_t> align_size_list;
  564. for (uint64_t mem_size : output_sizes) {
  565. if (AnfAlgo::OutputAddrExist(node, output_index++)) {
  566. MS_LOG(INFO) << "Communication op " << node->fullname_with_scope() << " has output device address";
  567. return;
  568. }
  569. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
  570. mem_size = MemoryManager::GetCommonAlignSize(mem_size);
  571. }
  572. total_size += mem_size;
  573. align_size_list.emplace_back(mem_size);
  574. }
  575. if (align_size_list.empty()) {
  576. return;
  577. }
  578. if (type == kSomasReuseDynamicMem) {
  579. bool not_reuse = KernelMemNotReuse(node);
  580. if (not_reuse) {
  581. type = kDynamicMem;
  582. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  583. }
  584. }
  585. uint8_t *output_ptr = nullptr;
  586. for (size_t j = 0; j < align_size_list.size(); ++j) {
  587. std::string output_format = AnfAlgo::GetOutputFormat(node, j);
  588. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, j);
  589. auto address = CreateDeviceAddress(nullptr, output_sizes[j], output_format, output_type, {node, j});
  590. MS_EXCEPTION_IF_NULL(address);
  591. if (output_ptr == nullptr) {
  592. output_ptr = mem_manager_->MallocOutputMem(node, 0, type, total_size, address, true);
  593. MS_EXCEPTION_IF_NULL(output_ptr);
  594. } else {
  595. address->set_ptr(output_ptr);
  596. }
  597. AnfAlgo::SetOutputAddr(address, j, node.get());
  598. output_ptr += align_size_list[j];
  599. }
  600. }
  601. bool KernelRuntime::KernelMemNotReuse(const AnfNodePtr &node) { return false; }
  602. DeviceAddressPtr KernelRuntime::PreAssignCNodeMemory(const AnfNodePtr &anf_node, size_t index) {
  603. MS_EXCEPTION_IF_NULL(anf_node);
  604. if (!anf_node->isa<CNode>()) {
  605. MS_LOG(EXCEPTION) << "anf_node should be a cnode";
  606. }
  607. auto cnode = anf_node->cast<CNodePtr>();
  608. MS_EXCEPTION_IF_NULL(cnode);
  609. if (opt::IsNopNode(cnode)) {
  610. const size_t kNopNodeInputSize = 2;
  611. if (cnode->size() != kNopNodeInputSize) {
  612. MS_LOG(EXCEPTION) << cnode->fullname_with_scope() << " has invalid input size: " << cnode->size();
  613. }
  614. auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, index);
  615. return PreAssignCNodeMemory(input_node_with_index.first, input_node_with_index.second);
  616. }
  617. auto kernel_mod = AnfAlgo::GetKernelMod(anf_node);
  618. MS_EXCEPTION_IF_NULL(kernel_mod);
  619. auto output_sizes = kernel_mod->GetOutputSizeList();
  620. if (output_sizes.size() <= index) {
  621. MS_LOG(EXCEPTION) << "Previous node output size " << output_sizes.size() << " <= node index " << index;
  622. }
  623. std::string output_format = AnfAlgo::GetOutputFormat(anf_node, index);
  624. auto output_type = AnfAlgo::GetOutputDeviceDataType(anf_node, index);
  625. auto address = CreateDeviceAddress(nullptr, output_sizes[index], output_format, output_type, {anf_node, index});
  626. AnfAlgo::SetOutputAddr(address, index, anf_node.get());
  627. return address;
  628. }
  629. void KernelRuntime::AssignCommunicationNodeInputMem(MemType type, const AnfNodePtr &node) {
  630. auto context_ptr = MsContext::GetInstance();
  631. MS_EXCEPTION_IF_NULL(context_ptr);
  632. MS_EXCEPTION_IF_NULL(node);
  633. MS_EXCEPTION_IF_NULL(mem_manager_);
  634. size_t total_size = 0;
  635. std::vector<std::pair<DeviceAddressPtr, size_t>> addr_size;
  636. size_t input_num = AnfAlgo::GetInputTensorNum(node);
  637. for (size_t i = 0; i < input_num; ++i) {
  638. auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(node, i, true);
  639. auto input_node = input_node_with_index.first;
  640. MS_EXCEPTION_IF_NULL(input_node);
  641. if (AnfAlgo::OutputAddrExist(input_node, input_node_with_index.second)) {
  642. MS_LOG(INFO) << "Communication op " << input_node->fullname_with_scope() << " has input device address";
  643. return;
  644. }
  645. DeviceAddressPtr address = nullptr;
  646. if (input_node->isa<CNode>()) {
  647. address = PreAssignCNodeMemory(input_node, input_node_with_index.second);
  648. } else {
  649. MS_LOG(EXCEPTION) << "Communication node inputs only support CNode";
  650. }
  651. MS_EXCEPTION_IF_NULL(address);
  652. auto mem_size = MemoryManager::GetCommonAlignSize(address->size());
  653. total_size += mem_size;
  654. addr_size.emplace_back(address, mem_size);
  655. }
  656. if (addr_size.empty()) {
  657. return;
  658. }
  659. if (type == kSomasReuseDynamicMem) {
  660. bool not_reuse = KernelMemNotReuse(node);
  661. if (not_reuse) {
  662. type = kDynamicMem;
  663. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s input.";
  664. }
  665. }
  666. auto cnode = node->cast<CNodePtr>();
  667. MS_EXCEPTION_IF_NULL(cnode);
  668. if (cnode->inputs().size() < kMinInputSize) {
  669. // communication node's input should contain itself and at least on input
  670. MS_LOG(ERROR) << "No inputs for " << cnode->fullname_with_scope();
  671. return;
  672. }
  673. auto first_input_node = cnode->input(1);
  674. auto prenode_index = AnfAlgo::VisitKernelWithReturnType(first_input_node, 0, true);
  675. uint8_t *input_ptr = mem_manager_->MallocOutputMem(prenode_index.first, prenode_index.second, type, total_size,
  676. addr_size[0].first, true);
  677. for (const auto &iter : addr_size) {
  678. MS_EXCEPTION_IF_NULL(iter.first);
  679. iter.first->set_ptr(input_ptr);
  680. input_ptr += iter.second;
  681. }
  682. }
  683. void KernelRuntime::AssignNodeOutputMem(MemType type, const AnfNodePtr &node, int index) {
  684. MS_EXCEPTION_IF_NULL(node);
  685. MS_EXCEPTION_IF_NULL(mem_manager_);
  686. if (type == kSomasReuseDynamicMem) {
  687. bool not_reuse = KernelMemNotReuse(node);
  688. if (not_reuse) {
  689. type = kDynamicMem;
  690. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  691. }
  692. }
  693. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  694. MS_EXCEPTION_IF_NULL(kernel_mod);
  695. auto output_sizes = kernel_mod->GetOutputSizeList();
  696. if (output_sizes.empty()) {
  697. return;
  698. }
  699. for (size_t i = 0; i < output_sizes.size(); ++i) {
  700. if ((kGetAllOuts != index) && (SizeToInt(i) != index)) {
  701. continue;
  702. }
  703. if (NodeOutputDeviceAddressExist(node, i)) {
  704. MS_LOG(INFO) << "Already malloc index:" << i;
  705. continue;
  706. }
  707. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope() << " output memory size:" << output_sizes[i];
  708. if (type == kStaticMem) {
  709. MS_LOG(INFO) << "Assign Static Memory for Output node, size:" << output_sizes[i]
  710. << " node:" << node->fullname_with_scope();
  711. }
  712. std::string output_format = AnfAlgo::GetOutputFormat(node, i);
  713. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, i);
  714. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {node, i});
  715. MS_EXCEPTION_IF_NULL(device_address);
  716. uint8_t *ptr = mem_manager_->MallocOutputMem(node, i, type, output_sizes[i], device_address, false);
  717. MS_EXCEPTION_IF_NULL(ptr);
  718. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  719. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  720. }
  721. }
  722. DeviceAddressPtr KernelRuntime::AssignExtraStaticMem(const TensorPtr &tensor, const AnfNodePtr &node, int index) {
  723. MS_EXCEPTION_IF_NULL(node);
  724. MS_EXCEPTION_IF_NULL(mem_manager_);
  725. auto i = SizeToInt(index);
  726. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  727. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope()
  728. << "Assign Static Memory for Output node, size:" << tensor_address->size();
  729. auto device_address = CreateDeviceAddress(nullptr, tensor_address->size(), tensor_address->format(),
  730. tensor_address->type_id(), {node, i});
  731. MS_EXCEPTION_IF_NULL(device_address);
  732. uint8_t *ptr = mem_manager_->MallocOutputMem(node, i, kStaticMem, tensor_address->size(), device_address, false);
  733. MS_EXCEPTION_IF_NULL(ptr);
  734. return device_address;
  735. }
  736. void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const ValuePtr &node_value,
  737. size_t output_idx) {
  738. MS_EXCEPTION_IF_NULL(value_node);
  739. MS_EXCEPTION_IF_NULL(node_value);
  740. MS_EXCEPTION_IF_NULL(mem_manager_);
  741. auto ms_context = MsContext::GetInstance();
  742. MS_EXCEPTION_IF_NULL(ms_context);
  743. std::vector<tensor::TensorPtr> tensors;
  744. TensorValueToTensor(node_value, &tensors);
  745. // Graph id should be passed to record static memory if profiling is enabled.
  746. auto kernel_info = dynamic_cast<device::KernelInfo *>(value_node->kernel_info());
  747. MS_EXCEPTION_IF_NULL(kernel_info);
  748. uint32_t graph_id = kernel_info->graph_id();
  749. for (const auto &tensor : tensors) {
  750. if (tensor == nullptr) {
  751. MS_LOG(WARNING) << "Tensor is null";
  752. return;
  753. }
  754. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  755. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  756. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address()), output_idx++,
  757. value_node.get());
  758. continue;
  759. }
  760. size_t tensor_size = LongToSize(tensor->data().nbytes());
  761. auto node_size = AnfAlgo::GetOutputTensorMemSize(value_node, output_idx);
  762. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(value_node, output_idx);
  763. if (output_type_id == kTypeUnknown) {
  764. output_type_id = AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  765. }
  766. auto output_format = AnfAlgo::GetOutputFormat(value_node, output_idx);
  767. DeviceAddressPtr address =
  768. CreateDeviceAddress(nullptr, node_size, output_format, output_type_id, {value_node, output_idx});
  769. MS_EXCEPTION_IF_NULL(address);
  770. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  771. !mem_manager_->MallocMemFromMemPool(address, node_size)) {
  772. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << node_size;
  773. } else {
  774. MS_LOG(INFO) << "Assign Static Memory for Value node, size:" << node_size
  775. << " node:" << value_node->fullname_with_scope();
  776. if (mem_manager_->MallocMem(kStaticMem, node_size, address, graph_id) == nullptr) {
  777. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << node_size;
  778. }
  779. }
  780. AnfAlgo::SetOutputAddr(address, output_idx, value_node.get());
  781. if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(),
  782. tensor->data_c(), tensor->device_info().host_format_)) {
  783. MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString()
  784. << "node format is" << AnfAlgo::GetOutputFormat(value_node, output_idx)
  785. << "node dtype is " << AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  786. }
  787. }
  788. }
  789. void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) {
  790. MS_EXCEPTION_IF_NULL(graph);
  791. MS_EXCEPTION_IF_NULL(mem_manager_);
  792. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode start for graph " << graph->graph_id();
  793. auto ms_context = MsContext::GetInstance();
  794. MS_EXCEPTION_IF_NULL(ms_context);
  795. // order the value nodes
  796. std::map<std::string, ValueNodePtr> value_nodes_map;
  797. for (auto &node : graph->graph_value_nodes()) {
  798. MS_EXCEPTION_IF_NULL(node);
  799. value_nodes_map[node->fullname_with_scope()] = node;
  800. }
  801. for (auto &item : value_nodes_map) {
  802. auto value_node = item.second;
  803. MS_EXCEPTION_IF_NULL(value_node);
  804. if (NodeOutputDeviceAddressExist(value_node, 0)) {
  805. MS_LOG(DEBUG) << "value_node[" << value_node->DebugString() << "] address already exist";
  806. // TODO(jojo): PyNaitve Infer ?
  807. auto device_address = AnfAlgo::GetMutableOutputAddr(value_node, 0);
  808. if (device_address->ptr_ == nullptr) {
  809. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  810. if (!mem_manager_->MallocMemFromMemPool(device_address, device_address->size_)) {
  811. MS_LOG(EXCEPTION) << "MallocMemFromMemPool failed";
  812. }
  813. } else {
  814. if (mem_manager_->MallocMem(kStaticMem, device_address->size_, device_address, graph->graph_id())) {
  815. MS_LOG(EXCEPTION) << "MallocMem kStaticMem failed";
  816. }
  817. }
  818. }
  819. continue;
  820. }
  821. auto &node_value = value_node->value();
  822. MS_EXCEPTION_IF_NULL(node_value);
  823. MS_LOG(DEBUG) << "Malloc memory for " << value_node->fullname_with_scope();
  824. if (node_value->isa<Tensor>() || node_value->isa<ValueTuple>()) {
  825. AssignValueNodeTensor(value_node, node_value, 0);
  826. } else if (node_value->isa<StringImm>()) {
  827. auto value = GetValue<std::string>(node_value);
  828. size_t tensor_size = value.size();
  829. DeviceAddressPtr address = nullptr;
  830. address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeUInt8);
  831. MS_EXCEPTION_IF_NULL(address);
  832. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  833. !mem_manager_->MallocMemFromMemPool(address, tensor_size)) {
  834. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  835. } else {
  836. MS_LOG(INFO) << "Assign Static Memory for Value node, size:" << tensor_size
  837. << " node:" << value_node->fullname_with_scope();
  838. if (mem_manager_->MallocMem(kStaticMem, tensor_size, address, graph->graph_id()) == nullptr) {
  839. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem
  840. << ", tensor size is: " << tensor_size;
  841. }
  842. }
  843. AnfAlgo::SetOutputAddr(address, 0, value_node.get());
  844. ShapeVector shape = {1, SizeToLong(tensor_size)};
  845. if (!address->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value.data())) {
  846. MS_LOG(EXCEPTION) << "kValueNode SyncHostToDevice fail!";
  847. }
  848. }
  849. }
  850. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode end";
  851. }
  852. void KernelRuntime::AssignDynamicMemory(session::KernelGraph *graph) {
  853. MS_EXCEPTION_IF_NULL(graph);
  854. MS_EXCEPTION_IF_NULL(mem_manager_);
  855. auto context_ptr = MsContext::GetInstance();
  856. MS_EXCEPTION_IF_NULL(context_ptr);
  857. bool is_enable_mem_reuse = EnvConfigParser::GetInstance().GetSysMemreuse();
  858. auto mem_type = kDynamicMem;
  859. auto &dump_json_parser = DumpJsonParser::GetInstance();
  860. if (dump_json_parser.e2e_dump_enabled() && dump_json_parser.dump_mode() == 0) {
  861. mindspore::EnvConfigParser::GetInstance().SetSysMemreuse(false);
  862. is_enable_mem_reuse = false;
  863. MS_LOG(INFO) << "Disable Memory Reuse when e2e dump is enable and dump mode is set to dump all kernels";
  864. }
  865. if (is_enable_mem_reuse) {
  866. MS_LOG(INFO) << "Memory Reuse is enable...";
  867. mem_manager_->MallocSomasDynamicMem(graph);
  868. mem_type = kSomasReuseDynamicMem;
  869. } else {
  870. MS_LOG(INFO) << "Memory Reuse is disable...";
  871. }
  872. auto &execution_nodes = graph->execution_order();
  873. std::vector<CNodePtr> compute_nodes;
  874. // communication nodes first
  875. for (auto &node : execution_nodes) {
  876. if (AnfAlgo::IsCommunicationOp(node)) {
  877. // skip if the memory is already allocated
  878. AssignCommunicationNodeMem(mem_type, node);
  879. } else {
  880. compute_nodes.emplace_back(node);
  881. }
  882. }
  883. // then compute nodes
  884. for (auto &node : compute_nodes) {
  885. AssignNodeOutputMem(mem_type, node, kGetAllOuts);
  886. AssignWorkSpaceMem(mem_type, node);
  887. }
  888. }
  889. void KernelRuntime::AssignWorkSpaceMem(MemType type, const AnfNodePtr &node) {
  890. MS_EXCEPTION_IF_NULL(node);
  891. MS_EXCEPTION_IF_NULL(mem_manager_);
  892. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  893. MS_EXCEPTION_IF_NULL(kernel_mod);
  894. size_t index = 0;
  895. for (auto &size : kernel_mod->GetWorkspaceSizeList()) {
  896. if (AnfAlgo::WorkspaceAddrExist(node, index)) {
  897. MS_LOG(INFO) << "Op " << node->fullname_with_scope() << " has workspace device address";
  898. return;
  899. }
  900. auto ptr = mem_manager_->MallocWorkSpaceMem(node, index, type, size);
  901. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get());
  902. index++;
  903. }
  904. }
  905. void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel,
  906. AddressPtrList *kernel_inputs, AddressPtrList *const kernel_workspaces,
  907. AddressPtrList *kernel_outputs) {
  908. MS_EXCEPTION_IF_NULL(kernel);
  909. MS_EXCEPTION_IF_NULL(kernel_inputs);
  910. MS_EXCEPTION_IF_NULL(kernel_workspaces);
  911. MS_EXCEPTION_IF_NULL(kernel_outputs);
  912. auto cnode = kernel->cast<CNodePtr>();
  913. MS_EXCEPTION_IF_NULL(cnode);
  914. if (AnfAlgo::GetCNodeName(cnode) == kAtomicAddrCleanOpName) {
  915. return GenAddrCleanLaunchArgs(cnode, kernel_inputs);
  916. }
  917. auto ms_context = MsContext::GetInstance();
  918. MS_EXCEPTION_IF_NULL(ms_context);
  919. auto visit_nop_node = (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode);
  920. size_t input_num = AnfAlgo::GetInputTensorNum(kernel);
  921. for (size_t i = 0; i < input_num; ++i) {
  922. auto op_name = AnfAlgo::GetCNodeName(cnode);
  923. constexpr auto none_placeholder_index = 3;
  924. if (op_name == kDynamicRNNOpName && i == none_placeholder_index) {
  925. continue;
  926. }
  927. if (op_name == kDynamicGRUV2OpName) {
  928. auto none_index = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, "placeholder_index");
  929. auto item = std::find(none_index.begin(), none_index.end(), i);
  930. if (item != none_index.end()) {
  931. continue;
  932. }
  933. }
  934. auto real_input = AnfAlgo::GetRealInputIndex(kernel, i);
  935. auto device_address = AnfAlgo::GetPrevNodeOutputAddr(kernel, real_input, visit_nop_node);
  936. MS_EXCEPTION_IF_NULL(device_address);
  937. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  938. MS_EXCEPTION_IF_NULL(input);
  939. input->addr = device_address->ptr_;
  940. MS_EXCEPTION_IF_NULL(input->addr);
  941. input->size = device_address->size_;
  942. kernel_inputs->emplace_back(input);
  943. }
  944. for (size_t i = 0; i < kernel_mod.GetOutputSizeList().size(); ++i) {
  945. auto device_address = AnfAlgo::GetOutputAddr(kernel, i, visit_nop_node);
  946. kernel::AddressPtr output = std::make_shared<kernel::Address>();
  947. MS_EXCEPTION_IF_NULL(output);
  948. output->addr = device_address->ptr_;
  949. MS_EXCEPTION_IF_NULL(output->addr);
  950. output->size = device_address->size_;
  951. kernel_outputs->emplace_back(output);
  952. }
  953. for (size_t i = 0; i < kernel_mod.GetWorkspaceSizeList().size(); ++i) {
  954. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  955. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  956. MS_EXCEPTION_IF_NULL(workspace);
  957. workspace->addr = device_address->ptr_;
  958. MS_EXCEPTION_IF_NULL(workspace->addr);
  959. workspace->size = device_address->size_;
  960. kernel_workspaces->emplace_back(workspace);
  961. }
  962. }
  963. void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs) {
  964. MS_EXCEPTION_IF_NULL(cnode);
  965. MS_EXCEPTION_IF_NULL(kernel_inputs);
  966. if (cnode->inputs().size() != 2) {
  967. MS_LOG(EXCEPTION) << "Atomic Addr clean Node Input nodes not equal 2.";
  968. }
  969. MS_EXCEPTION_IF_NULL(cnode->inputs()[1]);
  970. auto pre_node = (cnode->inputs()[1])->cast<CNodePtr>();
  971. // set clean output address
  972. if (AnfAlgo::HasNodeAttr(kAttrAtomicOutputIndexs, pre_node)) {
  973. #if defined(__APPLE__)
  974. auto clean_output_indexes = AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicOutputIndexs);
  975. #else
  976. auto clean_output_indexes = AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicOutputIndexs);
  977. #endif
  978. for (auto index : clean_output_indexes) {
  979. auto device_address = AnfAlgo::GetOutputAddr(pre_node, index);
  980. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  981. MS_EXCEPTION_IF_NULL(input);
  982. input->addr = device_address->ptr_;
  983. MS_EXCEPTION_IF_NULL(input->addr);
  984. input->size = device_address->size_;
  985. kernel_inputs->emplace_back(input);
  986. }
  987. MS_LOG(DEBUG) << "AtomicAddClean clean output size:" << clean_output_indexes.size();
  988. }
  989. // set clean workspace address
  990. if (AnfAlgo::HasNodeAttr(kAttrAtomicWorkspaceIndexs, pre_node)) {
  991. #if defined(__APPLE__)
  992. auto clean_workspaces_indexes = AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicWorkspaceIndexs);
  993. #else
  994. auto clean_workspaces_indexes = AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicWorkspaceIndexs);
  995. #endif
  996. for (const auto &index : clean_workspaces_indexes) {
  997. auto device_address = AnfAlgo::GetWorkspaceAddr(pre_node, index);
  998. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  999. MS_EXCEPTION_IF_NULL(workspace);
  1000. workspace->addr = device_address->ptr_;
  1001. MS_EXCEPTION_IF_NULL(workspace->addr);
  1002. workspace->size = device_address->size_;
  1003. kernel_inputs->emplace_back(workspace);
  1004. }
  1005. }
  1006. }
  1007. void KernelRuntime::LaunchKernelEvent(const std::vector<std::vector<std::function<void()>>> &kernel_events,
  1008. size_t index) const {
  1009. if (index >= kernel_events.size()) {
  1010. return;
  1011. }
  1012. for (auto &event : kernel_events[index]) {
  1013. event();
  1014. }
  1015. }
  1016. bool KernelRuntime::LaunchKernelWithPynativeProfiling(kernel::KernelMod *kernel_mod, const std::string &op_name,
  1017. const std::vector<AddressPtr> &inputs,
  1018. const std::vector<AddressPtr> &workspace,
  1019. const std::vector<AddressPtr> &outputs, void *stream) {
  1020. MS_EXCEPTION_IF_NULL(kernel_mod);
  1021. MS_EXCEPTION_IF_NULL(stream);
  1022. float cost_time = 0;
  1023. auto start = CreateDeviceTimeEvent();
  1024. auto end = CreateDeviceTimeEvent();
  1025. MS_EXCEPTION_IF_NULL(start);
  1026. MS_EXCEPTION_IF_NULL(end);
  1027. start->set_record_stream(stream);
  1028. end->set_record_stream(stream);
  1029. start->RecordEvent();
  1030. bool ret = kernel_mod->Launch(inputs, workspace, outputs, stream);
  1031. end->RecordEvent();
  1032. start->SyncEvent();
  1033. end->SyncEvent();
  1034. start->ElapsedTime(&cost_time, end.get());
  1035. auto launch_end_time = GetTime();
  1036. double launch_start_time = launch_end_time - cost_time / kBasicTimeTransferUnit;
  1037. auto op_launch_start_time_end_time = std::make_pair(launch_start_time, launch_end_time);
  1038. PynativeProfiler::SetOpNameAndLaunchTime(std::make_pair(op_name, op_launch_start_time_end_time));
  1039. if (!ret) {
  1040. MS_LOG(EXCEPTION) << "Launch kernel failed, kernel name is : " << op_name;
  1041. }
  1042. return ret;
  1043. }
  1044. void KernelRuntime::DebugStreamSync(const CNodePtr &kernel) {
  1045. auto ms_context = MsContext::GetInstance();
  1046. MS_EXCEPTION_IF_NULL(ms_context);
  1047. auto enable_sync_run = ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_SYNCHRONIZE);
  1048. if (enable_sync_run) {
  1049. if (!SyncStream()) {
  1050. MS_LOG(EXCEPTION) << "Op " << kernel->fullname_with_scope() << " run failed!";
  1051. }
  1052. }
  1053. }
  1054. bool KernelRuntime::LaunchKernel(const AnfNodePtr &kernel) {
  1055. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1056. MS_EXCEPTION_IF_NULL(kernel_mod);
  1057. AddressPtrList kernel_inputs;
  1058. AddressPtrList kernel_workspaces;
  1059. AddressPtrList kernel_outputs;
  1060. GenLaunchArgs(*kernel_mod, kernel, &kernel_inputs, &kernel_workspaces, &kernel_outputs);
  1061. bool ret;
  1062. if (AnfAlgo::IsCommunicationOp(kernel)) {
  1063. if (pynative_mode_profiling_flag_) {
  1064. ret = LaunchKernelWithPynativeProfiling(kernel_mod, kernel->fullname_with_scope(), kernel_inputs,
  1065. kernel_workspaces, kernel_outputs, communication_stream_);
  1066. } else {
  1067. ret = kernel_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, communication_stream_);
  1068. }
  1069. } else {
  1070. if (pynative_mode_profiling_flag_) {
  1071. ret = LaunchKernelWithPynativeProfiling(kernel_mod, kernel->fullname_with_scope(), kernel_inputs,
  1072. kernel_workspaces, kernel_outputs, stream_);
  1073. } else {
  1074. ret = kernel_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, stream_);
  1075. }
  1076. }
  1077. return ret;
  1078. }
  1079. bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph) {
  1080. const auto &kernels = graph.execution_order();
  1081. std::vector<DynamicKernelPtr> dynamic_kernel_list;
  1082. auto iter = graph_dynamic_kernel_map_.find(graph.graph_id());
  1083. if (iter != graph_dynamic_kernel_map_.end()) {
  1084. dynamic_kernel_list = iter->second;
  1085. }
  1086. if (!dynamic_kernel_list.empty() && dynamic_kernel_list.size() != kernels.size()) {
  1087. MS_LOG(EXCEPTION) << "The size of dynamic kernels " << dynamic_kernel_list.size()
  1088. << " should be equal to the size of kernels " << kernels.size();
  1089. }
  1090. std::vector<std::vector<std::function<void()>>> kernel_pre_run_events;
  1091. std::vector<std::vector<std::function<void()>>> kernel_post_run_events;
  1092. auto events_iter = graph_kernel_events_map_.find(graph.graph_id());
  1093. if (events_iter != graph_kernel_events_map_.end()) {
  1094. kernel_pre_run_events = events_iter->second.first;
  1095. kernel_post_run_events = events_iter->second.second;
  1096. }
  1097. for (size_t i = 0; i < kernels.size(); ++i) {
  1098. LaunchKernelEvent(kernel_pre_run_events, i);
  1099. if (!dynamic_kernel_list.empty() && dynamic_kernel_list[i] != nullptr &&
  1100. dynamic_kernel_list[i]->is_dynamic_shape()) {
  1101. dynamic_kernel_list[i]->InferShape();
  1102. dynamic_kernel_list[i]->UpdateArgs();
  1103. dynamic_kernel_list[i]->Execute();
  1104. if (!SyncStream()) {
  1105. MS_LOG(ERROR) << "SyncStream failed";
  1106. return false;
  1107. }
  1108. dynamic_kernel_list[i]->PostExecute();
  1109. } else {
  1110. auto &kernel = kernels[i];
  1111. MS_EXCEPTION_IF_NULL(kernel);
  1112. // Skip transpose kernel with "nop_op" attr which is not hidden or removed in PyNative infer scenario. Transpose
  1113. // kernel, which is not supposed to be executed, is generated in TransDataSplit to support specific Transdata.
  1114. // And hard code here should be removed after new Transdata programme is implemented in the foreseeable future.
  1115. if (AnfAlgo::HasNodeAttr("nop_op", kernel)) {
  1116. for (size_t idx = 0; idx < AnfAlgo::GetOutputTensorNum(kernel); idx += 1) {
  1117. auto real_input = AnfAlgo::GetRealInputIndex(kernel, idx);
  1118. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, real_input);
  1119. AnfAlgo::SetOutputAddr(device_address, idx, kernel.get());
  1120. }
  1121. continue;
  1122. }
  1123. auto ret = LaunchKernel(kernel);
  1124. if (!ret) {
  1125. MS_LOG(ERROR) << "Launch kernel failed.";
  1126. return false;
  1127. }
  1128. KernelLaunchProfiling(kernel->fullname_with_scope());
  1129. DebugStreamSync(kernel);
  1130. }
  1131. LaunchKernelEvent(kernel_post_run_events, i);
  1132. }
  1133. return true;
  1134. }
  1135. bool KernelRuntime::LaunchKernels(const session::KernelGraph *graph) {
  1136. MS_EXCEPTION_IF_NULL(graph);
  1137. if (!LaunchKernelMod(*graph)) {
  1138. MS_LOG(ERROR) << "LaunchKernelMod failed!";
  1139. return false;
  1140. }
  1141. auto ms_context = MsContext::GetInstance();
  1142. MS_EXCEPTION_IF_NULL(ms_context);
  1143. if (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kGraphMode) {
  1144. if (!SyncStream()) {
  1145. MS_LOG(ERROR) << "SyncStream failed";
  1146. return false;
  1147. }
  1148. }
  1149. return true;
  1150. }
  1151. void KernelRuntime::ClearGraphRuntimeResource(uint32_t graph_id) {
  1152. MS_LOG(INFO) << "Clear graph:" << graph_id << " runtime resource";
  1153. }
  1154. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  1155. void KernelRuntime::GetFirstPSEmbeddingCache(const session::KernelGraph *graph,
  1156. AnfNodePtr *const first_cache_input_index,
  1157. size_t *const first_cache_size) {
  1158. MS_EXCEPTION_IF_NULL(graph);
  1159. for (const auto &kernel : graph->execution_order()) {
  1160. MS_EXCEPTION_IF_NULL(kernel);
  1161. auto kernel_name = AnfAlgo::GetCNodeName(kernel);
  1162. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1163. continue;
  1164. }
  1165. auto input_param = AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1166. auto input_index = AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1167. MS_EXCEPTION_IF_NULL(input_param.first);
  1168. MS_EXCEPTION_IF_NULL(input_index.first);
  1169. auto param_name = input_param.first->fullname_with_scope();
  1170. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1171. continue;
  1172. }
  1173. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1174. while (input_index.first->isa<CNode>() && (AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1175. input_index = AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1176. MS_EXCEPTION_IF_NULL(input_index.first);
  1177. }
  1178. auto cnode =
  1179. AnfAlgo::IsGraphKernel(input_index.first) ? AnfAlgo::GetOutputOfGraphkernel(input_index) : input_index.first;
  1180. MS_EXCEPTION_IF_NULL(cnode);
  1181. if (!cnode->isa<CNode>()) {
  1182. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index should be a CNode but got "
  1183. << cnode->fullname_with_scope();
  1184. }
  1185. auto input_index_node_name = AnfAlgo::GetCNodeName(cnode);
  1186. if (input_index_node_name != kGetNextOpName) {
  1187. bool full_batch = parallel::ParallelContext::GetInstance()->full_batch();
  1188. if ((!full_batch && (input_index_node_name != kUniqueOpName)) ||
  1189. (full_batch && (input_index_node_name != kMinimumOpName))) {
  1190. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope()
  1191. << ") cache is from " << cnode->fullname_with_scope();
  1192. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index isn't from dataset doesn't support cache in "
  1193. "parameter server training mode.";
  1194. }
  1195. }
  1196. *first_cache_input_index = cnode;
  1197. *first_cache_size = size;
  1198. MS_LOG(INFO) << "The input index of the first embeddingLookup cache is from " << cnode->fullname_with_scope()
  1199. << ", the cache size is " << size;
  1200. return;
  1201. }
  1202. }
  1203. void KernelRuntime::CheckSparsePSEmbeddingCache(const CNodePtr &node) {
  1204. MS_EXCEPTION_IF_NULL(node);
  1205. auto pre_node = AnfAlgo::GetPrevNodeOutput(node, 1, true);
  1206. MS_EXCEPTION_IF_NULL(pre_node.first);
  1207. while (pre_node.first->isa<CNode>() && (AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1208. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1209. MS_EXCEPTION_IF_NULL(pre_node.first);
  1210. }
  1211. if (!(pre_node.first->isa<CNode>()) || (AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1212. MS_LOG(EXCEPTION) << "The input_indices of kernel[SparseGatherV2] must be unique in parameter server cache mode";
  1213. }
  1214. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1215. MS_EXCEPTION_IF_NULL(pre_node.first);
  1216. while (pre_node.first->isa<CNode>() && (AnfAlgo::GetCNodeName(pre_node.first) == kCastOpName)) {
  1217. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1218. MS_EXCEPTION_IF_NULL(pre_node.first);
  1219. }
  1220. if (!(pre_node.first->isa<CNode>()) || (AnfAlgo::GetCNodeName(pre_node.first) != kGetNextOpName)) {
  1221. MS_LOG(EXCEPTION) << "The input indices of kernel[Unique] must be produced from dataset directly and the indices "
  1222. "value can not be changed before delivering to kernel[Unique] in parameter server cache mode.";
  1223. }
  1224. }
  1225. void KernelRuntime::CheckIfSupportPSEmbeddingCache(const session::KernelGraph *graph) {
  1226. MS_EXCEPTION_IF_NULL(graph);
  1227. AnfNodePtr first_cache_input_index = nullptr;
  1228. size_t first_cache_size = 0;
  1229. GetFirstPSEmbeddingCache(graph, &first_cache_input_index, &first_cache_size);
  1230. MS_EXCEPTION_IF_NULL(first_cache_input_index);
  1231. for (const auto &kernel : graph->execution_order()) {
  1232. MS_EXCEPTION_IF_NULL(kernel);
  1233. auto kernel_name = AnfAlgo::GetCNodeName(kernel);
  1234. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1235. continue;
  1236. }
  1237. auto input_param = AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1238. auto input_index = AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1239. MS_EXCEPTION_IF_NULL(input_param.first);
  1240. MS_EXCEPTION_IF_NULL(input_index.first);
  1241. if (!input_param.first->isa<Parameter>()) {
  1242. continue;
  1243. }
  1244. auto param_name = input_param.first->fullname_with_scope();
  1245. if (ps::ps_cache_instance.IsHashTable(param_name) && (kernel_name == kSparseGatherV2OpName)) {
  1246. CheckSparsePSEmbeddingCache(kernel);
  1247. }
  1248. while (input_index.first->isa<CNode>() && (AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1249. input_index = AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1250. MS_EXCEPTION_IF_NULL(input_index.first);
  1251. }
  1252. auto cnode =
  1253. AnfAlgo::IsGraphKernel(input_index.first) ? AnfAlgo::GetOutputOfGraphkernel(input_index) : input_index.first;
  1254. MS_EXCEPTION_IF_NULL(cnode);
  1255. if (cnode == first_cache_input_index) {
  1256. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1257. MS_LOG(ERROR) << "The embeddingLookup(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1258. MS_LOG(EXCEPTION) << "All the embeddingLookups whose input indices are from dataset must enable cache at the "
  1259. "same time when one of them enables cache in parameter server training mode.";
  1260. }
  1261. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1262. if (size != first_cache_size) {
  1263. MS_LOG(ERROR) << "The cache size(" << size << ") of embeddingLookup(" << kernel->fullname_with_scope()
  1264. << ") is not the same as other embeddingLookup cache size(" << first_cache_size << ").";
  1265. MS_LOG(EXCEPTION) << "The cache sizes of embeddingLookups are not the same in parameter server training mode.";
  1266. }
  1267. } else if (ps::ps_cache_instance.IsHashTable(param_name)) {
  1268. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope() << ") cache is from "
  1269. << cnode->fullname_with_scope();
  1270. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index isn't from dataset doesn't support cache in "
  1271. "parameter server training mode.";
  1272. } else if (cnode->isa<CNode>() && (AnfAlgo::GetCNodeName(cnode) == kGetNextOpName)) {
  1273. MS_LOG(ERROR) << "The EmbeddingLookup kernel(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1274. MS_LOG(EXCEPTION) << "All EmbeddingLookup kernels whose input indices are from dataset must enable cache at "
  1275. "the same time and parameter 'sparse' must be equal to the value of 'enable_sparse' in "
  1276. "context setting in parameter server training mode.";
  1277. }
  1278. }
  1279. }
  1280. #endif
  1281. } // namespace device
  1282. } // namespace mindspore