You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_runtime.cc 79 kB

5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
6 years ago
5 years ago
5 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811
  1. /**
  2. * Copyright 2019-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "runtime/device/kernel_runtime.h"
  17. #include <functional>
  18. #include <utility>
  19. #include <vector>
  20. #include <set>
  21. #include "backend/common/optimizer/helper.h"
  22. #include "backend/common/session/anf_runtime_algorithm.h"
  23. #include "backend/common/session/kernel_graph.h"
  24. #include "utils/ms_device_shape_transfer.h"
  25. #include "debug/data_dump/dump_json_parser.h"
  26. #include "frontend/operator/ops.h"
  27. #include "ir/value.h"
  28. #include "utils/ms_context.h"
  29. #include "utils/ms_utils.h"
  30. #include "utils/shape_utils.h"
  31. #include "utils/utils.h"
  32. #include "frontend/parallel/context.h"
  33. #include "debug/env_config_parser.h"
  34. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  35. #include "ps/ps_cache/ps_cache_manager.h"
  36. #endif
  37. using mindspore::kernel::Address;
  38. using mindspore::kernel::AddressPtr;
  39. namespace mindspore {
  40. namespace device {
  41. constexpr size_t kAtomicCleanInputSize = 2;
  42. namespace {
  43. std::vector<AnfNodePtr> GetGraphInputs(const session::KernelGraph &graph) {
  44. auto graph_inputs = graph.inputs();
  45. std::vector<AnfNodePtr> result(graph_inputs.begin(), graph_inputs.end());
  46. std::set<AnfNodePtr> inputs_set(graph_inputs.begin(), graph_inputs.end());
  47. auto kernels = graph.execution_order();
  48. for (auto &kernel : kernels) {
  49. MS_EXCEPTION_IF_NULL(kernel);
  50. auto input_num = AnfAlgo::GetInputTensorNum(kernel);
  51. for (size_t i = 0; i < input_num; ++i) {
  52. auto input_node = kernel->input(i + 1);
  53. auto input_real_node = AnfAlgo::VisitKernelWithReturnType(input_node, 0).first;
  54. MS_EXCEPTION_IF_NULL(input_real_node);
  55. if (input_real_node->isa<Parameter>() && inputs_set.find(input_real_node) == inputs_set.end()) {
  56. (void)inputs_set.insert(input_real_node);
  57. (void)result.emplace_back(input_real_node);
  58. }
  59. }
  60. }
  61. return result;
  62. }
  63. } // namespace
  64. constexpr size_t kMinInputSize = 2;
  65. KernelRuntime::~KernelRuntime() {
  66. stream_ = nullptr;
  67. independent_stream_ = nullptr;
  68. communication_stream_ = nullptr;
  69. }
  70. bool KernelRuntime::Load(const session::KernelGraph &, bool) {
  71. MS_LOG(INFO) << "Call default load.";
  72. return true;
  73. }
  74. bool KernelRuntime::LoadData(const session::KernelGraph &) {
  75. MS_LOG(INFO) << "Call default load data.";
  76. return false;
  77. }
  78. bool KernelRuntime::NodeOutputDeviceAddressExist(const AnfNodePtr &kernel, size_t index) {
  79. MS_EXCEPTION_IF_NULL(kernel);
  80. if (AnfAlgo::OutputAddrExist(kernel, index)) {
  81. const auto &address = AnfAlgo::GetOutputAddr(kernel, index);
  82. MS_EXCEPTION_IF_NULL(address);
  83. return address->DeviceType() == GetTargetDeviceAddressType();
  84. }
  85. return false;
  86. }
  87. void KernelRuntime::AssignMemory(const session::KernelGraph &graph) {
  88. auto context_ptr = MsContext::GetInstance();
  89. MS_EXCEPTION_IF_NULL(context_ptr);
  90. if (UseMemScheduler()) {
  91. AssignStaticMemoryValueNode(graph);
  92. ResetNodeAddress(graph);
  93. AssignCommunicationMem(graph);
  94. } else {
  95. MS_EXCEPTION_IF_NULL(mem_manager_);
  96. mem_manager_->ResetDynamicMemory();
  97. AssignStaticMemory(graph);
  98. AssignDynamicMemory(graph);
  99. }
  100. UpdateRefNodeOutputMem(graph);
  101. }
  102. void KernelRuntime::GetCommunicationInputInfo(const AnfNodePtr &node, size_t *total_size,
  103. DeviceAddressPtrList *address_list,
  104. std::vector<size_t> *align_size_list) const {
  105. MS_EXCEPTION_IF_NULL(node);
  106. MS_EXCEPTION_IF_NULL(total_size);
  107. MS_EXCEPTION_IF_NULL(address_list);
  108. MS_EXCEPTION_IF_NULL(align_size_list);
  109. size_t input_num = AnfAlgo::GetInputTensorNum(node);
  110. for (size_t i = 0; i < input_num; ++i) {
  111. auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(node, i, true);
  112. auto input_node = input_node_with_index.first;
  113. MS_EXCEPTION_IF_NULL(input_node);
  114. DeviceAddressPtr address = nullptr;
  115. if (AnfAlgo::OutputAddrExist(input_node, input_node_with_index.second)) {
  116. address = AnfAlgo::GetMutableOutputAddr(input_node, input_node_with_index.second);
  117. } else {
  118. address = PreAssignCNodeMemory(input_node, input_node_with_index.second);
  119. }
  120. MS_EXCEPTION_IF_NULL(address);
  121. auto align_size = MemoryManager::GetCommonAlignSize(address->size());
  122. *total_size += align_size;
  123. address_list->emplace_back(address);
  124. align_size_list->emplace_back(align_size);
  125. }
  126. }
  127. void KernelRuntime::AssignCommunicationInputFromMemoryPool(const AnfNodePtr &node) const {
  128. if (!AnfAlgo::IsCommunicationOp(node)) {
  129. return;
  130. }
  131. MS_EXCEPTION_IF_NULL(node);
  132. MS_EXCEPTION_IF_NULL(mem_manager_);
  133. size_t total_size = 0;
  134. DeviceAddressPtrList address_list;
  135. std::vector<size_t> align_size_list;
  136. GetCommunicationInputInfo(node, &total_size, &address_list, &align_size_list);
  137. if (align_size_list.empty()) {
  138. MS_LOG(WARNING) << "No inputs for " << node->fullname_with_scope();
  139. return;
  140. }
  141. if (!mem_manager_->MallocContinuousMemFromMemPool(address_list, total_size, align_size_list)) {
  142. MS_LOG(EXCEPTION) << "Allocate continuous memory failed, totol_size:" << total_size;
  143. }
  144. }
  145. void KernelRuntime::GetCommunicationOutputInfo(const AnfNodePtr &node, size_t *total_size,
  146. DeviceAddressPtrList *address_list,
  147. std::vector<size_t> *align_size_list) const {
  148. MS_EXCEPTION_IF_NULL(node);
  149. MS_EXCEPTION_IF_NULL(total_size);
  150. MS_EXCEPTION_IF_NULL(align_size_list);
  151. MS_EXCEPTION_IF_NULL(address_list);
  152. const auto kernel_mod = AnfAlgo::GetKernelMod(node);
  153. MS_EXCEPTION_IF_NULL(kernel_mod);
  154. const auto output_size_list = kernel_mod->GetOutputSizeList();
  155. for (size_t i = 0; i < output_size_list.size(); ++i) {
  156. DeviceAddressPtr address = nullptr;
  157. if (AnfAlgo::OutputAddrExist(node, i)) {
  158. address = AnfAlgo::GetMutableOutputAddr(node, i);
  159. } else {
  160. const std::string output_format = AnfAlgo::GetOutputFormat(node, i);
  161. const auto output_type = AnfAlgo::GetOutputDeviceDataType(node, i);
  162. const auto tensor_size = AnfAlgo::GetOutputTensorMemSize(node, i);
  163. address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type, {node, i});
  164. AnfAlgo::SetOutputAddr(address, i, node.get());
  165. }
  166. MS_EXCEPTION_IF_NULL(address);
  167. auto align_size = MemoryManager::GetCommonAlignSize(address->size());
  168. *total_size += align_size;
  169. align_size_list->emplace_back(align_size);
  170. address_list->emplace_back(address);
  171. }
  172. }
  173. void KernelRuntime::AssignCommunicationOutputFromMemoryPool(const AnfNodePtr &node) const {
  174. if (!AnfAlgo::IsCommunicationOp(node)) {
  175. return;
  176. }
  177. MS_EXCEPTION_IF_NULL(node);
  178. MS_EXCEPTION_IF_NULL(mem_manager_);
  179. size_t total_size = 0;
  180. std::vector<size_t> align_size_list;
  181. std::vector<DeviceAddressPtr> address_list;
  182. GetCommunicationOutputInfo(node, &total_size, &address_list, &align_size_list);
  183. if (align_size_list.empty()) {
  184. MS_LOG(WARNING) << "No output for " << node->fullname_with_scope();
  185. return;
  186. }
  187. if (!mem_manager_->MallocContinuousMemFromMemPool(address_list, total_size, align_size_list)) {
  188. MS_LOG(EXCEPTION) << "Allocate continuous memory failed, totol_size:" << total_size;
  189. }
  190. }
  191. void KernelRuntime::RunOpMallocPre(const session::KernelGraph &graph,
  192. const std::vector<tensor::TensorPtr> &input_tensors) {
  193. const auto &nodes = graph.execution_order();
  194. // Malloc for Node output
  195. for (const auto &node : nodes) {
  196. auto output_num = AnfAlgo::GetOutputTensorNum(node);
  197. for (size_t i = 0; i < output_num; ++i) {
  198. MS_EXCEPTION_IF_NULL(node);
  199. auto runtime_info = node->user_data<session::OpRuntimeInfo>();
  200. MS_EXCEPTION_IF_NULL(runtime_info);
  201. auto const &output_format = runtime_info->output_format(i);
  202. auto output_type = runtime_info->output_type(i);
  203. auto tensor_size = runtime_info->output_tensor_size(i);
  204. // Create DeviceAddress without ptr.
  205. // Get real device ptr after KernelBuild finish.
  206. auto device_address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type);
  207. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  208. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  209. }
  210. }
  211. // Malloc for graph input
  212. if (input_tensors.size() != graph.inputs().size()) {
  213. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  214. << " should be equal to graph input parameter size " << graph.inputs().size();
  215. }
  216. for (size_t input_index = 0; input_index < graph.inputs().size(); ++input_index) {
  217. auto item = graph.inputs()[input_index];
  218. MS_EXCEPTION_IF_NULL(item);
  219. if (!item->isa<Parameter>()) {
  220. continue;
  221. }
  222. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  223. for (size_t index = 0; index < output_size; index++) {
  224. auto current_tensor = input_tensors[input_index];
  225. MS_EXCEPTION_IF_NULL(current_tensor);
  226. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  227. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  228. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  229. continue;
  230. }
  231. auto op_runtime_info = item->user_data<session::OpRuntimeInfo>();
  232. MS_EXCEPTION_IF_NULL(op_runtime_info);
  233. TypeId output_type_id = op_runtime_info->output_type(index);
  234. auto output_tensor_size = op_runtime_info->output_tensor_size(index);
  235. auto output_format = op_runtime_info->output_format(index);
  236. auto device_address =
  237. CreateDeviceAddress(nullptr, output_tensor_size, output_format, output_type_id, {item, index});
  238. device_address->set_from_persistent_mem(current_tensor->is_parameter());
  239. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  240. current_tensor->set_device_address(device_address);
  241. current_tensor->set_sync_status(kNeedSyncHostToDevice);
  242. }
  243. }
  244. }
  245. void KernelRuntime::ResetNodeAddress(const session::KernelGraph &kernel_graph) {
  246. auto kernels = kernel_graph.execution_order();
  247. for (auto &kernel : kernels) {
  248. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  249. MS_EXCEPTION_IF_NULL(kernel_mod);
  250. size_t input_num = AnfAlgo::GetInputTensorNum(kernel);
  251. for (size_t j = 0; j < input_num; ++j) {
  252. auto input_index = AnfAlgo::GetRealInputIndex(kernel, j);
  253. KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(kernel, input_index, true);
  254. auto index = kernel_with_index.second;
  255. auto &input_node = kernel_with_index.first;
  256. if (NodeOutputDeviceAddressExist(input_node, index)) {
  257. continue;
  258. }
  259. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(input_node, index);
  260. if (output_type_id == kTypeUnknown) {
  261. MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
  262. continue;
  263. }
  264. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(input_node, index);
  265. auto device_address = CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(input_node, index),
  266. output_type_id, {input_node, index});
  267. AnfAlgo::SetOutputAddr(device_address, index, input_node.get());
  268. }
  269. auto output_sizes = kernel_mod->GetOutputSizeList();
  270. for (size_t i = 0; i < output_sizes.size(); ++i) {
  271. auto output_format = AnfAlgo::GetOutputFormat(kernel, i);
  272. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  273. AnfAlgo::SetOutputAddr(CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type), i,
  274. kernel.get());
  275. }
  276. auto workspace_sizes = kernel_mod->GetWorkspaceSizeList();
  277. for (size_t i = 0; i < workspace_sizes.size(); ++i) {
  278. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(nullptr, workspace_sizes[i], kOpFormat_DEFAULT, kNumberTypeFloat32),
  279. i, kernel.get());
  280. }
  281. }
  282. }
  283. void KernelRuntime::RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  284. const session::KernelGraph &graph, bool is_gradient_out,
  285. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  286. MS_EXCEPTION_IF_NULL(mem_manager_);
  287. mem_manager_->ResetDynamicMemory();
  288. for (const auto &node : graph.execution_order()) {
  289. AssignCommunicationOutputFromMemoryPool(node);
  290. AssignCommunicationInputFromMemoryPool(node);
  291. }
  292. RunOpAssignInputMemory(input_tensors, graph);
  293. AssignStaticMemoryValueNode(graph);
  294. for (const auto &node : graph.execution_order()) {
  295. RunOpAssignOutputMemory(node, tensor_to_node, is_gradient_out);
  296. RunOpAssignWorkSpaceMemory(node);
  297. }
  298. UpdateRefNodeOutputMem(graph);
  299. }
  300. void KernelRuntime::RunOpClearMemory(const session::KernelGraph &graph) const {
  301. // clear input parameter memory resource
  302. for (const auto &input_node : graph.inputs()) {
  303. MS_EXCEPTION_IF_NULL(input_node);
  304. AnfAlgo::SetOutputAddr(nullptr, 0, input_node.get());
  305. }
  306. // clear input value node memory resource
  307. for (const auto &value_node : graph.graph_value_nodes()) {
  308. MS_EXCEPTION_IF_NULL(value_node);
  309. AnfAlgo::SetOutputAddr(nullptr, 0, value_node.get());
  310. }
  311. for (const auto &cnode : graph.execution_order()) {
  312. MS_EXCEPTION_IF_NULL(cnode);
  313. // clear output memory resource
  314. size_t output_num = AnfAlgo::GetOutputTensorNum(cnode);
  315. for (size_t index = 0; index < output_num; ++index) {
  316. AnfAlgo::SetOutputAddr(nullptr, index, cnode.get());
  317. }
  318. // clear workspace memory resource
  319. auto kernel_mod = AnfAlgo::GetKernelMod(cnode);
  320. MS_EXCEPTION_IF_NULL(kernel_mod);
  321. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  322. for (size_t index = 0; index < workspace_lists.size(); ++index) {
  323. AnfAlgo::SetWorkspaceAddr(nullptr, index, cnode.get());
  324. }
  325. }
  326. }
  327. #ifdef ENABLE_DEBUGGER
  328. bool KernelRuntime::DumpDataEnabled() {
  329. // Returns true if e2e dump is enabled.
  330. auto &dump_json_parser = DumpJsonParser::GetInstance();
  331. return dump_json_parser.e2e_dump_enabled();
  332. }
  333. bool KernelRuntime::DumpDataEnabledIteration() {
  334. // Returns true if e2e dump is enabled and current iteration must be dumped.
  335. auto &dump_json_parser = DumpJsonParser::GetInstance();
  336. if (!dump_json_parser.e2e_dump_enabled()) {
  337. return false;
  338. }
  339. auto cur_iter = dump_json_parser.cur_dump_iter();
  340. if (dump_json_parser.IsDumpIter(cur_iter)) {
  341. return true;
  342. }
  343. return false;
  344. }
  345. #endif
  346. void KernelRuntime::AssignStaticMemory(const session::KernelGraph &graph) {
  347. AssignStaticMemoryInput(graph);
  348. AssignStaticMemoryValueNode(graph);
  349. AssignStaticMemoryOutput(graph);
  350. }
  351. void KernelRuntime::RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  352. const session::KernelGraph &graph) {
  353. MS_EXCEPTION_IF_NULL(mem_manager_);
  354. if (input_tensors.size() != graph.inputs().size()) {
  355. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  356. << " should be equal to graph input parameter size " << graph.inputs().size();
  357. }
  358. for (size_t input_index = 0; input_index < graph.inputs().size(); ++input_index) {
  359. auto item = graph.inputs()[input_index];
  360. MS_EXCEPTION_IF_NULL(item);
  361. if (!item->isa<Parameter>()) {
  362. continue;
  363. }
  364. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  365. for (size_t index = 0; index < output_size; index++) {
  366. auto current_tensor = input_tensors[input_index];
  367. MS_EXCEPTION_IF_NULL(current_tensor);
  368. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  369. // Device address have already create
  370. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  371. if (output_address->ptr_ == nullptr) {
  372. if (!mem_manager_->MallocMemFromMemPool(output_address, output_address->size())) {
  373. MS_LOG(EXCEPTION) << "Allocate memory failed, size:" << output_address->size();
  374. }
  375. }
  376. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  377. continue;
  378. }
  379. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  380. if (output_type_id == kTypeUnknown) {
  381. output_type_id = AnfAlgo::GetOutputInferDataType(item, index);
  382. }
  383. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  384. // Device address new create
  385. auto device_address =
  386. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  387. MS_EXCEPTION_IF_NULL(device_address);
  388. MS_EXCEPTION_IF_NULL(mem_manager_);
  389. device_address->set_from_persistent_mem(true);
  390. auto ret = mem_manager_->MallocMemFromMemPool(device_address, tensor_size);
  391. if (!ret) {
  392. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  393. }
  394. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  395. }
  396. }
  397. }
  398. void KernelRuntime::RunOpAssignOutputMemory(const AnfNodePtr &kernel,
  399. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node,
  400. bool is_gradient_out) {
  401. MS_EXCEPTION_IF_NULL(kernel);
  402. MS_EXCEPTION_IF_NULL(mem_manager_);
  403. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  404. MS_EXCEPTION_IF_NULL(kernel_mod);
  405. auto output_sizes = kernel_mod->GetOutputSizeList();
  406. if (output_sizes.empty()) {
  407. return;
  408. }
  409. // Use device_address Allocated in RunOpMallocPre.
  410. for (auto &iter : tensor_to_node) {
  411. auto device_address = iter.first->device_address();
  412. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(device_address), iter.second.second,
  413. iter.second.first.get());
  414. }
  415. for (size_t i = 0; i < output_sizes.size(); ++i) {
  416. if (AnfAlgo::OutputAddrExist(kernel, i, false)) {
  417. auto address = AnfAlgo::GetMutableOutputAddr(kernel, i, false);
  418. MS_EXCEPTION_IF_NULL(address);
  419. if (address->ptr() == nullptr) {
  420. MS_EXCEPTION_IF_NULL(mem_manager_);
  421. if (!mem_manager_->MallocMemFromMemPool(address, address->size())) {
  422. MS_LOG(EXCEPTION) << "Allocate memory failed, size:" << address->size();
  423. }
  424. }
  425. continue;
  426. }
  427. if (AnfAlgo::GetCNodeName(kernel) == kApplyMomentumOpName) {
  428. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i);
  429. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  430. continue;
  431. }
  432. std::string output_format = AnfAlgo::GetOutputFormat(kernel, i);
  433. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  434. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {kernel, i});
  435. MS_EXCEPTION_IF_NULL(device_address);
  436. device_address->set_host_shape(trans::GetRuntimePaddingShape(kernel, i));
  437. if (is_gradient_out) {
  438. device_address->set_from_persistent_mem(true);
  439. }
  440. auto ret = mem_manager_->MallocMemFromMemPool(device_address, output_sizes[i]);
  441. if (!ret) {
  442. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << output_sizes[i];
  443. }
  444. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  445. }
  446. }
  447. void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) {
  448. MS_EXCEPTION_IF_NULL(kernel);
  449. MS_EXCEPTION_IF_NULL(mem_manager_);
  450. if (kernel->isa<CNode>()) {
  451. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  452. MS_EXCEPTION_IF_NULL(kernel_mod);
  453. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  454. for (size_t i = 0; i < workspace_lists.size(); ++i) {
  455. auto device_address = CreateDeviceAddress(nullptr, workspace_lists[i], "", kTypeUnknown);
  456. MS_EXCEPTION_IF_NULL(device_address);
  457. auto ret = mem_manager_->MallocMemFromMemPool(device_address, workspace_lists[i]);
  458. if (!ret) {
  459. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << workspace_lists[i];
  460. }
  461. AnfAlgo::SetWorkspaceAddr(device_address, i, kernel.get());
  462. }
  463. }
  464. }
  465. void KernelRuntime::RunOpAssignOutputNodeMemory(const ValuePtr &pre_output_value, const session::KernelGraph &graph) {
  466. if (pre_output_value == nullptr) {
  467. return;
  468. }
  469. std::vector<tensor::TensorPtr> pre_output_tensors;
  470. TensorValueToTensor(pre_output_value, &pre_output_tensors);
  471. auto output_nodes = graph.outputs();
  472. if (pre_output_tensors.size() != output_nodes.size()) {
  473. MS_LOG(EXCEPTION) << "The size of pre output tensors [" << pre_output_tensors.size()
  474. << "] is not equal to the size of output nodes of graph [" << output_nodes.size() << "]";
  475. }
  476. // share output address with pre output tensors
  477. for (size_t i = 0; i < output_nodes.size(); ++i) {
  478. auto output_node_with_index = AnfAlgo::VisitKernel(output_nodes[i], 0);
  479. auto output_node = output_node_with_index.first;
  480. MS_EXCEPTION_IF_NULL(output_node);
  481. if (!output_node->isa<CNode>()) {
  482. if (output_node->isa<Parameter>()) {
  483. auto param = output_node->cast<ParameterPtr>();
  484. if (param != nullptr && !param->has_default()) {
  485. MS_LOG(EXCEPTION) << "The output parameter should be real parameter!";
  486. }
  487. }
  488. continue;
  489. }
  490. auto real_output_cnode = output_node->cast<CNodePtr>();
  491. MS_EXCEPTION_IF_NULL(real_output_cnode);
  492. MS_EXCEPTION_IF_NULL(pre_output_tensors[i]);
  493. if (pre_output_tensors[i]->device_address() == nullptr) {
  494. MS_LOG(INFO) << "The address of pre output tensor [" << i << "] is a nullptr!";
  495. continue;
  496. }
  497. if (opt::IsNopNode(real_output_cnode)) {
  498. if (real_output_cnode->inputs().size() < kMinInputSize) {
  499. MS_LOG(EXCEPTION) << "The input size of output node: " << real_output_cnode->DebugString()
  500. << " should large than one!";
  501. }
  502. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  503. output_node_with_index.second, real_output_cnode->input(1).get());
  504. } else {
  505. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  506. output_node_with_index.second, output_node_with_index.first.get());
  507. }
  508. }
  509. }
  510. void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph &graph) {
  511. MS_EXCEPTION_IF_NULL(mem_manager_);
  512. auto graph_id = graph.graph_id();
  513. MS_LOG(INFO) << "AssignStaticMemoryInput start for graph " << graph_id;
  514. auto graph_inputs = GetGraphInputs(graph);
  515. auto graph_valid_input = graph.valid_inputs();
  516. graph_inputs.insert(graph_inputs.end(), graph.child_graph_result().begin(), graph.child_graph_result().end());
  517. std::vector<AnfNodePtr> need_alloc_nodes;
  518. auto add_need_alloc_nodes = [&need_alloc_nodes, graph_id, this](const AnfNodePtr &node) {
  519. MS_EXCEPTION_IF_NULL(node);
  520. if (!node->isa<Parameter>()) {
  521. return;
  522. }
  523. if (NodeOutputDeviceAddressExist(node, 0)) {
  524. const auto &address = AnfAlgo::GetOutputAddr(node, 0);
  525. MS_EXCEPTION_IF_NULL(address);
  526. if (address->GetPtr() != nullptr) {
  527. return;
  528. }
  529. }
  530. auto input_param = node->cast<ParameterPtr>();
  531. if (input_param != nullptr && !input_param->IsUsedByRealKernelInGraph(graph_id)) {
  532. return;
  533. }
  534. need_alloc_nodes.push_back(node);
  535. };
  536. for (size_t i = 0; i < graph_inputs.size(); ++i) {
  537. auto input_node = graph_inputs[i];
  538. MS_EXCEPTION_IF_NULL(input_node);
  539. if (i < graph_valid_input.size() && !graph_valid_input[i]) {
  540. continue;
  541. }
  542. if (AnfAlgo::CheckPrimitiveType(input_node, prim::kPrimMakeTuple)) {
  543. auto outs = AnfAlgo::GetAllOutput(input_node);
  544. for (auto &out : outs) {
  545. MS_EXCEPTION_IF_NULL(out);
  546. add_need_alloc_nodes(out);
  547. }
  548. }
  549. add_need_alloc_nodes(input_node);
  550. }
  551. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  552. bool ps_cache_check = false;
  553. #endif
  554. std::map<AnfNodePtr, AnfNodePtr> shadow_backend_node_map;
  555. GetShadowBackendNodeMap(graph, &shadow_backend_node_map);
  556. for (auto &item : need_alloc_nodes) {
  557. MS_EXCEPTION_IF_NULL(item);
  558. auto output_size = AnfAlgo::GetOutputTensorNum(item);
  559. for (size_t index = 0; index < output_size; index++) {
  560. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  561. // if graph output is a weight and doesn't link to any cnode, it's data type will be unknown
  562. if (output_type_id == kTypeUnknown) {
  563. MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
  564. continue;
  565. }
  566. DeviceAddressPtr device_address = GetInternalDeviceAddress(graph, item);
  567. #if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
  568. const std::string &param_name = item->fullname_with_scope();
  569. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  570. MS_LOG(INFO) << "Parameter(" << param_name << ")"
  571. << " enables the embeddingLookup cache in parameter server training mode.";
  572. // PS embeddingLookup cache check.
  573. if (!ps_cache_check) {
  574. CheckIfSupportPSEmbeddingCache(graph);
  575. ps_cache_check = true;
  576. }
  577. const auto &address = ps::ps_cache_instance.QueryHashTableAddr(param_name);
  578. MS_EXCEPTION_IF_NULL(address.addr);
  579. device_address = CreateDeviceAddress(address.addr, address.size, AnfAlgo::GetOutputFormat(item, index),
  580. output_type_id, {item, index});
  581. device_address->set_host_shape(trans::GetRuntimePaddingShape(item, index));
  582. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  583. continue;
  584. }
  585. #endif
  586. GetDeviceAddress(item, shadow_backend_node_map, index, graph.graph_id(), &device_address);
  587. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  588. }
  589. }
  590. MS_LOG(INFO) << "AssignStaticMemoryInput end";
  591. }
  592. void KernelRuntime::GetDeviceAddress(const AnfNodePtr &item,
  593. const std::map<AnfNodePtr, AnfNodePtr> shadow_backend_node_map, size_t index,
  594. uint32_t graph_id, DeviceAddressPtr *device_address) {
  595. AnfNodePtr shadow_node = nullptr;
  596. auto iter = shadow_backend_node_map.find(item);
  597. if (iter != shadow_backend_node_map.end()) {
  598. shadow_node = iter->second;
  599. }
  600. if (*device_address == nullptr && shadow_node != nullptr) {
  601. auto conj_device_address = AnfAlgo::GetMutableOutputAddr(shadow_node, index);
  602. if (conj_device_address != nullptr && conj_device_address->DeviceType() == DeviceAddressType::kAscend) {
  603. *device_address = conj_device_address;
  604. }
  605. } else if (*device_address == nullptr) {
  606. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  607. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  608. *device_address =
  609. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  610. }
  611. if (*device_address != nullptr && (*device_address)->GetPtr() == nullptr) {
  612. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  613. (*device_address)->set_host_shape(trans::GetRuntimePaddingShape(item, index));
  614. MS_LOG(INFO) << "Assign Static Memory for Input node, size:" << tensor_size
  615. << " node:" << item->fullname_with_scope() << " index: " << index;
  616. if (mem_manager_->MallocMem(kStaticMem, tensor_size, *device_address, graph_id) == nullptr) {
  617. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
  618. }
  619. }
  620. }
  621. void KernelRuntime::AssignStaticMemoryOutput(const session::KernelGraph &graph) {
  622. MS_LOG(INFO) << "AssignStaticMemoryOutput start for graph " << graph.graph_id();
  623. auto nodes = AnfAlgo::GetAllOutput(graph.output(), {prim::kPrimTupleGetItem});
  624. std::vector<session::KernelWithIndex> non_communication_op;
  625. // Assign Communicate Op Memory firstly.
  626. for (const auto &node : nodes) {
  627. // Assign output address to nop node that the attribute of "skip_nop_op_addr" is false;
  628. auto is_skip = !opt::IsNopNode(node) || AnfAlgo::IsNeedSkipNopOpAddr(node);
  629. auto kernel_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0, is_skip);
  630. MS_EXCEPTION_IF_NULL(kernel_with_index.first);
  631. if (!kernel_with_index.first->isa<CNode>() || !AnfUtils::IsRealKernel(kernel_with_index.first)) {
  632. continue;
  633. }
  634. if (AnfAlgo::IsCommunicationOp(kernel_with_index.first)) {
  635. AssignCommunicationNodeMem(kStaticMem, kernel_with_index.first);
  636. } else {
  637. non_communication_op.emplace_back(kernel_with_index);
  638. }
  639. }
  640. for (const auto &item_with_index : non_communication_op) {
  641. MS_EXCEPTION_IF_NULL(item_with_index.first);
  642. MS_LOG(DEBUG) << "AssignNodeOutputMem for " << item_with_index.first->fullname_with_scope();
  643. AssignNodeOutputMem(kStaticMem, item_with_index.first, SizeToInt(item_with_index.second));
  644. }
  645. MS_LOG(INFO) << "AssignStaticMemoryOutput end";
  646. }
  647. void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph &graph) {
  648. auto &kernels = graph.execution_order();
  649. for (auto &kernel : kernels) {
  650. MS_EXCEPTION_IF_NULL(kernel);
  651. auto output_num = AnfAlgo::GetOutputTensorNum(kernel);
  652. if (output_num == 0) {
  653. MS_LOG(DEBUG) << "This kernel has no output size.";
  654. continue;
  655. }
  656. for (size_t i = 0; i < output_num; ++i) {
  657. session::AnfWithOutIndex out_pair(kernel, i);
  658. if (graph.IsInRefOutputMap(out_pair)) {
  659. auto origin_pair = graph.GetRefCorrespondOutput(out_pair);
  660. MS_EXCEPTION_IF_NULL(origin_pair.first);
  661. auto origin_node_output_addr = AnfAlgo::GetMutableOutputAddr(origin_pair.first, origin_pair.second);
  662. MS_EXCEPTION_IF_NULL(origin_node_output_addr);
  663. auto cur_node_output_addr = AnfAlgo::GetMutableOutputAddr(kernel, i);
  664. if (origin_node_output_addr.get() != cur_node_output_addr.get()) {
  665. MS_LOG(DEBUG) << "REF address is not same, ref node output need address update";
  666. MS_LOG(DEBUG) << "REF origin op is " << origin_pair.first->DebugString() << ", output index is "
  667. << origin_pair.second << ", cur op is " << kernel->DebugString() << ", out index is " << i;
  668. if (!cur_node_output_addr->host_shape().empty()) {
  669. origin_node_output_addr->set_host_shape(cur_node_output_addr->host_shape());
  670. }
  671. AnfAlgo::SetOutputAddr(origin_node_output_addr, i, kernel.get());
  672. }
  673. }
  674. }
  675. }
  676. }
  677. void KernelRuntime::AssignCommunicationNodeMem(MemType type, const AnfNodePtr &node) {
  678. AssignCommunicationNodeInputMem(type, node);
  679. AssignCommunicationNodeOutputMem(type, node);
  680. AssignWorkSpaceMem(type, node);
  681. }
  682. void KernelRuntime::AssignCommunicationNodeOutputMem(MemType type, const AnfNodePtr &node) {
  683. MS_EXCEPTION_IF_NULL(node);
  684. MS_EXCEPTION_IF_NULL(mem_manager_);
  685. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  686. MS_EXCEPTION_IF_NULL(kernel_mod);
  687. auto output_sizes = kernel_mod->GetOutputSizeList();
  688. if (output_sizes.empty()) {
  689. MS_LOG(INFO) << "This kernel[" << node->DebugString() << "] has no output size.";
  690. return;
  691. }
  692. auto context_ptr = MsContext::GetInstance();
  693. MS_EXCEPTION_IF_NULL(context_ptr);
  694. size_t total_size = 0;
  695. size_t output_index = 0;
  696. std::vector<size_t> align_size_list;
  697. for (uint64_t mem_size : output_sizes) {
  698. if (AnfAlgo::OutputAddrExist(node, output_index++)) {
  699. MS_LOG(INFO) << "Communication op " << node->fullname_with_scope() << " has output device address";
  700. return;
  701. }
  702. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
  703. mem_size = MemoryManager::GetCommonAlignSize(mem_size);
  704. }
  705. total_size += mem_size;
  706. align_size_list.emplace_back(mem_size);
  707. }
  708. if (align_size_list.empty()) {
  709. return;
  710. }
  711. if (type == kSomasReuseDynamicMem) {
  712. bool not_reuse = KernelMemNotReuse(node);
  713. if (not_reuse) {
  714. type = kDynamicMem;
  715. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  716. }
  717. }
  718. uint8_t *output_ptr = nullptr;
  719. for (size_t j = 0; j < align_size_list.size(); ++j) {
  720. std::string output_format = AnfAlgo::GetOutputFormat(node, j);
  721. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, j);
  722. auto address = CreateDeviceAddress(nullptr, output_sizes[j], output_format, output_type, {node, j});
  723. MS_EXCEPTION_IF_NULL(address);
  724. if (output_ptr == nullptr) {
  725. output_ptr = mem_manager_->MallocOutputMem(node, 0, type, total_size, address, true);
  726. MS_EXCEPTION_IF_NULL(output_ptr);
  727. } else {
  728. address->set_ptr(output_ptr);
  729. }
  730. address->set_host_shape(trans::GetRuntimePaddingShape(node, j));
  731. AnfAlgo::SetOutputAddr(address, j, node.get());
  732. output_ptr += align_size_list[j];
  733. }
  734. }
  735. bool KernelRuntime::KernelMemNotReuse(const AnfNodePtr &node) {
  736. MS_EXCEPTION_IF_NULL(node);
  737. return false;
  738. }
  739. DeviceAddressPtr KernelRuntime::PreAssignCNodeMemory(const AnfNodePtr &anf_node, size_t index) const {
  740. MS_EXCEPTION_IF_NULL(anf_node);
  741. if (opt::IsNopNode(anf_node)) {
  742. auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, index);
  743. return PreAssignCNodeMemory(input_node_with_index.first, input_node_with_index.second);
  744. }
  745. auto output_size = AnfAlgo::GetOutputTensorMemSize(anf_node, index);
  746. std::string output_format = AnfAlgo::GetOutputFormat(anf_node, index);
  747. auto output_type = AnfAlgo::GetOutputDeviceDataType(anf_node, index);
  748. auto address = CreateDeviceAddress(nullptr, output_size, output_format, output_type, {anf_node, index});
  749. AnfAlgo::SetOutputAddr(address, index, anf_node.get());
  750. return address;
  751. }
  752. void KernelRuntime::AssignCommunicationNodeInputMem(MemType type, const AnfNodePtr &node) {
  753. auto context_ptr = MsContext::GetInstance();
  754. MS_EXCEPTION_IF_NULL(context_ptr);
  755. MS_EXCEPTION_IF_NULL(node);
  756. MS_EXCEPTION_IF_NULL(mem_manager_);
  757. size_t total_size = 0;
  758. std::vector<std::pair<DeviceAddressPtr, size_t>> addr_size;
  759. size_t input_num = AnfAlgo::GetInputTensorNum(node);
  760. for (size_t i = 0; i < input_num; ++i) {
  761. auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(node, i, true);
  762. auto input_node = input_node_with_index.first;
  763. MS_EXCEPTION_IF_NULL(input_node);
  764. if (AnfAlgo::OutputAddrExist(input_node, input_node_with_index.second)) {
  765. MS_LOG(INFO) << "Communication op " << input_node->fullname_with_scope() << " has input device address";
  766. return;
  767. }
  768. DeviceAddressPtr address = nullptr;
  769. address = PreAssignCNodeMemory(input_node, input_node_with_index.second);
  770. MS_EXCEPTION_IF_NULL(address);
  771. auto mem_size = MemoryManager::GetCommonAlignSize(address->size());
  772. total_size += mem_size;
  773. addr_size.emplace_back(address, mem_size);
  774. }
  775. if (addr_size.empty()) {
  776. return;
  777. }
  778. if (type == kSomasReuseDynamicMem) {
  779. bool not_reuse = KernelMemNotReuse(node);
  780. if (not_reuse) {
  781. type = kDynamicMem;
  782. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s input.";
  783. }
  784. }
  785. auto cnode = node->cast<CNodePtr>();
  786. MS_EXCEPTION_IF_NULL(cnode);
  787. if (cnode->inputs().size() < kMinInputSize) {
  788. // communication node's input should contain itself and at least on input
  789. MS_LOG(ERROR) << "No inputs for " << cnode->fullname_with_scope();
  790. return;
  791. }
  792. auto first_input_node = cnode->input(1);
  793. auto prenode_index = AnfAlgo::VisitKernelWithReturnType(first_input_node, 0, true);
  794. uint8_t *input_ptr = mem_manager_->MallocOutputMem(prenode_index.first, prenode_index.second, type, total_size,
  795. addr_size[0].first, true);
  796. for (const auto &iter : addr_size) {
  797. MS_EXCEPTION_IF_NULL(iter.first);
  798. iter.first->set_ptr(input_ptr);
  799. input_ptr += iter.second;
  800. }
  801. }
  802. void KernelRuntime::AssignNodeOutputMem(MemType type, const AnfNodePtr &node, int index) {
  803. MS_EXCEPTION_IF_NULL(node);
  804. MS_EXCEPTION_IF_NULL(mem_manager_);
  805. if (type == kSomasReuseDynamicMem) {
  806. bool not_reuse = KernelMemNotReuse(node);
  807. if (not_reuse) {
  808. type = kDynamicMem;
  809. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  810. }
  811. }
  812. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  813. MS_EXCEPTION_IF_NULL(kernel_mod);
  814. auto output_sizes = kernel_mod->GetOutputSizeList();
  815. if (output_sizes.empty()) {
  816. return;
  817. }
  818. for (size_t i = 0; i < output_sizes.size(); ++i) {
  819. if ((kGetAllOuts != index) && (SizeToInt(i) != index)) {
  820. continue;
  821. }
  822. if (NodeOutputDeviceAddressExist(node, i)) {
  823. MS_LOG(DEBUG) << "Already malloc index:" << i;
  824. continue;
  825. }
  826. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope() << " output memory size:" << output_sizes[i];
  827. if (type == kStaticMem) {
  828. MS_LOG(INFO) << "Assign Static Memory for Output node, size:" << output_sizes[i]
  829. << " node:" << node->fullname_with_scope();
  830. }
  831. std::string output_format = AnfAlgo::GetOutputFormat(node, i);
  832. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, i);
  833. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {node, i});
  834. MS_EXCEPTION_IF_NULL(device_address);
  835. uint8_t *ptr = mem_manager_->MallocOutputMem(node, i, type, output_sizes[i], device_address, false);
  836. MS_EXCEPTION_IF_NULL(ptr);
  837. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  838. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  839. }
  840. }
  841. DeviceAddressPtr KernelRuntime::AssignExtraStaticMem(const TensorPtr &tensor, const AnfNodePtr &node, size_t index) {
  842. MS_EXCEPTION_IF_NULL(node);
  843. MS_EXCEPTION_IF_NULL(mem_manager_);
  844. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  845. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope()
  846. << "Assign Static Memory for Output node, size:" << tensor_address->size();
  847. auto device_address = CreateDeviceAddress(nullptr, tensor_address->size(), tensor_address->format(),
  848. tensor_address->type_id(), {node, index});
  849. MS_EXCEPTION_IF_NULL(device_address);
  850. uint8_t *ptr = mem_manager_->MallocOutputMem(node, index, kStaticMem, tensor_address->size(), device_address, false);
  851. MS_EXCEPTION_IF_NULL(ptr);
  852. return device_address;
  853. }
  854. void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const ValuePtr &node_value,
  855. size_t output_idx) {
  856. MS_EXCEPTION_IF_NULL(value_node);
  857. MS_EXCEPTION_IF_NULL(node_value);
  858. MS_EXCEPTION_IF_NULL(mem_manager_);
  859. auto ms_context = MsContext::GetInstance();
  860. MS_EXCEPTION_IF_NULL(ms_context);
  861. std::vector<tensor::TensorPtr> tensors;
  862. TensorValueToTensor(node_value, &tensors);
  863. // Graph id should be passed to record static memory if profiling is enabled.
  864. auto kernel_info = dynamic_cast<device::KernelInfo *>(value_node->kernel_info());
  865. MS_EXCEPTION_IF_NULL(kernel_info);
  866. uint32_t graph_id = kernel_info->graph_id();
  867. for (const auto &tensor : tensors) {
  868. if (tensor == nullptr) {
  869. MS_LOG(WARNING) << "Tensor is null";
  870. return;
  871. }
  872. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  873. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  874. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address()), output_idx++,
  875. value_node.get());
  876. continue;
  877. }
  878. size_t tensor_size = LongToSize(tensor->data().nbytes());
  879. auto node_size = AnfAlgo::GetOutputTensorMemSize(value_node, output_idx);
  880. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(value_node, output_idx);
  881. if (output_type_id == kTypeUnknown) {
  882. output_type_id = AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  883. }
  884. auto output_format = AnfAlgo::GetOutputFormat(value_node, output_idx);
  885. DeviceAddressPtr address =
  886. CreateDeviceAddress(nullptr, node_size, output_format, output_type_id, {value_node, output_idx});
  887. address->set_host_shape(trans::GetRuntimePaddingShape(value_node, output_idx));
  888. address->set_from_persistent_mem(true);
  889. MS_EXCEPTION_IF_NULL(address);
  890. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  891. !mem_manager_->MallocMemFromMemPool(address, node_size)) {
  892. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << node_size;
  893. } else {
  894. MS_LOG(INFO) << "Assign Static Memory for Value node, size:" << node_size
  895. << " node:" << value_node->fullname_with_scope();
  896. if (mem_manager_->MallocMem(kStaticMem, node_size, address, graph_id) == nullptr) {
  897. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << node_size;
  898. }
  899. }
  900. AnfAlgo::SetOutputAddr(address, output_idx, value_node.get());
  901. if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(),
  902. tensor->data_c(), tensor->device_info().host_format_)) {
  903. MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString()
  904. << "node format is" << AnfAlgo::GetOutputFormat(value_node, output_idx)
  905. << "node dtype is " << AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  906. }
  907. }
  908. }
  909. void KernelRuntime::AssignStaticMemoryValueNode(const session::KernelGraph &graph) {
  910. MS_EXCEPTION_IF_NULL(mem_manager_);
  911. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode start for graph " << graph.graph_id();
  912. auto ms_context = MsContext::GetInstance();
  913. MS_EXCEPTION_IF_NULL(ms_context);
  914. // order the value nodes
  915. std::map<std::string, ValueNodePtr> value_nodes_map;
  916. for (auto &node : graph.graph_value_nodes()) {
  917. MS_EXCEPTION_IF_NULL(node);
  918. value_nodes_map[node->fullname_with_scope()] = node;
  919. }
  920. for (auto &item : value_nodes_map) {
  921. auto value_node = item.second;
  922. MS_EXCEPTION_IF_NULL(value_node);
  923. if (NodeOutputDeviceAddressExist(value_node, 0)) {
  924. MS_LOG(DEBUG) << "value_node[" << value_node->DebugString() << "] address already exist";
  925. auto device_address = AnfAlgo::GetMutableOutputAddr(value_node, 0);
  926. if (device_address->ptr_ == nullptr) {
  927. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  928. if (!mem_manager_->MallocMemFromMemPool(device_address, device_address->size_)) {
  929. MS_LOG(EXCEPTION) << "MallocMemFromMemPool failed";
  930. }
  931. } else {
  932. if (mem_manager_->MallocMem(kStaticMem, device_address->size_, device_address, graph.graph_id())) {
  933. MS_LOG(EXCEPTION) << "MallocStaticMem failed";
  934. }
  935. }
  936. }
  937. continue;
  938. }
  939. auto &node_value = value_node->value();
  940. MS_EXCEPTION_IF_NULL(node_value);
  941. MS_LOG(DEBUG) << "Malloc memory for " << value_node->fullname_with_scope();
  942. if (node_value->isa<Tensor>() || node_value->isa<ValueTuple>()) {
  943. AssignValueNodeTensor(value_node, node_value, 0);
  944. } else if (node_value->isa<StringImm>()) {
  945. const bool use_mem_from_memory_pool = ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) ||
  946. ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
  947. auto address = CreateDeviceAddressForStringValue(node_value, use_mem_from_memory_pool, graph.graph_id());
  948. MS_EXCEPTION_IF_NULL(address);
  949. address->set_from_persistent_mem(true);
  950. AnfAlgo::SetOutputAddr(address, 0, value_node.get());
  951. }
  952. }
  953. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode end";
  954. }
  955. DeviceAddressPtr KernelRuntime::CreateDeviceAddressForStringValue(const ValuePtr &value, bool use_mem_pool,
  956. uint32_t graph_id) {
  957. auto value_string = GetValue<std::string>(value);
  958. size_t tensor_size = value_string.size();
  959. DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeUInt8);
  960. MS_EXCEPTION_IF_NULL(address);
  961. address->set_from_persistent_mem(true);
  962. auto ms_context = MsContext::GetInstance();
  963. MS_EXCEPTION_IF_NULL(ms_context);
  964. if (use_mem_pool && !mem_manager_->MallocMemFromMemPool(address, tensor_size)) {
  965. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  966. } else {
  967. MS_LOG(INFO) << "Assign Static Memory for string Value node, size:" << tensor_size;
  968. if (mem_manager_->MallocMem(kStaticMem, tensor_size, address, graph_id) == nullptr) {
  969. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
  970. }
  971. }
  972. ShapeVector shape = {1, SizeToLong(tensor_size)};
  973. if (!address->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value_string.data())) {
  974. MS_LOG(EXCEPTION) << "kValueNode SyncHostToDevice fail!";
  975. }
  976. return address;
  977. }
  978. void KernelRuntime::AssignDynamicMemory(const session::KernelGraph &graph) {
  979. MS_EXCEPTION_IF_NULL(mem_manager_);
  980. auto context_ptr = MsContext::GetInstance();
  981. MS_EXCEPTION_IF_NULL(context_ptr);
  982. bool is_enable_mem_reuse = EnvConfigParser::GetInstance().GetSysMemreuse();
  983. auto mem_type = kDynamicMem;
  984. auto &dump_json_parser = DumpJsonParser::GetInstance();
  985. if (dump_json_parser.e2e_dump_enabled() && dump_json_parser.dump_mode() == 0) {
  986. mindspore::EnvConfigParser::GetInstance().SetSysMemreuse(false);
  987. is_enable_mem_reuse = false;
  988. MS_LOG(INFO) << "Disable Memory Reuse when e2e dump is enable and dump mode is set to dump all kernels";
  989. }
  990. if (is_enable_mem_reuse) {
  991. MS_LOG(INFO) << "Memory Reuse is enable...";
  992. mem_manager_->MallocSomasDynamicMem(graph);
  993. mem_type = kSomasReuseDynamicMem;
  994. } else {
  995. MS_LOG(INFO) << "Memory Reuse is disable...";
  996. }
  997. auto &execution_nodes = graph.execution_order();
  998. std::vector<CNodePtr> compute_nodes;
  999. // communication nodes first
  1000. for (auto &node : execution_nodes) {
  1001. if (AnfAlgo::IsCommunicationOp(node)) {
  1002. // skip if the memory is already allocated
  1003. AssignCommunicationNodeMem(mem_type, node);
  1004. } else {
  1005. compute_nodes.emplace_back(node);
  1006. }
  1007. }
  1008. // then compute nodes
  1009. for (auto &node : compute_nodes) {
  1010. AssignNodeOutputMem(mem_type, node, kGetAllOuts);
  1011. AssignWorkSpaceMem(mem_type, node);
  1012. }
  1013. }
  1014. void KernelRuntime::AssignWorkSpaceMem(MemType type, const AnfNodePtr &node) {
  1015. MS_EXCEPTION_IF_NULL(node);
  1016. MS_EXCEPTION_IF_NULL(mem_manager_);
  1017. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  1018. MS_EXCEPTION_IF_NULL(kernel_mod);
  1019. size_t index = 0;
  1020. for (auto &size : kernel_mod->GetWorkspaceSizeList()) {
  1021. if (AnfAlgo::WorkspaceAddrExist(node, index)) {
  1022. MS_LOG(INFO) << "Op " << node->fullname_with_scope() << " has workspace device address";
  1023. return;
  1024. }
  1025. auto ptr = mem_manager_->MallocWorkSpaceMem(node, index, type, size);
  1026. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get());
  1027. index++;
  1028. }
  1029. }
  1030. void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel,
  1031. KernelLaunchInfo *kernel_launch_info) {
  1032. MS_EXCEPTION_IF_NULL(kernel);
  1033. MS_EXCEPTION_IF_NULL(kernel_launch_info);
  1034. auto cnode = kernel->cast<CNodePtr>();
  1035. MS_EXCEPTION_IF_NULL(cnode);
  1036. if (AnfAlgo::GetCNodeName(cnode) == kAtomicAddrCleanOpName) {
  1037. return GenAddrCleanLaunchArgs(cnode, &(kernel_launch_info->inputs_));
  1038. }
  1039. auto ms_context = MsContext::GetInstance();
  1040. MS_EXCEPTION_IF_NULL(ms_context);
  1041. auto skip_nop_node = (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode);
  1042. size_t input_num = AnfAlgo::GetInputTensorNum(kernel);
  1043. for (size_t i = 0; i < input_num; ++i) {
  1044. if (AnfAlgo::IsNoneInput(kernel, i)) {
  1045. continue;
  1046. }
  1047. auto real_input = AnfAlgo::GetRealInputIndex(kernel, i);
  1048. auto device_address = AnfAlgo::GetPrevNodeOutputAddr(kernel, real_input, skip_nop_node);
  1049. MS_EXCEPTION_IF_NULL(device_address);
  1050. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1051. MS_EXCEPTION_IF_NULL(input);
  1052. input->addr = device_address->ptr_;
  1053. MS_EXCEPTION_IF_NULL(input->addr);
  1054. input->size = device_address->size_;
  1055. kernel_launch_info->inputs_.emplace_back(input);
  1056. }
  1057. for (size_t i = 0; i < kernel_mod.GetOutputSizeList().size(); ++i) {
  1058. auto device_address = AnfAlgo::GetOutputAddr(kernel, i, skip_nop_node);
  1059. kernel::AddressPtr output = std::make_shared<kernel::Address>();
  1060. MS_EXCEPTION_IF_NULL(output);
  1061. output->addr = device_address->ptr_;
  1062. MS_EXCEPTION_IF_NULL(output->addr);
  1063. output->size = device_address->size_;
  1064. kernel_launch_info->outputs_.emplace_back(output);
  1065. }
  1066. for (size_t i = 0; i < kernel_mod.GetWorkspaceSizeList().size(); ++i) {
  1067. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  1068. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1069. MS_EXCEPTION_IF_NULL(workspace);
  1070. workspace->addr = device_address->ptr_;
  1071. MS_EXCEPTION_IF_NULL(workspace->addr);
  1072. workspace->size = device_address->size_;
  1073. kernel_launch_info->workspaces_.emplace_back(workspace);
  1074. }
  1075. }
  1076. bool KernelRuntime::UseMemScheduler() {
  1077. auto context_ptr = MsContext::GetInstance();
  1078. MS_EXCEPTION_IF_NULL(context_ptr);
  1079. if (!context_ptr->get_param<bool>(MS_CTX_ENABLE_MEM_SCHEDULER)) {
  1080. return false;
  1081. }
  1082. // Not use MemScheduler when running single op
  1083. return (!context_ptr->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  1084. (context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode));
  1085. }
  1086. void KernelRuntime::GenKernelEvents(const session::KernelGraph &graph) {
  1087. auto &kernels = graph.execution_order();
  1088. if (kernels.empty() || graph_kernel_events_map_.find(graph.graph_id()) != graph_kernel_events_map_.end()) {
  1089. return;
  1090. }
  1091. auto kernel_events = std::pair<std::map<AnfNodePtr, std::vector<std::function<void()>>>,
  1092. std::map<AnfNodePtr, std::vector<std::function<void()>>>>();
  1093. auto &kernel_pre_run_events = kernel_events.first;
  1094. auto &kernel_post_run_events = kernel_events.second;
  1095. for (size_t i = 0; i < kernels.size(); ++i) {
  1096. auto &kernel = kernels[i];
  1097. if (!AnfAlgo::IsCommunicationOp(kernel)) {
  1098. continue;
  1099. }
  1100. auto pre_event = CreateDeviceEvent();
  1101. auto post_event = CreateDeviceEvent();
  1102. MS_EXCEPTION_IF_NULL(pre_event);
  1103. MS_EXCEPTION_IF_NULL(post_event);
  1104. pre_event->set_wait_stream(communication_stream_);
  1105. pre_event->set_record_stream(stream_);
  1106. post_event->set_wait_stream(stream_);
  1107. post_event->set_record_stream(communication_stream_);
  1108. kernel_pre_run_events[kernel].emplace_back([pre_event]() {
  1109. pre_event->RecordEvent();
  1110. pre_event->WaitEvent();
  1111. });
  1112. kernel_post_run_events[kernel].emplace_back([post_event]() { post_event->RecordEvent(); });
  1113. bool found_nearest_child = false;
  1114. for (size_t j = i + 1; j < kernels.size(); ++j) {
  1115. auto &child = kernels[j];
  1116. MS_EXCEPTION_IF_NULL(child);
  1117. if (AnfAlgo::IsCommunicationOp(child)) {
  1118. continue;
  1119. }
  1120. auto input_size = child->inputs().size() - 1;
  1121. for (size_t k = 0; k < input_size; ++k) {
  1122. auto kernel_index = AnfAlgo::VisitKernelWithReturnType(AnfAlgo::GetInputNode(child, k), 0, true);
  1123. if (kernel_index.first == kernel) {
  1124. found_nearest_child = true;
  1125. break;
  1126. }
  1127. }
  1128. if (found_nearest_child) {
  1129. kernel_pre_run_events[child].emplace_back([post_event]() { post_event->WaitEvent(); });
  1130. break;
  1131. }
  1132. }
  1133. if (!found_nearest_child) {
  1134. kernel_post_run_events[kernel].emplace_back([post_event]() { post_event->WaitEvent(); });
  1135. }
  1136. }
  1137. graph_kernel_events_map_[graph.graph_id()] = std::move(kernel_events);
  1138. }
  1139. void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs,
  1140. const std::shared_ptr<MemScheduler> &mem_scheduler) {
  1141. MS_EXCEPTION_IF_NULL(cnode);
  1142. MS_EXCEPTION_IF_NULL(kernel_inputs);
  1143. if (cnode->inputs().size() != kAtomicCleanInputSize) {
  1144. MS_LOG(EXCEPTION) << "Atomic Addr clean Node Input nodes not equal 2.";
  1145. }
  1146. MS_EXCEPTION_IF_NULL(cnode->inputs()[1]);
  1147. auto pre_node = (cnode->inputs()[1])->cast<CNodePtr>();
  1148. // set clean output address
  1149. if (AnfAlgo::HasNodeAttr(kAttrAtomicOutputIndexs, pre_node)) {
  1150. #if defined(__APPLE__)
  1151. auto clean_output_indexes = AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicOutputIndexs);
  1152. #else
  1153. auto clean_output_indexes = AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicOutputIndexs);
  1154. #endif
  1155. for (auto index : clean_output_indexes) {
  1156. auto device_address = AnfAlgo::GetOutputAddr(pre_node, index);
  1157. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1158. MS_EXCEPTION_IF_NULL(input);
  1159. if (mem_scheduler != nullptr) {
  1160. GetOrMallocAddress(mem_scheduler, device_address, input);
  1161. } else {
  1162. input->addr = device_address->ptr_;
  1163. MS_EXCEPTION_IF_NULL(input->addr);
  1164. }
  1165. input->size = device_address->size_;
  1166. kernel_inputs->emplace_back(input);
  1167. }
  1168. MS_LOG(DEBUG) << "AtomicAddClean clean output size:" << clean_output_indexes.size();
  1169. }
  1170. // set clean workspace address
  1171. if (AnfAlgo::HasNodeAttr(kAttrAtomicWorkspaceIndexs, pre_node)) {
  1172. #if defined(__APPLE__)
  1173. auto clean_workspaces_indexes = AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicWorkspaceIndexs);
  1174. #else
  1175. auto clean_workspaces_indexes = AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicWorkspaceIndexs);
  1176. #endif
  1177. for (const auto &index : clean_workspaces_indexes) {
  1178. auto device_address = AnfAlgo::GetWorkspaceAddr(pre_node, index);
  1179. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1180. MS_EXCEPTION_IF_NULL(workspace);
  1181. if (mem_scheduler != nullptr) {
  1182. GetOrMallocAddress(mem_scheduler, device_address, workspace);
  1183. } else {
  1184. workspace->addr = device_address->ptr_;
  1185. MS_EXCEPTION_IF_NULL(workspace->addr);
  1186. }
  1187. workspace->size = device_address->size_;
  1188. kernel_inputs->emplace_back(workspace);
  1189. }
  1190. }
  1191. }
  1192. void KernelRuntime::LaunchKernelEvent(const std::map<AnfNodePtr, std::vector<std::function<void()>>> &kernel_events,
  1193. const AnfNodePtr &node) const {
  1194. if (kernel_events.find(node) == kernel_events.end()) {
  1195. return;
  1196. }
  1197. for (auto &event : kernel_events.at(node)) {
  1198. event();
  1199. }
  1200. }
  1201. bool KernelRuntime::LaunchKernelWithPynativeProfiling(kernel::KernelMod *kernel_mod, const std::string &op_name,
  1202. const KernelLaunchInfo &kernel_launch_info, void *stream) {
  1203. MS_EXCEPTION_IF_NULL(kernel_mod);
  1204. MS_EXCEPTION_IF_NULL(stream);
  1205. float cost_time = 0;
  1206. auto start = CreateDeviceTimeEvent();
  1207. auto end = CreateDeviceTimeEvent();
  1208. MS_EXCEPTION_IF_NULL(start);
  1209. MS_EXCEPTION_IF_NULL(end);
  1210. start->set_record_stream(stream);
  1211. end->set_record_stream(stream);
  1212. start->RecordEvent();
  1213. bool ret = kernel_mod->Launch(kernel_launch_info, stream);
  1214. if (!ret) {
  1215. MS_LOG(EXCEPTION) << "Launch kernel failed, kernel name is : " << op_name;
  1216. }
  1217. end->RecordEvent();
  1218. start->SyncEvent();
  1219. end->SyncEvent();
  1220. start->ElapsedTime(&cost_time, end.get());
  1221. MS_LOG(DEBUG) << "Launch kernel:" << op_name << " cost:" << cost_time / kBasicTimeTransferUnit;
  1222. return ret;
  1223. }
  1224. void KernelRuntime::DebugStreamSync(const CNodePtr &kernel) {
  1225. auto ms_context = MsContext::GetInstance();
  1226. MS_EXCEPTION_IF_NULL(ms_context);
  1227. auto enable_sync_run = ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_SYNCHRONIZE);
  1228. if (enable_sync_run) {
  1229. if (!SyncStream()) {
  1230. MS_LOG(EXCEPTION) << "Op " << kernel->fullname_with_scope() << " run failed!";
  1231. }
  1232. }
  1233. }
  1234. void KernelRuntime::GetOrMallocAddress(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1235. const DeviceAddress *device_address, const kernel::AddressPtr &kernel_addr) {
  1236. if (device_address->ptr_ != nullptr) {
  1237. kernel_addr->addr = device_address->ptr_;
  1238. } else {
  1239. kernel_addr->addr = mem_scheduler->GetOrMalloc(device_address, device_address->size_);
  1240. }
  1241. }
  1242. void KernelRuntime::AssignKernelAddress(const std::shared_ptr<MemScheduler> &mem_scheduler, const AnfNodePtr &kernel,
  1243. KernelLaunchInfo *kernel_launch_info) {
  1244. MS_EXCEPTION_IF_NULL(kernel);
  1245. MS_EXCEPTION_IF_NULL(kernel_launch_info);
  1246. auto cnode = kernel->cast<CNodePtr>();
  1247. MS_EXCEPTION_IF_NULL(cnode);
  1248. if (AnfAlgo::GetCNodeName(cnode) == kAtomicAddrCleanOpName) {
  1249. return GenAddrCleanLaunchArgs(cnode, &(kernel_launch_info->inputs_), mem_scheduler);
  1250. }
  1251. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1252. MS_EXCEPTION_IF_NULL(kernel_mod);
  1253. size_t input_num = AnfAlgo::GetInputTensorNum(kernel);
  1254. const auto update_parameter = AnfAlgo::IsUpdateParameterKernel(cnode);
  1255. for (size_t j = 0; j < input_num; ++j) {
  1256. auto real_input = AnfAlgo::GetRealInputIndex(kernel, j);
  1257. auto kernel_with_index = AnfAlgo::GetPrevNodeOutput(kernel, real_input, true);
  1258. auto index = kernel_with_index.second;
  1259. auto &input_node = kernel_with_index.first;
  1260. auto device_address = AnfAlgo::GetOutputAddr(input_node, index, true);
  1261. MS_EXCEPTION_IF_NULL(device_address);
  1262. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1263. GetOrMallocAddress(mem_scheduler, device_address, input);
  1264. input->size = device_address->size_;
  1265. kernel_launch_info->inputs_.emplace_back(input);
  1266. if (update_parameter && input_node->isa<Parameter>()) {
  1267. auto param = input_node->cast<ParameterPtr>();
  1268. auto abstract = param->abstract();
  1269. MS_EXCEPTION_IF_NULL(abstract);
  1270. if (abstract->isa<abstract::AbstractRef>()) {
  1271. mem_scheduler->UpdateHighPriorityMem(device_address);
  1272. }
  1273. }
  1274. }
  1275. for (size_t j = 0; j < kernel_mod->GetOutputSizeList().size(); ++j) {
  1276. auto device_address = AnfAlgo::GetOutputAddr(kernel, j, true);
  1277. kernel::AddressPtr output = std::make_shared<kernel::Address>();
  1278. GetOrMallocAddress(mem_scheduler, device_address, output);
  1279. output->size = device_address->size_;
  1280. kernel_launch_info->outputs_.emplace_back(output);
  1281. }
  1282. for (size_t i = 0; i < kernel_mod->GetWorkspaceSizeList().size(); ++i) {
  1283. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  1284. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1285. GetOrMallocAddress(mem_scheduler, device_address, workspace);
  1286. workspace->size = device_address->size_;
  1287. kernel_launch_info->workspaces_.emplace_back(workspace);
  1288. }
  1289. }
  1290. void KernelRuntime::SyncNodeOutputTensors(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1291. const session::KernelGraph &graph, const AnfNodePtr &kernel) {
  1292. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1293. MS_EXCEPTION_IF_NULL(kernel);
  1294. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1295. MS_EXCEPTION_IF_NULL(kernel_mod);
  1296. for (size_t input_idx = 0; input_idx < kernel_mod->GetInputSizeList().size(); ++input_idx) {
  1297. const auto input_node_index = AnfAlgo::GetPrevNodeOutput(kernel, input_idx, true);
  1298. if (input_node_index.first != nullptr && input_node_index.first->isa<Parameter>()) {
  1299. SyncNodeOutputTensor(mem_scheduler, input_node_index, graph);
  1300. }
  1301. }
  1302. for (size_t output_idx = 0; output_idx < kernel_mod->GetOutputSizeList().size(); ++output_idx) {
  1303. SyncNodeOutputTensor(mem_scheduler, std::make_pair(kernel, output_idx), graph);
  1304. }
  1305. }
  1306. void KernelRuntime::SyncNodeOutputTensor(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1307. const KernelWithIndex &node_output_index, const session::KernelGraph &graph) {
  1308. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1309. if (node_output_index.first == nullptr) {
  1310. return;
  1311. }
  1312. auto device_address = AnfAlgo::GetMutableOutputAddr(node_output_index, true);
  1313. auto tensor = graph.GetNodeOutputTensor(node_output_index);
  1314. if (tensor == nullptr) {
  1315. return;
  1316. }
  1317. if (device_address == nullptr) {
  1318. tensor->data_sync(false);
  1319. tensor->set_device_address(nullptr);
  1320. tensor->set_sync_status(kNeedSyncHostToDevice);
  1321. return;
  1322. }
  1323. if (!SyncStream()) {
  1324. MS_LOG(EXCEPTION) << "SyncStream failed";
  1325. }
  1326. auto origin_ptr = device_address->ptr_;
  1327. if (device_address->ptr_ == nullptr) {
  1328. device_address->ptr_ = mem_scheduler->GetOrMalloc(device_address.get(), device_address->size_);
  1329. }
  1330. tensor->set_device_address(device_address);
  1331. tensor->data_sync(false);
  1332. tensor->set_device_address(nullptr);
  1333. device_address->ptr_ = origin_ptr;
  1334. tensor->set_sync_status(kNeedSyncHostToDevice);
  1335. }
  1336. void KernelRuntime::InitGraphInputTensors(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1337. const session::KernelGraph &graph) {
  1338. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1339. auto &input_nodes = graph.input_nodes();
  1340. auto &input_tensors = graph.input_tensors();
  1341. if (input_tensors.size() != input_nodes.size()) {
  1342. MS_LOG_EXCEPTION << "Invalid input tensor size:" << input_tensors.size() << " vs node size:" << input_nodes.size();
  1343. }
  1344. mem_scheduler->ClearMemNeedInit();
  1345. for (size_t i = 0; i < input_tensors.size(); ++i) {
  1346. auto input_node = input_nodes[i];
  1347. if (!input_node->isa<Parameter>() || !AnfAlgo::OutputAddrExist(input_node, 0)) {
  1348. continue;
  1349. }
  1350. auto device_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
  1351. auto tensor = input_tensors[i];
  1352. MS_EXCEPTION_IF_NULL(tensor);
  1353. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  1354. const auto tensor_size = LongToSize(tensor->data().nbytes());
  1355. bool need_sync = false;
  1356. if (tensor->NeedSyncHostToDevice()) {
  1357. need_sync = true;
  1358. } else if (tensor_address != device_address) {
  1359. tensor->data_sync(false);
  1360. need_sync = true;
  1361. }
  1362. if (mem_scheduler->HasDeviceMem(device_address.get())) {
  1363. device_address->set_ptr(nullptr);
  1364. }
  1365. if (need_sync) {
  1366. const auto &shape = trans::GetRuntimePaddingShape(input_node, 0);
  1367. if (device_address->GetPtr() != nullptr) {
  1368. device_address->SyncHostToDevice(shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
  1369. tensor->data_c(), tensor->device_info().host_format_);
  1370. } else {
  1371. mem_scheduler->AddMemNeedInit(device_address.get());
  1372. }
  1373. }
  1374. MemPriority priority = kMemPriorityLow;
  1375. const auto &parameter = input_node->cast<ParameterPtr>();
  1376. if (AnfAlgo::IsParameterWeight(parameter) || graph.IsUpdatedParameter(parameter)) {
  1377. priority = kMemPriorityHigh;
  1378. }
  1379. mem_scheduler->Init(device_address.get(), tensor->data_c(), tensor_size, priority);
  1380. tensor->set_sync_status(kNoNeedSync);
  1381. }
  1382. }
  1383. void KernelRuntime::AssignCommunicationMem(const session::KernelGraph &graph) {
  1384. for (const auto &kernel : graph.execution_order()) {
  1385. if (!AnfAlgo::IsCommunicationOp(kernel)) {
  1386. continue;
  1387. }
  1388. AssignCommunicationInputFromMemoryPool(kernel);
  1389. AssignCommunicationOutputFromMemoryPool(kernel);
  1390. }
  1391. }
  1392. bool KernelRuntime::LaunchKernel(const session::KernelGraph &graph, const AnfNodePtr &kernel,
  1393. const std::shared_ptr<MemScheduler> &mem_scheduler, bool mock) {
  1394. MS_EXCEPTION_IF_NULL(kernel);
  1395. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1396. MS_EXCEPTION_IF_NULL(kernel_mod);
  1397. KernelLaunchInfo kernel_launch_info;
  1398. auto stream = kernel_mod->GetStream();
  1399. if (stream == nullptr) {
  1400. if (AnfAlgo::IsCommunicationOp(kernel)) {
  1401. stream = communication_stream_;
  1402. } else {
  1403. stream = stream_;
  1404. }
  1405. }
  1406. bool ret = true;
  1407. if (mem_scheduler != nullptr) {
  1408. ret = mem_scheduler->PreCompute(stream);
  1409. if (!ret) {
  1410. return ret;
  1411. }
  1412. AssignKernelAddress(mem_scheduler, kernel, &kernel_launch_info);
  1413. auto cnode = kernel->cast<CNodePtr>();
  1414. if (mock && AnfAlgo::HasNodeAttr(kAttrOffload, cnode) && AnfAlgo::GetNodeAttr<bool>(cnode, kAttrOffload)) {
  1415. for (size_t i = 0; i < kernel_mod->GetOutputSizeList().size(); ++i) {
  1416. auto device_address = AnfAlgo::GetOutputAddr(kernel, i, true);
  1417. mem_scheduler->SetOffload(device_address);
  1418. }
  1419. }
  1420. } else if (!kernel_mod->GetInputsAddr().empty() || !kernel_mod->GetOutputsAddr().empty()) {
  1421. kernel_launch_info.inputs_ = kernel_mod->GetInputsAddr();
  1422. kernel_launch_info.outputs_ = kernel_mod->GetOutputsAddr();
  1423. kernel_launch_info.workspaces_ = kernel_mod->GetWorkSpacesAddr();
  1424. } else {
  1425. GenLaunchArgs(*kernel_mod, kernel, &kernel_launch_info);
  1426. }
  1427. if (!mock) {
  1428. if (pynative_mode_profiling_flag_) {
  1429. ret = LaunchKernelWithPynativeProfiling(kernel_mod, kernel->fullname_with_scope(), kernel_launch_info, stream);
  1430. } else {
  1431. ret = kernel_mod->Launch(kernel_launch_info, stream);
  1432. }
  1433. if (!ret) {
  1434. return ret;
  1435. }
  1436. }
  1437. if (mem_scheduler != nullptr) {
  1438. if (!mock) {
  1439. SyncNodeOutputTensors(mem_scheduler, graph, kernel);
  1440. }
  1441. ret = mem_scheduler->PostCompute(stream);
  1442. }
  1443. return ret;
  1444. }
  1445. bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph, bool mock) {
  1446. auto context_ptr = MsContext::GetInstance();
  1447. MS_EXCEPTION_IF_NULL(context_ptr);
  1448. std::shared_ptr<MemScheduler> mem_scheduler = nullptr;
  1449. if (UseMemScheduler()) {
  1450. mem_scheduler = mem_scheduler_manager_.GetOrCreateMemScheduler(graph.graph_id());
  1451. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1452. mem_scheduler->Reset();
  1453. mem_scheduler->Update();
  1454. InitGraphInputTensors(mem_scheduler, graph);
  1455. }
  1456. const auto &kernels = graph.execution_order();
  1457. std::vector<DynamicKernelPtr> dynamic_kernel_list;
  1458. auto iter = graph_dynamic_kernel_map_.find(graph.graph_id());
  1459. if (iter != graph_dynamic_kernel_map_.end()) {
  1460. dynamic_kernel_list = iter->second;
  1461. }
  1462. if (!dynamic_kernel_list.empty() && dynamic_kernel_list.size() != kernels.size()) {
  1463. MS_LOG(EXCEPTION) << "The size of dynamic kernels " << dynamic_kernel_list.size()
  1464. << " should be equal to the size of kernels " << kernels.size();
  1465. }
  1466. std::map<AnfNodePtr, std::vector<std::function<void()>>> kernel_pre_run_events;
  1467. std::map<AnfNodePtr, std::vector<std::function<void()>>> kernel_post_run_events;
  1468. auto events_iter = graph_kernel_events_map_.find(graph.graph_id());
  1469. if (events_iter != graph_kernel_events_map_.end()) {
  1470. kernel_pre_run_events = events_iter->second.first;
  1471. kernel_post_run_events = events_iter->second.second;
  1472. }
  1473. for (size_t i = 0; i < kernels.size(); ++i) {
  1474. LaunchKernelEvent(kernel_pre_run_events, kernels[i]);
  1475. if (!dynamic_kernel_list.empty() && dynamic_kernel_list[i] != nullptr &&
  1476. dynamic_kernel_list[i]->is_dynamic_shape()) {
  1477. dynamic_kernel_list[i]->InferShape();
  1478. dynamic_kernel_list[i]->UpdateArgs();
  1479. dynamic_kernel_list[i]->Execute();
  1480. if (!SyncStream()) {
  1481. MS_LOG(ERROR) << "SyncStream failed";
  1482. return false;
  1483. }
  1484. dynamic_kernel_list[i]->PostExecute();
  1485. } else {
  1486. auto &kernel = kernels[i];
  1487. MS_EXCEPTION_IF_NULL(kernel);
  1488. // Skip transpose kernel with "nop_op" attr which is not hidden or removed in PyNative infer scenario. Transpose
  1489. // kernel, which is not supposed to be executed, is generated in TransDataSplit to support specific Transdata.
  1490. // And hard code here should be removed after new Transdata programme is implemented in the foreseeable future.
  1491. if (AnfAlgo::HasNodeAttr(kAttrNopOp, kernel)) {
  1492. for (size_t idx = 0; idx < AnfAlgo::GetOutputTensorNum(kernel); idx += 1) {
  1493. auto real_input = AnfAlgo::GetRealInputIndex(kernel, idx);
  1494. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, real_input);
  1495. AnfAlgo::SetOutputAddr(device_address, idx, kernel.get());
  1496. }
  1497. continue;
  1498. }
  1499. auto ret = LaunchKernel(graph, kernel, mem_scheduler, mock);
  1500. if (!ret) {
  1501. MS_LOG(ERROR) << "Launch kernel failed.";
  1502. return false;
  1503. }
  1504. KernelLaunchProfiling(kernel->fullname_with_scope());
  1505. DebugStreamSync(kernel);
  1506. }
  1507. LaunchKernelEvent(kernel_post_run_events, kernels[i]);
  1508. }
  1509. if (UseMemScheduler() && !mock) {
  1510. SyncParameter(graph, mem_scheduler);
  1511. }
  1512. return true;
  1513. }
  1514. void KernelRuntime::SyncParameter(const session::KernelGraph &graph,
  1515. const std::shared_ptr<MemScheduler> &mem_scheduler) {
  1516. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1517. auto &input_nodes = graph.input_nodes();
  1518. auto &input_tensors = graph.input_tensors();
  1519. if (input_tensors.size() != input_nodes.size()) {
  1520. MS_LOG_EXCEPTION << "Invalid input tensor size:" << input_tensors.size() << " vs node size:" << input_nodes.size();
  1521. }
  1522. for (size_t i = 0; i < input_tensors.size(); ++i) {
  1523. auto input_node = input_nodes[i];
  1524. if (!input_node->isa<Parameter>() || !AnfAlgo::OutputAddrExist(input_node, 0)) {
  1525. continue;
  1526. }
  1527. auto device_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
  1528. MS_EXCEPTION_IF_NULL(device_address);
  1529. auto parameter = input_node->cast<ParameterPtr>();
  1530. MS_EXCEPTION_IF_NULL(parameter);
  1531. if (!AnfAlgo::IsParameterWeight(parameter) && !graph.IsUpdatedParameter(parameter)) {
  1532. continue;
  1533. }
  1534. auto tensor = input_tensors[i];
  1535. MS_EXCEPTION_IF_NULL(tensor);
  1536. if (mem_scheduler->HasDeviceMem(device_address.get())) {
  1537. auto device_ptr = mem_scheduler->GetOrMalloc(device_address.get(), device_address->size(), kMemPriorityHigh);
  1538. device_address->set_ptr(device_ptr);
  1539. tensor->set_device_address(device_address);
  1540. tensor->set_sync_status(kNeedSyncDeviceToHost);
  1541. }
  1542. if (graph.IsUpdatedParameter(parameter)) {
  1543. tensor->SetIsUpdateByDevice();
  1544. }
  1545. }
  1546. }
  1547. void KernelRuntime::UseMemSchedulerIfNeeded(const session::KernelGraph &graph) {
  1548. auto context_ptr = MsContext::GetInstance();
  1549. MS_EXCEPTION_IF_NULL(context_ptr);
  1550. if (!UseMemScheduler()) {
  1551. return;
  1552. }
  1553. auto mem_scheduler = mem_scheduler_manager_.GetOrCreateMemScheduler(graph.graph_id());
  1554. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1555. if (mem_scheduler->optimized()) {
  1556. return;
  1557. }
  1558. mem_scheduler->SetMemHandler(mem_manager_);
  1559. mem_scheduler->SetTotalStep(graph.execution_order().size());
  1560. if (mem_scheduler->need_record_event()) {
  1561. (void)LaunchKernelMod(graph, true);
  1562. mem_scheduler->set_need_record_event(false);
  1563. }
  1564. auto ret = mem_scheduler->Optimize();
  1565. if (!ret) {
  1566. MS_LOG_EXCEPTION << "Can't run graph " << graph.graph_id() << " for memory limit.";
  1567. }
  1568. }
  1569. bool KernelRuntime::LaunchKernels(const session::KernelGraph &graph) {
  1570. UseMemSchedulerIfNeeded(graph);
  1571. if (!LaunchKernelMod(graph)) {
  1572. MS_LOG(ERROR) << "LaunchKernelMod failed!";
  1573. return false;
  1574. }
  1575. auto ms_context = MsContext::GetInstance();
  1576. MS_EXCEPTION_IF_NULL(ms_context);
  1577. if (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kGraphMode) {
  1578. if (!SyncStream()) {
  1579. MS_LOG(ERROR) << "SyncStream failed";
  1580. return false;
  1581. }
  1582. }
  1583. return true;
  1584. }
  1585. void KernelRuntime::ClearGraphRuntimeResource(uint32_t graph_id) {
  1586. MS_LOG(INFO) << "Clear graph:" << graph_id << " runtime resource";
  1587. }
  1588. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  1589. void KernelRuntime::GetFirstPSEmbeddingCache(const session::KernelGraph &graph,
  1590. AnfNodePtr *const first_cache_input_index,
  1591. size_t *const first_cache_size) {
  1592. for (const auto &kernel : graph.execution_order()) {
  1593. MS_EXCEPTION_IF_NULL(kernel);
  1594. auto kernel_name = AnfAlgo::GetCNodeName(kernel);
  1595. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1596. continue;
  1597. }
  1598. auto input_param = AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1599. auto input_index = AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1600. MS_EXCEPTION_IF_NULL(input_param.first);
  1601. MS_EXCEPTION_IF_NULL(input_index.first);
  1602. auto param_name = input_param.first->fullname_with_scope();
  1603. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1604. continue;
  1605. }
  1606. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1607. while (input_index.first->isa<CNode>() && (AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1608. input_index = AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1609. MS_EXCEPTION_IF_NULL(input_index.first);
  1610. }
  1611. auto cnode =
  1612. AnfAlgo::IsGraphKernel(input_index.first) ? AnfAlgo::GetOutputOfGraphkernel(input_index) : input_index.first;
  1613. MS_EXCEPTION_IF_NULL(cnode);
  1614. if (!cnode->isa<CNode>()) {
  1615. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index should be a CNode but got "
  1616. << cnode->fullname_with_scope();
  1617. }
  1618. auto input_index_node_name = AnfAlgo::GetCNodeName(cnode);
  1619. if (input_index_node_name != kGetNextOpName) {
  1620. bool full_batch = parallel::ParallelContext::GetInstance()->full_batch();
  1621. if ((!full_batch && (input_index_node_name != kUniqueOpName)) ||
  1622. (full_batch && (input_index_node_name != kMinimumOpName))) {
  1623. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope()
  1624. << ") cache is from " << cnode->fullname_with_scope();
  1625. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index isn't from dataset doesn't support cache in "
  1626. "parameter server training mode.";
  1627. }
  1628. }
  1629. *first_cache_input_index = cnode;
  1630. *first_cache_size = size;
  1631. MS_LOG(INFO) << "The input index of the first embeddingLookup cache is from " << cnode->fullname_with_scope()
  1632. << ", the cache size is " << size;
  1633. return;
  1634. }
  1635. }
  1636. void KernelRuntime::CheckSparsePSEmbeddingCache(const CNodePtr &node) {
  1637. MS_EXCEPTION_IF_NULL(node);
  1638. auto pre_node = AnfAlgo::GetPrevNodeOutput(node, 1, true);
  1639. MS_EXCEPTION_IF_NULL(pre_node.first);
  1640. while (pre_node.first->isa<CNode>() && (AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1641. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1642. MS_EXCEPTION_IF_NULL(pre_node.first);
  1643. }
  1644. if (!(pre_node.first->isa<CNode>()) || (AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1645. MS_LOG(EXCEPTION) << "The input_indices of kernel[SparseGatherV2] must be unique in parameter server cache mode";
  1646. }
  1647. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1648. MS_EXCEPTION_IF_NULL(pre_node.first);
  1649. while (pre_node.first->isa<CNode>() && (AnfAlgo::GetCNodeName(pre_node.first) == kCastOpName)) {
  1650. pre_node = AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1651. MS_EXCEPTION_IF_NULL(pre_node.first);
  1652. }
  1653. if (!(pre_node.first->isa<CNode>()) || (AnfAlgo::GetCNodeName(pre_node.first) != kGetNextOpName)) {
  1654. MS_LOG(EXCEPTION) << "The input indices of kernel[Unique] must be produced from dataset directly and the indices "
  1655. "value can not be changed before delivering to kernel[Unique] in parameter server cache mode.";
  1656. }
  1657. }
  1658. void KernelRuntime::CheckIfSupportPSEmbeddingCache(const session::KernelGraph &graph) {
  1659. AnfNodePtr first_cache_input_index = nullptr;
  1660. size_t first_cache_size = 0;
  1661. GetFirstPSEmbeddingCache(graph, &first_cache_input_index, &first_cache_size);
  1662. MS_EXCEPTION_IF_NULL(first_cache_input_index);
  1663. for (const auto &kernel : graph.execution_order()) {
  1664. MS_EXCEPTION_IF_NULL(kernel);
  1665. auto kernel_name = AnfAlgo::GetCNodeName(kernel);
  1666. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1667. continue;
  1668. }
  1669. auto input_param = AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1670. auto input_index = AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1671. MS_EXCEPTION_IF_NULL(input_param.first);
  1672. MS_EXCEPTION_IF_NULL(input_index.first);
  1673. if (!input_param.first->isa<Parameter>()) {
  1674. continue;
  1675. }
  1676. auto param_name = input_param.first->fullname_with_scope();
  1677. if (ps::ps_cache_instance.IsHashTable(param_name) && (kernel_name == kSparseGatherV2OpName)) {
  1678. CheckSparsePSEmbeddingCache(kernel);
  1679. }
  1680. while (input_index.first->isa<CNode>() && (AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1681. input_index = AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1682. MS_EXCEPTION_IF_NULL(input_index.first);
  1683. }
  1684. auto cnode =
  1685. AnfAlgo::IsGraphKernel(input_index.first) ? AnfAlgo::GetOutputOfGraphkernel(input_index) : input_index.first;
  1686. MS_EXCEPTION_IF_NULL(cnode);
  1687. if (cnode == first_cache_input_index) {
  1688. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1689. MS_LOG(ERROR) << "The embeddingLookup(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1690. MS_LOG(EXCEPTION) << "All the embeddingLookups whose input indices are from dataset must enable cache at the "
  1691. "same time when one of them enables cache in parameter server training mode.";
  1692. }
  1693. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1694. if (size != first_cache_size) {
  1695. MS_LOG(ERROR) << "The cache size(" << size << ") of embeddingLookup(" << kernel->fullname_with_scope()
  1696. << ") is not the same as other embeddingLookup cache size(" << first_cache_size << ").";
  1697. MS_LOG(EXCEPTION) << "The cache sizes of embeddingLookups are not the same in parameter server training mode.";
  1698. }
  1699. } else if (ps::ps_cache_instance.IsHashTable(param_name)) {
  1700. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope() << ") cache is from "
  1701. << cnode->fullname_with_scope();
  1702. MS_LOG(EXCEPTION) << "The embeddingLookup whose input index isn't from dataset doesn't support cache in "
  1703. "parameter server training mode.";
  1704. } else if (cnode->isa<CNode>() && (AnfAlgo::GetCNodeName(cnode) == kGetNextOpName)) {
  1705. MS_LOG(ERROR) << "The EmbeddingLookup kernel(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1706. MS_LOG(EXCEPTION) << "All EmbeddingLookup kernels whose input indices are from dataset must enable cache at "
  1707. "the same time and parameter 'sparse' must be equal to the value of 'enable_sparse' in "
  1708. "context setting in parameter server training mode.";
  1709. }
  1710. }
  1711. }
  1712. #endif
  1713. } // namespace device
  1714. } // namespace mindspore