You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_runtime.cc 81 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
6 years ago
5 years ago
5 years ago
4 years ago
6 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838
  1. /**
  2. * Copyright 2019-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "runtime/device/kernel_runtime.h"
  17. #include <functional>
  18. #include <utility>
  19. #include <vector>
  20. #include <set>
  21. #include "backend/common/optimizer/helper.h"
  22. #include "backend/common/session/anf_runtime_algorithm.h"
  23. #include "include/common/utils/anfalgo.h"
  24. #include "backend/common/session/kernel_graph.h"
  25. #include "runtime/device/ms_device_shape_transfer.h"
  26. #include "runtime/pynative/op_runtime_info.h"
  27. #include "debug/data_dump/dump_json_parser.h"
  28. #include "frontend/operator/ops.h"
  29. #include "ir/value.h"
  30. #include "utils/ms_context.h"
  31. #include "utils/ms_utils.h"
  32. #include "utils/shape_utils.h"
  33. #include "include/common/utils/utils.h"
  34. #include "include/common/utils/parallel_context.h"
  35. #include "include/common/debug/env_config_parser.h"
  36. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  37. #include "ps/ps_cache/ps_cache_manager.h"
  38. #endif
  39. using mindspore::kernel::Address;
  40. using mindspore::kernel::AddressPtr;
  41. namespace mindspore {
  42. namespace device {
  43. constexpr size_t kAtomicCleanInputSize = 2;
  44. namespace {
  45. std::vector<AnfNodePtr> GetGraphInputs(const session::KernelGraph &graph) {
  46. auto graph_inputs = graph.inputs();
  47. std::vector<AnfNodePtr> result(graph_inputs.begin(), graph_inputs.end());
  48. std::set<AnfNodePtr> inputs_set(graph_inputs.begin(), graph_inputs.end());
  49. auto kernels = graph.execution_order();
  50. for (auto &kernel : kernels) {
  51. MS_EXCEPTION_IF_NULL(kernel);
  52. auto input_num = common::AnfAlgo::GetInputTensorNum(kernel);
  53. for (size_t i = 0; i < input_num; ++i) {
  54. auto input_node = kernel->input(i + 1);
  55. auto input_real_node = common::AnfAlgo::VisitKernelWithReturnType(input_node, 0).first;
  56. MS_EXCEPTION_IF_NULL(input_real_node);
  57. if (input_real_node->isa<Parameter>() && inputs_set.find(input_real_node) == inputs_set.end()) {
  58. (void)inputs_set.insert(input_real_node);
  59. (void)result.emplace_back(input_real_node);
  60. }
  61. }
  62. }
  63. return result;
  64. }
  65. } // namespace
  66. constexpr size_t kMinInputSize = 2;
  67. KernelRuntime::~KernelRuntime() {
  68. stream_ = nullptr;
  69. independent_stream_ = nullptr;
  70. communication_stream_ = nullptr;
  71. }
  72. std::lock_guard<std::mutex> KernelRuntime::LockRuntime() {
  73. static std::mutex mutex;
  74. return std::lock_guard<std::mutex>(mutex);
  75. }
  76. bool KernelRuntime::Load(const session::KernelGraph &, bool) {
  77. MS_LOG(INFO) << "Call default load.";
  78. return true;
  79. }
  80. bool KernelRuntime::LoadData(const session::KernelGraph &) {
  81. MS_LOG(INFO) << "Call default load data.";
  82. return false;
  83. }
  84. bool KernelRuntime::NodeOutputDeviceAddressExist(const AnfNodePtr &kernel, size_t index) {
  85. MS_EXCEPTION_IF_NULL(kernel);
  86. if (AnfAlgo::OutputAddrExist(kernel, index)) {
  87. const auto &address = AnfAlgo::GetOutputAddr(kernel, index);
  88. MS_EXCEPTION_IF_NULL(address);
  89. return address->DeviceType() == GetTargetDeviceAddressType();
  90. }
  91. return false;
  92. }
  93. void KernelRuntime::AssignMemory(const session::KernelGraph &graph) {
  94. auto context_ptr = MsContext::GetInstance();
  95. MS_EXCEPTION_IF_NULL(context_ptr);
  96. if (UseMemScheduler()) {
  97. AssignStaticMemoryValueNode(graph);
  98. ResetNodeAddress(graph);
  99. AssignCommunicationMem(graph);
  100. } else {
  101. MS_EXCEPTION_IF_NULL(mem_manager_);
  102. mem_manager_->ResetDynamicMemory();
  103. AssignStaticMemory(graph);
  104. AssignDynamicMemory(graph);
  105. }
  106. UpdateRefNodeOutputMem(graph);
  107. }
  108. void KernelRuntime::GetCommunicationInputInfo(const AnfNodePtr &node, size_t *total_size,
  109. DeviceAddressPtrList *address_list,
  110. std::vector<size_t> *align_size_list) const {
  111. MS_EXCEPTION_IF_NULL(node);
  112. MS_EXCEPTION_IF_NULL(total_size);
  113. MS_EXCEPTION_IF_NULL(address_list);
  114. MS_EXCEPTION_IF_NULL(align_size_list);
  115. size_t input_num = common::AnfAlgo::GetInputTensorNum(node);
  116. for (size_t i = 0; i < input_num; ++i) {
  117. auto input_node_with_index = common::AnfAlgo::GetPrevNodeOutput(node, i, true);
  118. auto input_node = input_node_with_index.first;
  119. MS_EXCEPTION_IF_NULL(input_node);
  120. DeviceAddressPtr address = nullptr;
  121. if (AnfAlgo::OutputAddrExist(input_node, input_node_with_index.second)) {
  122. address = AnfAlgo::GetMutableOutputAddr(input_node, input_node_with_index.second);
  123. } else {
  124. address = PreAssignCNodeMemory(input_node, input_node_with_index.second);
  125. }
  126. MS_EXCEPTION_IF_NULL(address);
  127. auto align_size = MemoryManager::GetCommonAlignSize(address->size());
  128. *total_size += align_size;
  129. address_list->emplace_back(address);
  130. align_size_list->emplace_back(align_size);
  131. }
  132. }
  133. void KernelRuntime::AssignCommunicationInputFromMemoryPool(const AnfNodePtr &node) const {
  134. if (!common::AnfAlgo::IsCommunicationOp(node)) {
  135. return;
  136. }
  137. MS_EXCEPTION_IF_NULL(node);
  138. MS_EXCEPTION_IF_NULL(mem_manager_);
  139. size_t total_size = 0;
  140. DeviceAddressPtrList address_list;
  141. std::vector<size_t> align_size_list;
  142. GetCommunicationInputInfo(node, &total_size, &address_list, &align_size_list);
  143. if (align_size_list.empty()) {
  144. MS_LOG(WARNING) << "No inputs for " << node->fullname_with_scope();
  145. return;
  146. }
  147. if (!mem_manager_->MallocContinuousMemFromMemPool(address_list, total_size, align_size_list)) {
  148. MS_LOG(EXCEPTION) << "Allocate continuous memory failed, totol_size:" << total_size;
  149. }
  150. }
  151. void KernelRuntime::GetCommunicationOutputInfo(const AnfNodePtr &node, size_t *total_size,
  152. DeviceAddressPtrList *address_list,
  153. std::vector<size_t> *align_size_list) const {
  154. MS_EXCEPTION_IF_NULL(node);
  155. MS_EXCEPTION_IF_NULL(total_size);
  156. MS_EXCEPTION_IF_NULL(align_size_list);
  157. MS_EXCEPTION_IF_NULL(address_list);
  158. const auto kernel_mod = AnfAlgo::GetKernelMod(node);
  159. MS_EXCEPTION_IF_NULL(kernel_mod);
  160. const auto output_size_list = kernel_mod->GetOutputSizeList();
  161. for (size_t i = 0; i < output_size_list.size(); ++i) {
  162. DeviceAddressPtr address = nullptr;
  163. if (AnfAlgo::OutputAddrExist(node, i)) {
  164. address = AnfAlgo::GetMutableOutputAddr(node, i);
  165. } else {
  166. const std::string output_format = AnfAlgo::GetOutputFormat(node, i);
  167. const auto output_type = AnfAlgo::GetOutputDeviceDataType(node, i);
  168. const auto tensor_size = AnfAlgo::GetOutputTensorMemSize(node, i);
  169. address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type, {node, i});
  170. AnfAlgo::SetOutputAddr(address, i, node.get());
  171. }
  172. MS_EXCEPTION_IF_NULL(address);
  173. auto align_size = MemoryManager::GetCommonAlignSize(address->size());
  174. *total_size += align_size;
  175. align_size_list->emplace_back(align_size);
  176. address_list->emplace_back(address);
  177. }
  178. }
  179. void KernelRuntime::AssignCommunicationOutputFromMemoryPool(const AnfNodePtr &node) const {
  180. if (!common::AnfAlgo::IsCommunicationOp(node)) {
  181. return;
  182. }
  183. MS_EXCEPTION_IF_NULL(node);
  184. MS_EXCEPTION_IF_NULL(mem_manager_);
  185. size_t total_size = 0;
  186. std::vector<size_t> align_size_list;
  187. std::vector<DeviceAddressPtr> address_list;
  188. GetCommunicationOutputInfo(node, &total_size, &address_list, &align_size_list);
  189. if (align_size_list.empty()) {
  190. MS_LOG(WARNING) << "No output for " << node->fullname_with_scope();
  191. return;
  192. }
  193. if (!mem_manager_->MallocContinuousMemFromMemPool(address_list, total_size, align_size_list)) {
  194. MS_LOG(EXCEPTION) << "Allocate continuous memory failed, totol_size:" << total_size;
  195. }
  196. }
  197. void KernelRuntime::RunOpMallocPre(const session::KernelGraph &graph,
  198. const std::vector<tensor::TensorPtr> &input_tensors) {
  199. const auto &nodes = graph.execution_order();
  200. // Malloc for Node output
  201. for (const auto &node : nodes) {
  202. auto output_num = common::AnfAlgo::GetOutputTensorNum(node);
  203. for (size_t i = 0; i < output_num; ++i) {
  204. MS_EXCEPTION_IF_NULL(node);
  205. auto runtime_info = node->user_data<runtime::OpRuntimeInfo>();
  206. MS_EXCEPTION_IF_NULL(runtime_info);
  207. auto const &output_format = runtime_info->output_format(i);
  208. auto output_type = runtime_info->output_type(i);
  209. auto tensor_size = runtime_info->output_tensor_size(i);
  210. // Create DeviceAddress without ptr.
  211. // Get real device ptr after KernelBuild finish.
  212. auto device_address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type);
  213. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  214. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  215. }
  216. }
  217. // Malloc for graph input
  218. if (input_tensors.size() != graph.inputs().size()) {
  219. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  220. << " should be equal to graph input parameter size " << graph.inputs().size();
  221. }
  222. for (size_t input_index = 0; input_index < graph.inputs().size(); ++input_index) {
  223. auto item = graph.inputs()[input_index];
  224. MS_EXCEPTION_IF_NULL(item);
  225. if (!item->isa<Parameter>()) {
  226. continue;
  227. }
  228. auto output_size = common::AnfAlgo::GetOutputTensorNum(item);
  229. for (size_t index = 0; index < output_size; index++) {
  230. auto current_tensor = input_tensors[input_index];
  231. MS_EXCEPTION_IF_NULL(current_tensor);
  232. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  233. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  234. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  235. continue;
  236. }
  237. auto op_runtime_info = item->user_data<runtime::OpRuntimeInfo>();
  238. MS_EXCEPTION_IF_NULL(op_runtime_info);
  239. TypeId output_type_id = op_runtime_info->output_type(index);
  240. auto output_tensor_size = op_runtime_info->output_tensor_size(index);
  241. auto output_format = op_runtime_info->output_format(index);
  242. auto device_address =
  243. CreateDeviceAddress(nullptr, output_tensor_size, output_format, output_type_id, {item, index});
  244. device_address->set_from_persistent_mem(current_tensor->is_parameter());
  245. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  246. current_tensor->set_device_address(device_address);
  247. current_tensor->set_sync_status(kNeedSyncHostToDevice);
  248. }
  249. }
  250. }
  251. void KernelRuntime::ResetNodeAddress(const session::KernelGraph &kernel_graph) {
  252. auto kernels = kernel_graph.execution_order();
  253. for (auto &kernel : kernels) {
  254. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  255. MS_EXCEPTION_IF_NULL(kernel_mod);
  256. size_t input_num = common::AnfAlgo::GetInputTensorNum(kernel);
  257. for (size_t j = 0; j < input_num; ++j) {
  258. auto input_index = AnfAlgo::GetRealInputIndex(kernel, j);
  259. KernelWithIndex kernel_with_index = common::AnfAlgo::GetPrevNodeOutput(kernel, input_index, true);
  260. auto index = kernel_with_index.second;
  261. auto &input_node = kernel_with_index.first;
  262. if (NodeOutputDeviceAddressExist(input_node, index)) {
  263. continue;
  264. }
  265. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(input_node, index);
  266. if (output_type_id == kTypeUnknown) {
  267. MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
  268. continue;
  269. }
  270. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(input_node, index);
  271. auto device_address = CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(input_node, index),
  272. output_type_id, {input_node, index});
  273. AnfAlgo::SetOutputAddr(device_address, index, input_node.get());
  274. }
  275. auto output_sizes = kernel_mod->GetOutputSizeList();
  276. for (size_t i = 0; i < output_sizes.size(); ++i) {
  277. auto output_format = AnfAlgo::GetOutputFormat(kernel, i);
  278. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  279. AnfAlgo::SetOutputAddr(CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type), i,
  280. kernel.get());
  281. }
  282. auto workspace_sizes = kernel_mod->GetWorkspaceSizeList();
  283. for (size_t i = 0; i < workspace_sizes.size(); ++i) {
  284. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(nullptr, workspace_sizes[i], kOpFormat_DEFAULT, kNumberTypeFloat32),
  285. i, kernel.get());
  286. }
  287. }
  288. }
  289. void KernelRuntime::RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  290. const session::KernelGraph &graph, bool is_gradient_out,
  291. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  292. MS_EXCEPTION_IF_NULL(mem_manager_);
  293. mem_manager_->ResetDynamicMemory();
  294. for (const auto &node : graph.execution_order()) {
  295. AssignCommunicationOutputFromMemoryPool(node);
  296. AssignCommunicationInputFromMemoryPool(node);
  297. }
  298. RunOpAssignInputMemory(input_tensors, graph);
  299. AssignStaticMemoryValueNode(graph);
  300. for (const auto &node : graph.execution_order()) {
  301. RunOpAssignOutputMemory(node, tensor_to_node, is_gradient_out);
  302. RunOpAssignWorkSpaceMemory(node);
  303. }
  304. UpdateRefNodeOutputMem(graph);
  305. }
  306. void KernelRuntime::RunOpClearMemory(const session::KernelGraph &graph) const {
  307. // clear input parameter memory resource
  308. for (const auto &input_node : graph.inputs()) {
  309. MS_EXCEPTION_IF_NULL(input_node);
  310. AnfAlgo::SetOutputAddr(nullptr, 0, input_node.get());
  311. }
  312. // clear input value node memory resource
  313. for (const auto &value_node : graph.graph_value_nodes()) {
  314. MS_EXCEPTION_IF_NULL(value_node);
  315. AnfAlgo::SetOutputAddr(nullptr, 0, value_node.get());
  316. }
  317. for (const auto &cnode : graph.execution_order()) {
  318. MS_EXCEPTION_IF_NULL(cnode);
  319. // clear output memory resource
  320. size_t output_num = common::AnfAlgo::GetOutputTensorNum(cnode);
  321. for (size_t index = 0; index < output_num; ++index) {
  322. AnfAlgo::SetOutputAddr(nullptr, index, cnode.get());
  323. }
  324. // clear workspace memory resource
  325. auto kernel_mod = AnfAlgo::GetKernelMod(cnode);
  326. MS_EXCEPTION_IF_NULL(kernel_mod);
  327. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  328. for (size_t index = 0; index < workspace_lists.size(); ++index) {
  329. AnfAlgo::SetWorkspaceAddr(nullptr, index, cnode.get());
  330. }
  331. }
  332. }
  333. #ifdef ENABLE_DEBUGGER
  334. bool KernelRuntime::DumpDataEnabled() {
  335. // Returns true if e2e dump is enabled.
  336. auto &dump_json_parser = DumpJsonParser::GetInstance();
  337. return dump_json_parser.e2e_dump_enabled();
  338. }
  339. bool KernelRuntime::DumpDataEnabledIteration() {
  340. // Returns true if e2e dump is enabled and current iteration must be dumped.
  341. auto &dump_json_parser = DumpJsonParser::GetInstance();
  342. if (!dump_json_parser.e2e_dump_enabled()) {
  343. return false;
  344. }
  345. auto cur_iter = dump_json_parser.cur_dump_iter();
  346. if (dump_json_parser.IsDumpIter(cur_iter)) {
  347. return true;
  348. }
  349. return false;
  350. }
  351. #endif
  352. void KernelRuntime::AssignStaticMemory(const session::KernelGraph &graph) {
  353. AssignStaticMemoryInput(graph);
  354. AssignStaticMemoryValueNode(graph);
  355. AssignStaticMemoryOutput(graph);
  356. }
  357. void KernelRuntime::RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  358. const session::KernelGraph &graph) {
  359. MS_EXCEPTION_IF_NULL(mem_manager_);
  360. if (input_tensors.size() != graph.inputs().size()) {
  361. MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size()
  362. << " should be equal to graph input parameter size " << graph.inputs().size();
  363. }
  364. for (size_t input_index = 0; input_index < graph.inputs().size(); ++input_index) {
  365. auto item = graph.inputs()[input_index];
  366. MS_EXCEPTION_IF_NULL(item);
  367. if (!item->isa<Parameter>()) {
  368. continue;
  369. }
  370. auto output_size = common::AnfAlgo::GetOutputTensorNum(item);
  371. for (size_t index = 0; index < output_size; index++) {
  372. auto current_tensor = input_tensors[input_index];
  373. MS_EXCEPTION_IF_NULL(current_tensor);
  374. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(current_tensor->device_address());
  375. // Device address have already create
  376. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  377. if (output_address->ptr_ == nullptr) {
  378. if (!mem_manager_->MallocMemFromMemPool(output_address, output_address->size())) {
  379. MS_LOG(EXCEPTION) << "Allocate memory failed, size:" << output_address->size();
  380. }
  381. }
  382. AnfAlgo::SetOutputAddr(output_address, index, item.get());
  383. continue;
  384. }
  385. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  386. if (output_type_id == kTypeUnknown) {
  387. output_type_id = common::AnfAlgo::GetOutputInferDataType(item, index);
  388. }
  389. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  390. // Device address new create
  391. auto device_address =
  392. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  393. MS_EXCEPTION_IF_NULL(device_address);
  394. MS_EXCEPTION_IF_NULL(mem_manager_);
  395. device_address->set_from_persistent_mem(true);
  396. auto ret = mem_manager_->MallocMemFromMemPool(device_address, tensor_size);
  397. if (!ret) {
  398. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  399. }
  400. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  401. }
  402. }
  403. }
  404. void KernelRuntime::RunOpAssignOutputMemory(const AnfNodePtr &kernel,
  405. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node,
  406. bool is_gradient_out) {
  407. MS_EXCEPTION_IF_NULL(kernel);
  408. MS_EXCEPTION_IF_NULL(mem_manager_);
  409. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  410. MS_EXCEPTION_IF_NULL(kernel_mod);
  411. auto output_sizes = kernel_mod->GetOutputSizeList();
  412. if (output_sizes.empty()) {
  413. return;
  414. }
  415. // Use device_address Allocated in RunOpMallocPre.
  416. for (auto &iter : tensor_to_node) {
  417. auto device_address = iter.first->device_address();
  418. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(device_address), iter.second.second,
  419. iter.second.first.get());
  420. }
  421. for (size_t i = 0; i < output_sizes.size(); ++i) {
  422. if (AnfAlgo::OutputAddrExist(kernel, i, false)) {
  423. auto address = AnfAlgo::GetMutableOutputAddr(kernel, i, false);
  424. MS_EXCEPTION_IF_NULL(address);
  425. if (address->ptr() == nullptr) {
  426. MS_EXCEPTION_IF_NULL(mem_manager_);
  427. if (!mem_manager_->MallocMemFromMemPool(address, address->size())) {
  428. MS_LOG(EXCEPTION) << "Allocate memory failed, size:" << address->size();
  429. }
  430. }
  431. continue;
  432. }
  433. if (common::AnfAlgo::GetCNodeName(kernel) == kApplyMomentumOpName) {
  434. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i);
  435. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  436. continue;
  437. }
  438. std::string output_format = AnfAlgo::GetOutputFormat(kernel, i);
  439. auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i);
  440. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {kernel, i});
  441. MS_EXCEPTION_IF_NULL(device_address);
  442. device_address->set_host_shape(trans::GetRuntimePaddingShape(kernel, i));
  443. if (is_gradient_out) {
  444. device_address->set_from_persistent_mem(true);
  445. }
  446. auto ret = mem_manager_->MallocMemFromMemPool(device_address, output_sizes[i]);
  447. if (!ret) {
  448. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << output_sizes[i];
  449. }
  450. AnfAlgo::SetOutputAddr(device_address, i, kernel.get());
  451. }
  452. }
  453. void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) {
  454. MS_EXCEPTION_IF_NULL(kernel);
  455. MS_EXCEPTION_IF_NULL(mem_manager_);
  456. if (kernel->isa<CNode>()) {
  457. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  458. MS_EXCEPTION_IF_NULL(kernel_mod);
  459. auto workspace_lists = kernel_mod->GetWorkspaceSizeList();
  460. for (size_t i = 0; i < workspace_lists.size(); ++i) {
  461. auto device_address = CreateDeviceAddress(nullptr, workspace_lists[i], "", kTypeUnknown);
  462. MS_EXCEPTION_IF_NULL(device_address);
  463. auto ret = mem_manager_->MallocMemFromMemPool(device_address, workspace_lists[i]);
  464. if (!ret) {
  465. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << workspace_lists[i];
  466. }
  467. AnfAlgo::SetWorkspaceAddr(device_address, i, kernel.get());
  468. }
  469. }
  470. }
  471. void KernelRuntime::RunOpAssignOutputNodeMemory(const ValuePtr &pre_output_value, const session::KernelGraph &graph) {
  472. if (pre_output_value == nullptr) {
  473. return;
  474. }
  475. std::vector<tensor::TensorPtr> pre_output_tensors;
  476. TensorValueToTensor(pre_output_value, &pre_output_tensors);
  477. auto output_nodes = graph.outputs();
  478. if (pre_output_tensors.size() != output_nodes.size()) {
  479. MS_LOG(EXCEPTION) << "The size of pre output tensors [" << pre_output_tensors.size()
  480. << "] is not equal to the size of output nodes of graph [" << output_nodes.size() << "]";
  481. }
  482. // share output address with pre output tensors
  483. for (size_t i = 0; i < output_nodes.size(); ++i) {
  484. auto output_node_with_index = common::AnfAlgo::VisitKernel(output_nodes[i], 0);
  485. auto output_node = output_node_with_index.first;
  486. MS_EXCEPTION_IF_NULL(output_node);
  487. if (!output_node->isa<CNode>()) {
  488. if (output_node->isa<Parameter>()) {
  489. auto param = output_node->cast<ParameterPtr>();
  490. if (param != nullptr && !param->has_default()) {
  491. MS_LOG(EXCEPTION) << "The output parameter should be real parameter!";
  492. }
  493. }
  494. continue;
  495. }
  496. auto real_output_cnode = output_node->cast<CNodePtr>();
  497. MS_EXCEPTION_IF_NULL(real_output_cnode);
  498. MS_EXCEPTION_IF_NULL(pre_output_tensors[i]);
  499. if (pre_output_tensors[i]->device_address() == nullptr) {
  500. MS_LOG(INFO) << "The address of pre output tensor [" << i << "] is a nullptr!";
  501. continue;
  502. }
  503. if (common::AnfAlgo::IsNopNode(real_output_cnode)) {
  504. if (real_output_cnode->inputs().size() < kMinInputSize) {
  505. MS_LOG(EXCEPTION) << "The input size of output node: " << real_output_cnode->DebugString()
  506. << " should large than one!";
  507. }
  508. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  509. output_node_with_index.second, real_output_cnode->input(1).get());
  510. } else {
  511. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(pre_output_tensors[i]->device_address()),
  512. output_node_with_index.second, output_node_with_index.first.get());
  513. }
  514. }
  515. }
  516. void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph &graph) {
  517. MS_EXCEPTION_IF_NULL(mem_manager_);
  518. auto graph_id = graph.graph_id();
  519. MS_LOG(INFO) << "AssignStaticMemoryInput start for graph " << graph_id;
  520. auto graph_inputs = GetGraphInputs(graph);
  521. auto graph_valid_input = graph.valid_inputs();
  522. graph_inputs.insert(graph_inputs.end(), graph.child_graph_result().begin(), graph.child_graph_result().end());
  523. std::vector<AnfNodePtr> need_alloc_nodes;
  524. auto add_need_alloc_nodes = [&need_alloc_nodes, graph_id, this](const AnfNodePtr &node) {
  525. MS_EXCEPTION_IF_NULL(node);
  526. if (!node->isa<Parameter>()) {
  527. return;
  528. }
  529. if (NodeOutputDeviceAddressExist(node, 0)) {
  530. const auto &address = AnfAlgo::GetOutputAddr(node, 0);
  531. MS_EXCEPTION_IF_NULL(address);
  532. if (address->GetPtr() != nullptr) {
  533. return;
  534. }
  535. }
  536. auto input_param = node->cast<ParameterPtr>();
  537. if (input_param != nullptr && !input_param->IsUsedByRealKernelInGraph(graph_id)) {
  538. return;
  539. }
  540. need_alloc_nodes.push_back(node);
  541. };
  542. for (size_t i = 0; i < graph_inputs.size(); ++i) {
  543. auto input_node = graph_inputs[i];
  544. MS_EXCEPTION_IF_NULL(input_node);
  545. if (i < graph_valid_input.size() && !graph_valid_input[i]) {
  546. continue;
  547. }
  548. if (common::AnfAlgo::CheckPrimitiveType(input_node, prim::kPrimMakeTuple)) {
  549. auto outs = common::AnfAlgo::GetAllOutput(input_node);
  550. for (auto &out : outs) {
  551. MS_EXCEPTION_IF_NULL(out);
  552. add_need_alloc_nodes(out);
  553. }
  554. }
  555. add_need_alloc_nodes(input_node);
  556. }
  557. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  558. bool ps_cache_check = false;
  559. #endif
  560. std::map<AnfNodePtr, AnfNodePtr> shadow_backend_node_map;
  561. GetShadowBackendNodeMap(graph, &shadow_backend_node_map);
  562. for (auto &item : need_alloc_nodes) {
  563. MS_EXCEPTION_IF_NULL(item);
  564. auto output_size = common::AnfAlgo::GetOutputTensorNum(item);
  565. for (size_t index = 0; index < output_size; index++) {
  566. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  567. // if graph output is a weight and doesn't link to any cnode, it's data type will be unknown
  568. if (output_type_id == kTypeUnknown) {
  569. MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
  570. continue;
  571. }
  572. DeviceAddressPtr device_address = GetInternalDeviceAddress(graph, item);
  573. #if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
  574. const std::string &param_name = item->fullname_with_scope();
  575. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  576. MS_LOG(INFO) << "Parameter(" << param_name << ")"
  577. << " enables the embeddingLookup cache in parameter server training mode.";
  578. // PS embeddingLookup cache check.
  579. if (!ps_cache_check) {
  580. CheckIfSupportPSEmbeddingCache(graph);
  581. ps_cache_check = true;
  582. }
  583. const auto &address = ps::ps_cache_instance.QueryHashTableAddr(param_name);
  584. MS_EXCEPTION_IF_NULL(address.addr);
  585. device_address = CreateDeviceAddress(address.addr, address.size, AnfAlgo::GetOutputFormat(item, index),
  586. output_type_id, {item, index});
  587. device_address->set_host_shape(trans::GetRuntimePaddingShape(item, index));
  588. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  589. continue;
  590. }
  591. #endif
  592. GetDeviceAddress(item, shadow_backend_node_map, index, graph.graph_id(), &device_address);
  593. AnfAlgo::SetOutputAddr(device_address, index, item.get());
  594. }
  595. }
  596. MS_LOG(INFO) << "AssignStaticMemoryInput end";
  597. }
  598. void KernelRuntime::GetDeviceAddress(const AnfNodePtr &item,
  599. const std::map<AnfNodePtr, AnfNodePtr> shadow_backend_node_map, size_t index,
  600. uint32_t graph_id, DeviceAddressPtr *device_address) {
  601. AnfNodePtr shadow_node = nullptr;
  602. auto iter = shadow_backend_node_map.find(item);
  603. if (iter != shadow_backend_node_map.end()) {
  604. shadow_node = iter->second;
  605. }
  606. if (*device_address == nullptr && shadow_node != nullptr) {
  607. auto conj_device_address = AnfAlgo::GetMutableOutputAddr(shadow_node, index);
  608. if (conj_device_address != nullptr && conj_device_address->DeviceType() == DeviceAddressType::kAscend) {
  609. *device_address = conj_device_address;
  610. }
  611. } else if (*device_address == nullptr) {
  612. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  613. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
  614. *device_address =
  615. CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {item, index});
  616. }
  617. if (*device_address != nullptr && (*device_address)->GetPtr() == nullptr) {
  618. auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index);
  619. (*device_address)->set_host_shape(trans::GetRuntimePaddingShape(item, index));
  620. MS_LOG(INFO) << "Assign Static Memory for Input node, size:" << tensor_size
  621. << " node:" << item->fullname_with_scope() << " index: " << index;
  622. if (mem_manager_->MallocMem(kStaticMem, tensor_size, *device_address, graph_id) == nullptr) {
  623. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
  624. }
  625. }
  626. }
  627. void KernelRuntime::AssignStaticMemoryOutput(const session::KernelGraph &graph) {
  628. MS_LOG(INFO) << "AssignStaticMemoryOutput start for graph " << graph.graph_id();
  629. auto nodes = common::AnfAlgo::GetAllOutput(graph.output(), {prim::kPrimTupleGetItem});
  630. std::vector<session::KernelWithIndex> non_communication_op;
  631. // Assign Communicate Op Memory firstly.
  632. for (const auto &node : nodes) {
  633. // Assign output address to nop node that the attribute of "skip_nop_op_addr" is false;
  634. auto is_skip = !common::AnfAlgo::IsNopNode(node) || common::AnfAlgo::IsNeedSkipNopOpAddr(node);
  635. auto kernel_with_index = common::AnfAlgo::VisitKernelWithReturnType(node, 0, is_skip);
  636. MS_EXCEPTION_IF_NULL(kernel_with_index.first);
  637. if (!kernel_with_index.first->isa<CNode>() || !AnfUtils::IsRealKernel(kernel_with_index.first)) {
  638. continue;
  639. }
  640. if (common::AnfAlgo::IsCommunicationOp(kernel_with_index.first)) {
  641. AssignCommunicationNodeMem(kStaticMem, kernel_with_index.first);
  642. } else {
  643. non_communication_op.emplace_back(kernel_with_index);
  644. }
  645. }
  646. for (const auto &item_with_index : non_communication_op) {
  647. MS_EXCEPTION_IF_NULL(item_with_index.first);
  648. MS_LOG(DEBUG) << "AssignNodeOutputMem for " << item_with_index.first->fullname_with_scope();
  649. AssignNodeOutputMem(kStaticMem, item_with_index.first, SizeToInt(item_with_index.second));
  650. }
  651. MS_LOG(INFO) << "AssignStaticMemoryOutput end";
  652. }
  653. void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph &graph) {
  654. auto &kernels = graph.execution_order();
  655. for (auto &kernel : kernels) {
  656. MS_EXCEPTION_IF_NULL(kernel);
  657. auto output_num = common::AnfAlgo::GetOutputTensorNum(kernel);
  658. if (output_num == 0) {
  659. MS_LOG(DEBUG) << "This kernel has no output size.";
  660. continue;
  661. }
  662. for (size_t i = 0; i < output_num; ++i) {
  663. session::AnfWithOutIndex out_pair(kernel, i);
  664. if (graph.IsInRefOutputMap(out_pair)) {
  665. auto origin_pair = graph.GetRefCorrespondOutput(out_pair);
  666. MS_EXCEPTION_IF_NULL(origin_pair.first);
  667. auto origin_node_output_addr = AnfAlgo::GetMutableOutputAddr(origin_pair.first, origin_pair.second);
  668. MS_EXCEPTION_IF_NULL(origin_node_output_addr);
  669. auto cur_node_output_addr = AnfAlgo::GetMutableOutputAddr(kernel, i);
  670. if (origin_node_output_addr.get() != cur_node_output_addr.get()) {
  671. MS_LOG(DEBUG) << "REF address is not same, ref node output need address update";
  672. MS_LOG(DEBUG) << "REF origin op is " << origin_pair.first->DebugString() << ", output index is "
  673. << origin_pair.second << ", cur op is " << kernel->DebugString() << ", out index is " << i;
  674. if (!cur_node_output_addr->host_shape().empty()) {
  675. origin_node_output_addr->set_host_shape(cur_node_output_addr->host_shape());
  676. }
  677. AnfAlgo::SetOutputAddr(origin_node_output_addr, i, kernel.get());
  678. }
  679. }
  680. }
  681. }
  682. }
  683. void KernelRuntime::AssignCommunicationNodeMem(MemType type, const AnfNodePtr &node) {
  684. AssignCommunicationNodeInputMem(type, node);
  685. AssignCommunicationNodeOutputMem(type, node);
  686. AssignWorkSpaceMem(type, node);
  687. }
  688. void KernelRuntime::AssignCommunicationNodeOutputMem(MemType type, const AnfNodePtr &node) {
  689. MS_EXCEPTION_IF_NULL(node);
  690. MS_EXCEPTION_IF_NULL(mem_manager_);
  691. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  692. MS_EXCEPTION_IF_NULL(kernel_mod);
  693. auto output_sizes = kernel_mod->GetOutputSizeList();
  694. if (output_sizes.empty()) {
  695. MS_LOG(INFO) << "This kernel[" << node->DebugString() << "] has no output size.";
  696. return;
  697. }
  698. auto context_ptr = MsContext::GetInstance();
  699. MS_EXCEPTION_IF_NULL(context_ptr);
  700. size_t total_size = 0;
  701. size_t output_index = 0;
  702. std::vector<size_t> align_size_list;
  703. for (uint64_t mem_size : output_sizes) {
  704. if (AnfAlgo::OutputAddrExist(node, output_index++)) {
  705. MS_LOG(INFO) << "Communication op " << node->fullname_with_scope() << " has output device address";
  706. return;
  707. }
  708. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
  709. mem_size = MemoryManager::GetCommonAlignSize(mem_size);
  710. }
  711. total_size += mem_size;
  712. align_size_list.emplace_back(mem_size);
  713. }
  714. if (align_size_list.empty()) {
  715. return;
  716. }
  717. if (type == kSomasReuseDynamicMem) {
  718. bool not_reuse = KernelMemNotReuse(node);
  719. if (not_reuse) {
  720. type = kDynamicMem;
  721. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  722. }
  723. }
  724. uint8_t *output_ptr = nullptr;
  725. for (size_t j = 0; j < align_size_list.size(); ++j) {
  726. std::string output_format = AnfAlgo::GetOutputFormat(node, j);
  727. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, j);
  728. auto address = CreateDeviceAddress(nullptr, output_sizes[j], output_format, output_type, {node, j});
  729. MS_EXCEPTION_IF_NULL(address);
  730. if (output_ptr == nullptr) {
  731. output_ptr = mem_manager_->MallocOutputMem(node, 0, type, total_size, address, true);
  732. MS_EXCEPTION_IF_NULL(output_ptr);
  733. } else {
  734. address->set_ptr(output_ptr);
  735. }
  736. address->set_host_shape(trans::GetRuntimePaddingShape(node, j));
  737. AnfAlgo::SetOutputAddr(address, j, node.get());
  738. output_ptr += align_size_list[j];
  739. }
  740. }
  741. bool KernelRuntime::KernelMemNotReuse(const AnfNodePtr &node) {
  742. MS_EXCEPTION_IF_NULL(node);
  743. return false;
  744. }
  745. DeviceAddressPtr KernelRuntime::PreAssignCNodeMemory(const AnfNodePtr &anf_node, size_t index) const {
  746. MS_EXCEPTION_IF_NULL(anf_node);
  747. if (common::AnfAlgo::IsNopNode(anf_node)) {
  748. auto input_node_with_index = common::AnfAlgo::GetPrevNodeOutput(anf_node, index);
  749. return PreAssignCNodeMemory(input_node_with_index.first, input_node_with_index.second);
  750. }
  751. auto output_size = AnfAlgo::GetOutputTensorMemSize(anf_node, index);
  752. std::string output_format = AnfAlgo::GetOutputFormat(anf_node, index);
  753. auto output_type = AnfAlgo::GetOutputDeviceDataType(anf_node, index);
  754. auto address = CreateDeviceAddress(nullptr, output_size, output_format, output_type, {anf_node, index});
  755. AnfAlgo::SetOutputAddr(address, index, anf_node.get());
  756. return address;
  757. }
  758. void KernelRuntime::AssignCommunicationNodeInputMem(MemType type, const AnfNodePtr &node) {
  759. auto context_ptr = MsContext::GetInstance();
  760. MS_EXCEPTION_IF_NULL(context_ptr);
  761. MS_EXCEPTION_IF_NULL(node);
  762. MS_EXCEPTION_IF_NULL(mem_manager_);
  763. size_t total_size = 0;
  764. std::vector<std::pair<DeviceAddressPtr, size_t>> addr_size;
  765. size_t input_num = common::AnfAlgo::GetInputTensorNum(node);
  766. for (size_t i = 0; i < input_num; ++i) {
  767. auto input_node_with_index = common::AnfAlgo::GetPrevNodeOutput(node, i, true);
  768. auto input_node = input_node_with_index.first;
  769. MS_EXCEPTION_IF_NULL(input_node);
  770. if (AnfAlgo::OutputAddrExist(input_node, input_node_with_index.second)) {
  771. MS_LOG(INFO) << "Communication op " << input_node->fullname_with_scope() << " has input device address";
  772. return;
  773. }
  774. DeviceAddressPtr address = nullptr;
  775. address = PreAssignCNodeMemory(input_node, input_node_with_index.second);
  776. MS_EXCEPTION_IF_NULL(address);
  777. auto mem_size = MemoryManager::GetCommonAlignSize(address->size());
  778. total_size += mem_size;
  779. addr_size.emplace_back(address, mem_size);
  780. }
  781. if (addr_size.empty()) {
  782. return;
  783. }
  784. if (type == kSomasReuseDynamicMem) {
  785. bool not_reuse = KernelMemNotReuse(node);
  786. if (not_reuse) {
  787. type = kDynamicMem;
  788. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s input.";
  789. }
  790. }
  791. auto cnode = node->cast<CNodePtr>();
  792. MS_EXCEPTION_IF_NULL(cnode);
  793. if (cnode->inputs().size() < kMinInputSize) {
  794. // communication node's input should contain itself and at least on input
  795. MS_LOG(ERROR) << "No inputs for " << cnode->fullname_with_scope();
  796. return;
  797. }
  798. auto first_input_node = cnode->input(1);
  799. auto prenode_index = common::AnfAlgo::VisitKernelWithReturnType(first_input_node, 0, true);
  800. uint8_t *input_ptr = mem_manager_->MallocOutputMem(prenode_index.first, prenode_index.second, type, total_size,
  801. addr_size[0].first, true);
  802. for (const auto &iter : addr_size) {
  803. MS_EXCEPTION_IF_NULL(iter.first);
  804. iter.first->set_ptr(input_ptr);
  805. input_ptr += iter.second;
  806. }
  807. }
  808. void KernelRuntime::AssignNodeOutputMem(MemType type, const AnfNodePtr &node, int index) {
  809. MS_EXCEPTION_IF_NULL(node);
  810. MS_EXCEPTION_IF_NULL(mem_manager_);
  811. if (type == kSomasReuseDynamicMem) {
  812. bool not_reuse = KernelMemNotReuse(node);
  813. if (not_reuse) {
  814. type = kDynamicMem;
  815. MS_LOG(INFO) << "Disable Memory Reuse for " << node->fullname_with_scope() << "'s output.";
  816. }
  817. }
  818. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  819. MS_EXCEPTION_IF_NULL(kernel_mod);
  820. auto output_sizes = kernel_mod->GetOutputSizeList();
  821. if (output_sizes.empty()) {
  822. return;
  823. }
  824. for (size_t i = 0; i < output_sizes.size(); ++i) {
  825. if ((kGetAllOuts != index) && (SizeToInt(i) != index)) {
  826. continue;
  827. }
  828. if (NodeOutputDeviceAddressExist(node, i)) {
  829. MS_LOG(DEBUG) << "Already malloc index:" << i;
  830. continue;
  831. }
  832. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope() << " output memory size:" << output_sizes[i];
  833. if (type == kStaticMem) {
  834. MS_LOG(INFO) << "Assign Static Memory for Output node, size:" << output_sizes[i]
  835. << " node:" << node->fullname_with_scope();
  836. }
  837. std::string output_format = AnfAlgo::GetOutputFormat(node, i);
  838. auto output_type = AnfAlgo::GetOutputDeviceDataType(node, i);
  839. auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type, {node, i});
  840. MS_EXCEPTION_IF_NULL(device_address);
  841. uint8_t *ptr = mem_manager_->MallocOutputMem(node, i, type, output_sizes[i], device_address, false);
  842. MS_EXCEPTION_IF_NULL(ptr);
  843. device_address->set_host_shape(trans::GetRuntimePaddingShape(node, i));
  844. AnfAlgo::SetOutputAddr(device_address, i, node.get());
  845. }
  846. }
  847. DeviceAddressPtr KernelRuntime::AssignExtraStaticMem(const TensorPtr &tensor, const AnfNodePtr &node, size_t index) {
  848. MS_EXCEPTION_IF_NULL(node);
  849. MS_EXCEPTION_IF_NULL(mem_manager_);
  850. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  851. MS_LOG(DEBUG) << "Assign Node:" << node->fullname_with_scope()
  852. << "Assign Static Memory for Output node, size:" << tensor_address->size();
  853. auto device_address = CreateDeviceAddress(nullptr, tensor_address->size(), tensor_address->format(),
  854. tensor_address->type_id(), {node, index});
  855. MS_EXCEPTION_IF_NULL(device_address);
  856. uint8_t *ptr = mem_manager_->MallocOutputMem(node, index, kStaticMem, tensor_address->size(), device_address, false);
  857. MS_EXCEPTION_IF_NULL(ptr);
  858. return device_address;
  859. }
  860. void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const ValuePtr &node_value,
  861. size_t output_idx) {
  862. MS_EXCEPTION_IF_NULL(value_node);
  863. MS_EXCEPTION_IF_NULL(node_value);
  864. MS_EXCEPTION_IF_NULL(mem_manager_);
  865. auto ms_context = MsContext::GetInstance();
  866. MS_EXCEPTION_IF_NULL(ms_context);
  867. std::vector<tensor::TensorPtr> tensors;
  868. TensorValueToTensor(node_value, &tensors);
  869. // Graph id should be passed to record static memory if profiling is enabled.
  870. auto kernel_info = dynamic_cast<device::KernelInfo *>(value_node->kernel_info());
  871. MS_EXCEPTION_IF_NULL(kernel_info);
  872. uint32_t graph_id = kernel_info->graph_id();
  873. for (const auto &tensor : tensors) {
  874. if (tensor == nullptr) {
  875. MS_LOG(WARNING) << "Tensor is null";
  876. return;
  877. }
  878. auto output_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  879. if (output_address != nullptr && output_address->DeviceType() == GetTargetDeviceAddressType()) {
  880. AnfAlgo::SetOutputAddr(std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address()), output_idx++,
  881. value_node.get());
  882. continue;
  883. }
  884. size_t tensor_size = LongToSize(tensor->data().nbytes());
  885. auto node_size = AnfAlgo::GetOutputTensorMemSize(value_node, output_idx);
  886. TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(value_node, output_idx);
  887. if (output_type_id == kTypeUnknown) {
  888. output_type_id = common::AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  889. }
  890. auto output_format = AnfAlgo::GetOutputFormat(value_node, output_idx);
  891. DeviceAddressPtr address =
  892. CreateDeviceAddress(nullptr, node_size, output_format, output_type_id, {value_node, output_idx});
  893. address->set_host_shape(trans::GetRuntimePaddingShape(value_node, output_idx));
  894. address->set_from_persistent_mem(true);
  895. MS_EXCEPTION_IF_NULL(address);
  896. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  897. !mem_manager_->MallocMemFromMemPool(address, node_size)) {
  898. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << node_size;
  899. } else {
  900. MS_LOG(INFO) << "Assign Static Memory for Value node, size:" << node_size
  901. << " node:" << value_node->fullname_with_scope();
  902. if (mem_manager_->MallocMem(kStaticMem, node_size, address, graph_id) == nullptr) {
  903. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << node_size;
  904. }
  905. }
  906. AnfAlgo::SetOutputAddr(address, output_idx, value_node.get());
  907. if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(),
  908. tensor->data_c(), tensor->device_info().host_format_)) {
  909. MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString()
  910. << "node format is" << AnfAlgo::GetOutputFormat(value_node, output_idx)
  911. << "node dtype is "
  912. << common::AnfAlgo::GetOutputInferDataType(value_node, output_idx);
  913. }
  914. }
  915. }
  916. void KernelRuntime::AssignStaticMemoryValueNode(const session::KernelGraph &graph) {
  917. MS_EXCEPTION_IF_NULL(mem_manager_);
  918. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode start for graph " << graph.graph_id();
  919. auto ms_context = MsContext::GetInstance();
  920. MS_EXCEPTION_IF_NULL(ms_context);
  921. // order the value nodes
  922. std::map<std::string, ValueNodePtr> value_nodes_map;
  923. for (auto &node : graph.graph_value_nodes()) {
  924. MS_EXCEPTION_IF_NULL(node);
  925. value_nodes_map[node->fullname_with_scope()] = node;
  926. }
  927. for (auto &item : value_nodes_map) {
  928. auto value_node = item.second;
  929. MS_EXCEPTION_IF_NULL(value_node);
  930. if (NodeOutputDeviceAddressExist(value_node, 0)) {
  931. MS_LOG(DEBUG) << "value_node[" << value_node->DebugString() << "] address already exist";
  932. auto device_address = AnfAlgo::GetMutableOutputAddr(value_node, 0);
  933. if (device_address->ptr_ == nullptr) {
  934. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  935. if (!mem_manager_->MallocMemFromMemPool(device_address, device_address->size_)) {
  936. MS_LOG(EXCEPTION) << "MallocMemFromMemPool failed";
  937. }
  938. } else {
  939. if (mem_manager_->MallocMem(kStaticMem, device_address->size_, device_address, graph.graph_id())) {
  940. MS_LOG(EXCEPTION) << "MallocStaticMem failed";
  941. }
  942. }
  943. }
  944. continue;
  945. }
  946. auto &node_value = value_node->value();
  947. MS_EXCEPTION_IF_NULL(node_value);
  948. MS_LOG(DEBUG) << "Malloc memory for " << value_node->fullname_with_scope();
  949. if (node_value->isa<Tensor>() || node_value->isa<ValueTuple>()) {
  950. AssignValueNodeTensor(value_node, node_value, 0);
  951. } else if (node_value->isa<StringImm>()) {
  952. const bool use_mem_from_memory_pool = ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) ||
  953. ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
  954. auto address = CreateDeviceAddressForStringValue(node_value, use_mem_from_memory_pool, graph.graph_id());
  955. MS_EXCEPTION_IF_NULL(address);
  956. address->set_from_persistent_mem(true);
  957. AnfAlgo::SetOutputAddr(address, 0, value_node.get());
  958. }
  959. }
  960. MS_LOG(DEBUG) << "AssignStaticMemoryValueNode end";
  961. }
  962. DeviceAddressPtr KernelRuntime::CreateDeviceAddressForStringValue(const ValuePtr &value, bool use_mem_pool,
  963. uint32_t graph_id) {
  964. auto value_string = GetValue<std::string>(value);
  965. size_t tensor_size = value_string.size();
  966. DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeUInt8);
  967. MS_EXCEPTION_IF_NULL(address);
  968. address->set_from_persistent_mem(true);
  969. auto ms_context = MsContext::GetInstance();
  970. MS_EXCEPTION_IF_NULL(ms_context);
  971. if (use_mem_pool && !mem_manager_->MallocMemFromMemPool(address, tensor_size)) {
  972. MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size;
  973. } else {
  974. MS_LOG(INFO) << "Assign Static Memory for string Value node, size:" << tensor_size;
  975. if (mem_manager_->MallocMem(kStaticMem, tensor_size, address, graph_id) == nullptr) {
  976. MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
  977. }
  978. }
  979. ShapeVector shape = {1, SizeToLong(tensor_size)};
  980. if (!address->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value_string.data(), "DefaultFormat")) {
  981. MS_LOG(EXCEPTION) << "kValueNode SyncHostToDevice fail!";
  982. }
  983. return address;
  984. }
  985. void KernelRuntime::AssignDynamicMemory(const session::KernelGraph &graph) {
  986. MS_EXCEPTION_IF_NULL(mem_manager_);
  987. auto context_ptr = MsContext::GetInstance();
  988. MS_EXCEPTION_IF_NULL(context_ptr);
  989. bool is_enable_mem_reuse = EnvConfigParser::GetInstance().GetSysMemreuse();
  990. auto mem_type = kDynamicMem;
  991. auto &dump_json_parser = DumpJsonParser::GetInstance();
  992. if (dump_json_parser.e2e_dump_enabled() && dump_json_parser.dump_mode() == 0) {
  993. mindspore::EnvConfigParser::GetInstance().SetSysMemreuse(false);
  994. is_enable_mem_reuse = false;
  995. MS_LOG(INFO) << "Disable Memory Reuse when e2e dump is enable and dump mode is set to dump all kernels";
  996. }
  997. if (is_enable_mem_reuse) {
  998. MS_LOG(INFO) << "Memory Reuse is enable...";
  999. mem_manager_->MallocSomasDynamicMem(graph);
  1000. mem_type = kSomasReuseDynamicMem;
  1001. } else {
  1002. MS_LOG(INFO) << "Memory Reuse is disable...";
  1003. }
  1004. auto &execution_nodes = graph.execution_order();
  1005. std::vector<CNodePtr> compute_nodes;
  1006. // communication nodes first
  1007. for (auto &node : execution_nodes) {
  1008. if (common::AnfAlgo::IsCommunicationOp(node)) {
  1009. // skip if the memory is already allocated
  1010. AssignCommunicationNodeMem(mem_type, node);
  1011. } else {
  1012. compute_nodes.emplace_back(node);
  1013. }
  1014. }
  1015. // then compute nodes
  1016. for (auto &node : compute_nodes) {
  1017. AssignNodeOutputMem(mem_type, node, kGetAllOuts);
  1018. AssignWorkSpaceMem(mem_type, node);
  1019. }
  1020. }
  1021. void KernelRuntime::AssignWorkSpaceMem(MemType type, const AnfNodePtr &node) {
  1022. MS_EXCEPTION_IF_NULL(node);
  1023. MS_EXCEPTION_IF_NULL(mem_manager_);
  1024. auto kernel_mod = AnfAlgo::GetKernelMod(node);
  1025. MS_EXCEPTION_IF_NULL(kernel_mod);
  1026. size_t index = 0;
  1027. for (auto &size : kernel_mod->GetWorkspaceSizeList()) {
  1028. if (AnfAlgo::WorkspaceAddrExist(node, index)) {
  1029. MS_LOG(INFO) << "Op " << node->fullname_with_scope() << " has workspace device address";
  1030. return;
  1031. }
  1032. auto ptr = mem_manager_->MallocWorkSpaceMem(node, index, type, size);
  1033. AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get());
  1034. index++;
  1035. }
  1036. }
  1037. void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel,
  1038. KernelLaunchInfo *kernel_launch_info) {
  1039. MS_EXCEPTION_IF_NULL(kernel);
  1040. MS_EXCEPTION_IF_NULL(kernel_launch_info);
  1041. auto cnode = kernel->cast<CNodePtr>();
  1042. MS_EXCEPTION_IF_NULL(cnode);
  1043. if (common::AnfAlgo::GetCNodeName(cnode) == kAtomicAddrCleanOpName) {
  1044. return GenAddrCleanLaunchArgs(cnode, &(kernel_launch_info->inputs_));
  1045. }
  1046. auto ms_context = MsContext::GetInstance();
  1047. MS_EXCEPTION_IF_NULL(ms_context);
  1048. auto skip_nop_node = (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode);
  1049. size_t input_num = common::AnfAlgo::GetInputTensorNum(kernel);
  1050. for (size_t i = 0; i < input_num; ++i) {
  1051. if (common::AnfAlgo::IsNoneInput(kernel, i)) {
  1052. continue;
  1053. }
  1054. auto real_input = AnfAlgo::GetRealInputIndex(kernel, i);
  1055. auto device_address = AnfAlgo::GetPrevNodeOutputAddr(kernel, real_input, skip_nop_node);
  1056. MS_EXCEPTION_IF_NULL(device_address);
  1057. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1058. MS_EXCEPTION_IF_NULL(input);
  1059. input->addr = device_address->ptr_;
  1060. MS_EXCEPTION_IF_NULL(input->addr);
  1061. input->size = device_address->size_;
  1062. kernel_launch_info->inputs_.emplace_back(input);
  1063. }
  1064. for (size_t i = 0; i < kernel_mod.GetOutputSizeList().size(); ++i) {
  1065. auto device_address = AnfAlgo::GetOutputAddr(kernel, i, skip_nop_node);
  1066. kernel::AddressPtr output = std::make_shared<kernel::Address>();
  1067. MS_EXCEPTION_IF_NULL(output);
  1068. output->addr = device_address->ptr_;
  1069. MS_EXCEPTION_IF_NULL(output->addr);
  1070. output->size = device_address->size_;
  1071. kernel_launch_info->outputs_.emplace_back(output);
  1072. }
  1073. for (size_t i = 0; i < kernel_mod.GetWorkspaceSizeList().size(); ++i) {
  1074. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  1075. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1076. MS_EXCEPTION_IF_NULL(workspace);
  1077. workspace->addr = device_address->ptr_;
  1078. MS_EXCEPTION_IF_NULL(workspace->addr);
  1079. workspace->size = device_address->size_;
  1080. kernel_launch_info->workspaces_.emplace_back(workspace);
  1081. }
  1082. }
  1083. bool KernelRuntime::UseMemScheduler() {
  1084. auto context_ptr = MsContext::GetInstance();
  1085. MS_EXCEPTION_IF_NULL(context_ptr);
  1086. if (!context_ptr->get_param<bool>(MS_CTX_ENABLE_MEM_SCHEDULER)) {
  1087. return false;
  1088. }
  1089. // Not use MemScheduler when running single op
  1090. return (!context_ptr->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER) &&
  1091. (context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode));
  1092. }
  1093. void KernelRuntime::GenKernelEvents(const session::KernelGraph &graph) {
  1094. auto &kernels = graph.execution_order();
  1095. if (kernels.empty() || graph_kernel_events_map_.find(graph.graph_id()) != graph_kernel_events_map_.end()) {
  1096. return;
  1097. }
  1098. auto kernel_events = std::pair<std::map<AnfNodePtr, std::vector<std::function<void()>>>,
  1099. std::map<AnfNodePtr, std::vector<std::function<void()>>>>();
  1100. auto &kernel_pre_run_events = kernel_events.first;
  1101. auto &kernel_post_run_events = kernel_events.second;
  1102. for (size_t i = 0; i < kernels.size(); ++i) {
  1103. auto &kernel = kernels[i];
  1104. if (!common::AnfAlgo::IsCommunicationOp(kernel)) {
  1105. continue;
  1106. }
  1107. auto pre_event = CreateDeviceEvent();
  1108. auto post_event = CreateDeviceEvent();
  1109. MS_EXCEPTION_IF_NULL(pre_event);
  1110. MS_EXCEPTION_IF_NULL(post_event);
  1111. pre_event->set_wait_stream(communication_stream_);
  1112. pre_event->set_record_stream(stream_);
  1113. post_event->set_wait_stream(stream_);
  1114. post_event->set_record_stream(communication_stream_);
  1115. kernel_pre_run_events[kernel].emplace_back([pre_event]() {
  1116. pre_event->RecordEvent();
  1117. pre_event->WaitEvent();
  1118. });
  1119. kernel_post_run_events[kernel].emplace_back([post_event]() { post_event->RecordEvent(); });
  1120. bool found_nearest_child = false;
  1121. for (size_t j = i + 1; j < kernels.size(); ++j) {
  1122. auto &child = kernels[j];
  1123. MS_EXCEPTION_IF_NULL(child);
  1124. if (common::AnfAlgo::IsCommunicationOp(child)) {
  1125. continue;
  1126. }
  1127. auto input_size = child->inputs().size() - 1;
  1128. for (size_t k = 0; k < input_size; ++k) {
  1129. auto kernel_index =
  1130. common::AnfAlgo::VisitKernelWithReturnType(common::AnfAlgo::GetInputNode(child, k), 0, true);
  1131. if (kernel_index.first == kernel) {
  1132. found_nearest_child = true;
  1133. break;
  1134. }
  1135. }
  1136. if (found_nearest_child) {
  1137. kernel_pre_run_events[child].emplace_back([post_event]() { post_event->WaitEvent(); });
  1138. break;
  1139. }
  1140. }
  1141. if (!found_nearest_child) {
  1142. kernel_post_run_events[kernel].emplace_back([post_event]() { post_event->WaitEvent(); });
  1143. }
  1144. }
  1145. graph_kernel_events_map_[graph.graph_id()] = std::move(kernel_events);
  1146. }
  1147. void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs,
  1148. const std::shared_ptr<MemScheduler> &mem_scheduler) {
  1149. MS_EXCEPTION_IF_NULL(cnode);
  1150. MS_EXCEPTION_IF_NULL(kernel_inputs);
  1151. if (cnode->inputs().size() != kAtomicCleanInputSize) {
  1152. MS_LOG(EXCEPTION) << "Atomic Addr clean Node Input nodes not equal 2.";
  1153. }
  1154. MS_EXCEPTION_IF_NULL(cnode->inputs()[1]);
  1155. auto pre_node = (cnode->inputs()[1])->cast<CNodePtr>();
  1156. // set clean output address
  1157. if (common::AnfAlgo::HasNodeAttr(kAttrAtomicOutputIndexs, pre_node)) {
  1158. #if defined(__APPLE__)
  1159. auto clean_output_indexes = common::AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicOutputIndexs);
  1160. #else
  1161. auto clean_output_indexes = common::AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicOutputIndexs);
  1162. #endif
  1163. for (auto index : clean_output_indexes) {
  1164. auto device_address = AnfAlgo::GetOutputAddr(pre_node, index);
  1165. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1166. MS_EXCEPTION_IF_NULL(input);
  1167. if (mem_scheduler != nullptr) {
  1168. GetOrMallocAddress(mem_scheduler, device_address, input);
  1169. } else {
  1170. input->addr = device_address->ptr_;
  1171. MS_EXCEPTION_IF_NULL(input->addr);
  1172. }
  1173. input->size = device_address->size_;
  1174. kernel_inputs->emplace_back(input);
  1175. }
  1176. MS_LOG(DEBUG) << "AtomicAddClean clean output size:" << clean_output_indexes.size();
  1177. }
  1178. // set clean workspace address
  1179. if (common::AnfAlgo::HasNodeAttr(kAttrAtomicWorkspaceIndexs, pre_node)) {
  1180. #if defined(__APPLE__)
  1181. auto clean_workspaces_indexes =
  1182. common::AnfAlgo::GetNodeAttr<std::vector<int>>(pre_node, kAttrAtomicWorkspaceIndexs);
  1183. #else
  1184. auto clean_workspaces_indexes =
  1185. common::AnfAlgo::GetNodeAttr<std::vector<size_t>>(pre_node, kAttrAtomicWorkspaceIndexs);
  1186. #endif
  1187. for (const auto &index : clean_workspaces_indexes) {
  1188. auto device_address = AnfAlgo::GetWorkspaceAddr(pre_node, index);
  1189. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1190. MS_EXCEPTION_IF_NULL(workspace);
  1191. if (mem_scheduler != nullptr) {
  1192. GetOrMallocAddress(mem_scheduler, device_address, workspace);
  1193. } else {
  1194. workspace->addr = device_address->ptr_;
  1195. MS_EXCEPTION_IF_NULL(workspace->addr);
  1196. }
  1197. workspace->size = device_address->size_;
  1198. kernel_inputs->emplace_back(workspace);
  1199. }
  1200. }
  1201. }
  1202. void KernelRuntime::LaunchKernelEvent(const std::map<AnfNodePtr, std::vector<std::function<void()>>> &kernel_events,
  1203. const AnfNodePtr &node) const {
  1204. if (kernel_events.find(node) == kernel_events.end()) {
  1205. return;
  1206. }
  1207. for (auto &event : kernel_events.at(node)) {
  1208. event();
  1209. }
  1210. }
  1211. bool KernelRuntime::LaunchKernelWithPynativeProfiling(kernel::KernelMod *kernel_mod, const std::string &op_name,
  1212. const KernelLaunchInfo &kernel_launch_info, void *stream) {
  1213. MS_EXCEPTION_IF_NULL(kernel_mod);
  1214. MS_EXCEPTION_IF_NULL(stream);
  1215. float cost_time = 0;
  1216. auto start = CreateDeviceTimeEvent();
  1217. auto end = CreateDeviceTimeEvent();
  1218. MS_EXCEPTION_IF_NULL(start);
  1219. MS_EXCEPTION_IF_NULL(end);
  1220. start->set_record_stream(stream);
  1221. end->set_record_stream(stream);
  1222. start->RecordEvent();
  1223. bool ret = kernel_mod->LaunchKernel(kernel_launch_info, stream);
  1224. if (!ret) {
  1225. MS_LOG(EXCEPTION) << "Launch kernel failed, kernel name is : " << op_name;
  1226. }
  1227. end->RecordEvent();
  1228. start->SyncEvent();
  1229. end->SyncEvent();
  1230. start->ElapsedTime(&cost_time, end.get());
  1231. MS_LOG(DEBUG) << "Launch kernel:" << op_name << " cost:" << cost_time / kBasicTimeTransferUnit;
  1232. return ret;
  1233. }
  1234. void KernelRuntime::DebugStreamSync(const CNodePtr &kernel) {
  1235. auto ms_context = MsContext::GetInstance();
  1236. MS_EXCEPTION_IF_NULL(ms_context);
  1237. auto enable_sync_run = ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_SYNCHRONIZE);
  1238. if (enable_sync_run) {
  1239. if (!SyncStream()) {
  1240. MS_LOG(EXCEPTION) << "Op " << kernel->fullname_with_scope() << " run failed!";
  1241. }
  1242. }
  1243. }
  1244. void KernelRuntime::GetOrMallocAddress(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1245. const DeviceAddress *device_address, const kernel::AddressPtr &kernel_addr) {
  1246. if (device_address->ptr_ != nullptr) {
  1247. kernel_addr->addr = device_address->ptr_;
  1248. } else {
  1249. kernel_addr->addr = mem_scheduler->GetOrMalloc(device_address, device_address->size_);
  1250. }
  1251. }
  1252. void KernelRuntime::AssignKernelAddress(const std::shared_ptr<MemScheduler> &mem_scheduler, const AnfNodePtr &kernel,
  1253. KernelLaunchInfo *kernel_launch_info) {
  1254. MS_EXCEPTION_IF_NULL(kernel);
  1255. MS_EXCEPTION_IF_NULL(kernel_launch_info);
  1256. auto cnode = kernel->cast<CNodePtr>();
  1257. MS_EXCEPTION_IF_NULL(cnode);
  1258. if (common::AnfAlgo::GetCNodeName(cnode) == kAtomicAddrCleanOpName) {
  1259. return GenAddrCleanLaunchArgs(cnode, &(kernel_launch_info->inputs_), mem_scheduler);
  1260. }
  1261. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1262. MS_EXCEPTION_IF_NULL(kernel_mod);
  1263. size_t input_num = common::AnfAlgo::GetInputTensorNum(kernel);
  1264. const auto update_parameter = common::AnfAlgo::IsUpdateParameterKernel(cnode);
  1265. for (size_t j = 0; j < input_num; ++j) {
  1266. auto real_input = AnfAlgo::GetRealInputIndex(kernel, j);
  1267. auto kernel_with_index = common::AnfAlgo::GetPrevNodeOutput(kernel, real_input, true);
  1268. auto index = kernel_with_index.second;
  1269. auto &input_node = kernel_with_index.first;
  1270. auto device_address = AnfAlgo::GetOutputAddr(input_node, index, true);
  1271. MS_EXCEPTION_IF_NULL(device_address);
  1272. kernel::AddressPtr input = std::make_shared<kernel::Address>();
  1273. GetOrMallocAddress(mem_scheduler, device_address, input);
  1274. input->size = device_address->size_;
  1275. kernel_launch_info->inputs_.emplace_back(input);
  1276. if (update_parameter && input_node->isa<Parameter>()) {
  1277. auto param = input_node->cast<ParameterPtr>();
  1278. auto abstract = param->abstract();
  1279. MS_EXCEPTION_IF_NULL(abstract);
  1280. if (abstract->isa<abstract::AbstractRef>()) {
  1281. mem_scheduler->UpdateHighPriorityMem(device_address);
  1282. }
  1283. }
  1284. }
  1285. for (size_t j = 0; j < kernel_mod->GetOutputSizeList().size(); ++j) {
  1286. auto device_address = AnfAlgo::GetOutputAddr(kernel, j, true);
  1287. kernel::AddressPtr output = std::make_shared<kernel::Address>();
  1288. GetOrMallocAddress(mem_scheduler, device_address, output);
  1289. output->size = device_address->size_;
  1290. kernel_launch_info->outputs_.emplace_back(output);
  1291. }
  1292. for (size_t i = 0; i < kernel_mod->GetWorkspaceSizeList().size(); ++i) {
  1293. auto device_address = AnfAlgo::GetWorkspaceAddr(kernel, i);
  1294. kernel::AddressPtr workspace = std::make_shared<kernel::Address>();
  1295. GetOrMallocAddress(mem_scheduler, device_address, workspace);
  1296. workspace->size = device_address->size_;
  1297. kernel_launch_info->workspaces_.emplace_back(workspace);
  1298. }
  1299. }
  1300. void KernelRuntime::SyncNodeOutputTensors(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1301. const session::KernelGraph &graph, const AnfNodePtr &kernel) {
  1302. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1303. MS_EXCEPTION_IF_NULL(kernel);
  1304. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1305. MS_EXCEPTION_IF_NULL(kernel_mod);
  1306. for (size_t input_idx = 0; input_idx < kernel_mod->GetInputSizeList().size(); ++input_idx) {
  1307. const auto input_node_index = common::AnfAlgo::GetPrevNodeOutput(kernel, input_idx, true);
  1308. if (input_node_index.first != nullptr && input_node_index.first->isa<Parameter>()) {
  1309. SyncNodeOutputTensor(mem_scheduler, input_node_index, graph);
  1310. }
  1311. }
  1312. for (size_t output_idx = 0; output_idx < kernel_mod->GetOutputSizeList().size(); ++output_idx) {
  1313. SyncNodeOutputTensor(mem_scheduler, std::make_pair(kernel, output_idx), graph);
  1314. }
  1315. }
  1316. void KernelRuntime::SyncNodeOutputTensor(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1317. const KernelWithIndex &node_output_index, const session::KernelGraph &graph) {
  1318. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1319. if (node_output_index.first == nullptr) {
  1320. return;
  1321. }
  1322. auto device_address = AnfAlgo::GetMutableOutputAddr(node_output_index, true);
  1323. auto tensor = graph.GetNodeOutputTensor(node_output_index);
  1324. if (tensor == nullptr) {
  1325. return;
  1326. }
  1327. if (device_address == nullptr) {
  1328. tensor->data_sync(false);
  1329. tensor->set_device_address(nullptr);
  1330. tensor->set_sync_status(kNeedSyncHostToDevice);
  1331. return;
  1332. }
  1333. if (!SyncStream()) {
  1334. MS_LOG(EXCEPTION) << "SyncStream failed";
  1335. }
  1336. auto origin_ptr = device_address->ptr_;
  1337. if (device_address->ptr_ == nullptr) {
  1338. device_address->ptr_ = mem_scheduler->GetOrMalloc(device_address.get(), device_address->size_);
  1339. }
  1340. tensor->set_device_address(device_address);
  1341. tensor->data_sync(false);
  1342. tensor->set_device_address(nullptr);
  1343. device_address->ptr_ = origin_ptr;
  1344. tensor->set_sync_status(kNeedSyncHostToDevice);
  1345. }
  1346. void KernelRuntime::InitGraphInputTensors(const std::shared_ptr<MemScheduler> &mem_scheduler,
  1347. const session::KernelGraph &graph) {
  1348. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1349. auto &input_nodes = graph.input_nodes();
  1350. auto &input_tensors = graph.input_tensors();
  1351. if (input_tensors.size() != input_nodes.size()) {
  1352. MS_LOG_EXCEPTION << "Invalid input tensor size:" << input_tensors.size() << " vs node size:" << input_nodes.size();
  1353. }
  1354. mem_scheduler->ClearMemNeedInit();
  1355. for (size_t i = 0; i < input_tensors.size(); ++i) {
  1356. auto input_node = input_nodes[i];
  1357. if (!input_node->isa<Parameter>() || !AnfAlgo::OutputAddrExist(input_node, 0)) {
  1358. continue;
  1359. }
  1360. auto device_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
  1361. auto tensor = input_tensors[i];
  1362. MS_EXCEPTION_IF_NULL(tensor);
  1363. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  1364. const auto tensor_size = LongToSize(tensor->data().nbytes());
  1365. bool need_sync = false;
  1366. if (tensor->NeedSyncHostToDevice()) {
  1367. need_sync = true;
  1368. } else if (tensor_address != device_address) {
  1369. tensor->data_sync(false);
  1370. need_sync = true;
  1371. }
  1372. if (mem_scheduler->HasDeviceMem(device_address.get())) {
  1373. device_address->set_ptr(nullptr);
  1374. }
  1375. if (need_sync) {
  1376. const auto &shape = trans::GetRuntimePaddingShape(input_node, 0);
  1377. if (device_address->GetPtr() != nullptr) {
  1378. device_address->SyncHostToDevice(shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
  1379. tensor->data_c(), tensor->device_info().host_format_);
  1380. } else {
  1381. mem_scheduler->AddMemNeedInit(device_address.get());
  1382. }
  1383. }
  1384. MemPriority priority = kMemPriorityLow;
  1385. const auto &parameter = input_node->cast<ParameterPtr>();
  1386. if (common::AnfAlgo::IsParameterWeight(parameter) || graph.IsUpdatedParameter(parameter)) {
  1387. priority = kMemPriorityHigh;
  1388. }
  1389. mem_scheduler->Init(device_address.get(), tensor->data_c(), tensor_size, priority);
  1390. tensor->set_sync_status(kNoNeedSync);
  1391. }
  1392. }
  1393. void KernelRuntime::AssignCommunicationMem(const session::KernelGraph &graph) {
  1394. for (const auto &kernel : graph.execution_order()) {
  1395. if (!common::AnfAlgo::IsCommunicationOp(kernel)) {
  1396. continue;
  1397. }
  1398. AssignCommunicationInputFromMemoryPool(kernel);
  1399. AssignCommunicationOutputFromMemoryPool(kernel);
  1400. }
  1401. }
  1402. bool KernelRuntime::LaunchKernel(const session::KernelGraph &graph, const AnfNodePtr &kernel,
  1403. const std::shared_ptr<MemScheduler> &mem_scheduler, bool mock) {
  1404. MS_EXCEPTION_IF_NULL(kernel);
  1405. auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
  1406. MS_EXCEPTION_IF_NULL(kernel_mod);
  1407. KernelLaunchInfo kernel_launch_info;
  1408. auto stream = kernel_mod->stream();
  1409. if (stream == nullptr) {
  1410. if (common::AnfAlgo::IsCommunicationOp(kernel)) {
  1411. stream = communication_stream_;
  1412. } else {
  1413. stream = stream_;
  1414. }
  1415. }
  1416. bool ret = true;
  1417. if (mem_scheduler != nullptr) {
  1418. ret = mem_scheduler->PreCompute(stream);
  1419. if (!ret) {
  1420. return ret;
  1421. }
  1422. AssignKernelAddress(mem_scheduler, kernel, &kernel_launch_info);
  1423. auto cnode = kernel->cast<CNodePtr>();
  1424. if (mock && common::AnfAlgo::HasNodeAttr(kAttrOffload, cnode) &&
  1425. common::AnfAlgo::GetNodeAttr<bool>(cnode, kAttrOffload)) {
  1426. for (size_t i = 0; i < kernel_mod->GetOutputSizeList().size(); ++i) {
  1427. auto device_address = AnfAlgo::GetOutputAddr(kernel, i, true);
  1428. mem_scheduler->SetOffload(device_address);
  1429. }
  1430. }
  1431. } else if (!kernel_mod->GetInputsAddr().empty() || !kernel_mod->GetOutputsAddr().empty()) {
  1432. kernel_launch_info.inputs_ = kernel_mod->GetInputsAddr();
  1433. kernel_launch_info.outputs_ = kernel_mod->GetOutputsAddr();
  1434. kernel_launch_info.workspaces_ = kernel_mod->GetWorkSpacesAddr();
  1435. } else {
  1436. GenLaunchArgs(*kernel_mod, kernel, &kernel_launch_info);
  1437. }
  1438. if (!mock) {
  1439. if (pynative_mode_profiling_flag_) {
  1440. ret = LaunchKernelWithPynativeProfiling(kernel_mod, kernel->fullname_with_scope(), kernel_launch_info, stream);
  1441. } else {
  1442. ret = kernel_mod->LaunchKernel(kernel_launch_info, stream);
  1443. }
  1444. if (!ret) {
  1445. return ret;
  1446. }
  1447. }
  1448. if (mem_scheduler != nullptr) {
  1449. if (!mock) {
  1450. SyncNodeOutputTensors(mem_scheduler, graph, kernel);
  1451. }
  1452. ret = mem_scheduler->PostCompute(stream);
  1453. }
  1454. return ret;
  1455. }
  1456. bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph, bool mock) {
  1457. auto context_ptr = MsContext::GetInstance();
  1458. MS_EXCEPTION_IF_NULL(context_ptr);
  1459. std::shared_ptr<MemScheduler> mem_scheduler = nullptr;
  1460. if (UseMemScheduler()) {
  1461. mem_scheduler = mem_scheduler_manager_.GetOrCreateMemScheduler(graph.graph_id());
  1462. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1463. mem_scheduler->Reset();
  1464. mem_scheduler->Update();
  1465. InitGraphInputTensors(mem_scheduler, graph);
  1466. }
  1467. const auto &kernels = graph.execution_order();
  1468. std::vector<DynamicKernelPtr> dynamic_kernel_list;
  1469. auto iter = graph_dynamic_kernel_map_.find(graph.graph_id());
  1470. if (iter != graph_dynamic_kernel_map_.end()) {
  1471. dynamic_kernel_list = iter->second;
  1472. }
  1473. if (!dynamic_kernel_list.empty() && dynamic_kernel_list.size() != kernels.size()) {
  1474. MS_LOG(EXCEPTION) << "The size of dynamic kernels " << dynamic_kernel_list.size()
  1475. << " should be equal to the size of kernels " << kernels.size();
  1476. }
  1477. std::map<AnfNodePtr, std::vector<std::function<void()>>> kernel_pre_run_events;
  1478. std::map<AnfNodePtr, std::vector<std::function<void()>>> kernel_post_run_events;
  1479. auto events_iter = graph_kernel_events_map_.find(graph.graph_id());
  1480. if (events_iter != graph_kernel_events_map_.end()) {
  1481. kernel_pre_run_events = events_iter->second.first;
  1482. kernel_post_run_events = events_iter->second.second;
  1483. }
  1484. for (size_t i = 0; i < kernels.size(); ++i) {
  1485. LaunchKernelEvent(kernel_pre_run_events, kernels[i]);
  1486. if (!dynamic_kernel_list.empty() && dynamic_kernel_list[i] != nullptr &&
  1487. dynamic_kernel_list[i]->is_dynamic_shape()) {
  1488. dynamic_kernel_list[i]->InferShape();
  1489. dynamic_kernel_list[i]->UpdateArgs();
  1490. dynamic_kernel_list[i]->Execute();
  1491. if (!SyncStream()) {
  1492. MS_LOG(ERROR) << "SyncStream failed";
  1493. return false;
  1494. }
  1495. dynamic_kernel_list[i]->PostExecute();
  1496. } else {
  1497. auto &kernel = kernels[i];
  1498. MS_EXCEPTION_IF_NULL(kernel);
  1499. // Skip transpose kernel with "nop_op" attr which is not hidden or removed in PyNative infer scenario. Transpose
  1500. // kernel, which is not supposed to be executed, is generated in TransDataSplit to support specific Transdata.
  1501. // And hard code here should be removed after new Transdata programme is implemented in the foreseeable future.
  1502. if (common::AnfAlgo::HasNodeAttr(kAttrNopOp, kernel)) {
  1503. for (size_t idx = 0; idx < common::AnfAlgo::GetOutputTensorNum(kernel); idx += 1) {
  1504. auto real_input = AnfAlgo::GetRealInputIndex(kernel, idx);
  1505. auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, real_input);
  1506. AnfAlgo::SetOutputAddr(device_address, idx, kernel.get());
  1507. }
  1508. continue;
  1509. }
  1510. auto ret = LaunchKernel(graph, kernel, mem_scheduler, mock);
  1511. if (!ret) {
  1512. MS_LOG(ERROR) << "Launch kernel failed.";
  1513. return false;
  1514. }
  1515. KernelLaunchProfiling(kernel->fullname_with_scope());
  1516. DebugStreamSync(kernel);
  1517. }
  1518. LaunchKernelEvent(kernel_post_run_events, kernels[i]);
  1519. }
  1520. if (UseMemScheduler() && !mock) {
  1521. SyncParameter(graph, mem_scheduler);
  1522. }
  1523. return true;
  1524. }
  1525. void KernelRuntime::SyncParameter(const session::KernelGraph &graph,
  1526. const std::shared_ptr<MemScheduler> &mem_scheduler) {
  1527. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1528. auto &input_nodes = graph.input_nodes();
  1529. auto &input_tensors = graph.input_tensors();
  1530. if (input_tensors.size() != input_nodes.size()) {
  1531. MS_LOG_EXCEPTION << "Invalid input tensor size:" << input_tensors.size() << " vs node size:" << input_nodes.size();
  1532. }
  1533. for (size_t i = 0; i < input_tensors.size(); ++i) {
  1534. auto input_node = input_nodes[i];
  1535. if (!input_node->isa<Parameter>() || !AnfAlgo::OutputAddrExist(input_node, 0)) {
  1536. continue;
  1537. }
  1538. auto device_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
  1539. MS_EXCEPTION_IF_NULL(device_address);
  1540. auto parameter = input_node->cast<ParameterPtr>();
  1541. MS_EXCEPTION_IF_NULL(parameter);
  1542. if (!common::AnfAlgo::IsParameterWeight(parameter) && !graph.IsUpdatedParameter(parameter)) {
  1543. continue;
  1544. }
  1545. auto tensor = input_tensors[i];
  1546. MS_EXCEPTION_IF_NULL(tensor);
  1547. if (mem_scheduler->HasDeviceMem(device_address.get())) {
  1548. auto device_ptr = mem_scheduler->GetOrMalloc(device_address.get(), device_address->size(), kMemPriorityHigh);
  1549. device_address->set_ptr(device_ptr);
  1550. tensor->set_device_address(device_address);
  1551. tensor->set_sync_status(kNeedSyncDeviceToHost);
  1552. }
  1553. if (graph.IsUpdatedParameter(parameter)) {
  1554. tensor->SetIsUpdateByDevice();
  1555. }
  1556. }
  1557. }
  1558. void KernelRuntime::UseMemSchedulerIfNeeded(const session::KernelGraph &graph) {
  1559. auto context_ptr = MsContext::GetInstance();
  1560. MS_EXCEPTION_IF_NULL(context_ptr);
  1561. if (!UseMemScheduler()) {
  1562. return;
  1563. }
  1564. auto mem_scheduler = mem_scheduler_manager_.GetOrCreateMemScheduler(graph.graph_id());
  1565. MS_EXCEPTION_IF_NULL(mem_scheduler);
  1566. if (mem_scheduler->optimized()) {
  1567. return;
  1568. }
  1569. mem_scheduler->SetMemHandler(mem_manager_);
  1570. mem_scheduler->SetTotalStep(graph.execution_order().size());
  1571. if (mem_scheduler->need_record_event()) {
  1572. (void)LaunchKernelMod(graph, true);
  1573. mem_scheduler->set_need_record_event(false);
  1574. }
  1575. auto ret = mem_scheduler->Optimize();
  1576. if (!ret) {
  1577. MS_LOG_EXCEPTION << "Can't run graph " << graph.graph_id() << " for memory limit.";
  1578. }
  1579. }
  1580. bool KernelRuntime::LaunchKernels(const session::KernelGraph &graph) {
  1581. UseMemSchedulerIfNeeded(graph);
  1582. if (!LaunchKernelMod(graph)) {
  1583. MS_LOG(ERROR) << "LaunchKernelMod failed!";
  1584. return false;
  1585. }
  1586. auto ms_context = MsContext::GetInstance();
  1587. MS_EXCEPTION_IF_NULL(ms_context);
  1588. if (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kGraphMode) {
  1589. if (!SyncStream()) {
  1590. MS_LOG(ERROR) << "SyncStream failed";
  1591. return false;
  1592. }
  1593. }
  1594. return true;
  1595. }
  1596. void KernelRuntime::ClearGraphRuntimeResource(uint32_t graph_id) {
  1597. MS_LOG(INFO) << "Clear graph:" << graph_id << " runtime resource";
  1598. }
  1599. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  1600. namespace {
  1601. // Finalize ps cache module before throw an exception.
  1602. void FinalizePsCache(const std::string &exception) {
  1603. ps::ps_cache_instance.Finalize();
  1604. MS_LOG(EXCEPTION) << exception;
  1605. }
  1606. } // namespace
  1607. void KernelRuntime::GetFirstPSEmbeddingCache(const session::KernelGraph &graph,
  1608. AnfNodePtr *const first_cache_input_index,
  1609. size_t *const first_cache_size) {
  1610. for (const auto &kernel : graph.execution_order()) {
  1611. MS_EXCEPTION_IF_NULL(kernel);
  1612. auto kernel_name = common::AnfAlgo::GetCNodeName(kernel);
  1613. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1614. continue;
  1615. }
  1616. auto input_param = common::AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1617. auto input_index = common::AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1618. MS_EXCEPTION_IF_NULL(input_param.first);
  1619. MS_EXCEPTION_IF_NULL(input_index.first);
  1620. auto param_name = input_param.first->fullname_with_scope();
  1621. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1622. continue;
  1623. }
  1624. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1625. while (input_index.first->isa<CNode>() && (common::AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1626. input_index = common::AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1627. MS_EXCEPTION_IF_NULL(input_index.first);
  1628. }
  1629. auto cnode = common::AnfAlgo::IsGraphKernel(input_index.first)
  1630. ? common::AnfAlgo::GetOutputOfGraphkernel(input_index)
  1631. : input_index.first;
  1632. MS_EXCEPTION_IF_NULL(cnode);
  1633. if (!cnode->isa<CNode>()) {
  1634. FinalizePsCache("The embeddingLookup whose input index should be a CNode but got " +
  1635. cnode->fullname_with_scope());
  1636. }
  1637. auto input_index_node_name = common::AnfAlgo::GetCNodeName(cnode);
  1638. if (input_index_node_name != kGetNextOpName) {
  1639. bool full_batch = parallel::ParallelContext::GetInstance()->full_batch();
  1640. if ((!full_batch && (input_index_node_name != kUniqueOpName)) ||
  1641. (full_batch && (input_index_node_name != kMinimumOpName))) {
  1642. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope()
  1643. << ") cache is from " << cnode->fullname_with_scope();
  1644. FinalizePsCache(
  1645. "The embeddingLookup whose input index isn't from dataset doesn't support cache in parameter server training "
  1646. "mode.");
  1647. }
  1648. }
  1649. *first_cache_input_index = cnode;
  1650. *first_cache_size = size;
  1651. MS_LOG(INFO) << "The input index of the first embeddingLookup cache is from " << cnode->fullname_with_scope()
  1652. << ", the cache size is " << size;
  1653. return;
  1654. }
  1655. }
  1656. void KernelRuntime::CheckSparsePSEmbeddingCache(const CNodePtr &node) {
  1657. MS_EXCEPTION_IF_NULL(node);
  1658. auto pre_node = common::AnfAlgo::GetPrevNodeOutput(node, 1, true);
  1659. MS_EXCEPTION_IF_NULL(pre_node.first);
  1660. while (pre_node.first->isa<CNode>() && (common::AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1661. pre_node = common::AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1662. MS_EXCEPTION_IF_NULL(pre_node.first);
  1663. }
  1664. if (!(pre_node.first->isa<CNode>()) || (common::AnfAlgo::GetCNodeName(pre_node.first) != kUniqueOpName)) {
  1665. FinalizePsCache("The input_indices of kernel[SparseGatherV2] must be unique in parameter server cache mode");
  1666. }
  1667. pre_node = common::AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1668. MS_EXCEPTION_IF_NULL(pre_node.first);
  1669. while (pre_node.first->isa<CNode>() && (common::AnfAlgo::GetCNodeName(pre_node.first) == kCastOpName)) {
  1670. pre_node = common::AnfAlgo::GetPrevNodeOutput(pre_node.first, 0, true);
  1671. MS_EXCEPTION_IF_NULL(pre_node.first);
  1672. }
  1673. if (!(pre_node.first->isa<CNode>()) || (common::AnfAlgo::GetCNodeName(pre_node.first) != kGetNextOpName)) {
  1674. FinalizePsCache(
  1675. "The input indices of kernel[Unique] must be produced from dataset directly and the indices value can not be "
  1676. "changed before delivering to kernel[Unique] in parameter server cache mode.");
  1677. }
  1678. }
  1679. void KernelRuntime::CheckIfSupportPSEmbeddingCache(const session::KernelGraph &graph) {
  1680. AnfNodePtr first_cache_input_index = nullptr;
  1681. size_t first_cache_size = 0;
  1682. GetFirstPSEmbeddingCache(graph, &first_cache_input_index, &first_cache_size);
  1683. MS_EXCEPTION_IF_NULL(first_cache_input_index);
  1684. for (const auto &kernel : graph.execution_order()) {
  1685. MS_EXCEPTION_IF_NULL(kernel);
  1686. auto kernel_name = common::AnfAlgo::GetCNodeName(kernel);
  1687. if (kernel_name != kGatherV2OpName && kernel_name != kSparseGatherV2OpName) {
  1688. continue;
  1689. }
  1690. auto input_param = common::AnfAlgo::GetPrevNodeOutput(kernel, 0, true);
  1691. auto input_index = common::AnfAlgo::GetPrevNodeOutput(kernel, 1, true);
  1692. MS_EXCEPTION_IF_NULL(input_param.first);
  1693. MS_EXCEPTION_IF_NULL(input_index.first);
  1694. if (!input_param.first->isa<Parameter>()) {
  1695. continue;
  1696. }
  1697. auto param_name = input_param.first->fullname_with_scope();
  1698. if (ps::ps_cache_instance.IsHashTable(param_name) && (kernel_name == kSparseGatherV2OpName)) {
  1699. CheckSparsePSEmbeddingCache(kernel);
  1700. }
  1701. while (input_index.first->isa<CNode>() && (common::AnfAlgo::GetCNodeName(input_index.first) == kCastOpName)) {
  1702. input_index = common::AnfAlgo::GetPrevNodeOutput(input_index.first, 0, true);
  1703. MS_EXCEPTION_IF_NULL(input_index.first);
  1704. }
  1705. auto cnode = common::AnfAlgo::IsGraphKernel(input_index.first)
  1706. ? common::AnfAlgo::GetOutputOfGraphkernel(input_index)
  1707. : input_index.first;
  1708. MS_EXCEPTION_IF_NULL(cnode);
  1709. if (cnode == first_cache_input_index) {
  1710. if (!ps::ps_cache_instance.IsHashTable(param_name)) {
  1711. MS_LOG(ERROR) << "The embeddingLookup(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1712. FinalizePsCache(
  1713. "All the embeddingLookups whose input indices are from dataset must enable cache at the same time when one "
  1714. "of them enables cache in parameter server training mode.");
  1715. }
  1716. auto size = ps::ps_cache_instance.QueryHashTableSize(param_name);
  1717. if (size != first_cache_size) {
  1718. MS_LOG(ERROR) << "The cache size(" << size << ") of embeddingLookup(" << kernel->fullname_with_scope()
  1719. << ") is not the same as other embeddingLookup cache size(" << first_cache_size << ").";
  1720. FinalizePsCache("The cache sizes of embeddingLookups are not the same in parameter server training mode.");
  1721. }
  1722. } else if (ps::ps_cache_instance.IsHashTable(param_name)) {
  1723. MS_LOG(ERROR) << "The input index of the embeddingLookup(" << kernel->fullname_with_scope() << ") cache is from "
  1724. << cnode->fullname_with_scope();
  1725. FinalizePsCache(
  1726. "The embeddingLookup whose input index isn't from dataset doesn't support cache in parameter server training "
  1727. "mode.");
  1728. } else if (cnode->isa<CNode>() && (common::AnfAlgo::GetCNodeName(cnode) == kGetNextOpName)) {
  1729. MS_LOG(ERROR) << "The EmbeddingLookup kernel(" << kernel->fullname_with_scope() << ") doesn't enable cache.";
  1730. FinalizePsCache(
  1731. "All EmbeddingLookup kernels whose input indices are from dataset must enable cache at "
  1732. "the same time and parameter 'sparse' must be equal to the value of 'enable_sparse' in "
  1733. "context setting in parameter server training mode.");
  1734. }
  1735. }
  1736. }
  1737. #endif
  1738. } // namespace device
  1739. } // namespace mindspore