You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

common_utils.cc 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "kernel/common_utils.h"
  17. #include <unordered_map>
  18. #include <map>
  19. #include <iostream>
  20. #include <fstream>
  21. #include "nlohmann/json.hpp"
  22. #include "session/anf_runtime_algorithm.h"
  23. #include "common/utils.h"
  24. namespace mindspore {
  25. namespace kernel {
  26. const std::unordered_map<std::string, TypeId> type_id_maps = {
  27. {"float", TypeId::kNumberTypeFloat32}, {"float16", TypeId::kNumberTypeFloat16},
  28. {"float32", TypeId::kNumberTypeFloat32}, {"float64", TypeId::kNumberTypeFloat64},
  29. {"int", TypeId::kNumberTypeInt}, {"int8", TypeId::kNumberTypeInt8},
  30. {"int16", TypeId::kNumberTypeInt16}, {"int32", TypeId::kNumberTypeInt32},
  31. {"int64", TypeId::kNumberTypeInt64}, {"uint", TypeId::kNumberTypeUInt},
  32. {"uint8", TypeId::kNumberTypeUInt8}, {"uint16", TypeId::kNumberTypeUInt16},
  33. {"uint32", TypeId::kNumberTypeUInt32}, {"uint64", TypeId::kNumberTypeUInt64},
  34. {"bool", TypeId::kNumberTypeBool},
  35. };
  36. const std::map<TypeId, std::string> type_id_str_map = {
  37. {TypeId::kNumberTypeFloat32, "float32"}, {TypeId::kNumberTypeFloat16, "float16"},
  38. {TypeId::kNumberTypeFloat, "float"}, {TypeId::kNumberTypeFloat64, "float64"},
  39. {TypeId::kNumberTypeInt, "int"}, {TypeId::kNumberTypeInt8, "int8"},
  40. {TypeId::kNumberTypeInt16, "int16"}, {TypeId::kNumberTypeInt32, "int32"},
  41. {TypeId::kNumberTypeInt64, "int64"}, {TypeId::kNumberTypeUInt, "uint"},
  42. {TypeId::kNumberTypeUInt8, "uint8"}, {TypeId::kNumberTypeUInt16, "uint16"},
  43. {TypeId::kNumberTypeUInt32, "uint32"}, {TypeId::kNumberTypeUInt64, "uint64"},
  44. {TypeId::kNumberTypeBool, "bool"},
  45. };
  46. const std::map<std::string, std::string> DATATYPE_STRING_MAP{
  47. {"Float32", "float32"}, {"Float16", "float16"}, {"Int8", "int8"}, {"Int16", "int16"},
  48. {"UInt16", "uint16"}, {"UInt8", "uint8"}, {"Int32", "int32"}, {"UInt32", "uint32"},
  49. {"Int64", "int64"}, {"UInt64", "uint64"}, {"Bool_", "bool"}, {"Float64", "double"},
  50. };
  51. const std::unordered_map<std::string, std::string> dtype_shortdtype_map_ = {
  52. {"float16", "f16"}, {"float32", "f32"}, {"float64", "f64"}, {"int8", "i8"}, {"int16", "i16"}, {"int32", "i32"},
  53. {"int64", "i64"}, {"uint8", "u8"}, {"uint16", "u16"}, {"uint32", "u32"}, {"uint64", "u64"}, {"bool", "bool"},
  54. };
  55. const std::unordered_map<std::string, size_t> dtype_nbyte_map = {
  56. {"float16", sizeof(float) / 2}, {"float32", sizeof(float)}, {"float64", sizeof(float) * 2},
  57. {"int8", sizeof(int) / 4}, {"int16", sizeof(int) / 2}, {"int32", sizeof(int)},
  58. {"int64", sizeof(int) * 2}, {"uint8", sizeof(int) / 4}, {"uint16", sizeof(int) / 2},
  59. {"uint32", sizeof(int)}, {"uint64", sizeof(int) * 2}, {"bool", sizeof(char)},
  60. };
  61. const std::unordered_map<std::string, FusionType> fusion_type_maps = {
  62. {"CONVLUTION", FusionType::CONVLUTION}, {"ELEMWISE", FusionType::ELEMWISE}, {"COMMREDUCE", FusionType::COMMREDUCE},
  63. {"SEGMENT", FusionType::SEGMENT}, {"OPAQUE", FusionType::OPAQUE},
  64. };
  65. bool IsAtomicNode(const CNodePtr &kernel_node) {
  66. MS_EXCEPTION_IF_NULL(kernel_node);
  67. auto kernel_mod = AnfAlgo::GetKernelMod(kernel_node);
  68. MS_EXCEPTION_IF_NULL(kernel_mod);
  69. auto parameters_indexs = kernel_mod->GenParameters();
  70. if (parameters_indexs.empty()) {
  71. return false;
  72. }
  73. auto atomic_flag = false;
  74. size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
  75. size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
  76. auto workspace_size_list = kernel_mod->GetWorkspaceSizeList();
  77. size_t workspace_num = kernel_mod->GetWorkspaceSizeList().size();
  78. if (input_num + workspace_num + output_num > parameters_indexs.size()) {
  79. size_t lossNum = (input_num + workspace_num + output_num) - parameters_indexs.size();
  80. for (size_t i = 0; i < lossNum; i++) {
  81. parameters_indexs.push_back(0);
  82. }
  83. }
  84. std::vector<int> clean_output_indexs;
  85. // in parameters data sort as input->workspace->output
  86. size_t index = 0;
  87. while (index < output_num) {
  88. if (parameters_indexs[input_num + workspace_num + index] == 1) {
  89. atomic_flag = true;
  90. clean_output_indexs.push_back(SizeToInt(index));
  91. }
  92. index++;
  93. }
  94. if (atomic_flag) {
  95. AnfAlgo::SetNodeAttr(kAttrAutomicOutputIndexs, MakeValue(clean_output_indexs), kernel_node);
  96. }
  97. for (size_t i = 0; i < workspace_num; ++i) {
  98. if (parameters_indexs[input_num + i] == 1) {
  99. atomic_flag = true;
  100. AnfAlgo::SetNodeAttr(kAttrAutomicWorkspaceSize,
  101. MakeValue(std::accumulate(workspace_size_list.begin(), workspace_size_list.end(), 0)),
  102. kernel_node);
  103. break;
  104. }
  105. }
  106. return atomic_flag;
  107. }
  108. void KernelMeta::Initialize() {
  109. kernel_meta_path_ = std::string(kGpuKernelMeta) + "_" + std::to_string(getpid()) + "/";
  110. // remove old kernel cache
  111. RemoveKernelCache();
  112. #if defined(_WIN32) || defined(_WIN64)
  113. auto ret = mkdir(kernel_meta_path_.c_str());
  114. #else
  115. auto ret = mkdir(kernel_meta_path_.c_str(), S_IRWXG | S_IRWXU);
  116. #endif
  117. if (ret != 0) {
  118. MS_LOG(INFO) << "kernel dir [" << kernel_meta_path_ << "], will be created later";
  119. }
  120. initialized_ = true;
  121. }
  122. void KernelMeta::RemoveKernelCache() {
  123. DIR *dir = opendir(kernel_meta_path_.c_str());
  124. if (dir == nullptr) {
  125. return;
  126. }
  127. struct dirent *entry;
  128. while ((entry = readdir(dir)) != nullptr) {
  129. std::string kernel_file = entry->d_name;
  130. std::string kernel_file_realpath = kernel_meta_path_ + kernel_file;
  131. (void)remove(kernel_file_realpath.c_str());
  132. }
  133. (void)closedir(dir);
  134. (void)rmdir(kernel_meta_path_.c_str());
  135. }
  136. std::string KernelMeta::Search(const std::string &kernel_name) const {
  137. if (!initialized_) {
  138. return "";
  139. }
  140. auto iter = kernel_meta_map_.find(kernel_name);
  141. if (iter == kernel_meta_map_.end()) {
  142. return "";
  143. } else {
  144. return iter->second;
  145. }
  146. }
  147. bool KernelMeta::Insert(const std::string &kernel_name, const std::string &kernel_json) {
  148. if (!initialized_) {
  149. return false;
  150. }
  151. kernel_meta_map_[kernel_name] = kernel_json;
  152. return true;
  153. }
  154. bool CheckCache(const std::string &kernel_name) {
  155. // check cache.
  156. KernelMeta *bin_map = KernelMeta::GetInstance();
  157. if (bin_map == nullptr) {
  158. MS_LOG(DEBUG) << "kernel cache is invalid.";
  159. return false;
  160. }
  161. std::string kernel_json = bin_map->Search(kernel_name);
  162. bool ret = (!kernel_json.empty());
  163. if (ret) {
  164. MS_LOG(INFO) << "Kernel name:" << kernel_name << " has registed.";
  165. } else {
  166. MS_LOG(INFO) << "Kernel name:" << kernel_name << " will been registed.";
  167. }
  168. return ret;
  169. }
  170. KernelPackPtr SearchCache(const std::string &kernel_name, const std::string &processor) {
  171. // search cache.
  172. KernelMeta *bin_map = KernelMeta::GetInstance();
  173. if (bin_map == nullptr) {
  174. MS_LOG(DEBUG) << "kernel cache is invalid.";
  175. return nullptr;
  176. }
  177. std::string kernel_json = bin_map->Search(kernel_name);
  178. if (!kernel_json.empty()) {
  179. KernelPackPtr kernel_pack = std::make_shared<KernelPack>();
  180. // just a tmp solution.
  181. if (!kernel_pack->ReadFromJsonFile(kernel_json, processor)) {
  182. MS_LOG(DEBUG) << "Read cache json and bin file failed[" << kernel_json << "].";
  183. return nullptr;
  184. } else {
  185. return kernel_pack;
  186. }
  187. } else {
  188. MS_LOG(INFO) << "cache kernel not found[" << kernel_name << "].";
  189. return nullptr;
  190. }
  191. }
  192. KernelPackPtr InsertCache(const std::string &kernel_name, const std::string &processor) {
  193. MS_LOG(INFO) << "kernel name:" << kernel_name << ", processr:" << processor;
  194. KernelMeta *bin_map = KernelMeta::GetInstance();
  195. std::string kernel_json;
  196. if (processor == kProcessorAiCore || processor == kProcessorAiCpu) {
  197. kernel_json = kCceKernelMeta;
  198. } else {
  199. kernel_json = bin_map->GetKernelMetaPath();
  200. }
  201. (void)kernel_json.append(kernel_name).append(kJsonSuffix);
  202. KernelPackPtr kernel_pack = std::make_shared<KernelPack>();
  203. if (!kernel_pack->ReadFromJsonFile(kernel_json, processor)) {
  204. MS_LOG(DEBUG) << "Read json and bin file failed[" << kernel_json << "].";
  205. return nullptr;
  206. }
  207. if (bin_map == nullptr) {
  208. MS_LOG(DEBUG) << "kernel cache is invalid.";
  209. return nullptr;
  210. }
  211. if (bin_map->Insert(kernel_name, kernel_json)) {
  212. MS_LOG(INFO) << "Insert to cache success[" << kernel_json << "], kernelname[" << kernel_name << "].";
  213. }
  214. return kernel_pack;
  215. }
  216. TypeId DtypeToTypeId(const std::string &dtypes) {
  217. auto iter = type_id_maps.find(dtypes);
  218. if (iter != type_id_maps.end()) {
  219. return iter->second;
  220. } else {
  221. MS_EXCEPTION(ArgumentError) << "Illegal input device dtype:" << dtypes;
  222. }
  223. }
  224. std::string Dtype2String(const std::string &dtypes) {
  225. auto iter = DATATYPE_STRING_MAP.find(dtypes);
  226. if (iter == DATATYPE_STRING_MAP.end()) {
  227. MS_EXCEPTION(ArgumentError) << "Illegal input dtype:" << dtypes;
  228. }
  229. return iter->second;
  230. }
  231. std::string TypeId2String(TypeId type_id) {
  232. auto iter = type_id_str_map.find(type_id);
  233. if (iter == type_id_str_map.end()) {
  234. MS_EXCEPTION(ArgumentError) << "Illegal input dtype." << TypeIdLabel(type_id);
  235. }
  236. return iter->second;
  237. }
  238. std::string Dtype2ShortType(const std::string &dtypes) {
  239. auto iter = dtype_shortdtype_map_.find(dtypes);
  240. if (iter != dtype_shortdtype_map_.end()) {
  241. return iter->second;
  242. } else {
  243. MS_EXCEPTION(ArgumentError) << "Illegal input dtype:" << dtypes;
  244. }
  245. }
  246. size_t GetDtypeNbyte(const std::string &dtypes) {
  247. auto iter = dtype_nbyte_map.find(dtypes);
  248. if (iter != dtype_nbyte_map.end()) {
  249. return iter->second;
  250. } else {
  251. MS_EXCEPTION(ArgumentError) << "Illegal input dtype:" << dtypes;
  252. }
  253. }
  254. bool SetInputKernelBuilderInfo(const std::vector<std::shared_ptr<OpIOInfo>> &inputs, size_t real_input_num,
  255. size_t builder_idex, const std::vector<int> &dyn_input_sizes,
  256. const std::shared_ptr<KernelBuildInfo::KernelBuildInfoBuilder> &builder) {
  257. MS_EXCEPTION_IF_NULL(builder);
  258. std::vector<TypeId> inputs_device_type;
  259. std::vector<std::string> inputs_format;
  260. size_t dyn_input_idx = 0;
  261. size_t kernel_info_index = 0;
  262. MS_EXCEPTION_IF_NULL(inputs[0]);
  263. size_t kernel_info_cnt = inputs[0]->dtypes().size();
  264. for (const auto &input : inputs) {
  265. MS_EXCEPTION_IF_NULL(input);
  266. std::string param_type = input->param_type();
  267. std::vector<std::string> dtypes = input->dtypes();
  268. std::vector<std::string> formats = input->formats();
  269. if (dtypes.size() != kernel_info_cnt || formats.size() != kernel_info_cnt) {
  270. MS_LOG(DEBUG) << "Set input kernel builder info, dtyps size != formats size.";
  271. return false;
  272. }
  273. if (param_type == "dynamic") {
  274. if (dyn_input_sizes.empty()) {
  275. MS_LOG(DEBUG) << "Set input kernel builder info, dyn_input_sizes's size is 0 when param_type is dynamic";
  276. return false;
  277. }
  278. for (int t = 0; t < dyn_input_sizes[dyn_input_idx]; t++) {
  279. kernel_info_index++;
  280. auto type_id = DtypeToTypeId(dtypes[builder_idex]);
  281. inputs_device_type.push_back(type_id);
  282. inputs_format.push_back(formats[builder_idex]);
  283. }
  284. dyn_input_idx++;
  285. } else if (param_type == "required") {
  286. kernel_info_index++;
  287. auto type_id = DtypeToTypeId(dtypes[builder_idex]);
  288. inputs_device_type.push_back(type_id);
  289. inputs_format.push_back(formats[builder_idex]);
  290. } else {
  291. if (kernel_info_index < real_input_num) {
  292. MS_LOG(INFO) << "Set input kernel builder info, input type is optional, input index is :" << kernel_info_index;
  293. kernel_info_index++;
  294. auto type_id = DtypeToTypeId(dtypes[builder_idex]);
  295. inputs_device_type.push_back(type_id);
  296. inputs_format.push_back(formats[builder_idex]);
  297. }
  298. }
  299. }
  300. builder->SetInputsDeviceType(inputs_device_type);
  301. builder->SetInputsFormat(inputs_format);
  302. return true;
  303. }
  304. bool SetOutputKernelBuilderInfo(const std::vector<std::shared_ptr<OpIOInfo>> &outputs, size_t builder_idex,
  305. const size_t &real_output_num,
  306. const std::shared_ptr<KernelBuildInfo::KernelBuildInfoBuilder> &builder) {
  307. // not now but in the next we need to support dynamic output case
  308. MS_EXCEPTION_IF_NULL(builder);
  309. size_t output_idx = 0;
  310. std::vector<TypeId> outputs_device_type;
  311. std::vector<std::string> outputs_format;
  312. MS_EXCEPTION_IF_NULL(outputs[0]);
  313. size_t kernel_info_cnt = outputs[0]->dtypes().size();
  314. for (const auto &output : outputs) {
  315. MS_EXCEPTION_IF_NULL(output);
  316. if (output_idx >= real_output_num) {
  317. MS_LOG(DEBUG) << "real_output_num:" << real_output_num << ", output_idx:" << output_idx << " is out of limit!";
  318. continue;
  319. }
  320. size_t output_num = 0;
  321. if (output->param_type() == "dynamic") {
  322. if (outputs.size() > 1) {
  323. MS_EXCEPTION(ArgumentError) << "Dynamic output is unsupported multi output!";
  324. }
  325. output_num = real_output_num;
  326. } else if (output->param_type() == "required") {
  327. output_num = 1;
  328. } else {
  329. if (output_idx < real_output_num) {
  330. MS_LOG(INFO) << "Set output kernel builder info, output type is optional, output index is :" << output_idx;
  331. output_num = 1;
  332. }
  333. }
  334. for (size_t i = 0; i < output_num; i++) {
  335. std::vector<std::string> dtypes = output->dtypes();
  336. std::vector<std::string> formats = output->formats();
  337. if (dtypes.size() != kernel_info_cnt || formats.size() != kernel_info_cnt) {
  338. MS_LOG(DEBUG) << "Set output kernel builder info, dtyps size != formats size.";
  339. return false;
  340. }
  341. auto type_id = DtypeToTypeId(dtypes[builder_idex]);
  342. outputs_device_type.push_back(type_id);
  343. outputs_format.push_back(formats[builder_idex]);
  344. output_idx++;
  345. }
  346. }
  347. builder->SetOutputsFormat(outputs_format);
  348. builder->SetOutputsDeviceType(outputs_device_type);
  349. return true;
  350. }
  351. void SetKernelBuildInfo(const std::shared_ptr<KernelBuildInfo::KernelBuildInfoBuilder> &builder, Processor processor,
  352. const std::shared_ptr<const OpInfo> &op_info_ptr) {
  353. MS_EXCEPTION_IF_NULL(builder);
  354. MS_EXCEPTION_IF_NULL(op_info_ptr);
  355. auto imply_type = op_info_ptr->imply_type();
  356. builder->SetProcessor(processor);
  357. std::string fusion_type = op_info_ptr->fusion_type();
  358. auto iter = fusion_type_maps.find(fusion_type);
  359. if (iter != fusion_type_maps.end()) {
  360. builder->SetFusionType(iter->second);
  361. } else {
  362. if (imply_type == kAKG) {
  363. MS_EXCEPTION(NotExistsError) << "Illegal fusion type from dsl register:" << fusion_type;
  364. }
  365. }
  366. if (imply_type == kAKG) {
  367. builder->SetKernelType(AUTO_DIFF_KERNEL);
  368. } else if (imply_type == kAICPU) {
  369. builder->SetKernelType(AICPU_KERNEL);
  370. } else {
  371. builder->SetKernelType(TBE_KERNEL);
  372. }
  373. }
  374. bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptr<const OpInfo> &op_info_ptr, Processor processor,
  375. std::vector<std::shared_ptr<KernelBuildInfo>> *const kernel_info_list) {
  376. MS_EXCEPTION_IF_NULL(kernel_node);
  377. MS_EXCEPTION_IF_NULL(kernel_info_list);
  378. size_t real_input_num = AnfAlgo::GetInputTensorNum(kernel_node);
  379. size_t real_output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
  380. std::vector<std::shared_ptr<OpIOInfo>> inputs = op_info_ptr->inputs_ptr();
  381. std::vector<std::shared_ptr<OpIOInfo>> outputs = op_info_ptr->outputs_ptr();
  382. std::vector<int> dyn_input_sizes;
  383. auto primitive = AnfAlgo::GetCNodePrimitive(kernel_node);
  384. MS_EXCEPTION_IF_NULL(primitive);
  385. if (primitive->GetAttr("dyn_input_sizes") != nullptr) {
  386. dyn_input_sizes = GetValue<std::vector<int>>(primitive->GetAttr("dyn_input_sizes"));
  387. }
  388. if (inputs.size() > 0) {
  389. MS_EXCEPTION_IF_NULL(inputs[0]);
  390. size_t kernel_info_cnt = inputs[0]->dtypes().size();
  391. for (size_t j = 0; j < kernel_info_cnt; j++) {
  392. auto builder = std::make_shared<KernelBuildInfo::KernelBuildInfoBuilder>();
  393. MS_EXCEPTION_IF_NULL(builder);
  394. SetKernelBuildInfo(builder, processor, op_info_ptr);
  395. if (!SetInputKernelBuilderInfo(inputs, real_input_num, j, dyn_input_sizes, builder)) {
  396. MS_LOG(DEBUG) << "Parse kernel metadata, set inputs kernel builder info failed.";
  397. return false;
  398. }
  399. if (outputs.size() > 0) {
  400. if (!SetOutputKernelBuilderInfo(outputs, j, real_output_num, builder)) {
  401. MS_LOG(DEBUG) << "Parse kernel metadata, set outputs kernel builder info failed.";
  402. return false;
  403. }
  404. }
  405. kernel_info_list->push_back(builder->Build());
  406. }
  407. } else if (outputs.size() > 0) {
  408. MS_EXCEPTION_IF_NULL(outputs[0]);
  409. size_t kernel_info_cnt = outputs[0]->dtypes().size();
  410. for (size_t j = 0; j < kernel_info_cnt; j++) {
  411. auto builder = std::make_shared<KernelBuildInfo::KernelBuildInfoBuilder>();
  412. MS_EXCEPTION_IF_NULL(builder);
  413. SetKernelBuildInfo(builder, processor, op_info_ptr);
  414. if (!SetOutputKernelBuilderInfo(outputs, j, real_output_num, builder)) {
  415. MS_LOG(DEBUG) << "Parse kernel metadata, set outputs kernel builder info failed.";
  416. return false;
  417. }
  418. kernel_info_list->push_back(builder->Build());
  419. }
  420. } else {
  421. if (processor == AICPU) {
  422. auto builder = std::make_shared<KernelBuildInfo::KernelBuildInfoBuilder>();
  423. MS_EXCEPTION_IF_NULL(builder);
  424. SetKernelBuildInfo(builder, processor, op_info_ptr);
  425. kernel_info_list->push_back(builder->Build());
  426. }
  427. }
  428. return true;
  429. }
  430. void SaveJsonInfo(const std::string &json_name, const std::string &info) {
  431. char real_path[PATH_MAX] = {0};
  432. std::string path = kCceKernelMeta + json_name + kInfoSuffix;
  433. if (path.size() > PATH_MAX) {
  434. MS_LOG(DEBUG) << "file path " << path << " is too long.";
  435. return;
  436. }
  437. std::ofstream filewrite;
  438. filewrite.open(path);
  439. if (!filewrite.is_open()) {
  440. return;
  441. }
  442. filewrite << info << std::endl;
  443. filewrite.close();
  444. #if defined(_WIN32) || defined(_WIN64)
  445. if (nullptr == _fullpath(real_path, path.c_str(), PATH_MAX)) {
  446. MS_LOG(DEBUG) << "dir " << path << " does not exit.";
  447. return;
  448. }
  449. #else
  450. if (nullptr == realpath(path.c_str(), real_path)) {
  451. MS_LOG(DEBUG) << "dir " << path << " does not exit.";
  452. return;
  453. }
  454. #endif
  455. MS_LOG(INFO) << "real path is :" << real_path;
  456. if (chmod(real_path, S_IRUSR) == -1) {
  457. MS_LOG(DEBUG) << "modify file:" << real_path << " to read only fail.";
  458. }
  459. }
  460. std::string GetProcessor(const AnfNodePtr &anf_node) {
  461. MS_EXCEPTION_IF_NULL(anf_node);
  462. std::string device;
  463. switch (AnfAlgo::GetProcessor(anf_node)) {
  464. case Processor::AICORE:
  465. device = kProcessorAiCore;
  466. break;
  467. case Processor::AICPU:
  468. device = kProcessorAiCpu;
  469. break;
  470. case Processor::CUDA:
  471. device = kProcessorCuda;
  472. break;
  473. default:
  474. MS_LOG(DEBUG) << "Unknown processor type.";
  475. break;
  476. }
  477. return device;
  478. }
  479. } // namespace kernel
  480. } // namespace mindspore