You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

common_utils.h 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_
  17. #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_
  18. #include <dirent.h>
  19. #include <memory>
  20. #include <unordered_map>
  21. #include <unordered_set>
  22. #include <map>
  23. #include <string>
  24. #include <algorithm>
  25. #include <vector>
  26. #include <utility>
  27. #include <nlohmann/json.hpp>
  28. #include "backend/kernel_compiler/kernel.h"
  29. #include "backend/kernel_compiler/oplib/opinfo.h"
  30. #include "backend/kernel_compiler/kernel_build_info.h"
  31. namespace mindspore {
  32. namespace kernel {
  33. constexpr auto kCceKernelMeta = "./kernel_meta/";
  34. constexpr auto kGpuKernelMeta = "./cuda_meta";
  35. constexpr auto kProcessorAiCore = "aicore";
  36. constexpr auto kProcessorAiCpu = "aicpu";
  37. constexpr auto kProcessorCuda = "cuda";
  38. constexpr auto kProcessorUnknown = "unknown";
  39. constexpr auto kJsonSuffix = ".json";
  40. constexpr auto kInfoSuffix = ".info";
  41. constexpr unsigned int AUTODIFF_COMPILE_OVERTIME = 600;
  42. constexpr auto kArgDataformat = "data_format";
  43. const std::vector<std::string> support_devices = {"aicore", "aicpu", "cuda"};
  44. struct KernelMetaInfo {
  45. uintptr_t func_stub_;
  46. uint32_t block_dim_;
  47. };
  48. using KernelMetaPtr = std::shared_ptr<KernelMetaInfo>;
  49. class KernelMeta {
  50. public:
  51. KernelMeta() = default;
  52. void Initialize(int pid);
  53. void RemoveKernelCache();
  54. std::string Search(const std::string &kernel_name) const;
  55. bool Insert(const std::string &kernel_name, const std::string &kernel_json);
  56. std::string kernel_meta_path() const { return kernel_meta_path_; }
  57. bool initialized() const { return initialized_; }
  58. static KernelMeta *GetInstance() {
  59. static KernelMeta kernel_meta;
  60. return &kernel_meta;
  61. }
  62. ~KernelMeta() = default;
  63. private:
  64. bool initialized_ = false;
  65. std::string kernel_meta_path_;
  66. std::unordered_map<std::string, std::string> kernel_meta_map_;
  67. };
  68. bool CheckCache(const std::string &kernel_name);
  69. KernelPackPtr SearchCache(const std::string &kernel_name, const std::string &processor);
  70. KernelPackPtr InsertCache(const std::string &kernel_name, const std::string &processor);
  71. TypeId DtypeToTypeId(const std::string &dtypes);
  72. std::string Dtype2ShortType(const std::string &dtypes);
  73. std::string TypeId2String(TypeId type_id, bool unknown_as_default = false);
  74. size_t GetDtypeNbyte(const std::string &dtypes);
  75. bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptr<const OpInfo> &op_info_ptr, Processor processor,
  76. std::vector<std::shared_ptr<KernelBuildInfo>> *const kernel_info_list);
  77. void SaveJsonInfo(const std::string &json_name, const std::string &info, const std::string &base_path = kCceKernelMeta);
  78. std::string GetProcessor(const AnfNodePtr &anf_node);
  79. Processor GetProcessor(const string &processor);
  80. bool IsSameShape(const std::vector<size_t> &shape_a, const std::vector<size_t> &shape_b);
  81. int Sign(float x);
  82. std::pair<AnfNodePtr, size_t> GetKernelInput(const AnfNodePtr &anf_node, size_t index);
  83. std::vector<std::pair<AnfNodePtr, std::pair<size_t, size_t>>> GetInputIndex(const std::vector<AnfNodePtr> &node_list,
  84. const std::vector<AnfNodePtr> &input_list);
  85. std::vector<std::pair<AnfNodePtr, size_t>> GetOutputIndex(const std::vector<AnfNodePtr> &node_list,
  86. const std::vector<AnfNodePtr> &input_list,
  87. const std::vector<AnfNodePtr> &output_list);
  88. void GetValidKernelNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *node_list);
  89. void GetValidKernelNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *node_list,
  90. std::vector<AnfNodePtr> *input_list, std::vector<AnfNodePtr> *output_list);
  91. void GetFuncGraphOutputNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *output_list);
  92. bool GetInputTensorValue(const AnfNodePtr &anf_node, size_t input_idx, nlohmann::json *const node_json);
  93. void GetGraphRealOutput(const FuncGraphPtr &func_graph, std::vector<std::pair<AnfNodePtr, size_t>> *node_list);
  94. bool IsWeightBoundary(const AnfNodePtr &node);
  95. std::vector<int> GetReduceAttrAxis(const CNodePtr &cnode);
  96. std::string GetProcessorStr(const AnfNodePtr &anf_node);
  97. template <typename T>
  98. inline std::string Vector2Str(const std::vector<T> &inputs) {
  99. if (!inputs.empty()) {
  100. std::ostringstream oss;
  101. (void)std::copy(inputs.begin(), inputs.end() - 1, std::ostream_iterator<T>(oss, ", "));
  102. oss << inputs.back();
  103. return oss.str();
  104. }
  105. return "";
  106. }
  107. } // namespace kernel
  108. } // namespace mindspore
  109. #endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_