You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

common_utils.h 6.9 kB

4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_
  17. #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_
  18. #include <dirent.h>
  19. #include <memory>
  20. #include <unordered_map>
  21. #include <unordered_set>
  22. #include <map>
  23. #include <string>
  24. #include <algorithm>
  25. #include <vector>
  26. #include <utility>
  27. #include <nlohmann/json.hpp>
  28. #include "backend/kernel_compiler/kernel.h"
  29. #include "backend/kernel_compiler/oplib/opinfo.h"
  30. #include "backend/kernel_compiler/kernel_build_info.h"
  31. namespace mindspore {
  32. namespace kernel {
  33. constexpr auto kCceKernelMeta = "./kernel_meta/";
  34. constexpr auto kGpuKernelMeta = "./cuda_meta";
  35. constexpr auto kProcessorAiCore = "aicore";
  36. constexpr auto kProcessorAiCpu = "aicpu";
  37. constexpr auto kProcessorCuda = "cuda";
  38. constexpr auto kProcessorUnknown = "unknown";
  39. constexpr auto kJsonSuffix = ".json";
  40. constexpr auto kInfoSuffix = ".info";
  41. constexpr unsigned int AUTODIFF_COMPILE_OVERTIME = 600;
  42. constexpr auto kArgDataformat = "data_format";
  43. const std::vector<std::string> support_devices = {"aicore", "aicpu", "cuda"};
  44. struct KernelMetaInfo {
  45. uintptr_t func_stub_;
  46. uint32_t block_dim_;
  47. };
  48. using KernelMetaPtr = std::shared_ptr<KernelMetaInfo>;
  49. class KernelMeta {
  50. public:
  51. KernelMeta() = default;
  52. void Initialize();
  53. std::string Search(const std::string &kernel_name) const;
  54. bool Insert(const std::string &kernel_name, const std::string &kernel_json);
  55. std::string kernel_meta_path() const { return kernel_meta_path_; }
  56. bool initialized() const { return initialized_; }
  57. static KernelMeta *GetInstance() {
  58. static KernelMeta kernel_meta;
  59. return &kernel_meta;
  60. }
  61. ~KernelMeta() = default;
  62. private:
  63. bool initialized_ = false;
  64. std::string kernel_meta_path_;
  65. std::unordered_map<std::string, std::string> kernel_meta_map_;
  66. };
  67. bool CheckCache(const std::string &kernel_name);
  68. KernelPackPtr SearchCache(const std::string &kernel_name, const std::string &processor);
  69. KernelPackPtr InsertCache(const std::string &kernel_name, const std::string &processor);
  70. TypeId DtypeToTypeId(const std::string &dtypes);
  71. std::string Dtype2ShortType(const std::string &dtypes);
  72. std::string TypeId2String(TypeId type_id, bool unknown_as_default = false);
  73. size_t GetDtypeNbyte(const std::string &dtypes);
  74. bool GetShapeSize(const std::vector<size_t> &shape, const TypePtr &type_ptr, int64_t *size_i);
  75. bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptr<const OpInfo> &op_info_ptr, Processor processor,
  76. std::vector<std::shared_ptr<KernelBuildInfo>> *const kernel_info_list);
  77. void SaveJsonInfo(const std::string &json_name, const std::string &info, const std::string &base_path = kCceKernelMeta);
  78. std::string GetProcessor(const AnfNodePtr &anf_node);
  79. Processor GetProcessor(const string &processor);
  80. bool IsSameShape(const std::vector<size_t> &shape_a, const std::vector<size_t> &shape_b);
  81. int Sign(float x);
  82. std::pair<AnfNodePtr, size_t> GetKernelInput(const AnfNodePtr &anf_node, size_t index);
  83. std::vector<std::pair<AnfNodePtr, std::pair<size_t, size_t>>> GetInputIndex(const std::vector<AnfNodePtr> &node_list,
  84. const std::vector<AnfNodePtr> &input_list);
  85. std::vector<std::pair<AnfNodePtr, size_t>> GetOutputIndex(const std::vector<AnfNodePtr> &node_list,
  86. const std::vector<AnfNodePtr> &input_list,
  87. const std::vector<AnfNodePtr> &output_list);
  88. void GetValidKernelNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *node_list);
  89. void GetValidKernelNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *node_list,
  90. std::vector<AnfNodePtr> *input_list, std::vector<AnfNodePtr> *output_list);
  91. void GetFuncGraphOutputNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *output_list);
  92. bool GetInputTensorValue(const AnfNodePtr &anf_node, size_t input_idx, nlohmann::json *const node_json);
  93. void GetGraphRealOutput(const FuncGraphPtr &func_graph, std::vector<std::pair<AnfNodePtr, size_t>> *node_list);
  94. bool IsWeightBoundary(const AnfNodePtr &node);
  95. std::vector<int64_t> GetReduceAttrAxis(const CNodePtr &cnode);
  96. std::string GetProcessorStr(const AnfNodePtr &anf_node);
  97. Processor GetProcessorFromContext();
  98. std::string GetStrProcessorFromContext();
  99. float Scaling(size_t in_size, size_t out_size, bool align_corners);
  100. float ScaleGrid(const int x, const float scale);
  101. FusionType GetFusionTypeByName(const std::string &name);
  102. std::string GetFusionNameByType(const kernel::FusionType &type);
  103. struct CachedInterpolation {
  104. size_t lower;
  105. size_t upper;
  106. float lerp;
  107. };
  108. void ComputeInterpolationWeights(const size_t out_size, const size_t in_size, const float scale,
  109. CachedInterpolation *interpolation);
  110. template <typename T>
  111. inline std::string Vector2Str(const std::vector<T> &inputs) {
  112. if (!inputs.empty()) {
  113. std::ostringstream oss;
  114. (void)std::copy(inputs.begin(), inputs.end() - 1, std::ostream_iterator<T>(oss, ", "));
  115. oss << inputs.back();
  116. return oss.str();
  117. }
  118. return "";
  119. }
  120. template <typename T>
  121. inline T ComputeLerp(T top_left, T top_right, T bottom_left, T bottom_right, T x_lerp, T y_lerp) {
  122. T top = top_left + (top_right - top_left) * x_lerp;
  123. T bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
  124. return top + (bottom - top) * y_lerp;
  125. }
  126. void CastShapeSizeToLong(const std::vector<size_t> &shape, std::vector<int64_t> *long_shape);
  127. void CheckSliceValid(const std::vector<int64_t> &start, const std::vector<int64_t> &stop,
  128. const std::vector<int64_t> &step, const std::vector<int64_t> &input_shape);
  129. size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &stop,
  130. const std::vector<int64_t> &dim_offset);
  131. std::vector<int64_t> CalDimOffset(const std::vector<int64_t> &input_shape);
  132. size_t GetCopySize(const std::vector<int64_t> &dim_offset, const std::vector<int64_t> &start,
  133. const std::vector<int64_t> &stop);
  134. size_t UnitSizeInBytes(const mindspore::TypeId &t);
  135. } // namespace kernel
  136. } // namespace mindspore
  137. #endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_COMMON_UTILS_H_