You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformer_utils.cc 7.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "transformer_utils.h"
  17. #include "external/ge/ge_api_types.h"
  18. #include "framework/common/debug/ge_log.h"
  19. #include "graph/utils/type_utils.h"
  20. namespace ge {
  21. bool NodeShapeTransUtils::CatchFormatAndShape() {
  22. inputs_ = op_desc_->GetAllInputName();
  23. outputs_ = op_desc_->GetAllOutputName();
  24. for (auto &ele : inputs_) {
  25. auto tensor_desc_input = op_desc_->MutableInputDesc(ele.first);
  26. if (tensor_desc_input == nullptr) {
  27. continue;
  28. }
  29. auto format = tensor_desc_input->GetFormat();
  30. auto ori_format = tensor_desc_input->GetOriginFormat();
  31. if (format == ori_format) {
  32. GELOGD("Node is %s, input tensor name is %s. ori format: %s, format: %s is same! No need to catch format&shape!",
  33. op_desc_->GetName().c_str(), ele.first.c_str(), TypeUtils::FormatToSerialString(ori_format).c_str(),
  34. TypeUtils::FormatToSerialString(format).c_str());
  35. continue;
  36. }
  37. map_format_in_.insert(std::pair<std::string, Format>(ele.first, format));
  38. map_ori_format_in_.insert(std::pair<std::string, Format>(ele.first, ori_format));
  39. map_dtype_in_.insert(std::pair<std::string, DataType>(ele.first, tensor_desc_input->GetDataType()));
  40. tensor_desc_input->SetFormat(ori_format);
  41. tensor_desc_input->SetShape(tensor_desc_input->GetOriginShape());
  42. }
  43. for (auto &ele : outputs_) {
  44. auto tensor_desc_output = op_desc_->MutableOutputDesc(ele.first);
  45. if (tensor_desc_output == nullptr) {
  46. continue;
  47. }
  48. auto format = tensor_desc_output->GetFormat();
  49. auto ori_format = tensor_desc_output->GetOriginFormat();
  50. if (format == ori_format) {
  51. GELOGD("Node is %s, output tensor name is %s. ori format: %s, format: %s is same! No need to catch format&shape!",
  52. op_desc_->GetName().c_str(), ele.first.c_str(), TypeUtils::FormatToSerialString(ori_format).c_str(),
  53. TypeUtils::FormatToSerialString(format).c_str());
  54. continue;
  55. }
  56. map_format_out_.insert(std::pair<std::string, Format>(ele.first, format));
  57. map_ori_format_out_.insert(std::pair<std::string, Format>(ele.first, ori_format));
  58. map_dtype_out_.insert(std::pair<std::string, DataType>(ele.first, tensor_desc_output->GetDataType()));
  59. if (format == ori_format) {
  60. continue;
  61. }
  62. tensor_desc_output->SetFormat(ori_format);
  63. }
  64. return true;
  65. }
  66. bool NodeShapeTransUtils::UpdateFormatAndShape() {
  67. for (auto &ele : inputs_) {
  68. auto tensor_desc_input = op_desc_->MutableInputDesc(ele.first);
  69. if (tensor_desc_input == nullptr) {
  70. continue;
  71. }
  72. // if can not find saved info, it says format and origin format is same when catched
  73. if (map_format_in_.find(ele.first) == map_format_in_.end()) {
  74. GELOGD("Node is [%s], input tensor name [%s] is not been catched.Skip update action for it!",
  75. op_desc_->GetName().c_str(), ele.first.c_str());
  76. tensor_desc_input->SetOriginFormat(tensor_desc_input->GetFormat());
  77. tensor_desc_input->SetOriginShape(tensor_desc_input->GetShape());
  78. continue;
  79. }
  80. auto ori_format = tensor_desc_input->GetFormat();
  81. auto ori_shape = tensor_desc_input->GetShape();
  82. auto curr_format = map_format_in_[ele.first];
  83. if (ori_format == curr_format) {
  84. continue;
  85. }
  86. std::unique_ptr<common::transformer::ShapeTransferAccordingToFormat> shape_transfer(
  87. new (std::nothrow) common::transformer::ShapeTransferAccordingToFormat());
  88. if (shape_transfer == nullptr) {
  89. GELOGE(GRAPH_FAILED, "Memory alloc failed");
  90. return false;
  91. }
  92. std::vector<int64_t> ori_shape_dims = ori_shape.GetDims();
  93. std::vector<int64_t> out_dims;
  94. ge::DataType dtype = map_dtype_in_[ele.first];
  95. common::transformer::ShapeAndFormat shape_and_format_info{
  96. ori_shape_dims, out_dims, ori_format, curr_format, dtype, common::transformer::EN_IMPL_CUSTOM_TBE};
  97. shape_transfer->GetShapeAccordingToFormat(shape_and_format_info);
  98. tensor_desc_input->SetFormat(curr_format);
  99. tensor_desc_input->SetShape(GeShape(out_dims));
  100. }
  101. for (auto &ele : outputs_) {
  102. auto tensor_desc_output = op_desc_->MutableOutputDesc(ele.first);
  103. if (tensor_desc_output == nullptr) {
  104. continue;
  105. }
  106. // if can not find saved info, it says format and origin format is same when catched
  107. if (map_ori_format_out_.find(ele.first) == map_ori_format_out_.end()) {
  108. GELOGD("Node is [%s], input tensor name [%s] is not been catched.Skip update action for it!",
  109. op_desc_->GetName().c_str(), ele.first.c_str());
  110. tensor_desc_output->SetOriginFormat(tensor_desc_output->GetFormat());
  111. tensor_desc_output->SetOriginShape(tensor_desc_output->GetShape());
  112. continue;
  113. }
  114. auto ori_shape = tensor_desc_output->GetShape();
  115. auto curr_format = tensor_desc_output->GetFormat();
  116. if (curr_format != map_ori_format_out_[ele.first]) {
  117. GELOGE(GRAPH_FAILED, "Node is %s, out tensor name is %s. format: %s, recorded origin format: %s is not same",
  118. op_desc_->GetName().c_str(), ele.first.c_str(), TypeUtils::FormatToSerialString(curr_format).c_str(),
  119. TypeUtils::FormatToSerialString(map_ori_format_out_[ele.first]).c_str());
  120. return GRAPH_FAILED;
  121. }
  122. tensor_desc_output->SetOriginShape(ori_shape);
  123. auto saved_format = map_format_out_[ele.first];
  124. if (curr_format == saved_format) {
  125. GELOGD("Nodeis %s, out tensor name is %s. ori format: %s, recorded format: %s is same! No need to transfer",
  126. op_desc_->GetName().c_str(), ele.first.c_str(), TypeUtils::FormatToSerialString(curr_format).c_str(),
  127. TypeUtils::FormatToSerialString(saved_format).c_str());
  128. continue;
  129. }
  130. tensor_desc_output->SetFormat(saved_format);
  131. std::unique_ptr<common::transformer::ShapeTransferAccordingToFormat> shape_transfer(
  132. new (std::nothrow) common::transformer::ShapeTransferAccordingToFormat());
  133. if (shape_transfer == nullptr) {
  134. GELOGE(GRAPH_FAILED, "Memory alloc failed");
  135. return false;
  136. }
  137. std::vector<int64_t> ori_shape_dims = ori_shape.GetDims();
  138. std::vector<int64_t> out_dims;
  139. ge::DataType dtype = tensor_desc_output->GetDataType();
  140. common::transformer::ShapeAndFormat shape_and_format_info{
  141. ori_shape_dims, out_dims, curr_format, saved_format, dtype, common::transformer::EN_IMPL_CUSTOM_TBE};
  142. shape_transfer->GetShapeAccordingToFormat(shape_and_format_info);
  143. tensor_desc_output->SetShape(GeShape(out_dims));
  144. GELOGD("Node is %s, out tensor name is %s. Update format and shape success,ori format: %s, format: %s",
  145. op_desc_->GetName().c_str(), ele.first.c_str(), TypeUtils::FormatToSerialString(curr_format).c_str(),
  146. TypeUtils::FormatToSerialString(saved_format).c_str());
  147. }
  148. GELOGD("Node is %s. Update format and shape success", op_desc_->GetName().c_str());
  149. return true;
  150. }
  151. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知.