You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

helper.h 8.8 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_
  17. #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_
  18. #include <vector>
  19. #include <memory>
  20. #include <utility>
  21. #include <string>
  22. #include <set>
  23. #include <unordered_set>
  24. #include "ir/func_graph.h"
  25. #include "backend/session/kernel_graph.h"
  26. #include "utils/ms_utils.h"
  27. #include "backend/optimizer/common/pattern_engine.h"
  28. namespace mindspore {
  29. namespace opt {
  30. constexpr size_t kTransOpInputTensorNum = 1;
  31. constexpr size_t kCastInputTensorNum = 1;
  32. constexpr size_t kDependInputTensorNum = 2;
  33. constexpr size_t kReluInputTensorNum = 1;
  34. constexpr size_t kReluGradInputTensorNum = 2;
  35. constexpr size_t kAddInputTensorNum = 2;
  36. constexpr size_t kTupleGetItemInputTensorNum = 2;
  37. constexpr size_t kConvInputTensorNum = 2;
  38. constexpr size_t kRealDivInputTensorNum = 2;
  39. constexpr size_t kSqrtInputTensorNum = 1;
  40. constexpr size_t kMatMulInputTensorNum = 2;
  41. constexpr size_t kMulInputTensorNum = 2;
  42. constexpr size_t kSubInputTensorNum = 2;
  43. constexpr size_t kAssignSubInputTensorNum = 2;
  44. constexpr size_t kDropoutInputTensorNum = 1;
  45. constexpr size_t kAssignInputTensorNum = 2;
  46. constexpr size_t kConvBn1OutputNum = 3;
  47. constexpr size_t kBn2ReluOutputNum = 4;
  48. constexpr size_t kBnInputTensorNum = 5;
  49. constexpr size_t kBnOutputNum = 5;
  50. constexpr size_t kBN1OutputNum = 2;
  51. constexpr size_t kBN2OutputNum = 3;
  52. constexpr size_t kBN3OutputNum = 1;
  53. constexpr size_t kBNGradInputTensorNum = 5;
  54. constexpr size_t kBNGradOutputNum = 3;
  55. constexpr size_t kBNGrad1OutputNum = 3;
  56. constexpr size_t kBNGrad2OutputNum = 5;
  57. constexpr size_t kBNGrad3OutputNum = 1;
  58. constexpr size_t kBNTrainingReduceOutputNum = 2;
  59. constexpr size_t kBNTrainingUpdateOutputNum = 5;
  60. constexpr size_t kBNTrainingUpdateV2OutputNum = 3;
  61. constexpr size_t kBNTrainingUpdateV3OutputNum = 5;
  62. constexpr size_t kBNTrainingUpdateGradOutputNum = 2;
  63. constexpr size_t kSingleOutputNum = 1;
  64. constexpr size_t kSumNodeInputTensorNum = 1;
  65. constexpr size_t kSquareNodeInputTensorNum = 1;
  66. constexpr size_t kSquareSumv2OutputNum = 2;
  67. constexpr size_t kMinimumInputTensorNum = 2;
  68. constexpr size_t kLambNextMVWithDecayInputNum = 7;
  69. constexpr size_t kLambNextMVWithDecayConstantMulInputNum = 5;
  70. constexpr size_t kLambNextMVWithDecayOutputNum = 4;
  71. constexpr size_t kLambNextMVWithDecayV1OutputNum = 4;
  72. constexpr size_t kLambNextRightOutputNum = 2;
  73. constexpr size_t kLambUpdateWithLrV2InputNum = 8;
  74. constexpr size_t kLambNextMVRuleInputNum = 14;
  75. constexpr size_t kLambNextMVRuleOutputNum = 4;
  76. constexpr size_t kBackendReshapeInputTensorNum = 1;
  77. constexpr size_t kBackendTransposeInputTensorNum = 1;
  78. constexpr size_t kAdamApplyOneWithDecayOutputNum = 3;
  79. constexpr size_t kLayerNormBetaGammaBackpropInputTensorNum = 4;
  80. constexpr size_t kLayerNormBetaGammaBackpropOutputNum = 2;
  81. constexpr size_t kLayerNormGradInputTensorNum = 5;
  82. constexpr size_t kAdamApplyOneOutputNum = 3;
  83. constexpr size_t kApplyMomentumInputTensorNum = 5;
  84. constexpr size_t kBiasAddInputTensorNum = 2;
  85. constexpr size_t kTopkInputTensorNum = 2;
  86. constexpr size_t kLarsV2InputTensorNum = 4;
  87. constexpr size_t kFusedMulApplyMomentumOutputNum = 2;
  88. constexpr size_t kSplitInputTensorNum = 1;
  89. constexpr size_t kGatherV2DynInputTensorNum = 3;
  90. constexpr size_t kUnsortedSegmentSumInputTensorNum = 2;
  91. constexpr size_t kSoftmaxCrossEntropyWithLogitsOutputNum = 2;
  92. constexpr size_t kSparseSoftmaxCrossEntropyWithLogitsInputTensorNum = 2;
  93. constexpr size_t kOneHotOutputNum = 1;
  94. constexpr size_t kOneHotInputTensorNum = 4;
  95. enum FusedBatchNormInput {
  96. kX = 1,
  97. kVariance = 5,
  98. };
  99. enum FusedBatchNormOutput {
  100. kY = 0,
  101. kRunningMean,
  102. kRunningVariance,
  103. kSaveMean,
  104. kSaveInvVariance,
  105. };
  106. enum ConvBn1Output {
  107. kData = 0,
  108. kVarPart,
  109. kMean,
  110. };
  111. std::vector<int64_t> Convert2Int(const std::vector<size_t> &v);
  112. std::vector<int64_t> Convert2Long(const std::vector<size_t> &v);
  113. // check whether node depends on either of nodes or not
  114. bool IsDepend(const FuncGraph &graph, const AnfNodePtr &node, const std::vector<AnfNodePtr> &nodes);
  115. bool UnVisited(const BaseRef &n);
  116. bool Visited(const BaseRef &n);
  117. // check if the input node is CNode, then check it's input_size, return CNodePtr if check success.
  118. CNodePtr CheckAnfNodeIfCNodeAndInputSize(const AnfNodePtr &node, size_t input_size);
  119. void CheckCNodeInputSize(const CNodePtr &cnode, size_t input_tensor_num);
  120. bool HasSymmetricalKernelInfo(const AnfNodePtr &node_x, const AnfNodePtr &node_y);
  121. const AnfNodePtr EliminateDependTransop(const FuncGraphPtr &func_graph, const AnfNodePtr &node);
  122. void CreateOutputsOfConvBn1(const FuncGraphPtr &func_graph, const CNodePtr &conv_cnode, const CNodePtr &bn_cnode,
  123. std::vector<AnfNodePtr> *conv_bn1_outputs);
  124. void CreateOutputsOfFusedBn2(const FuncGraphPtr &graph, const std::vector<AnfNodePtr> &fused_bn1_outputs,
  125. const CNodePtr &bn_node, std::vector<AnfNodePtr> *fused_bn2_outputs);
  126. void CreateOutputsOfFusedBn3(const FuncGraphPtr &graph, const AnfNodePtr &data_input,
  127. const std::vector<AnfNodePtr> &fused_bn1_outputs,
  128. const std::vector<AnfNodePtr> &fused_bn2_outputs, const CNodePtr &bn_node,
  129. std::vector<AnfNodePtr> *fused_bn3_outputs);
  130. void CreateMultipleOutputsOfAnfNode(const FuncGraphPtr &kernel_graph, const AnfNodePtr &anf_node_ptr, size_t output_num,
  131. std::vector<AnfNodePtr> *outputs);
  132. tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr,
  133. size_t data_length);
  134. tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple);
  135. bool IsAllNopNode(const session::KernelGraph *const graph);
  136. bool IsNopNode(const AnfNodePtr &node);
  137. void HideNopNode(session::KernelGraph *const graph);
  138. void RemoveNopNode(session::KernelGraph *const graph);
  139. CNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx);
  140. bool IsUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node);
  141. std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedList(const FuncGraphPtr &graph,
  142. const AnfNodePtr &node);
  143. size_t GetRealNodeNum(const FuncGraphPtr &graph, const AnfNodePtr &node);
  144. std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOutputIdx(const FuncGraphPtr &graph,
  145. const AnfNodePtr &node,
  146. size_t output_index);
  147. bool IsNotRealUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node);
  148. void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set<size_t> &input_attrs);
  149. bool AnfEqual(const BaseRef &a, const BaseRef &b);
  150. bool CNodeTypeEqual(const BaseRef &a, const BaseRef &b);
  151. AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars,
  152. bool multigraph = false);
  153. // Check var_node in two equivs is the same node
  154. bool IsSameNode(const EquivPtr &equiv1, const EquivPtr &equiv2, const VarPtr &var_node);
  155. // Get anf_node from equiv by var_node
  156. AnfNodePtr GetAnfNodeByVar(const EquivPtr &equiv, const VarPtr &var_node);
  157. // Compare tuple getitem's index, return bool[n1's index < n2's index]
  158. bool CompareTupleGetitem(const AnfNodePtr &n1, const AnfNodePtr &n2);
  159. // Get attr which is bool from cnode
  160. bool GetBoolAttr(const AnfNodePtr &node, const std::string &attr_name);
  161. // Check node's data type is in supported data type set
  162. bool CheckSupportDataType(const AnfNodePtr &node, const std::set<TypeId> &supported_data_type_set);
  163. // Create a new value node of func graph,not kernel graph
  164. ValueNodePtr MakeValueNode(const ValueNodePtr &value_node);
  165. // Transfer depend or control_depend to the new node
  166. void TransferDepend(const CNodePtr &old_node, const FuncGraphPtr &graph, const CNodePtr &new_node);
  167. } // namespace opt
  168. } // namespace mindspore
  169. #endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_