You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

inline.h 16 kB

4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
optimize the comment and log description 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INLINE_H_
  17. #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INLINE_H_
  18. #include <vector>
  19. #include <utility>
  20. #include <algorithm>
  21. #include "utils/hash_map.h"
  22. #include "frontend/optimizer/irpass.h"
  23. #include "include/common/utils/parallel_context.h"
  24. #include "frontend/optimizer/optimizer.h"
  25. #include "frontend/optimizer/anf_visitor.h"
  26. #include "ir/func_graph.h"
  27. #include "ir/func_graph_cloner.h"
  28. #include "ir/tensor.h"
  29. #include "frontend/operator/ops.h"
  30. #include "abstract/abstract_value.h"
  31. #include "include/common/utils/utils.h"
  32. namespace mindspore {
  33. namespace opt {
  34. namespace irpass {
  35. class ReplaceApplicator : public AnfVisitor {
  36. public:
  37. AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
  38. if (!IsValueNode<FuncGraph>(node)) {
  39. return nullptr;
  40. }
  41. auto fg = GetValueNode<FuncGraphPtr>(node);
  42. if (fg->has_flag(FUNC_GRAPH_FLAG_DEFER_INLINE) || fg->stub() || *(fg->switch_input()) ||
  43. *(fg->switch_layer_input())) {
  44. return nullptr;
  45. }
  46. // Defer inlining in the case of pipeline.
  47. auto stage_num = parallel::ParallelContext::GetInstance()->pipeline_stage_split_num();
  48. if (fg->stage() != -1 && stage_num > 1) {
  49. return nullptr;
  50. }
  51. // Defer inlining to get the output nodes of the recomputed cell whose output is non-recomputed.
  52. if (fg->has_flag(FUNC_GRAPH_OUTPUT_NO_RECOMPUTE)) {
  53. return nullptr;
  54. }
  55. auto out = fg->output();
  56. MS_EXCEPTION_IF_NULL(out);
  57. if (!out->isa<CNode>()) {
  58. return nullptr;
  59. }
  60. auto &inputs = out->cast<CNodePtr>()->inputs();
  61. auto params = fg->parameters();
  62. // Exclude first elements of inputs which is fn.
  63. auto input_size = inputs.size();
  64. auto param_size = params.size();
  65. if ((input_size == 1 && param_size == 0) || (input_size > 1 && (input_size - 1) == param_size &&
  66. std::equal(inputs.begin() + 1, inputs.end(), params.begin()))) {
  67. auto inner = inputs[0];
  68. if (IsValueNode<Primitive>(inner) ||
  69. (IsValueNode<FuncGraph>(inner) && GetValueNode<FuncGraphPtr>(inner)->parent() == nullptr)) {
  70. return inner;
  71. }
  72. }
  73. return nullptr;
  74. }
  75. };
  76. class InlinerBase;
  77. using CriterionFuncType = std::function<bool(InlinerBase *, const FuncGraphPtr &, const AnfNodePtr &)>;
  78. bool IsUniqueUse(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &);
  79. bool IsTrivial(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &);
  80. bool IsInside(InlinerBase *, const FuncGraphPtr &, const AnfNodePtr &node);
  81. bool IsCore(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &);
  82. bool IsDirectParentCall(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &node);
  83. bool IsNotRecursive(InlinerBase *inliner, const FuncGraphPtr &fg, const AnfNodePtr &);
  84. bool IsForceInline(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &) {
  85. return fg->has_flag(FUNC_GRAPH_FLAG_FORCE_INLINE);
  86. }
  87. // {G, Xs}
  88. class InlinerBase : public AnfVisitor {
  89. public:
  90. explicit InlinerBase(const std::vector<std::vector<CriterionFuncType>> &criterions, bool use_move = true)
  91. : use_move_(use_move), criterions_(criterions) {}
  92. ~InlinerBase() override = default;
  93. AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
  94. auto cnode = dyn_cast<CNode>(node);
  95. if (cnode == nullptr || cnode->size() < 1) {
  96. return nullptr;
  97. }
  98. auto &inputs = cnode->inputs();
  99. // G
  100. auto fg = GetValueNode<FuncGraphPtr>(inputs[0]);
  101. if (fg == nullptr || fg->has_flag(FUNC_GRAPH_FLAG_DEFER_INLINE) || fg->stub()) {
  102. return nullptr;
  103. }
  104. // Defer inlining in the case of pipeline.
  105. auto stage_num = parallel::ParallelContext::GetInstance()->pipeline_stage_split_num();
  106. if (fg->stage() != -1 && stage_num > 1) {
  107. return nullptr;
  108. }
  109. // Defer inlining to get the output nodes of the recomputed cell whose output is non-recomputed.
  110. if (fg->has_flag(FUNC_GRAPH_OUTPUT_NO_RECOMPUTE)) {
  111. return nullptr;
  112. }
  113. Reset();
  114. // 'criterions_': {criterion_group_1:{criterion1, criterion2, ...}, criterion_group_2:{...}, ...}
  115. // All the criterions of 'criterion group' are true would set 'criterion group' as 'true'. As [AND].
  116. // Anyone of 'criterion group' in 'criterions_' is 'true' would be matched. As [OR].
  117. bool is_match = ApplyCriterions(node, fg);
  118. if (!is_match) {
  119. return nullptr;
  120. }
  121. std::vector<AnfNodePtr> args;
  122. (void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(args));
  123. // Compare size to avoid the case that the function has default value after grad.
  124. // for which after renormalize, the function default value will be an input
  125. if (fg->parameters().size() != args.size()) {
  126. return nullptr;
  127. }
  128. if (IsForceInline(this, fg, node)) {
  129. if (IsUniqueUse(nullptr, fg, nullptr)) {
  130. return InlineMove(node, fg, args, inputs);
  131. }
  132. return InlineClone(fg, node->func_graph(), args, inputs[0]->scope());
  133. }
  134. if (IsUniqueUse(nullptr, fg, nullptr)) {
  135. // For the single used fg, including non-after and after not matched above,
  136. // we move the whole fg nodes.
  137. auto ret_node = InlineForUniqueUse(node, fg, args, inputs);
  138. if (ret_node != nullptr) {
  139. return ret_node;
  140. }
  141. } else {
  142. // We don't expand the middle multiple used after block, except the last one.
  143. if (GraphHasBranch(fg)) {
  144. return nullptr;
  145. }
  146. // Check if parameters' changed for the first met branch calling.
  147. if (fg->has_flag(FUNC_GRAPH_FLAG_AFTER_BLOCK)) {
  148. auto param_simplified_caller = SimplifyAfterParameter(fg, node, args);
  149. if (param_simplified_caller != nullptr) {
  150. return param_simplified_caller;
  151. }
  152. }
  153. }
  154. // Or, just make a clone for not single used fg.
  155. MS_LOG(DEBUG) << "Run InlineClone in inline pass, subgraph number may increase.";
  156. return InlineClone(fg, node->func_graph(), args, inputs[0]->scope());
  157. }
  158. AnfNodePtr InlineMove(const AnfNodePtr &node, const FuncGraphPtr &fg, const std::vector<AnfNodePtr> &args,
  159. const std::vector<AnfNodePtr> &inputs) {
  160. auto mng = fg->manager();
  161. MS_EXCEPTION_IF_NULL(mng);
  162. ReplaceParams(mng, args, fg);
  163. auto out_node = fg->output();
  164. mng->MoveAllCNodeDropGraph(fg, node->func_graph(), inputs[0]->scope());
  165. return out_node;
  166. }
  167. AnfNodePtr InlineForUniqueUse(const AnfNodePtr &node, const FuncGraphPtr &fg, const std::vector<AnfNodePtr> &args,
  168. const std::vector<AnfNodePtr> &inputs) {
  169. if (use_move_) {
  170. return InlineMove(node, fg, args, inputs);
  171. }
  172. // The other branch calling the last after block.
  173. if (fg->has_flag(FUNC_GRAPH_FLAG_AFTER_BLOCK)) {
  174. // Check if parameters' changed.
  175. auto param_simplified_caller = SimplifyAfterParameter(fg, node, args);
  176. if (param_simplified_caller != nullptr) {
  177. return param_simplified_caller;
  178. }
  179. }
  180. return nullptr;
  181. }
  182. bool ApplyCriterions(const AnfNodePtr &node, const FuncGraphPtr &fg) {
  183. bool is_match = false;
  184. for (auto &criterions : criterions_) { // Each 'criterion group' in criterions_.
  185. is_match = true;
  186. for (auto &criterion : criterions) { // Each criterion in 'criterion group'.
  187. if (!criterion(this, fg, node)) {
  188. is_match = false;
  189. break;
  190. }
  191. }
  192. if (is_match) {
  193. break;
  194. }
  195. }
  196. return is_match;
  197. }
  198. void ReplaceParams(const FuncGraphManagerPtr &mng, const std::vector<AnfNodePtr> &new_params,
  199. const FuncGraphPtr &fg) {
  200. auto params = fg->parameters();
  201. auto old_size = params.size();
  202. if (old_size != new_params.size()) {
  203. MS_LOG(EXCEPTION) << "Parameter size not match." << old_size << " new " << new_params.size()
  204. << fg->output()->DebugString(10);
  205. }
  206. for (size_t i = 0; i < old_size; i++) {
  207. (void)mng->Replace(params[i], new_params[i]);
  208. }
  209. }
  210. bool IsRecursive(const FuncGraphPtr &fg) {
  211. if (!is_checked_) {
  212. is_checked_ = true;
  213. is_recursive_ = fg->recursive();
  214. }
  215. return is_recursive_;
  216. }
  217. void Reset() {
  218. is_checked_ = false;
  219. is_recursive_ = false;
  220. }
  221. // For after block which contains branch call, delete the parameters which is not used.
  222. // In most cases, it may be a `Module` or other constant input.
  223. AnfNodePtr SimplifyAfterParameter(const FuncGraphPtr &fg, const AnfNodePtr &node,
  224. const std::vector<AnfNodePtr> &args) {
  225. auto &fg_params = fg->parameters();
  226. std::vector<int64_t> used_param_index;
  227. auto mng = fg->manager();
  228. MS_EXCEPTION_IF_NULL(mng);
  229. bool should_simplify = false;
  230. for (size_t i = 0; i < fg_params.size(); i++) {
  231. if (mng->node_users()[fg_params[i]].size() != 0) {
  232. used_param_index.emplace_back(i);
  233. } else {
  234. MS_LOG(DEBUG) << "Not used parameter " << fg_params[i]->DebugString() << " for calling " << fg->ToString();
  235. should_simplify = true;
  236. }
  237. }
  238. if (!should_simplify) {
  239. return nullptr;
  240. }
  241. MS_LOG(DEBUG) << "Parameter not used found for graph :" << fg->ToString();
  242. // Clone a new graph and ignore the not used parameters
  243. auto new_fg = TransformableClone(fg);
  244. auto &new_fg_params = new_fg->parameters();
  245. std::vector<AnfNodePtr> new_params;
  246. std::transform(used_param_index.begin(), used_param_index.end(), std::back_inserter(new_params),
  247. [&new_fg_params](size_t i) { return new_fg_params[i]; });
  248. new_fg->set_parameters(new_params);
  249. std::vector<AnfNodePtr> node_inputs;
  250. node_inputs.push_back(NewValueNode(new_fg));
  251. std::transform(used_param_index.begin(), used_param_index.end(), std::back_inserter(node_inputs),
  252. [&args](size_t i) { return args[i]; });
  253. return node->func_graph()->NewCNode(node_inputs);
  254. }
  255. bool CheckSwitchBranchAbstract(const AbstractBasePtr &branch_abstract) {
  256. if (branch_abstract != nullptr && branch_abstract->isa<abstract::AbstractError>()) {
  257. auto branch_abstract_value = branch_abstract->GetValueTrack();
  258. MS_EXCEPTION_IF_NULL(branch_abstract_value);
  259. auto branch_abstract_value_string_imm = branch_abstract_value->cast<StringImmPtr>();
  260. if (branch_abstract_value_string_imm != nullptr) {
  261. auto branch_abstract_value_string_imm_value = branch_abstract_value_string_imm->value();
  262. return branch_abstract_value_string_imm_value == kDeadNodeName ||
  263. branch_abstract_value_string_imm_value == kPolyNodeName;
  264. }
  265. }
  266. return false;
  267. }
  268. bool CheckSwitchInputs(const std::vector<AnfNodePtr> &sw_inputs) {
  269. auto true_branch_abstract = sw_inputs[kSwitchTrueBranchIndex]->abstract();
  270. auto false_branch_abstract = sw_inputs[kSwitchFalseBranchIndex]->abstract();
  271. // When branch has dead node or poly node, do not perform inline.
  272. if (CheckSwitchBranchAbstract(true_branch_abstract) || CheckSwitchBranchAbstract(false_branch_abstract)) {
  273. return true;
  274. }
  275. return !sw_inputs[1]->isa<ValueNode>() || IsValueNode<tensor::Tensor>(sw_inputs[1]);
  276. }
  277. // This is a try-best algorithm to find a graph which may generate branch call.
  278. // It does not handle high-order function call. For high-orderer call branch, it still may be inlined.
  279. bool GraphHasBranch(const FuncGraphPtr &fg) {
  280. if (graph_branch_cache_.find(fg) != graph_branch_cache_.end()) {
  281. return graph_branch_cache_[fg];
  282. }
  283. bool has_branch = false;
  284. auto nodes = fg->nodes();
  285. for (auto &item : nodes) {
  286. if (IsPrimitiveCNode(item, prim::kPrimSwitch)) {
  287. auto sw_inputs = item->cast<CNodePtr>()->inputs();
  288. if (sw_inputs.size() != 4) {
  289. MS_LOG(EXCEPTION) << "switch inputs should be 4";
  290. }
  291. if (CheckSwitchInputs(sw_inputs)) {
  292. has_branch = true;
  293. break;
  294. }
  295. } else if (IsCNodeGraph(item)) {
  296. auto cinputs = item->cast<CNodePtr>()->inputs();
  297. if (cinputs.size() < 1) {
  298. MS_LOG(EXCEPTION) << "graph call inputs should be greater than 1";
  299. }
  300. FuncGraphPtr call_fg = GetValueNode<FuncGraphPtr>(cinputs[0]);
  301. bool call_fg_has_branch = GraphHasBranch(call_fg);
  302. if (call_fg_has_branch) {
  303. has_branch = true;
  304. break;
  305. }
  306. } else if (IsPrimitiveCNode(item, prim::kPrimPartial)) {
  307. auto cinputs = item->cast<CNodePtr>()->inputs();
  308. if (cinputs.size() < 2) {
  309. MS_LOG(EXCEPTION) << "partial call inputs should be greater than 2";
  310. }
  311. FuncGraphPtr call_fg = GetValueNode<FuncGraphPtr>(cinputs[1]);
  312. if (call_fg == nullptr) {
  313. continue;
  314. }
  315. bool call_fg_has_branch = GraphHasBranch(call_fg);
  316. if (call_fg_has_branch) {
  317. has_branch = true;
  318. break;
  319. }
  320. }
  321. }
  322. graph_branch_cache_[fg] = has_branch;
  323. return has_branch;
  324. }
  325. private:
  326. bool is_checked_{false}, is_recursive_{false};
  327. bool use_move_;
  328. std::vector<std::vector<CriterionFuncType>> criterions_;
  329. mindspore::HashMap<FuncGraphPtr, bool> graph_branch_cache_;
  330. };
  331. bool IsUniqueUse(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &) {
  332. const auto &users = fg->func_graph_cnodes_index();
  333. int64_t n_use = std::accumulate(
  334. users.begin(), users.end(), 0,
  335. [](int64_t sum, const std::pair<const CNodeIndexPairPtr, int64_t> &item) { return sum + item.second; });
  336. return n_use == 1;
  337. }
  338. bool IsTrivial(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &) {
  339. auto n_cnode = fg->nodes().size() - fg->parameters().size();
  340. // There is at least one CNode(return, other_node).
  341. return n_cnode <= 2;
  342. }
  343. bool IsInside(InlinerBase *, const FuncGraphPtr &, const AnfNodePtr &node) {
  344. MS_EXCEPTION_IF_NULL(node->func_graph());
  345. return node->func_graph()->has_flag("inline_inside");
  346. }
  347. bool IsCore(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &) { return fg->has_flag("core"); }
  348. bool IsDirectParentCall(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &node) {
  349. bool unique_use = IsUniqueUse(nullptr, fg, nullptr);
  350. bool is_recursive = fg->recursive();
  351. if (fg->parent() != nullptr && is_recursive) {
  352. if (fg->parent() == node->func_graph() && unique_use) {
  353. return true;
  354. }
  355. }
  356. return false;
  357. }
  358. bool IsNotRecursive(InlinerBase *inliner, const FuncGraphPtr &fg, const AnfNodePtr &) {
  359. return !inliner->IsRecursive(fg);
  360. }
  361. class Inliner : public InlinerBase {
  362. public:
  363. explicit Inliner(bool use_move = true)
  364. : InlinerBase(
  365. // Supports AND conditions in one criterion, Ex. {IsUniqueUse, IsNotRecursive}.
  366. {
  367. {IsTrivial},
  368. {IsInside},
  369. {IsCore},
  370. {IsNotRecursive},
  371. {IsDirectParentCall},
  372. },
  373. use_move) {}
  374. ~Inliner() override = default;
  375. };
  376. class DirectInliner : public InlinerBase {
  377. public:
  378. explicit DirectInliner(bool use_move = true)
  379. : InlinerBase(
  380. // Supports AND conditions in one criterion, Ex. {IsUniqueUse, IsNotRecursive}.
  381. {
  382. {IsForceInline},
  383. {IsDirectParentCall},
  384. },
  385. use_move) {}
  386. ~DirectInliner() override = default;
  387. };
  388. } // namespace irpass
  389. } // namespace opt
  390. } // namespace mindspore
  391. #endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INLINE_H_