You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_parallel_utils.cc 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_parallel_utils.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <set>
  22. #include <string>
  23. #include <unordered_map>
  24. #include <utility>
  25. #include "base/core_ops.h"
  26. #include "frontend/operator/ops.h"
  27. #include "frontend/optimizer/optimizer.h"
  28. #include "frontend/parallel/context.h"
  29. #include "frontend/parallel/device_manager.h"
  30. #include "frontend/parallel/graph_util/generate_graph.h"
  31. #include "frontend/parallel/graph_util/graph_info.h"
  32. #include "frontend/parallel/graph_util/node_info.h"
  33. #include "frontend/parallel/node_check.h"
  34. #include "ir/param_info.h"
  35. #include "ir/tensor.h"
  36. #include "utils/trace_base.h"
  37. #include "utils/comm_manager.h"
  38. #include "utils/ms_context.h"
  39. #include "utils/symbolic.h"
  40. #include "mindspore/core/utils/parallel_node_check.h"
  41. namespace mindspore {
  42. namespace parallel {
  43. bool IsSomePrimitive(const CNodePtr &cnode, const std::string &name) {
  44. if (!cnode) {
  45. return false;
  46. }
  47. ValueNodePtr anf_node = cnode->input(0)->cast<ValueNodePtr>();
  48. MS_EXCEPTION_IF_NULL(anf_node);
  49. PrimitivePtr prim = anf_node->value()->cast<PrimitivePtr>();
  50. return (prim->name() == name);
  51. }
  52. bool IsParallelCareNode(const CNodePtr &cnode) {
  53. MS_EXCEPTION_IF_NULL(cnode);
  54. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  55. if (prim_node == nullptr) {
  56. return false;
  57. }
  58. PrimitivePtr prim = prim_node->value()->cast<PrimitivePtr>();
  59. if (prim == nullptr) {
  60. return false;
  61. }
  62. if (IsInParallelBlackList(prim)) {
  63. MS_LOG(DEBUG) << "Parallel don't care node: " << prim->name();
  64. return false;
  65. }
  66. // get_next is not in the forward graph, we need mark the get_next as the forward node
  67. if (prim->name() == GET_NEXT || prim->name() == VIRTUAL_OUTPUT) {
  68. return true;
  69. }
  70. if ((prim->name() == CAST) && !cnode->has_user_data<OperatorInfo>()) {
  71. return false;
  72. }
  73. return cnode->in_forward_flag();
  74. }
  75. Shapes GetValueListShape(const AnfNodePtr &node) {
  76. Shapes shapes;
  77. std::vector<ValuePtr> inputs_seq;
  78. if (IsValueNode<ValueList>(node)) {
  79. inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
  80. } else if (IsValueNode<ValueTuple>(node)) {
  81. inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
  82. } else {
  83. MS_LOG(EXCEPTION) << "node is eigther ValueList or ValueTuple";
  84. }
  85. for (auto &ele : inputs_seq) {
  86. auto tensor = ele->cast<tensor::TensorPtr>();
  87. MS_EXCEPTION_IF_NULL(tensor);
  88. auto one_shape = tensor->shape();
  89. shapes.push_back(one_shape);
  90. }
  91. return shapes;
  92. }
  93. Shapes GetNodeShape(const AnfNodePtr &node) {
  94. MS_EXCEPTION_IF_NULL(node);
  95. Shapes shapes;
  96. if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  97. return GetValueListShape(node);
  98. }
  99. BaseShapePtr base_shape_ptr = node->Shape();
  100. if (node->isa<CNode>()) {
  101. auto cnode = node->cast<CNodePtr>();
  102. if (IsValueNode<Primitive>(cnode->input(0))) {
  103. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  104. MS_EXCEPTION_IF_NULL(prim);
  105. if (prim->name() == MAKEREF) {
  106. AnfNodePtr ref_node = cnode->input(1);
  107. auto func_graph = cnode->func_graph();
  108. MS_EXCEPTION_IF_NULL(ref_node);
  109. MS_EXCEPTION_IF_NULL(func_graph);
  110. return GetRefKeyNodeShape(ref_node, func_graph);
  111. }
  112. }
  113. if (cnode->input(0)->isa<CNode>()) {
  114. if (cnode->inputs().size() < 2) {
  115. MS_LOG(EXCEPTION) << "GetNodeShape: " << node->ToString() << " size is smaller than 2";
  116. }
  117. base_shape_ptr = cnode->input(1)->Shape();
  118. }
  119. }
  120. if (base_shape_ptr == nullptr) {
  121. MS_LOG(EXCEPTION) << "GetNodeShape: " << node->ToString() << " shape_ptr is nullptr, full name is "
  122. << node->fullname_with_scope();
  123. }
  124. auto tuple_shape_ptr = dyn_cast<abstract::SequeueShape>(base_shape_ptr);
  125. if (tuple_shape_ptr != nullptr) {
  126. auto tuple_shape = tuple_shape_ptr->shape();
  127. for (auto &shape : tuple_shape) {
  128. auto each_shape = dyn_cast<abstract::Shape>(shape);
  129. MS_EXCEPTION_IF_NULL(each_shape);
  130. shapes.push_back(each_shape->shape());
  131. }
  132. } else {
  133. auto shape_ptr = dyn_cast<abstract::Shape>(base_shape_ptr);
  134. MS_EXCEPTION_IF_NULL(shape_ptr);
  135. shapes.push_back(shape_ptr->shape());
  136. }
  137. return shapes;
  138. }
  139. } // namespace parallel
  140. } // namespace mindspore