Browse Source

!5229 [AutoParallel]Fix CodeDex

Merge pull request !5229 from lichen/fix_code_index
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
3725062582
4 changed files with 3 additions and 5 deletions
  1. +0
    -1
      mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc
  2. +0
    -2
      mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc
  3. +3
    -1
      mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc
  4. +0
    -1
      mindspore/ccsrc/frontend/parallel/step_parallel.cc

+ 0
- 1
mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc View File

@@ -26,7 +26,6 @@

namespace mindspore {
namespace parallel {

// Compute redistributed cost
double CostRedis(const Graph::NodeType &node,
const std::vector<std::pair<std::string, StrategyRec>> &node_name_to_strategy,


+ 0
- 2
mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc View File

@@ -746,7 +746,6 @@ Strategys CheckBroadcast(const std::vector<std::shared_ptr<OperatorInfo>> &ops,

size_t first_tensor_dim = ops[iter_ops]->inputs_tensor_info()[0].shape().size();
size_t second_tensor_dim = ops[iter_ops]->inputs_tensor_info()[1].shape().size();

// Do Broadcasting in the second tensor.
if (second_tensor_dim < first_tensor_dim) {
bool braoadcast_first_tensor = false;
@@ -964,7 +963,6 @@ void GenerateEliminatedOperatorStrategyBackward(const std::vector<std::shared_pt
auto iter_ops = no_stra_op_list->at(iter_list - 1);
Strategys stra;
Dimensions s = CopyOutgoingOperatorInputStrategy(ops, input_tensor_names, iter_ops);

if (s.size() != 0 && ops[iter_ops]->type() == SQUEEZE) {
s = ModifyStrategyIfSqueezeOutgoing(ops, iter_ops, s);
}


+ 3
- 1
mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc View File

@@ -204,7 +204,9 @@ Status PartitionForAllDevices(const size_t num_device, const double device_memor

// Comopute iter times
int iter_times = static_cast<int>(log2(num_device));

if (iter_times > 10) {
MS_LOG(EXCEPTION) << "ERROR: Number of iter_times can't be larger than 10.";
}
// N-cuts loop
for (int loop = 0; loop < iter_times; loop++) {
// Sort by weights


+ 0
- 1
mindspore/ccsrc/frontend/parallel/step_parallel.cc View File

@@ -341,7 +341,6 @@ void Redistribution(const std::pair<AnfNodePtr, int> &node_pair, const OperatorI
TensorInfo tensorinfo_out = next_distribute_operator->inputs_tensor_info()[IntToSize(index - 1)];
TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout();
TensorLayout tensorlayout_in = GetTensorInLayout(middle_node, middle_prim, distribute_operator);

if (tensor_redistribution.Init(tensorlayout_in, tensorlayout_out, dev_list) == FAILED) {
MS_LOG(ERROR) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim : " << next_prim_name;
MS_LOG(ERROR) << "Redistribution: middle_node " << middle_node->ToString() << " next_node "


Loading…
Cancel
Save