Browse Source

!349 fix coding style check warning for auto parallel

Merge pull request !349 from chentingting/fix_coding_style_check_warning
tags/v0.2.0-alpha
mindspore-ci-bot Gitee 5 years ago
parent
commit
2961c6bc59
38 changed files with 15 additions and 59 deletions
  1. +0
    -1
      mindspore/ccsrc/parallel/device_manager.cc
  2. +0
    -1
      mindspore/ccsrc/parallel/device_matrix.cc
  3. +0
    -1
      mindspore/ccsrc/parallel/dynamic_creator.h
  4. +2
    -1
      mindspore/ccsrc/parallel/ops_info/activation_info.cc
  5. +0
    -1
      mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc
  6. +2
    -1
      mindspore/ccsrc/parallel/ops_info/loss_info.cc
  7. +0
    -1
      mindspore/ccsrc/parallel/ops_info/operator_info.cc
  8. +4
    -2
      mindspore/ccsrc/parallel/ops_info/prelu_info.cc
  9. +3
    -2
      mindspore/ccsrc/parallel/ops_info/reshape_info.cc
  10. +3
    -3
      mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc
  11. +0
    -1
      mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h
  12. +0
    -1
      mindspore/ccsrc/parallel/status.h
  13. +0
    -2
      mindspore/ccsrc/parallel/step_auto_parallel.cc
  14. +0
    -1
      mindspore/ccsrc/parallel/step_auto_parallel.h
  15. +0
    -2
      mindspore/ccsrc/parallel/step_parallel.cc
  16. +0
    -1
      mindspore/ccsrc/parallel/strategy.h
  17. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/arrangement.cc
  18. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/arrangement.h
  19. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/array.cc
  20. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/array.h
  21. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/construct_operator.h
  22. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc
  23. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h
  24. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/map.cc
  25. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/map.h
  26. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc
  27. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h
  28. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc
  29. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h
  30. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc
  31. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h
  32. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/shape_util.cc
  33. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/shape_util.h
  34. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/tensor_info.h
  35. +1
    -2
      mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc
  36. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h
  37. +0
    -1
      mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc
  38. +0
    -2
      mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h

+ 0
- 1
mindspore/ccsrc/parallel/device_manager.cc View File

@@ -370,6 +370,5 @@ void DeviceManager::Clear() {
stage_devices_.clear();
gm_.Clear();
}

} // namespace parallel
} // namespace mindspore

+ 0
- 1
mindspore/ccsrc/parallel/device_matrix.cc View File

@@ -29,7 +29,6 @@

namespace mindspore {
namespace parallel {

DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape)
: rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) {
if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) {


+ 0
- 1
mindspore/ccsrc/parallel/dynamic_creator.h View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

#define REGISTER(className) \
OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \
return std::make_shared<className>(name, in, out, attrs); \


+ 2
- 1
mindspore/ccsrc/parallel/ops_info/activation_info.cc View File

@@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) {
}

is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 1);
Shape input0_split;
(void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1);
for (auto& element : axis_) {
int32_t axis_index = element;
if (element < 0) {


+ 0
- 1
mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) {
if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) {
if (is_auto_parallel_) {


+ 2
- 1
mindspore/ccsrc/parallel/ops_info/loss_info.cc View File

@@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) {
}
is_auto_parallel_ = true;

Shape input0_split(inputs_shape_[0].size(), 1);
Shape input0_split;
(void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1);
input0_split[IntToSize(axis_index)] = 0;
Shapes splittable_inputs = {input0_split, input0_split};
std::vector<StrategyPtr> sp_vector;


+ 0
- 1
mindspore/ccsrc/parallel/ops_info/operator_info.cc View File

@@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra
double OperatorInfo::GetForwardMemoryCostFromCNode() {
return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0);
}

} // namespace parallel
} // namespace mindspore

+ 4
- 2
mindspore/ccsrc/parallel/ops_info/prelu_info.cc View File

@@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 1);
input0_split[1] = 0;
Shape input0_split;
input0_split.emplace_back(1);
input0_split.emplace_back(0);
(void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 2, 1);
Shape input1_split(inputs_shape_[1].size(), 0);
Shapes splittable_inputs = {input0_split, input1_split};
std::vector<StrategyPtr> sp_vector;


+ 3
- 2
mindspore/ccsrc/parallel/ops_info/reshape_info.cc View File

@@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 0);
input0_split[0] = 1;
Shape input0_split;
input0_split.emplace_back(1);
(void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 1, 0);
Shapes splittable_inputs = {input0_split};
std::vector<StrategyPtr> sp_vector;
if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) {


+ 3
- 3
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) {
if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) {
if (is_auto_parallel_) {
@@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) {
StrategyPtr sp;
std::vector<Dimensions> strategy;
for (auto& shape : inputs_shape_) {
Shape temp(shape.size(), 1);
temp[0] = SizeToInt(total_dev_num);
Shape temp;
temp.emplace_back(SizeToInt(total_dev_num));
(void)temp.insert(temp.end(), shape.size() - 1, 1);
strategy.push_back(temp);
}
sp = std::make_shared<Strategy>(stage_id, strategy);


+ 0
- 1
mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h View File

@@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo {
Status GetAttrs() override;
Status InferAsLossDivisor() override;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/status.h View File

@@ -21,7 +21,6 @@

namespace mindspore {
namespace parallel {

enum Status {
SUCCESS = 0,
FAILED,


+ 0
- 2
mindspore/ccsrc/parallel/step_auto_parallel.cc View File

@@ -477,7 +477,6 @@ Status ConstructCostGraphNodes(const std::vector<AnfNodePtr> &all_nodes, const F
bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
(current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
(current_op_ptr->name().find(prim->name()) == std::string::npos);

if (is_find_wrong) {
MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
<< " does not match the Prim: " << prim->name();
@@ -937,7 +936,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const

graph = EliminateGraph(graph, eli_list, index_list);
size_t num_device = g_device_manager->DeviceNum();

if (PartitionForAllDevices(num_device, graph) == SUCCESS) {
MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
} else {


+ 0
- 1
mindspore/ccsrc/parallel/step_auto_parallel.h View File

@@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const

std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
std::vector<std::vector<std::string>> input_tensor_names);

} // namespace parallel
} // namespace mindspore
#endif // PARALLEL_STEP_AUTO_PARALLEL_H_

+ 0
- 2
mindspore/ccsrc/parallel/step_parallel.cc View File

@@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL(root_return_node);
const auto& all_nodes = root->nodes();
FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes);

if (func_graph == nullptr) {
return FindLossCNode(root);
} else {
@@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL(root_return_node);
const auto& all_nodes = root->nodes();
FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes);

if (func_graph != nullptr) {
forward_graph = func_graph;
}


+ 0
- 1
mindspore/ccsrc/parallel/strategy.h View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

#define MIN_SLICE_NUM 1

using Dimensions = std::vector<int32_t>;


+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/arrangement.cc View File

@@ -26,7 +26,6 @@

namespace mindspore {
namespace parallel {

Status Arrangement::Init(const std::vector<int32_t>& array) {
Status status = Array::Init(array);
if (status != Status::SUCCESS) {


+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/arrangement.h View File

@@ -28,7 +28,6 @@

namespace mindspore {
namespace parallel {

class Arrangement : public Array {
public:
Arrangement() : size_(1) {}
@@ -53,7 +52,6 @@ class Arrangement : public Array {
void ComputeSize();
int32_t size_;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/array.cc View File

@@ -21,7 +21,6 @@

namespace mindspore {
namespace parallel {

std::string Array::ToString() const {
std::ostringstream buffer;
buffer << "[ ";


+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/array.h View File

@@ -26,7 +26,6 @@

namespace mindspore {
namespace parallel {

class Array {
public:
Array() = default;
@@ -43,7 +42,6 @@ class Array {
protected:
std::vector<int32_t> array_;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/construct_operator.h View File

@@ -52,7 +52,6 @@ class ConstructOperator {
Shape dev_matrix_shape_;
Status CreateGroupByDim(size_t axis, std::vector<Group>* group);
};

} // namespace parallel
} // namespace mindspore



+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc View File

@@ -20,7 +20,6 @@

namespace mindspore {
namespace parallel {

std::string LayoutTransfer::ToString() const {
std::ostringstream buffer;
buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString());
@@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_
Status status = CheckValidTransfer();
return status;
}

} // namespace parallel
} // namespace mindspore

+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h View File

@@ -23,7 +23,6 @@

namespace mindspore {
namespace parallel {

class LayoutTransfer {
public:
LayoutTransfer() = default;
@@ -43,7 +42,6 @@ class LayoutTransfer {
private:
virtual Status CheckValidTransfer() = 0;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/map.cc View File

@@ -26,7 +26,6 @@

namespace mindspore {
namespace parallel {

Status Map::Init(const std::vector<int32_t>& array) {
Status status = Array::Init(array);
if (status != Status::SUCCESS) {


+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/map.h View File

@@ -46,7 +46,6 @@ class Map : public Array {
private:
bool IsValidMap();
};

} // namespace parallel
} // namespace mindspore



+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc View File

@@ -21,7 +21,6 @@

namespace mindspore {
namespace parallel {

Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; }

/*
@@ -66,6 +65,5 @@ std::shared_ptr<ReshapeLayoutTransfer> RedistributionLayoutTransfer::UnifyDevice
}
return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape();
}

} // namespace parallel
} // namespace mindspore

+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h View File

@@ -24,7 +24,6 @@

namespace mindspore {
namespace parallel {

class RedistributionLayoutTransfer : public LayoutTransfer {
public:
RedistributionLayoutTransfer() = default;
@@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer {
Status CheckValidTransfer() override;
std::shared_ptr<ReshapeLayoutTransfer> UnifyDeviceArrangement() const;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc View File

@@ -22,7 +22,6 @@

namespace mindspore {
namespace parallel {

Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map,
RankList dev_list) {
in_tensor_map_ = tensor_layout.tensor_map();
@@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) {
}
return Status::SUCCESS;
}

} // namespace parallel
} // namespace mindspore

+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h View File

@@ -28,7 +28,6 @@
#include "utils/convert_utils.h"
namespace mindspore {
namespace parallel {

using DeviceArrangement = std::vector<int32_t>;
using TensorMap = std::vector<int32_t>;
using TensorShape = std::vector<int32_t>;
@@ -69,7 +68,6 @@ class RedistributionOperatorInfer {
RankList dev_list_;
bool construct_op_flag_;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc View File

@@ -20,7 +20,6 @@

namespace mindspore {
namespace parallel {

Status ReshapeLayoutTransfer::CheckValidTransfer() {
if (!IsSameDeviceArrangement()) {
return Status::FAILED;


+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h View File

@@ -23,7 +23,6 @@

namespace mindspore {
namespace parallel {

class ReshapeLayoutTransfer : public LayoutTransfer {
public:
ReshapeLayoutTransfer() = default;
@@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer {
bool FromTensorShapeCanBeExpandByTo() const;
bool ToTensorShapeCanBeExpandByFrom() const;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/shape_util.cc View File

@@ -21,7 +21,6 @@

namespace mindspore {
namespace parallel {

/*
* example:
* shape = [2, 8, 32]
@@ -260,6 +259,5 @@ Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& e
}
return status;
}

} // namespace parallel
} // namespace mindspore

+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/shape_util.h View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

/*
* compute the accumulating product of all the values in shape from left to right,
* the accumulating results are saved in shape_accum from left to right
@@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector<int64_t>& in_accum_reverse,
* out = [2, 4, 2, 4, 8]
*/
Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& expand, std::vector<int32_t>* out);

} // namespace parallel
} // namespace mindspore



+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/tensor_info.h View File

@@ -28,7 +28,6 @@

namespace mindspore {
namespace parallel {

using Shapes = std::vector<Shape>;

class TensorInfo {
@@ -55,7 +54,6 @@ class TensorInfo {
// reduce method's reduce dim
std::vector<int32_t> reduce_dim_;
};

} // namespace parallel
} // namespace mindspore



+ 1
- 2
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc View File

@@ -27,7 +27,6 @@

namespace mindspore {
namespace parallel {

std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); }

std::string TensorLayout::StandardToString() const {
@@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) {
MS_LOG(ERROR) << "Index is out of the size of the tensor map!";
return Status::FAILED;
}
Shape shape = tensor_map_.array();
auto shape = tensor_map_.array();
shape[index] = value;
if (tensor_map_.Init(shape) == Status::FAILED) {
MS_LOG(ERROR) << "Update tensor map failed!";


+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h View File

@@ -30,7 +30,6 @@

namespace mindspore {
namespace parallel {

class TensorLayout {
public:
TensorLayout() = default;
@@ -94,7 +93,6 @@ class TensorLayout {
Map tensor_map_;
Arrangement tensor_shape_;
};

} // namespace parallel
} // namespace mindspore



+ 0
- 1
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc View File

@@ -24,7 +24,6 @@

namespace mindspore {
namespace parallel {

Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) {
from_origin_ = from;
to_origin_ = to;


+ 0
- 2
mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h View File

@@ -33,7 +33,6 @@

namespace mindspore {
namespace parallel {

class TensorRedistribution {
public:
explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false)
@@ -83,7 +82,6 @@ class TensorRedistribution {
bool construct_op_flag_;
bool keep_reshape_;
};

} // namespace parallel
} // namespace mindspore



Loading…
Cancel
Save