Merge pull request !349 from chentingting/fix_coding_style_check_warningtags/v0.2.0-alpha
| @@ -370,6 +370,5 @@ void DeviceManager::Clear() { | |||||
| stage_devices_.clear(); | stage_devices_.clear(); | ||||
| gm_.Clear(); | gm_.Clear(); | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -29,7 +29,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape) | DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape) | ||||
| : rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) { | : rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) { | ||||
| if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) { | if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) { | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| #define REGISTER(className) \ | #define REGISTER(className) \ | ||||
| OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \ | OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \ | ||||
| return std::make_shared<className>(name, in, out, attrs); \ | return std::make_shared<className>(name, in, out, attrs); \ | ||||
| @@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) { | |||||
| } | } | ||||
| is_auto_parallel_ = true; | is_auto_parallel_ = true; | ||||
| Shape input0_split(inputs_shape_[0].size(), 1); | |||||
| Shape input0_split; | |||||
| (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); | |||||
| for (auto& element : axis_) { | for (auto& element : axis_) { | ||||
| int32_t axis_index = element; | int32_t axis_index = element; | ||||
| if (element < 0) { | if (element < 0) { | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) { | Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) { | ||||
| if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { | if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { | ||||
| if (is_auto_parallel_) { | if (is_auto_parallel_) { | ||||
| @@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) { | |||||
| } | } | ||||
| is_auto_parallel_ = true; | is_auto_parallel_ = true; | ||||
| Shape input0_split(inputs_shape_[0].size(), 1); | |||||
| Shape input0_split; | |||||
| (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); | |||||
| input0_split[IntToSize(axis_index)] = 0; | input0_split[IntToSize(axis_index)] = 0; | ||||
| Shapes splittable_inputs = {input0_split, input0_split}; | Shapes splittable_inputs = {input0_split, input0_split}; | ||||
| std::vector<StrategyPtr> sp_vector; | std::vector<StrategyPtr> sp_vector; | ||||
| @@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra | |||||
| double OperatorInfo::GetForwardMemoryCostFromCNode() { | double OperatorInfo::GetForwardMemoryCostFromCNode() { | ||||
| return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); | return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) { | |||||
| return FAILED; | return FAILED; | ||||
| } | } | ||||
| is_auto_parallel_ = true; | is_auto_parallel_ = true; | ||||
| Shape input0_split(inputs_shape_[0].size(), 1); | |||||
| input0_split[1] = 0; | |||||
| Shape input0_split; | |||||
| input0_split.emplace_back(1); | |||||
| input0_split.emplace_back(0); | |||||
| (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 2, 1); | |||||
| Shape input1_split(inputs_shape_[1].size(), 0); | Shape input1_split(inputs_shape_[1].size(), 0); | ||||
| Shapes splittable_inputs = {input0_split, input1_split}; | Shapes splittable_inputs = {input0_split, input1_split}; | ||||
| std::vector<StrategyPtr> sp_vector; | std::vector<StrategyPtr> sp_vector; | ||||
| @@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) { | |||||
| return FAILED; | return FAILED; | ||||
| } | } | ||||
| is_auto_parallel_ = true; | is_auto_parallel_ = true; | ||||
| Shape input0_split(inputs_shape_[0].size(), 0); | |||||
| input0_split[0] = 1; | |||||
| Shape input0_split; | |||||
| input0_split.emplace_back(1); | |||||
| (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 1, 0); | |||||
| Shapes splittable_inputs = {input0_split}; | Shapes splittable_inputs = {input0_split}; | ||||
| std::vector<StrategyPtr> sp_vector; | std::vector<StrategyPtr> sp_vector; | ||||
| if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) { | if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) { | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) { | Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) { | ||||
| if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { | if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { | ||||
| if (is_auto_parallel_) { | if (is_auto_parallel_) { | ||||
| @@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) { | |||||
| StrategyPtr sp; | StrategyPtr sp; | ||||
| std::vector<Dimensions> strategy; | std::vector<Dimensions> strategy; | ||||
| for (auto& shape : inputs_shape_) { | for (auto& shape : inputs_shape_) { | ||||
| Shape temp(shape.size(), 1); | |||||
| temp[0] = SizeToInt(total_dev_num); | |||||
| Shape temp; | |||||
| temp.emplace_back(SizeToInt(total_dev_num)); | |||||
| (void)temp.insert(temp.end(), shape.size() - 1, 1); | |||||
| strategy.push_back(temp); | strategy.push_back(temp); | ||||
| } | } | ||||
| sp = std::make_shared<Strategy>(stage_id, strategy); | sp = std::make_shared<Strategy>(stage_id, strategy); | ||||
| @@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo { | |||||
| Status GetAttrs() override; | Status GetAttrs() override; | ||||
| Status InferAsLossDivisor() override; | Status InferAsLossDivisor() override; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -21,7 +21,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| enum Status { | enum Status { | ||||
| SUCCESS = 0, | SUCCESS = 0, | ||||
| FAILED, | FAILED, | ||||
| @@ -477,7 +477,6 @@ Status ConstructCostGraphNodes(const std::vector<AnfNodePtr> &all_nodes, const F | |||||
| bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) && | bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) && | ||||
| (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) && | (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) && | ||||
| (current_op_ptr->name().find(prim->name()) == std::string::npos); | (current_op_ptr->name().find(prim->name()) == std::string::npos); | ||||
| if (is_find_wrong) { | if (is_find_wrong) { | ||||
| MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name() | MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name() | ||||
| << " does not match the Prim: " << prim->name(); | << " does not match the Prim: " << prim->name(); | ||||
| @@ -937,7 +936,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const | |||||
| graph = EliminateGraph(graph, eli_list, index_list); | graph = EliminateGraph(graph, eli_list, index_list); | ||||
| size_t num_device = g_device_manager->DeviceNum(); | size_t num_device = g_device_manager->DeviceNum(); | ||||
| if (PartitionForAllDevices(num_device, graph) == SUCCESS) { | if (PartitionForAllDevices(num_device, graph) == SUCCESS) { | ||||
| MS_LOG(INFO) << "Partition Success With " << num_device << " devices."; | MS_LOG(INFO) << "Partition Success With " << num_device << " devices."; | ||||
| } else { | } else { | ||||
| @@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const | |||||
| std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it, | std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it, | ||||
| std::vector<std::vector<std::string>> input_tensor_names); | std::vector<std::vector<std::string>> input_tensor_names); | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // PARALLEL_STEP_AUTO_PARALLEL_H_ | #endif // PARALLEL_STEP_AUTO_PARALLEL_H_ | ||||
| @@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) { | |||||
| MS_EXCEPTION_IF_NULL(root_return_node); | MS_EXCEPTION_IF_NULL(root_return_node); | ||||
| const auto& all_nodes = root->nodes(); | const auto& all_nodes = root->nodes(); | ||||
| FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); | FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); | ||||
| if (func_graph == nullptr) { | if (func_graph == nullptr) { | ||||
| return FindLossCNode(root); | return FindLossCNode(root); | ||||
| } else { | } else { | ||||
| @@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) { | |||||
| MS_EXCEPTION_IF_NULL(root_return_node); | MS_EXCEPTION_IF_NULL(root_return_node); | ||||
| const auto& all_nodes = root->nodes(); | const auto& all_nodes = root->nodes(); | ||||
| FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); | FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); | ||||
| if (func_graph != nullptr) { | if (func_graph != nullptr) { | ||||
| forward_graph = func_graph; | forward_graph = func_graph; | ||||
| } | } | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| #define MIN_SLICE_NUM 1 | #define MIN_SLICE_NUM 1 | ||||
| using Dimensions = std::vector<int32_t>; | using Dimensions = std::vector<int32_t>; | ||||
| @@ -26,7 +26,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status Arrangement::Init(const std::vector<int32_t>& array) { | Status Arrangement::Init(const std::vector<int32_t>& array) { | ||||
| Status status = Array::Init(array); | Status status = Array::Init(array); | ||||
| if (status != Status::SUCCESS) { | if (status != Status::SUCCESS) { | ||||
| @@ -28,7 +28,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class Arrangement : public Array { | class Arrangement : public Array { | ||||
| public: | public: | ||||
| Arrangement() : size_(1) {} | Arrangement() : size_(1) {} | ||||
| @@ -53,7 +52,6 @@ class Arrangement : public Array { | |||||
| void ComputeSize(); | void ComputeSize(); | ||||
| int32_t size_; | int32_t size_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -21,7 +21,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| std::string Array::ToString() const { | std::string Array::ToString() const { | ||||
| std::ostringstream buffer; | std::ostringstream buffer; | ||||
| buffer << "[ "; | buffer << "[ "; | ||||
| @@ -26,7 +26,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class Array { | class Array { | ||||
| public: | public: | ||||
| Array() = default; | Array() = default; | ||||
| @@ -43,7 +42,6 @@ class Array { | |||||
| protected: | protected: | ||||
| std::vector<int32_t> array_; | std::vector<int32_t> array_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -52,7 +52,6 @@ class ConstructOperator { | |||||
| Shape dev_matrix_shape_; | Shape dev_matrix_shape_; | ||||
| Status CreateGroupByDim(size_t axis, std::vector<Group>* group); | Status CreateGroupByDim(size_t axis, std::vector<Group>* group); | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -20,7 +20,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| std::string LayoutTransfer::ToString() const { | std::string LayoutTransfer::ToString() const { | ||||
| std::ostringstream buffer; | std::ostringstream buffer; | ||||
| buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString()); | buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString()); | ||||
| @@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_ | |||||
| Status status = CheckValidTransfer(); | Status status = CheckValidTransfer(); | ||||
| return status; | return status; | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -23,7 +23,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class LayoutTransfer { | class LayoutTransfer { | ||||
| public: | public: | ||||
| LayoutTransfer() = default; | LayoutTransfer() = default; | ||||
| @@ -43,7 +42,6 @@ class LayoutTransfer { | |||||
| private: | private: | ||||
| virtual Status CheckValidTransfer() = 0; | virtual Status CheckValidTransfer() = 0; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -26,7 +26,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status Map::Init(const std::vector<int32_t>& array) { | Status Map::Init(const std::vector<int32_t>& array) { | ||||
| Status status = Array::Init(array); | Status status = Array::Init(array); | ||||
| if (status != Status::SUCCESS) { | if (status != Status::SUCCESS) { | ||||
| @@ -46,7 +46,6 @@ class Map : public Array { | |||||
| private: | private: | ||||
| bool IsValidMap(); | bool IsValidMap(); | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -21,7 +21,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; } | Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; } | ||||
| /* | /* | ||||
| @@ -66,6 +65,5 @@ std::shared_ptr<ReshapeLayoutTransfer> RedistributionLayoutTransfer::UnifyDevice | |||||
| } | } | ||||
| return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape(); | return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape(); | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -24,7 +24,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class RedistributionLayoutTransfer : public LayoutTransfer { | class RedistributionLayoutTransfer : public LayoutTransfer { | ||||
| public: | public: | ||||
| RedistributionLayoutTransfer() = default; | RedistributionLayoutTransfer() = default; | ||||
| @@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer { | |||||
| Status CheckValidTransfer() override; | Status CheckValidTransfer() override; | ||||
| std::shared_ptr<ReshapeLayoutTransfer> UnifyDeviceArrangement() const; | std::shared_ptr<ReshapeLayoutTransfer> UnifyDeviceArrangement() const; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -22,7 +22,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map, | Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map, | ||||
| RankList dev_list) { | RankList dev_list) { | ||||
| in_tensor_map_ = tensor_layout.tensor_map(); | in_tensor_map_ = tensor_layout.tensor_map(); | ||||
| @@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) { | |||||
| } | } | ||||
| return Status::SUCCESS; | return Status::SUCCESS; | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -28,7 +28,6 @@ | |||||
| #include "utils/convert_utils.h" | #include "utils/convert_utils.h" | ||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| using DeviceArrangement = std::vector<int32_t>; | using DeviceArrangement = std::vector<int32_t>; | ||||
| using TensorMap = std::vector<int32_t>; | using TensorMap = std::vector<int32_t>; | ||||
| using TensorShape = std::vector<int32_t>; | using TensorShape = std::vector<int32_t>; | ||||
| @@ -69,7 +68,6 @@ class RedistributionOperatorInfer { | |||||
| RankList dev_list_; | RankList dev_list_; | ||||
| bool construct_op_flag_; | bool construct_op_flag_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -20,7 +20,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status ReshapeLayoutTransfer::CheckValidTransfer() { | Status ReshapeLayoutTransfer::CheckValidTransfer() { | ||||
| if (!IsSameDeviceArrangement()) { | if (!IsSameDeviceArrangement()) { | ||||
| return Status::FAILED; | return Status::FAILED; | ||||
| @@ -23,7 +23,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class ReshapeLayoutTransfer : public LayoutTransfer { | class ReshapeLayoutTransfer : public LayoutTransfer { | ||||
| public: | public: | ||||
| ReshapeLayoutTransfer() = default; | ReshapeLayoutTransfer() = default; | ||||
| @@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer { | |||||
| bool FromTensorShapeCanBeExpandByTo() const; | bool FromTensorShapeCanBeExpandByTo() const; | ||||
| bool ToTensorShapeCanBeExpandByFrom() const; | bool ToTensorShapeCanBeExpandByFrom() const; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -21,7 +21,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| /* | /* | ||||
| * example: | * example: | ||||
| * shape = [2, 8, 32] | * shape = [2, 8, 32] | ||||
| @@ -260,6 +259,5 @@ Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& e | |||||
| } | } | ||||
| return status; | return status; | ||||
| } | } | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| /* | /* | ||||
| * compute the accumulating product of all the values in shape from left to right, | * compute the accumulating product of all the values in shape from left to right, | ||||
| * the accumulating results are saved in shape_accum from left to right | * the accumulating results are saved in shape_accum from left to right | ||||
| @@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector<int64_t>& in_accum_reverse, | |||||
| * out = [2, 4, 2, 4, 8] | * out = [2, 4, 2, 4, 8] | ||||
| */ | */ | ||||
| Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& expand, std::vector<int32_t>* out); | Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& expand, std::vector<int32_t>* out); | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -28,7 +28,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| using Shapes = std::vector<Shape>; | using Shapes = std::vector<Shape>; | ||||
| class TensorInfo { | class TensorInfo { | ||||
| @@ -55,7 +54,6 @@ class TensorInfo { | |||||
| // reduce method's reduce dim | // reduce method's reduce dim | ||||
| std::vector<int32_t> reduce_dim_; | std::vector<int32_t> reduce_dim_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -27,7 +27,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); } | std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); } | ||||
| std::string TensorLayout::StandardToString() const { | std::string TensorLayout::StandardToString() const { | ||||
| @@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) { | |||||
| MS_LOG(ERROR) << "Index is out of the size of the tensor map!"; | MS_LOG(ERROR) << "Index is out of the size of the tensor map!"; | ||||
| return Status::FAILED; | return Status::FAILED; | ||||
| } | } | ||||
| Shape shape = tensor_map_.array(); | |||||
| auto shape = tensor_map_.array(); | |||||
| shape[index] = value; | shape[index] = value; | ||||
| if (tensor_map_.Init(shape) == Status::FAILED) { | if (tensor_map_.Init(shape) == Status::FAILED) { | ||||
| MS_LOG(ERROR) << "Update tensor map failed!"; | MS_LOG(ERROR) << "Update tensor map failed!"; | ||||
| @@ -30,7 +30,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class TensorLayout { | class TensorLayout { | ||||
| public: | public: | ||||
| TensorLayout() = default; | TensorLayout() = default; | ||||
| @@ -94,7 +93,6 @@ class TensorLayout { | |||||
| Map tensor_map_; | Map tensor_map_; | ||||
| Arrangement tensor_shape_; | Arrangement tensor_shape_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -24,7 +24,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) { | Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) { | ||||
| from_origin_ = from; | from_origin_ = from; | ||||
| to_origin_ = to; | to_origin_ = to; | ||||
| @@ -33,7 +33,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace parallel { | namespace parallel { | ||||
| class TensorRedistribution { | class TensorRedistribution { | ||||
| public: | public: | ||||
| explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false) | explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false) | ||||
| @@ -83,7 +82,6 @@ class TensorRedistribution { | |||||
| bool construct_op_flag_; | bool construct_op_flag_; | ||||
| bool keep_reshape_; | bool keep_reshape_; | ||||
| }; | }; | ||||
| } // namespace parallel | } // namespace parallel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||