| @@ -172,6 +172,8 @@ using TransposeCost = ActivationCost; | |||||
| using TransposeCostPtr = std::shared_ptr<TransposeCost>; | using TransposeCostPtr = std::shared_ptr<TransposeCost>; | ||||
| using StridedSliceCost = ActivationCost; | using StridedSliceCost = ActivationCost; | ||||
| using StridedSliceCostPtr = std::shared_ptr<StridedSliceCost>; | using StridedSliceCostPtr = std::shared_ptr<StridedSliceCost>; | ||||
| using SliceCost = ActivationCost; | |||||
| using SliceCostPtr = std::shared_ptr<SliceCost>; | |||||
| using SplitCost = ActivationCost; | using SplitCost = ActivationCost; | ||||
| using SplitCostPtr = std::shared_ptr<SplitCost>; | using SplitCostPtr = std::shared_ptr<SplitCost>; | ||||
| @@ -184,6 +184,7 @@ REGISTER(EmbeddingLookupInfo); | |||||
| REGISTER(TileInfo); | REGISTER(TileInfo); | ||||
| REGISTER(BroadcastToInfo); | REGISTER(BroadcastToInfo); | ||||
| REGISTER(StridedSliceInfo); | REGISTER(StridedSliceInfo); | ||||
| REGISTER(SliceInfo); | |||||
| REGISTER(DropoutInfo); | REGISTER(DropoutInfo); | ||||
| REGISTER(PackInfo); | REGISTER(PackInfo); | ||||
| REGISTER(ConcatInfo); | REGISTER(ConcatInfo); | ||||
| @@ -40,6 +40,7 @@ | |||||
| #include "frontend/parallel/ops_info/gather_v2_p_info.h" | #include "frontend/parallel/ops_info/gather_v2_p_info.h" | ||||
| #include "frontend/parallel/ops_info/tile_info.h" | #include "frontend/parallel/ops_info/tile_info.h" | ||||
| #include "frontend/parallel/ops_info/strided_slice_info.h" | #include "frontend/parallel/ops_info/strided_slice_info.h" | ||||
| #include "frontend/parallel/ops_info/slice_info.h" | |||||
| #include "frontend/parallel/ops_info/concat_info.h" | #include "frontend/parallel/ops_info/concat_info.h" | ||||
| #include "frontend/parallel/ops_info/split_info.h" | #include "frontend/parallel/ops_info/split_info.h" | ||||
| #include "frontend/parallel/ops_info/tensordot_info.h" | #include "frontend/parallel/ops_info/tensordot_info.h" | ||||
| @@ -29,6 +29,9 @@ constexpr int64_t NO_SPLIT_STRATEGY = 1; | |||||
| constexpr int64_t SPLIT_FLAG = 1; | constexpr int64_t SPLIT_FLAG = 1; | ||||
| constexpr int64_t NO_SPLIT_FLAG = 0; | constexpr int64_t NO_SPLIT_FLAG = 0; | ||||
| constexpr size_t MATMUL_ATTRS_SIZE = 2; | constexpr size_t MATMUL_ATTRS_SIZE = 2; | ||||
| constexpr size_t SLICE_BEGIN_INDEX = 1; | |||||
| constexpr size_t SLICE_SIZE_INDEX = 2; | |||||
| constexpr size_t SLICE_INPUTS_SIZE = 3; | |||||
| constexpr size_t STRIDED_SLICE_ATTRS_SIZE = 5; | constexpr size_t STRIDED_SLICE_ATTRS_SIZE = 5; | ||||
| constexpr size_t STRIDED_SLICE_INPUTS_SIZE = 4; | constexpr size_t STRIDED_SLICE_INPUTS_SIZE = 4; | ||||
| constexpr size_t STRIDED_SLICE_BEGIN_INDEX = 1; | constexpr size_t STRIDED_SLICE_BEGIN_INDEX = 1; | ||||
| @@ -98,6 +101,7 @@ constexpr char ELLIPSIS_MASK[] = "ellipsis_mask"; | |||||
| constexpr char NEW_AXIS_MASK[] = "new_axis_mask"; | constexpr char NEW_AXIS_MASK[] = "new_axis_mask"; | ||||
| constexpr char SHRINK_AXIS_MASK[] = "shrink_axis_mask"; | constexpr char SHRINK_AXIS_MASK[] = "shrink_axis_mask"; | ||||
| constexpr char BEGIN[] = "begin"; | constexpr char BEGIN[] = "begin"; | ||||
| constexpr char SIZE[] = "size"; | |||||
| constexpr char END[] = "end"; | constexpr char END[] = "end"; | ||||
| constexpr char STRIDES[] = "strides"; | constexpr char STRIDES[] = "strides"; | ||||
| constexpr char GROUP[] = "group"; | constexpr char GROUP[] = "group"; | ||||
| @@ -241,6 +245,7 @@ constexpr char LOGICALNOT[] = "LogicalNot"; | |||||
| constexpr char GATHERV2[] = "GatherV2"; | constexpr char GATHERV2[] = "GatherV2"; | ||||
| constexpr char SPARSE_GATHERV2[] = "SparseGatherV2"; | constexpr char SPARSE_GATHERV2[] = "SparseGatherV2"; | ||||
| constexpr char STRIDEDSLICE[] = "StridedSlice"; | constexpr char STRIDEDSLICE[] = "StridedSlice"; | ||||
| constexpr char SLICE[] = "Slice"; | |||||
| constexpr char BROADCAST[] = "Broadcast"; | constexpr char BROADCAST[] = "Broadcast"; | ||||
| constexpr char BROADCAST_TO[] = "BroadcastTo"; | constexpr char BROADCAST_TO[] = "BroadcastTo"; | ||||
| constexpr char SQRT[] = "Sqrt"; | constexpr char SQRT[] = "Sqrt"; | ||||
| @@ -0,0 +1,284 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #include "frontend/parallel/ops_info/slice_info.h" | |||||
| #include <algorithm> | |||||
| #include <memory> | |||||
| #include <utility> | |||||
| #include <vector> | |||||
| #include "frontend/parallel/device_matrix.h" | |||||
| #include "frontend/parallel/strategy.h" | |||||
| #include "frontend/parallel/graph_util/generate_graph.h" | |||||
| #include "frontend/parallel/tensor_layout/tensor_redistribution.h" | |||||
| #include "pipeline/jit/resource.h" | |||||
| namespace mindspore { | |||||
| namespace parallel { | |||||
| Status SliceInfo::GetInput(const ValuePtr &input_value, std::vector<int64_t> *input) { | |||||
| MS_EXCEPTION_IF_NULL(input_value); | |||||
| ValueTuplePtr value_tuple = input_value->cast<ValueTuplePtr>(); | |||||
| if (value_tuple == nullptr) { | |||||
| MS_LOG(ERROR) << name_ << ": Input value must be ValueTuplePtr."; | |||||
| return FAILED; | |||||
| } | |||||
| for (auto &element : value_tuple->value()) { | |||||
| MS_EXCEPTION_IF_NULL(element); | |||||
| if (element->isa<Int64Imm>()) { | |||||
| int64_t value = element->cast<Int64ImmPtr>()->value(); | |||||
| input->push_back(value); | |||||
| } else { | |||||
| MS_LOG(ERROR) << name_ << ": The value must be int64"; | |||||
| return FAILED; | |||||
| } | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::GetAttrs() { | |||||
| if (input_value_.size() != SLICE_INPUTS_SIZE) { | |||||
| MS_LOG(ERROR) << name_ << ": The size of input value must be " << SLICE_INPUTS_SIZE << ", but got " | |||||
| << input_value_.size(); | |||||
| return FAILED; | |||||
| } | |||||
| if ((GetInput(input_value_[SLICE_BEGIN_INDEX], &begin_) != SUCCESS) || | |||||
| (GetInput(input_value_[SLICE_SIZE_INDEX], &size_) != SUCCESS)) { | |||||
| return FAILED; | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::CheckStrategy(const StrategyPtr &strategy) { | |||||
| MS_EXCEPTION_IF_NULL(strategy); | |||||
| if (CheckStrategyValue(strategy, inputs_shape_) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Invalid strategy"; | |||||
| return FAILED; | |||||
| } | |||||
| std::vector<Dimensions> stra = strategy->GetInputDim(); | |||||
| if (stra.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": The strategy is empty"; | |||||
| return FAILED; | |||||
| } | |||||
| Dimensions strategy_value = stra[0]; | |||||
| for (size_t i = 0; i < begin_.size(); ++i) { | |||||
| bool no_fully_fetch = ((begin_[i] != 0) || (size_[i] < inputs_shape_[0][i])); | |||||
| if (no_fully_fetch && (strategy_value[i] != 1)) { | |||||
| MS_LOG(ERROR) << name_ << ": When a dimension is not fully fetched, the dimension can not be split now"; | |||||
| return FAILED; | |||||
| } | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::InferDevMatrixShape() { | |||||
| MS_EXCEPTION_IF_NULL(strategy_); | |||||
| std::vector<Dimensions> stra = strategy_->GetInputDim(); | |||||
| if (stra.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": The strategy is empty"; | |||||
| return FAILED; | |||||
| } | |||||
| dev_matrix_shape_ = stra[0]; | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::InferTensorMap() { | |||||
| TensorMap tensor_map; | |||||
| if (inputs_shape_.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; | |||||
| return FAILED; | |||||
| } | |||||
| // cannot use dev_matrix_shape_ replace inputs_shape_[0], because it may not be fully split in all devices. | |||||
| int64_t size = SizeToInt(inputs_shape_[0].size()); | |||||
| for (int i = 0; i < size; ++i) { | |||||
| tensor_map.push_back(size - i - 1); | |||||
| } | |||||
| inputs_tensor_map_.push_back(tensor_map); | |||||
| outputs_tensor_map_.push_back(tensor_map); | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::InferMirrorOps() { | |||||
| mirror_ops_.clear(); | |||||
| if (inputs_tensor_map_.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": The inputs tensor map is empty"; | |||||
| return FAILED; | |||||
| } | |||||
| Shape input_tensor_map = inputs_tensor_map_[0]; | |||||
| std::vector<Group> group; | |||||
| if (CreateGroupByTensorMap(input_tensor_map, &group) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Create group for input failed."; | |||||
| return FAILED; | |||||
| } | |||||
| if (group.empty()) { | |||||
| MS_LOG(INFO) << name_ << ": The mirror group is empty."; | |||||
| return SUCCESS; | |||||
| } | |||||
| OperatorVector input_op, begin_op, end_op; | |||||
| input_op = CreateMirrorOps(group[0].name(), group[0].GetDevNum()); | |||||
| mirror_ops_.push_back(input_op); | |||||
| mirror_ops_.push_back(begin_op); | |||||
| mirror_ops_.push_back(end_op); | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::InferTensorInfo() { | |||||
| if (inputs_shape_.empty() || outputs_shape_.empty() || inputs_tensor_map_.empty() || outputs_tensor_map_.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": Invalid args"; | |||||
| return FAILED; | |||||
| } | |||||
| // infer tensor layout | |||||
| TensorLayout input_layout, output_layout; | |||||
| if (input_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], inputs_shape_[0]) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Infer input tensor layout failed."; | |||||
| return FAILED; | |||||
| } | |||||
| if (output_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], outputs_shape_[0]) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Infer output tensor layout failed."; | |||||
| return FAILED; | |||||
| } | |||||
| TensorInfo input_tensor_info(input_layout); | |||||
| TensorInfo output_tensor_info(output_layout); | |||||
| inputs_tensor_info_.push_back(input_tensor_info); | |||||
| outputs_tensor_info_.push_back(output_tensor_info); | |||||
| return SUCCESS; | |||||
| } | |||||
| // Note: if the batch dimension is not fully fetched, the batch strategy may not work. | |||||
| std::shared_ptr<Strategys> SliceInfo::GenerateBatchStrategies() { | |||||
| split_flag_list_ = {true}; | |||||
| return GenerateBatchStrategiesBySplitFlag(inputs_shape_, split_flag_list_); | |||||
| } | |||||
| Status SliceInfo::SetCostUnderStrategy(const StrategyPtr &strategy) { return SetCostUnderStrategyBase(strategy); } | |||||
| Status SliceInfo::GenerateStrategies(int64_t stage_id) { | |||||
| if (InferAttrs() != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Infer attrs failed"; | |||||
| return FAILED; | |||||
| } | |||||
| if (inputs_shape_.empty()) { | |||||
| MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; | |||||
| return FAILED; | |||||
| } | |||||
| Shape input_split(inputs_shape_[0].size(), 1); | |||||
| for (size_t i = 0; i < begin_.size(); ++i) { | |||||
| bool no_fully_fetch = ((begin_[i] != 0) || (size_[i] < inputs_shape_[0][i])); | |||||
| if (no_fully_fetch) { | |||||
| input_split[i] = 0; | |||||
| } | |||||
| } | |||||
| Shapes splittable_inputs = {input_split}; | |||||
| std::vector<StrategyPtr> sp_vector; | |||||
| if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) { | |||||
| return FAILED; | |||||
| } | |||||
| size_t success = 0; | |||||
| for (auto &sp : sp_vector) { | |||||
| PrintStrategy(sp); | |||||
| if (SetCostUnderStrategy(sp) == SUCCESS) { | |||||
| success++; | |||||
| MS_LOG(INFO) << name_ << ": Successfully generated " << success << " strategy."; | |||||
| PrintStrategy(sp); | |||||
| } | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::Init(const StrategyPtr &strategy) { | |||||
| if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Init failed."; | |||||
| return FAILED; | |||||
| } | |||||
| MS_LOG(INFO) << name_ << ": Init success."; | |||||
| return SUCCESS; | |||||
| } | |||||
| Status SliceInfo::InitForCostModel(const StrategyPtr &strategy) { | |||||
| if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { | |||||
| MS_LOG(ERROR) << name_ << ": Init for cost model failed."; | |||||
| return FAILED; | |||||
| } | |||||
| MS_LOG(INFO) << name_ << ": Init for cost model success."; | |||||
| return SUCCESS; | |||||
| } | |||||
| ReplaceGraphPtr SliceInfo::replace_graph(const CNodePtr &cnode) { | |||||
| auto input_strategy = strategy_->GetInputDim().at(0); | |||||
| if (std::any_of(input_strategy.begin(), input_strategy.end(), [](const int64_t &shard) { return shard > 1; })) { | |||||
| if (ComputeReplaceGraph(cnode) != SUCCESS) { | |||||
| MS_LOG(EXCEPTION) << name_ << ": InferReplaceOp failed."; | |||||
| } | |||||
| } | |||||
| return replace_graph_; | |||||
| } | |||||
| AnfNodePtr CreateValueTupleAndNodePtr(const std::vector<int64_t> &value_tuple) { | |||||
| auto value_ptr = MakeValue(value_tuple)->cast<ValueTuplePtr>(); | |||||
| auto value_node = NewValueNode(value_ptr); | |||||
| return value_node->cast<AnfNodePtr>(); | |||||
| } | |||||
| Status SliceInfo::ComputeReplaceGraph(const CNodePtr &cnode) { | |||||
| GenerateGraph gen_g = GenerateGraph(); | |||||
| if (gen_g.Init(cnode) != SUCCESS) { | |||||
| MS_LOG(ERROR) << "GenerateGraph Init failed"; | |||||
| return FAILED; | |||||
| } | |||||
| Dimensions input_stra = strategy_->GetInputDim().at(0); | |||||
| std::vector<int64_t> sliced_size_shape_int; | |||||
| Shape input_slice_shape = inputs_tensor_info_[0].slice_shape(); | |||||
| for (uint64_t i = 0; i < size_.size(); i++) { | |||||
| if (input_stra[i] == 1) { | |||||
| sliced_size_shape_int.push_back(size_[i]); | |||||
| } else { | |||||
| sliced_size_shape_int.push_back(input_slice_shape[i]); | |||||
| } | |||||
| } | |||||
| auto new_begin = CreateValueTupleAndNodePtr(begin_); | |||||
| auto new_size = CreateValueTupleAndNodePtr(sliced_size_shape_int); | |||||
| auto slice = gen_g.PushBack({gen_g.NewOpInst(SLICE), gen_g.virtual_input_node(), new_begin, new_size}); | |||||
| std::vector<std::pair<AnfNodePtr, int64_t>> input_nodes = {std::make_pair(slice, 1)}; | |||||
| replace_graph_ = std::make_shared<std::pair<std::vector<std::pair<AnfNodePtr, int64_t>>, AnfNodePtr>>( | |||||
| std::make_pair(input_nodes, slice)); | |||||
| return SUCCESS; | |||||
| } | |||||
| } // namespace parallel | |||||
| } // namespace mindspore | |||||
| @@ -0,0 +1,69 @@ | |||||
| /** | |||||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SLICE_INFO_H_ | |||||
| #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SLICE_INFO_H_ | |||||
| #include <string> | |||||
| #include <memory> | |||||
| #include <unordered_map> | |||||
| #include <vector> | |||||
| #include "ir/value.h" | |||||
| #include "frontend/parallel/auto_parallel/operator_costmodel.h" | |||||
| #include "frontend/parallel/ops_info/operator_info.h" | |||||
| #include "frontend/parallel/strategy.h" | |||||
| namespace mindspore { | |||||
| namespace parallel { | |||||
| class SliceInfo : public OperatorInfo { | |||||
| public: | |||||
| SliceInfo(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, | |||||
| const PrimitiveAttrs &attrs) | |||||
| : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared<SliceCost>(false)), | |||||
| slice_axis_(-1) {} | |||||
| ~SliceInfo() override = default; | |||||
| Status Init(const StrategyPtr &strategy) override; | |||||
| Status InitForCostModel(const StrategyPtr &strategy) override; | |||||
| Status GenerateStrategies(int64_t) override; | |||||
| Status SetCostUnderStrategy(const StrategyPtr &) override; | |||||
| std::shared_ptr<Strategys> GenerateBatchStrategies() override; | |||||
| protected: | |||||
| Status GetAttrs() override; | |||||
| Status CheckStrategy(const StrategyPtr &strategy) override; | |||||
| Status InferMirrorOps() override; | |||||
| Status InferForwardCommunication() override { return SUCCESS; } | |||||
| Status InferTensorInfo() override; | |||||
| Status InferDevMatrixShape() override; | |||||
| Status InferTensorMap() override; | |||||
| ReplaceGraphPtr replace_graph(const CNodePtr &cnode) override; | |||||
| private: | |||||
| Status GetInput(const ValuePtr &input_value, std::vector<int64_t> *input); | |||||
| Status ComputeReplaceGraph(const CNodePtr &cnode); | |||||
| std::vector<int64_t> begin_; | |||||
| std::vector<int64_t> size_; | |||||
| int64_t slice_axis_; | |||||
| }; | |||||
| using SliceInfoPtr = std::shared_ptr<SliceInfo>; | |||||
| } // namespace parallel | |||||
| } // namespace mindspore | |||||
| #endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_SLICE_INFO_H_ | |||||
| @@ -59,7 +59,7 @@ Status GetInput(const ValuePtr &input_value, std::vector<int64_t> *input) { | |||||
| int64_t value = element->cast<Int64ImmPtr>()->value(); | int64_t value = element->cast<Int64ImmPtr>()->value(); | ||||
| input->push_back(value); | input->push_back(value); | ||||
| } else { | } else { | ||||
| MS_LOG(ERROR) << "The value must be int32"; | |||||
| MS_LOG(ERROR) << "The value must be int64"; | |||||
| return FAILED; | return FAILED; | ||||
| } | } | ||||
| } | } | ||||
| @@ -317,7 +317,7 @@ bool IsSplittableOperator(const std::string &op_name) { | |||||
| EXPM1, LOG1P, SIN, SINH, TAN, RSQRT, INV, RECIPROCAL, ROUND, FLOOR, SIGN, ERF, ERFC, ZEROSLIKE, ONESLIKE, | EXPM1, LOG1P, SIN, SINH, TAN, RSQRT, INV, RECIPROCAL, ROUND, FLOOR, SIGN, ERF, ERFC, ZEROSLIKE, ONESLIKE, | ||||
| BESSELI0E, BESSELI1E, FLOORMOD, ASSIGN, ASSIGN_ADD, ATAN2, DIVNONAN, LOGICALAND, LOGICALOR, ELU, RELU6, RELUV2, | BESSELI0E, BESSELI1E, FLOORMOD, ASSIGN, ASSIGN_ADD, ATAN2, DIVNONAN, LOGICALAND, LOGICALOR, ELU, RELU6, RELUV2, | ||||
| SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD, UNIQUE, UNSORTED_SEGMENT_SUM, | SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD, UNIQUE, UNSORTED_SEGMENT_SUM, | ||||
| UNSORTED_SEGMENT_MIN, REPEAT_ELEMENTS, TENSOR_DOT, RANGE, UNIFORM_CANDIDATE_SAMPLER}; | |||||
| UNSORTED_SEGMENT_MIN, REPEAT_ELEMENTS, TENSOR_DOT, RANGE, UNIFORM_CANDIDATE_SAMPLER, SLICE}; | |||||
| // clang-format on | // clang-format on | ||||
| auto iter = splittable_op.find(op_name); | auto iter = splittable_op.find(op_name); | ||||
| @@ -0,0 +1,135 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| import pytest | |||||
| import mindspore as ms | |||||
| from mindspore import context, Tensor, Parameter | |||||
| from mindspore.common.api import _executor | |||||
| from mindspore.nn import Cell, TrainOneStepCell, Momentum | |||||
| from mindspore.ops import operations as P | |||||
| class Net(Cell): | |||||
| def __init__(self, weight, w2, begin, end, strategy1=None, strategy2=None, is_parameter=True): | |||||
| super().__init__() | |||||
| self.mul = P.Mul().shard(strategy1) | |||||
| self.slice = P.Slice().shard(strategy2) | |||||
| if is_parameter: | |||||
| self.weight = Parameter(weight, "w1") | |||||
| else: | |||||
| self.weight = weight | |||||
| self.mul2 = P.Mul() | |||||
| self.weight2 = Parameter(w2, "w2") | |||||
| self.begin = begin | |||||
| self.end = end | |||||
| def construct(self, x, b): | |||||
| out = self.slice(self.weight, self.begin, self.end) | |||||
| out = self.mul(x, out) | |||||
| out = self.mul2(out, self.weight2) | |||||
| return out | |||||
| class Net2(Cell): | |||||
| def __init__(self, weight2, begin, end, strategy1=None, strategy2=None): | |||||
| super().__init__() | |||||
| self.mul = P.Mul().shard(strategy1) | |||||
| self.slice = P.Slice().shard(strategy2) | |||||
| self.weight2 = Parameter(weight2, "w2") | |||||
| self.begin = begin | |||||
| self.end = end | |||||
| def construct(self, x, b): | |||||
| out = self.mul(x, self.weight2) | |||||
| out = self.slice(out, self.begin, self.end) | |||||
| return out | |||||
| _x = Tensor(np.ones([128, 64, 1]), dtype=ms.float32) | |||||
| _w1 = Tensor(np.ones([256, 64, 32]), dtype=ms.float32) | |||||
| _w2 = Tensor(np.ones([128, 64, 1]), dtype=ms.float32) | |||||
| _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) | |||||
| def compile_net(net): | |||||
| optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) | |||||
| train_net = TrainOneStepCell(net, optimizer) | |||||
| train_net.set_auto_parallel() | |||||
| train_net.set_train() | |||||
| _executor.compile(train_net, _x, _b) | |||||
| context.reset_auto_parallel_context() | |||||
| def test_slice_no_fully_fetch_split_error(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((2, 2, 2), (2, 2, 2)) | |||||
| strategy2 = ((2, 2, 2),) | |||||
| net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), strategy1, strategy2, is_parameter=True) | |||||
| with pytest.raises(RuntimeError): | |||||
| compile_net(net) | |||||
| def test_slice_parameter(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 4, 1), (1, 4, 2)) | |||||
| strategy2 = ((1, 4, 2),) | |||||
| net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), strategy1, strategy2) | |||||
| compile_net(net) | |||||
| def test_slice_tensor(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 4, 1), (1, 4, 2)) | |||||
| strategy2 = ((1, 4, 2),) | |||||
| net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), strategy1, strategy2, is_parameter=False) | |||||
| compile_net(net) | |||||
| def test_slice_parameter_no_full_split(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 4, 1), (1, 4, 2)) | |||||
| strategy2 = ((1, 2, 2),) | |||||
| net = Net(_w1, _w2, (0, 0, 0), (128, 64, 32), strategy1, strategy2, is_parameter=True) | |||||
| compile_net(net) | |||||
| def test_slice_output(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 8, 1), (1, 8, 1)) | |||||
| strategy2 = ((1, 8, 1),) | |||||
| net = Net2(_w2, (0, 0, 0), (64, 64, 1), strategy1, strategy2) | |||||
| compile_net(net) | |||||
| def test_stridedslice_output_no_full_split(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 8, 1), (1, 8, 1)) | |||||
| strategy2 = ((1, 4, 1),) | |||||
| net = Net2(_w2, (0, 0, 0), (64, 64, 1), strategy1, strategy2) | |||||
| compile_net(net) | |||||
| def test_stridedslice_no_strategy(): | |||||
| context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) | |||||
| strategy1 = ((1, 8, 1), (1, 8, 1)) | |||||
| strategy2 = None | |||||
| net = Net2(_w2, (0, 0, 0), (128, 64, 1), strategy1, strategy2) | |||||
| compile_net(net) | |||||
| def test_slice_auto_parallel(): | |||||
| context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) | |||||
| net = Net2(_w2, (0, 0, 0), (32, 64, 1)) | |||||
| compile_net(net) | |||||