Browse Source

!15994 add split operator factory && register

From: @zoloft
Reviewed-by: @zhang_xue_tong,@wangchengyuan
Signed-off-by: @zhang_xue_tong
pull/15994/MERGE
mindspore-ci-bot Gitee 5 years ago
parent
commit
862507ec16
10 changed files with 310 additions and 25 deletions
  1. +36
    -0
      mindspore/lite/src/parallel_config.h
  2. +9
    -0
      mindspore/lite/test/CMakeLists.txt
  3. +3
    -1
      mindspore/lite/tools/converter/CMakeLists.txt
  4. +10
    -2
      mindspore/lite/tools/converter/anf_transform.cc
  5. +5
    -5
      mindspore/lite/tools/optimizer/parallel/operator_info.cc
  6. +15
    -17
      mindspore/lite/tools/optimizer/parallel/operator_info.h
  7. +51
    -0
      mindspore/lite/tools/optimizer/parallel/operator_info_register.cc
  8. +68
    -0
      mindspore/lite/tools/optimizer/parallel/operator_info_register.h
  9. +46
    -0
      mindspore/lite/tools/optimizer/parallel/spliter.cc
  10. +67
    -0
      mindspore/lite/tools/optimizer/parallel/spliter.h

+ 36
- 0
mindspore/lite/src/parallel_config.h View File

@@ -0,0 +1,36 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_PARALLEL_CONFIG_H_
#define MINDSPORE_LITE_SRC_PARALLEL_CONFIG_H_

// whether to enable parallel_executor or not
#define PARALLEL 1

// cut graph in case of split, concat, add, eltwise, or branches. Used in scheduler.cc
#define SUB_GRAPH 1

// whether to omit PReLU in converter or not
#define REMOVE_PRELU 1

// whether to enable count time and data printer in parallel_executor.cc or not
#define PROFILE 1

// typical bind mode for parallel_executor
// #define BASE 6

#define CPU16SUB_INSERT_CAST 0
#endif // MINDSPORE_LITE_SRC_PARALLEL_CONFIG_H_

+ 9
- 0
mindspore/lite/test/CMakeLists.txt View File

@@ -279,7 +279,16 @@ if(ENABLE_CONVERTER)
${LITE_DIR}/tools/optimizer/graph/unify_format_pass.cc
${LITE_DIR}/tools/optimizer/graph/node_infershape.cc
${LITE_DIR}/tools/optimizer/graph/transpose_strategy.cc
${LITE_DIR}/tools/optimizer/fisson/eliminate_concat_split.cc
${LITE_DIR}/tools/optimizer/fisson/fisson_util.cc
${LITE_DIR}/tools/optimizer/fisson/iter_node_outputs.cc
${LITE_DIR}/tools/optimizer/fisson/node_out_shapes.cc
${LITE_DIR}/tools/optimizer/parallel/parallel_pass.cc
${LITE_DIR}/tools/optimizer/parallel/operator_info.cc
${LITE_DIR}/tools/optimizer/parallel/dynamic_creator.cc
${LITE_DIR}/tools/optimizer/parallel/split_strategy.cc
${LITE_DIR}/tools/optimizer/parallel/operator_info_register.cc
${LITE_DIR}/tools/optimizer/parallel/spliter.cc
${LITE_DIR}/tools/common/graph_util.cc
${LITE_DIR}/tools/common/tensor_util.cc
${LITE_DIR}/tools/common/node_util.cc


+ 3
- 1
mindspore/lite/tools/converter/CMakeLists.txt View File

@@ -67,6 +67,9 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
../optimizer/parallel/dynamic_creator.cc
../optimizer/parallel/operator_info.cc
../optimizer/parallel/parallel_pass.cc
../optimizer/parallel/split_strategy.cc
../optimizer/parallel/operator_info_register.cc
../optimizer/parallel/spliter.cc
../optimizer/graph/conv1d_inout_adjust_pass.cc
../optimizer/graph/weight_format_transform_pass.cc
../optimizer/graph/weight_format_hardcode_pass.cc
@@ -94,7 +97,6 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
../optimizer/graph/unify_format_pass.cc
../optimizer/graph/node_infershape.cc
../optimizer/graph/transpose_strategy.cc
../optimizer/parallel/split_strategy.cc
)

add_subdirectory(../anf_exporter anf_exporter)


+ 10
- 2
mindspore/lite/tools/converter/anf_transform.cc View File

@@ -67,6 +67,9 @@
#include "tools/converter/quantizer/quant_cast.h"
#include "tools/converter/quantizer/weight_quantizer.h"
#include "tools/optimizer/parallel/split_strategy.h"
#include "tools/optimizer/fisson/iter_node_outputs.h"
#include "tools/optimizer/fisson/node_out_shapes.h"
#include "tools/optimizer/parallel/parallel_pass.h"

using std::string;
namespace mindspore::lite {
@@ -137,12 +140,17 @@ int AnfTransform::RunParallelPass(const FuncGraphPtr &old_graph, const converter
}
auto parallel_pm = std::make_shared<opt::PassManager>("anf parallel pass manager", false);
// 2. preceding parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
parallel_pm->AddPass(std::make_shared<opt::IterNodeOutputs>());
parallel_pm->AddPass(std::make_shared<opt::NodeOutShapes>());
// 3. multi_conv parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
// 4. single conv parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
parallel_pm->AddPass(std::make_shared<opt::ParallelPass>(split_strategys, config->fmk));
optimizer->AddPassManager(parallel_pm);
if (optimizer->Optimize(old_graph) == nullptr) {
MS_LOG(ERROR) << "run const fold failed.";
return RET_ERROR;
}
MS_LOG(DEBUG) << "Run ParallelPass end";
return RET_OK;
}


+ 5
- 5
mindspore/lite/tools/optimizer/parallel/operator_info.cc View File

@@ -36,7 +36,7 @@ bool is_any_not_none(const std::vector<int64_t> &split) {
return std::any_of(split.begin(), split.end(), [](int64_t v) { return v != static_cast<int64_t>(NoSplit); });
}

lite::STATUS OperatorInfo::SetCNodeBackend() {
int OperatorInfo::SetCNodeBackend() {
for (size_t i = 0; i < strategy_.dev_num; ++i) {
lite::DeviceType dt_type;
std::string type = strategy_.dev_types[i];
@@ -57,7 +57,7 @@ lite::STATUS OperatorInfo::SetCNodeBackend() {
return lite::RET_OK;
}

lite::STATUS OperatorInfo::CheckStrategyValue() {
int OperatorInfo::CheckStrategyValue() {
auto strategy_size = strategy_.strategys.size();

for (size_t index = 0; index < strategy_size; ++index) {
@@ -77,8 +77,8 @@ lite::STATUS OperatorInfo::CheckStrategyValue() {
return lite::RET_OK;
}

lite::STATUS OperatorInfo::CreateMultipleOutputsOfAnfNode(const AnfNodePtr &node, size_t output_num,
std::vector<AnfNodePtr> *outputs) {
int OperatorInfo::CreateMultipleOutputsOfAnfNode(const AnfNodePtr &node, size_t output_num,
std::vector<AnfNodePtr> *outputs) {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(outputs);
AbstractBasePtrList ptr_list;
@@ -180,7 +180,7 @@ AnfNodePtr OperatorInfo::CreateReduceNode(const CNodePtr &orig_node, const std::
return addn_cnode;
}

lite::STATUS OperatorInfo::Init() {
int OperatorInfo::Init() {
if (GetAttrs() != lite::RET_OK) {
MS_LOG(ERROR) << name_ << ": Parse attrs failed.";
return lite::RET_ERROR;


+ 15
- 17
mindspore/lite/tools/optimizer/parallel/operator_info.h View File

@@ -22,7 +22,6 @@
#include <string>
#include <memory>
#include <unordered_map>

#include "tools/optimizer/parallel/split_strategy.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
@@ -49,24 +48,23 @@ using OperatorInfoPtr = std::shared_ptr<OperatorInfo>;

class OperatorInfo {
public:
OperatorInfo(std::string name, SplitStrategy strategy)
OperatorInfo(const std::string &name, const SplitStrategy &strategy)
: name_(std::move(name)),
strategy_(std::move(strategy)),
replace_op_(nullptr),
func_graph_(nullptr),
cnode_(nullptr) {}
virtual ~OperatorInfo() = default;
const std::string &name() const { return name_; }
const std::string name() const { return name_; }
void set_name(const std::string &name) { name_ = name; }
void set_func_graph(const FuncGraphPtr &func_graph) { func_graph_ = func_graph; }
void set_cnode(const CNodePtr &cnode) { cnode_ = cnode; }
void setFmk(const int32_t FmkType) { FmkType_ = FmkType; }
void setFmk(const int32_t fmk_type) { fmk_type_ = fmk_type; }
AnfNodePtr replace_op() { return replace_op_; }
lite::STATUS Init();
int Init();

protected:
lite::STATUS CreateMultipleOutputsOfAnfNode(const AnfNodePtr &node, size_t output_num,
std::vector<AnfNodePtr> *outputs);
int CreateMultipleOutputsOfAnfNode(const AnfNodePtr &node, size_t output_num, std::vector<AnfNodePtr> *outputs);
AnfNodePtr CreateOutputsOfSplit(const CNodePtr &orig_node, size_t input_index, std::vector<AnfNodePtr> *split_outputs,
size_t split_dim, size_t split_num, const std::vector<int64_t> &splits,
bool trans_format);
@@ -74,22 +72,22 @@ class OperatorInfo {
int32_t concat_dim, size_t input_nodes_num, bool trans_format);
AnfNodePtr CreateReduceNode(const CNodePtr &orig_node, const std::vector<AnfNodePtr> &input_nodes, int32_t reduce_dim,
size_t input_nodes_num, bool trans_format);
virtual lite::STATUS GetAttrs() = 0;
virtual lite::STATUS InferReplaceOp() = 0;
virtual lite::STATUS InferParallelCNodes() = 0;
virtual lite::STATUS CheckStrategy(const SplitStrategy &strategy) = 0;
virtual int GetAttrs() = 0;
virtual int InferReplaceOp() = 0;
virtual int InferParallelCNodes() = 0;
virtual int CheckStrategy(const SplitStrategy &strategy) = 0;

std::string name_;
SplitStrategy strategy_;
AnfNodePtr replace_op_;
AnfNodePtr replace_op_{nullptr};
std::vector<AnfNodePtr> parallel_output_nodes_;
FuncGraphPtr func_graph_;
CNodePtr cnode_;
int32_t FmkType_{};
FuncGraphPtr func_graph_{nullptr};
CNodePtr cnode_{nullptr};
int32_t fmk_type_{};

private:
lite::STATUS SetCNodeBackend();
lite::STATUS CheckStrategyValue();
int SetCNodeBackend();
int CheckStrategyValue();
};

bool is_any_none(const std::vector<int64_t> &split);


+ 51
- 0
mindspore/lite/tools/optimizer/parallel/operator_info_register.cc View File

@@ -0,0 +1,51 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "tools/optimizer/parallel/operator_info_register.h"
#include <utility>
namespace mindspore {
namespace opt {

OperatorInfoFactory *OperatorInfoFactory::GeInstance() {
static OperatorInfoFactory factory;
return &factory;
}

int OperatorInfoFactory::RegisterOperatorInfo(const std::string &name, const SplitStrategy &strategy,
const OperatorInfoCreatorFunc &creator_func) {
if (operator_info_map_.find(name) != operator_info_map_.end()) {
MS_LOG(ERROR) << "Operator already exist" << name;
return lite::RET_ERROR;
}
this->operator_info_map_.insert(std::pair<std::string, OperatorInfoCreatorFunc>(name, creator_func));
return lite::RET_OK;
}

OperatorInfoCreatorFunc OperatorInfoFactory::FindOperatorInfo(const std::string &name, const SplitStrategy &strategy) {
auto iterator = this->operator_info_map_.find(name);
if (iterator != this->operator_info_map_.end()) {
return iterator->second;
}
MS_LOG(ERROR) << "operator_info do not register" << name;
return nullptr;
}

OperatorInfoRegister::OperatorInfoRegister(const std::string &name, const SplitStrategy &strategy,
const OperatorInfoCreatorFunc &creator_func) {
OperatorInfoFactory::GeInstance()->RegisterOperatorInfo(name, strategy, creator_func);
}
} // namespace opt
} // namespace mindspore

+ 68
- 0
mindspore/lite/tools/optimizer/parallel/operator_info_register.h View File

@@ -0,0 +1,68 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_TOOLS_OPERATOR_INFO_REGISTER_H
#define MINDSPORE_LITE_TOOLS_OPERATOR_INFO_REGISTER_H

#include <map>
#include <vector>
#include <string>
#include <memory>
#include "tools/optimizer/parallel/operator_info.h"

namespace mindspore {
namespace opt {
using OperatorInfoCreatorFunc =
std::function<std::unique_ptr<opt::OperatorInfo>(const std::string &name, const SplitStrategy &strategy)>;

class OperatorInfoFactory {
public:
static OperatorInfoFactory *GeInstance();

OperatorInfoFactory(const OperatorInfoFactory &) = delete;

OperatorInfoFactory &operator=(const OperatorInfoFactory &) = delete;

int RegisterOperatorInfo(const std::string &name, const SplitStrategy &strategy,
const OperatorInfoCreatorFunc &creator_func);

OperatorInfoCreatorFunc FindOperatorInfo(const std::string &name, const SplitStrategy &strategy);

private:
OperatorInfoFactory() = default;

virtual ~OperatorInfoFactory() = default;

private:
std::map<std::string, OperatorInfoCreatorFunc> operator_info_map_;
};

class OperatorInfoRegister {
public:
OperatorInfoRegister() = delete;

OperatorInfoRegister(const std::string &name, const SplitStrategy &strategy,
const OperatorInfoCreatorFunc &creator_func);

~OperatorInfoRegister() = default;
};

#define OPERATOR_INFO_REGISTER(name, strategy, creator_func) \
static OperatorInfoRegister g_##name##Creator(name, strategy, creator_func);
} // namespace opt
} // namespace mindspore

#endif // MINDSPORE_LITE_TOOLS_OPERATOR_INFO_REGISTER_H

+ 46
- 0
mindspore/lite/tools/optimizer/parallel/spliter.cc View File

@@ -0,0 +1,46 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "tools/optimizer/parallel/spliter.h"
namespace mindspore {
namespace opt {
Spliter *Spliter::GetInstance() {
static Spliter spliter;
return &spliter;
}

void Spliter::UpdateNodeOutputs(const std::string &input_node_name, const AnfNodePtr &candidate_output) {
if (graph_node_outputs_.find(input_node_name) != graph_node_outputs_.end()) {
std::vector<AnfNodePtr>::iterator it;
it =
find(graph_node_outputs_[input_node_name].begin(), graph_node_outputs_[input_node_name].end(), candidate_output);
if (it != graph_node_outputs_[input_node_name].end()) {
return;
}
}
graph_node_outputs_[input_node_name].push_back(candidate_output);
}

void Spliter::UpdateNodeInputShapes(const std::string &node_name, const std::vector<ShapeVector> &input_shapes) {
graph_node_input_shapes_[node_name] = (input_shapes);
}

void Spliter::UpdateNodeOutputShapes(const std::string &node_name, const std::vector<ShapeVector> &output_shapes) {
graph_node_output_shapes_[node_name] = (output_shapes);
}

} // namespace opt
} // namespace mindspore

+ 67
- 0
mindspore/lite/tools/optimizer/parallel/spliter.h View File

@@ -0,0 +1,67 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_SPLITER_H
#define MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_SPLITER_H
#include <vector>
#include <string>
#include <unordered_map>
#include "schema/inner/model_generated.h"
#include "utils/utils.h"
#include "tools/optimizer/common/gllo_utils.h"
#include "tools/converter/converter_flags.h"
#include "include/context.h"
#include "include/lite_types.h"
namespace mindspore {
namespace opt {

class Spliter {
public:
static Spliter *GetInstance();
Spliter(const Spliter &) = delete;
Spliter &operator=(const Spliter &) = delete;

// update current input node's output. if candidate node has been recorded, we will be ignore it, otherwise record it.
void UpdateNodeOutputs(const std::string &input_node_name, const AnfNodePtr &candidate_output);

// update current node's input shapes.
void UpdateNodeInputShapes(const std::string &node_name, const std::vector<ShapeVector> &input_shapes);

// update current node's output shapes.
void UpdateNodeOutputShapes(const std::string &node_name, const std::vector<ShapeVector> &output_shapes);

std::unordered_map<std::string, std::vector<AnfNodePtr>> graph_node_outputs() const { return graph_node_outputs_; }

std::unordered_map<std::string, std::vector<ShapeVector>> graph_node_output_shapes() const {
return graph_node_output_shapes_;
}

std::unordered_map<std::string, std::vector<ShapeVector>> graph_node_input_shapes() const {
return graph_node_input_shapes_;
}

private:
Spliter() = default;
virtual ~Spliter() = default;

private:
std::unordered_map<std::string, std::vector<AnfNodePtr>> graph_node_outputs_;
std::unordered_map<std::string, std::vector<ShapeVector>> graph_node_output_shapes_;
std::unordered_map<std::string, std::vector<ShapeVector>> graph_node_input_shapes_;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_SPLITER_H

Loading…
Cancel
Save