Browse Source

fix static check on master

tags/v1.6.0
Yang Jiao 4 years ago
parent
commit
bb62ae0777
27 changed files with 103 additions and 82 deletions
  1. +1
    -0
      mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc
  2. +4
    -3
      mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_json_generator.cc
  3. +6
    -6
      mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc
  4. +2
    -2
      mindspore/ccsrc/backend/kernel_compiler/akg/cpu/akg_cpu_kernel_mod.cc
  5. +3
    -3
      mindspore/ccsrc/backend/kernel_compiler/akg/gpu/akg_gpu_kernel_mod.cc
  6. +7
    -6
      mindspore/ccsrc/backend/kernel_compiler/gpu/custom/custom_aot_gpu_kernel.h
  7. +6
    -7
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/callback_impl.cc
  8. +2
    -2
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/fake_abstract_shape.cc
  9. +1
    -1
      mindspore/ccsrc/backend/optimizer/graph_kernel/add_atomic_clean.cc
  10. +2
    -4
      mindspore/ccsrc/backend/optimizer/graph_kernel/add_stitch_atomic_clean_gpu.cc
  11. +7
    -7
      mindspore/ccsrc/backend/optimizer/graph_kernel/core/eliminate_redundant_output.cc
  12. +2
    -2
      mindspore/ccsrc/backend/optimizer/graph_kernel/core/graph_builder.cc
  13. +3
    -3
      mindspore/ccsrc/backend/optimizer/graph_kernel/core/shape_ops_splitter.cc
  14. +18
    -14
      mindspore/ccsrc/backend/optimizer/graph_kernel/core/update_state_formatter.cc
  15. +1
    -1
      mindspore/ccsrc/backend/optimizer/graph_kernel/depend_elimination.cc
  16. +0
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/standard_normal.cc
  17. +8
    -2
      mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc
  18. +2
    -2
      mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_recompute.cc
  19. +2
    -1
      mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc
  20. +1
    -1
      mindspore/ccsrc/backend/optimizer/graph_kernel/insert_pad.cc
  21. +1
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h
  22. +3
    -3
      mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc
  23. +1
    -1
      mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h
  24. +3
    -4
      mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc
  25. +4
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.cc
  26. +9
    -7
      mindspore/ccsrc/backend/optimizer/graph_kernel/parallel_fusion.cc
  27. +4
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/transform_op_optimizer.cc

+ 1
- 0
mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc View File

@@ -565,6 +565,7 @@ void AkgKernelBuilder::LoadCache() {
(void)bin_map->Insert(kernel_name, kernel_dir + kernel_json);
}
has_load = true;
(void)closedir(dir);
return;
}



+ 4
- 3
mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_json_generator.cc View File

@@ -90,7 +90,8 @@ std::vector<std::pair<AnfNodePtr, std::pair<size_t, size_t>>> GetInputIndex(cons
accum_idx += LongToInt(dyn_input_sizes[dyn_i]);
if (used_as_idx < accum_idx) {
input_index.push_back(std::make_pair(
anf_node, std::make_pair(dyn_i, IntToSize(used_as_idx - (accum_idx - dyn_input_sizes[dyn_i])))));
anf_node,
std::make_pair(dyn_i, IntToSize(used_as_idx - (accum_idx - LongToInt(dyn_input_sizes[dyn_i]))))));
found = true;
break;
}
@@ -335,7 +336,7 @@ bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, con
input_list.emplace_back(input_desc_json);
real_input_index++;
}
inputs_json->emplace_back(input_list);
(void)inputs_json->emplace_back(input_list);
}
return true;
}
@@ -877,7 +878,7 @@ nlohmann::json AkgKernelJsonGenerator::CreateInputsJson(const std::vector<AnfNod
input_shape.push_back(1);
}
input_desc_json[kJsonKeyShape] = input_shape;
inputs_json.emplace_back(std::vector<nlohmann::json>{input_desc_json});
(void)inputs_json.emplace_back(std::vector<nlohmann::json>{input_desc_json});
}
return inputs_json;
}


+ 6
- 6
mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc View File

@@ -73,12 +73,12 @@ bool AkgKernelMod::Launch(const std::vector<AddressPtr> &inputs, const std::vect
// pack all addresses into a vector.
std::vector<void *> runtime_args;
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(runtime_args),
[](const AddressPtr &input) -> void * { return input->addr; });
[](const AddressPtr &input) { return input->addr; });
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(runtime_args),
[](const AddressPtr &output) -> void * { return output->addr; });
[](const AddressPtr &output) { return output->addr; });
if (!workspace.empty()) {
(void)std::transform(std::begin(workspace), std::end(workspace), std::back_inserter(runtime_args),
[](const AddressPtr &addr) -> void * { return addr->addr; });
[](const AddressPtr &addr) { return addr->addr; });
}

rtL2Ctrl_t *l2ctrl = nullptr;
@@ -111,12 +111,12 @@ std::vector<TaskInfoPtr> AkgKernelMod::GenTask(const std::vector<AddressPtr> &in

// pack all addresses into a vector.
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(input_data_addrs),
[](const AddressPtr &input) -> void * { return input->addr; });
[](const AddressPtr &input) { return input->addr; });
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(output_data_addrs),
[](const AddressPtr &output) -> void * { return output->addr; });
[](const AddressPtr &output) { return output->addr; });
if (!workspace.empty()) {
(void)std::transform(std::begin(workspace), std::end(workspace), std::back_inserter(workspace_addrs),
[](const AddressPtr &workspace) -> void * { return workspace->addr; });
[](const AddressPtr &workspace) { return workspace->addr; });
}

uint32_t block_dim = DEFAULT_BLOCK_DIM; // default blockdim equal to 1.


+ 2
- 2
mindspore/ccsrc/backend/kernel_compiler/akg/cpu/akg_cpu_kernel_mod.cc View File

@@ -128,9 +128,9 @@ bool CpuKernelMod::Launch(const std::vector<AddressPtr> &inputs, const std::vect
}
std::vector<void *> runtimeargs;
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(runtimeargs),
[](const AddressPtr &input) -> void * { return input->addr; });
[](const AddressPtr &input) { return input->addr; });
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(runtimeargs),
[](const AddressPtr &output) -> void * { return output->addr; });
[](const AddressPtr &output) { return output->addr; });
static AkgCallBack akg_callback;
(void)runtimeargs.emplace_back(reinterpret_cast<void *>(&akg_callback));
using AkgCpuKernelFunction = void (*)(void *);


+ 3
- 3
mindspore/ccsrc/backend/kernel_compiler/akg/gpu/akg_gpu_kernel_mod.cc View File

@@ -120,12 +120,12 @@ bool GpuKernelMod::Launch(const std::vector<AddressPtr> &inputs, const std::vect
}
std::vector<void *> runtimeargs;
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(runtimeargs),
[](const AddressPtr &input) -> void * { return reinterpret_cast<void *>(&(input->addr)); });
[](const AddressPtr &input) { return reinterpret_cast<void *>(&(input->addr)); });
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(runtimeargs),
[](const AddressPtr &output) -> void * { return reinterpret_cast<void *>(&(output->addr)); });
[](const AddressPtr &output) { return reinterpret_cast<void *>(&(output->addr)); });
if (!workspace.empty()) {
(void)std::transform(std::begin(workspace), std::end(workspace), std::back_inserter(runtimeargs),
[](const AddressPtr &addr) -> void * { return reinterpret_cast<void *>(&(addr->addr)); });
[](const AddressPtr &addr) { return reinterpret_cast<void *>(&(addr->addr)); });
}
result = cuLaunchKernel(kernel_addr, thread_info[0], thread_info[1], thread_info[2], thread_info[3], thread_info[4],
thread_info[5], 0, reinterpret_cast<CUstream>(stream_ptr),


+ 7
- 6
mindspore/ccsrc/backend/kernel_compiler/gpu/custom/custom_aot_gpu_kernel.h View File

@@ -128,13 +128,13 @@ class CustomAOTGpuKernel : public GpuKernel {
}
for (size_t i = 0; i < num_input_; i++) {
std::vector<size_t> in_shape = AnfAlgo::GetInputDeviceShape(kernel_node, i);
auto in_shape = AnfAlgo::GetInputDeviceShape(kernel_node, i);
std::vector<int64_t> in_shape_tmp;
std::for_each(in_shape.begin(), in_shape.end(),
[&in_shape_tmp](size_t c) { in_shape_tmp.push_back(SizeToLong(c)); });
shape_list_.emplace_back(in_shape_tmp);
ndims_.push_back(SizeToInt(in_shape_tmp.size()));
type_list_.emplace_back(TypeIdToString(input_type_list[i], true));
ndims_.push_back(SizeToInt(in_shape_tmp.size()));
shape_list_.emplace_back(in_shape_tmp);
}
num_output_ = AnfAlgo::GetOutputTensorNum(kernel_node);
@@ -182,9 +182,6 @@ class CustomAOTGpuKernel : public GpuKernel {
}
private:
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
std::vector<std::vector<int64_t>> shape_list_;
std::vector<int> ndims_;
std::vector<std::string> type_list_;
@@ -192,6 +189,10 @@ class CustomAOTGpuKernel : public GpuKernel {
std::vector<int64_t *> shapes_;
std::vector<const char *> type_pointer_list_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
size_t num_input_;
size_t num_output_;
std::string file_path_;


+ 6
- 7
mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/callback_impl.cc View File

@@ -126,19 +126,18 @@ void CallbackImpl::SetGraphKernelNodeKernelInfo(const AnfNodePtr &node) {
}
for (size_t i = 0; i < outputs.size(); ++i) {
auto kernel_with_index = AnfAlgo::VisitKernel(outputs[i], 0);
auto output_format = AnfAlgo::GetOutputFormat(kernel_with_index.first, kernel_with_index.second);
auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel_with_index.first, kernel_with_index.second);
graph_output_format.push_back(output_format);
graph_output_type.push_back(output_type);
graph_output_format.push_back(AnfAlgo::GetOutputFormat(kernel_with_index.first, kernel_with_index.second));
graph_output_type.push_back(AnfAlgo::GetOutputDeviceDataType(kernel_with_index.first, kernel_with_index.second));
}
kernel::KernelBuildInfo::KernelBuildInfoBuilder graph_info_builder;
graph_info_builder.SetProcessor(kernel::GetProcessorFromContext());
graph_info_builder.SetKernelType(KernelType::AKG_KERNEL);
graph_info_builder.SetFusionType(kernel::FusionType::OPAQUE);
graph_info_builder.SetInputsFormat(graph_input_format);
graph_info_builder.SetInputsDeviceType(graph_input_type);
graph_info_builder.SetOutputsFormat(graph_output_format);
graph_info_builder.SetOutputsDeviceType(graph_output_type);
graph_info_builder.SetProcessor(kernel::GetProcessorFromContext());
graph_info_builder.SetKernelType(KernelType::AKG_KERNEL);
graph_info_builder.SetFusionType(kernel::FusionType::OPAQUE);

auto graph_selected_info = graph_info_builder.Build();
AnfAlgo::SetSelectKernelBuildInfo(graph_selected_info, node.get());
}


+ 2
- 2
mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/fake_abstract_shape.cc View File

@@ -48,7 +48,7 @@ class AbstractShapeCreator {
private:
static ShapeVector NchwAbstractShape(const ShapeVector &device_shape) { return device_shape; }
static ShapeVector NhwcAbstractShape(const ShapeVector &device_shape) {
constexpr size_t nhwc_size = 4;
const size_t nhwc_size = 4;
if (device_shape.size() != nhwc_size) {
MS_LOG(EXCEPTION) << "Shape size of NHWC should be 4, but got " << device_shape.size();
}
@@ -58,7 +58,7 @@ class AbstractShapeCreator {
if (device_shape.size() == 1 && (device_shape[0] == 1 || static_cast<size_t>(device_shape[0]) % kCubeSize == 0)) {
return device_shape;
}
constexpr size_t nz_size = 4;
const size_t nz_size = 4;
if (device_shape.size() < nz_size) {
MS_LOG(EXCEPTION) << "Shape size of FRACTAL_NZ should >= 4, but got " << device_shape.size();
}


+ 1
- 1
mindspore/ccsrc/backend/optimizer/graph_kernel/add_atomic_clean.cc View File

@@ -526,7 +526,7 @@ std::vector<std::tuple<AnfNodePtr, int, AnfNodePtr>> AtomicCleanInsertter::FindO
auto item_idx = GetValue<int64_t>(value_node->value());
auto iter = real_indices_and_clean_node.find(LongToSize(item_idx));
if (iter != real_indices_and_clean_node.end()) {
getitem_user_nodes.emplace_back(node_index.first, iter->second);
(void)getitem_user_nodes.emplace_back(node_index.first, iter->second);
} else if (correct_index) {
// Recorrect other getitem index.
int64_t new_item_idx = CalNewIndex(item_idx, real_indices);


+ 2
- 4
mindspore/ccsrc/backend/optimizer/graph_kernel/add_stitch_atomic_clean_gpu.cc View File

@@ -36,14 +36,12 @@ void StitchAtomicCleanInsertter::CorrectKernelBuildInfo(
auto kernel_info = dynamic_cast<device::KernelInfo *>(composite_node->kernel_info());
MS_EXCEPTION_IF_NULL(kernel_info);
const auto &origin_kernel_build_info = kernel_info->GetMutableSelectKernelBuildInfo();
auto origin_inputs_format = origin_kernel_build_info->GetAllInputFormats();
auto origin_outputs_format = origin_kernel_build_info->GetAllOutputFormats();
auto origin_inputs_type = origin_kernel_build_info->GetAllInputDeviceTypes();
auto origin_outputs_type = origin_kernel_build_info->GetAllOutputDeviceTypes();
auto origin_processor = origin_kernel_build_info->processor();

std::vector<std::string> &new_inputs_format = origin_inputs_format;
std::vector<TypeId> &new_inputs_type = origin_inputs_type;
std::vector<std::string> new_inputs_format = origin_kernel_build_info->GetAllInputFormats();
std::vector<TypeId> new_inputs_type = origin_kernel_build_info->GetAllInputDeviceTypes();
std::vector<std::string> new_outputs_format;
std::vector<TypeId> new_outputs_type;
for (size_t i = 0; i < origin_outputs_format.size(); ++i) {


+ 7
- 7
mindspore/ccsrc/backend/optimizer/graph_kernel/core/eliminate_redundant_output.cc View File

@@ -75,7 +75,7 @@ bool GetGraphKernelGetitemList(const FuncGraphManagerPtr &mng, const AnfNodePtr
MS_LOG(EXCEPTION) << "Index of GetItem is out of range of MakeTuple. getitem node: " << getitem->DebugString();
}
if (merge_repeated_getitem && (*getitem_list)[idx] != nullptr) {
mng->Replace(getitem, (*getitem_list)[idx]);
(void)mng->Replace(getitem, (*getitem_list)[idx]);
changed = true;
} else {
(*getitem_list)[idx] = getitem;
@@ -87,7 +87,7 @@ bool GetGraphKernelGetitemList(const FuncGraphManagerPtr &mng, const AnfNodePtr
AnfNodePtrList FindGraphKernelsWithMultiOutput(const FuncGraphPtr &func_graph) {
auto todos = TopoSort(func_graph->get_return());
AnfNodePtrList result;
std::copy_if(todos.begin(), todos.end(), std::back_inserter(result), [](const AnfNodePtr &node) {
(void)std::copy_if(todos.begin(), todos.end(), std::back_inserter(result), [](const AnfNodePtr &node) {
return AnfUtils::IsGraphKernel(node) && IsPrimitiveCNode(GetCNodeFuncGraph(node)->output(), prim::kPrimMakeTuple);
});
return result;
@@ -127,7 +127,7 @@ class UnifyRepeatedOutput : public opt::Pass {
if (CheckRepeatedOutput(GetCNodeFuncGraph(node))) {
changed = true;
AnfNodePtrList getitem_list;
GetGraphKernelGetitemList(mng, node, &getitem_list, false);
(void)GetGraphKernelGetitemList(mng, node, &getitem_list, false);
if (getitem_list.size() != index_map_.size()) {
MS_LOG(EXCEPTION) << "getitem_list.size (" << getitem_list.size() << ") should be equal to index_map.size ("
<< index_map_.size() << ").";
@@ -242,7 +242,7 @@ AnfNodePtr EliminateHangingOutput::ReplaceMakeTuple(const AnfNodePtr &node, cons
if (new_maketuple_inputs.size() == 1) {
MS_LOG(EXCEPTION) << "Input of MakeTuple could not be empty";
}
constexpr size_t maketuple_one_input_size = 2;
const size_t maketuple_one_input_size = 2;
if (new_maketuple_inputs.size() == maketuple_one_input_size) {
func_graph->set_output(new_maketuple_inputs.back());
} else {
@@ -266,17 +266,17 @@ bool EliminateHangingOutput::Run(const FuncGraphPtr &func_graph) {
bool changed = false;
for (auto node : todos) {
AnfNodePtrList getitems;
GetGraphKernelGetitemList(mng, node, &getitems, false);
(void)GetGraphKernelGetitemList(mng, node, &getitems, false);
auto new_node = ReplaceMakeTuple(node, getitems);
if (new_node != nullptr) {
if (!IsPrimitiveCNode(GetCNodeFuncGraph(new_node)->output(), prim::kPrimMakeTuple)) {
// only one output, remove the getitem.
auto i = std::find_if(getitems.begin(), getitems.end(), [](const AnfNodePtr &node) { return node != nullptr; });
if (i != getitems.end()) {
mng->Replace(*i, new_node);
(void)mng->Replace(*i, new_node);
}
} else {
mng->Replace(node, new_node);
(void)mng->Replace(node, new_node);
}
changed = true;
}


+ 2
- 2
mindspore/ccsrc/backend/optimizer/graph_kernel/core/graph_builder.cc View File

@@ -102,7 +102,7 @@ void EliminateMakeTuple(const FuncGraphPtr &fg) {
fg->output()->set_abstract(std::make_shared<abstract::AbstractTuple>(abs_list));
}

bool ConvertNonscalarTensorToParameter(const FuncGraphPtr &fg, AnfNodePtrList *inputs_ptr) {
bool ConvertNonscalarTensorToParameter(const FuncGraphPtr &fg, AnfNodePtrList *const inputs_ptr) {
auto cnodes = fg->GetOrderedCnodes();
std::set<AnfNodePtr> value_nodes;
for (const auto &cnode : cnodes) {
@@ -189,7 +189,7 @@ void ReplaceNewFuseCNode(const FuncGraphPtr &func_graph, const AnfNodePtr &new_f
}

// remove parameter which is not used
void EliminateRedundantParameters(const FuncGraphPtr &func_graph, AnfNodePtrList *inputs) {
void EliminateRedundantParameters(const FuncGraphPtr &func_graph, AnfNodePtrList *const inputs) {
MS_EXCEPTION_IF_NULL(inputs);
const auto &ori_parameter = func_graph->parameters();
auto todos = TopoSort(func_graph->get_return());


+ 3
- 3
mindspore/ccsrc/backend/optimizer/graph_kernel/core/shape_ops_splitter.cc View File

@@ -46,7 +46,7 @@ AnfNodePtr CloneCNode(const AnfNodePtr &anf_node) {
void SplitNode(const AnfNodePtr &node, const FuncGraphManagerPtr &mng) {
const auto &index_set = mng->node_users()[node];
std::map<AnfNodePtr, std::vector<int>> users_info;
std::for_each(index_set.cbegin(), index_set.cend(), [&users_info](const std::pair<AnfNodePtr, int> &iter) {
(void)std::for_each(index_set.cbegin(), index_set.cend(), [&users_info](const std::pair<AnfNodePtr, int> &iter) {
users_info[iter.first].push_back(iter.second);
});

@@ -70,8 +70,8 @@ void SplitNode(const AnfNodePtr &node, const FuncGraphManagerPtr &mng) {
bool ShapeOpsSplitter::IsMultiUserShapeOps(const AnfNodePtr &node, const FuncGraphManagerPtr &mng) const {
auto &users = mng->node_users();
std::set<AnfNodePtr> user_set;
std::transform(users[node].cbegin(), users[node].cend(), std::inserter(user_set, user_set.end()),
[](const std::pair<AnfNodePtr, int> &iter) { return iter.first; });
(void)std::transform(users[node].cbegin(), users[node].cend(), std::inserter(user_set, user_set.end()),
[](const std::pair<AnfNodePtr, int> &iter) { return iter.first; });
return user_set.size() > 1 && std::any_of(shape_ops_.begin(), shape_ops_.end(),
[&node](const PrimitivePtr &prim) { return IsPrimitiveCNode(node, prim); });
}


+ 18
- 14
mindspore/ccsrc/backend/optimizer/graph_kernel/core/update_state_formatter.cc View File

@@ -29,8 +29,8 @@ namespace mindspore::graphkernel {
AnfNodePtrList GetUpdateStateList(const FuncGraphPtr &func_graph) {
auto todos = TopoSort(func_graph->get_return());
AnfNodePtrList result;
std::copy_if(todos.begin(), todos.end(), std::back_inserter(result),
[](const AnfNodePtr &node) { return IsPrimitiveCNode(node, prim::kPrimUpdateState); });
(void)std::copy_if(todos.begin(), todos.end(), std::back_inserter(result),
[](const AnfNodePtr &node) { return IsPrimitiveCNode(node, prim::kPrimUpdateState); });
return result;
}

@@ -75,7 +75,7 @@ bool SpreadUpdateState::Run(const FuncGraphPtr &func_graph) {
inputs = ExtendInputsOfUpdateState(inputs, func_graph);
if (inputs.size() + kUpdateStateRealInput != cnode->size() || inputs[0] != cnode->input(kUpdateStateRealInput)) {
AnfNodePtrList node_inputs = {cnode->input(kAnfPrimitiveIndex), cnode->input(kUpdateStateStateInput)};
node_inputs.insert(node_inputs.end(), inputs.begin(), inputs.end());
(void)node_inputs.insert(node_inputs.end(), inputs.begin(), inputs.end());
// Create a new UpdateState
auto new_node = func_graph->NewCNode(node_inputs);
new_node->set_abstract(node->abstract());
@@ -97,9 +97,9 @@ bool ShrinkUpdateState::Run(const FuncGraphPtr &func_graph) {
if (cnode->size() <= kUpdateStateRealInput + 1) continue;
AnfNodePtrList mt_inputs = GkUtils::SpreadTuples(cnode->inputs(), kUpdateStateRealInput);
AbstractBasePtrList abs_list;
std::transform(mt_inputs.begin(), mt_inputs.end(), std::back_inserter(abs_list),
[](const AnfNodePtr &inp) { return inp->abstract(); });
mt_inputs.insert(mt_inputs.begin(), NewValueNode(prim::kPrimMakeTuple));
(void)std::transform(mt_inputs.begin(), mt_inputs.end(), std::back_inserter(abs_list),
[](const AnfNodePtr &inp) { return inp->abstract(); });
(void)mt_inputs.insert(mt_inputs.begin(), NewValueNode(prim::kPrimMakeTuple));
auto mt_node = func_graph->NewCNode(mt_inputs);
mt_node->set_abstract(std::make_shared<abstract::AbstractTuple>(abs_list));
Callback::Instance()->SetEmptyKernelInfo(mt_node);
@@ -120,7 +120,7 @@ bool ExtendOutputForUpdateState::Run(const FuncGraphPtr &func_graph) {
MS_EXCEPTION_IF_NULL(mng);
bool changed = false;
for (const auto &node : todos) {
GetGraphKernelGetitemList(mng, node, &getitems_, false);
(void)GetGraphKernelGetitemList(mng, node, &getitems_, false);
if (getitems_.empty()) continue;
FindIndexesToUpdateState(mng);
if (indexes_.empty()) continue;
@@ -133,8 +133,12 @@ bool ExtendOutputForUpdateState::Run(const FuncGraphPtr &func_graph) {
}
if (changed) {
GkUtils::UpdateFuncGraphManager(mng, func_graph);
std::make_shared<SpreadUpdateState>()->Run(func_graph);
std::make_shared<EliminateHangingOutput>()->Run(func_graph);
auto spread_update_state = std::make_shared<SpreadUpdateState>();
MS_EXCEPTION_IF_NULL(spread_update_state);
(void)spread_update_state->Run(func_graph);
auto elim_hanging_output = std::make_shared<EliminateHangingOutput>();
MS_EXCEPTION_IF_NULL(elim_hanging_output);
(void)elim_hanging_output->Run(func_graph);
}
return changed;
}
@@ -164,9 +168,9 @@ void ExtendOutputForUpdateState::FindIndexesToUpdateState(const FuncGraphManager
void ExtendOutputForUpdateState::FilterIndexes(const FuncGraphPtr &func_graph) {
auto output_node = func_graph->output()->cast<CNodePtr>();
// do not process the side-effect nodes.
indexes_.erase(std::remove_if(indexes_.begin(), indexes_.end(),
[&output_node](size_t i) { return IsSideEffectNode(output_node->input(i + 1)); }),
indexes_.end());
(void)indexes_.erase(std::remove_if(indexes_.begin(), indexes_.end(),
[&output_node](size_t i) { return IsSideEffectNode(output_node->input(i + 1)); }),
indexes_.end());
}

std::vector<size_t> ExtendOutputForUpdateState::FindAllOutputs(const FuncGraphPtr &func_graph, size_t index) {
@@ -222,8 +226,8 @@ bool ExtendOutputForUpdateState::ProcessIndex(const FuncGraphPtr &func_graph, co
// Create MakeTuple, even though the group size is 1, the following pass will spread the MakeTuple,
// so it's unnecessary to set abstract for it.
AnfNodePtrList mt_input = {NewValueNode(prim::kPrimMakeTuple)};
std::transform(group.begin(), group.end(), std::back_inserter(mt_input),
[this](size_t idx) { return getitems_[idx]; });
(void)std::transform(group.begin(), group.end(), std::back_inserter(mt_input),
[this](size_t idx) { return getitems_[idx]; });
new_node = func_graph->NewCNode(mt_input)->cast<AnfNodePtr>();
}
auto mng = func_graph->manager();


+ 1
- 1
mindspore/ccsrc/backend/optimizer/graph_kernel/depend_elimination.cc View File

@@ -35,7 +35,7 @@ bool DependElimination::Run(const FuncGraphPtr &func_graph) {
continue;
}
if (inputs[kRealInputIndexInDepend] == inputs[kDependAttachNodeIndex]) {
mng->Replace(node, inputs[kRealInputIndexInDepend]);
(void)mng->Replace(node, inputs[kRealInputIndexInDepend]);
MS_LOG(INFO) << "Depend node has been replaced by " << inputs[kRealInputIndexInDepend];
}
}


mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/standardnormal.cc → mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/standard_normal.cc View File


+ 8
- 2
mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc View File

@@ -42,8 +42,14 @@ bool IsCNodePrimitveEqual(const CNodePtr &main, const CNodePtr &node, const std:
auto node_attrs = node_primitive->attrs();
std::vector<std::string> exclude_attrs{"IsFeatureMapOutput", "IsFeatureMapInputList", "pri_format"};
for (auto &attr : exclude_attrs) {
main_attrs.erase(attr);
node_attrs.erase(attr);
auto main_attrs_iter = main_attrs.find(attr);
if (main_attrs_iter != main_attrs.end()) {
(void)main_attrs.erase(main_attrs_iter);
}
auto node_attrs_iter = node_attrs.find(attr);
if (node_attrs_iter != node_attrs.end()) {
(void)node_attrs.erase(node_attrs_iter);
}
}
return common::IsAttrsEqual(main_attrs, node_attrs);
}


+ 2
- 2
mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_recompute.cc View File

@@ -67,7 +67,7 @@ using NextFunc = std::function<AnfNodePtrList(const AnfNodePtr &)>;
using ProcessFunc = std::function<void(const AnfNodePtr &)>;

void Dfs(const AnfNodePtr &current, const VisitFunc &visit_func, const NextFunc &next_func,
const ProcessFunc &before_func, const ProcessFunc &after_func, std::set<AnfNodePtr> *visited) {
const ProcessFunc &before_func, const ProcessFunc &after_func, std::set<AnfNodePtr> *const visited) {
if (visited->count(current) > 0) {
return;
}
@@ -171,7 +171,7 @@ OrderedSet<AnfNodePtr> GetLongTermNodes(const AnfNodePtrList &nodes, const AnfNo
* @param func_graph Graph.
* @param inputs Real inputs for graph cnode.
*/
void ElimRedundantInputsAndGraphParameters(const FuncGraphPtr &func_graph, AnfNodePtrList *inputs) {
void ElimRedundantInputsAndGraphParameters(const FuncGraphPtr &func_graph, AnfNodePtrList *const inputs) {
MS_EXCEPTION_IF_NULL(inputs);
const auto &ori_parameter = func_graph->parameters();
auto nodes = TopoSort(func_graph->get_return());


+ 2
- 1
mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc View File

@@ -247,7 +247,8 @@ class Area {
}

// Make a return node for traitor nodes.
void CreateReturnNode(const FuncGraphPtr &func_graph, mindspore::HashMap<AnfNodePtr, size_t> *tuple_node_index) {
void CreateReturnNode(const FuncGraphPtr &func_graph,
mindspore::HashMap<AnfNodePtr, size_t> *const tuple_node_index) {
// If there's no traitor in the area, it means that this area is the last part
// of the original FuncGraph, it already contains the original Return node.
if (traitor_nodes_.empty()) {


+ 1
- 1
mindspore/ccsrc/backend/optimizer/graph_kernel/insert_pad.cc View File

@@ -109,7 +109,7 @@ bool IsAkgMatMul(size_t K, size_t M, size_t N) {
std::tuple<bool, bool, bool> NeedPad(const CNodePtr &matmul, vec *pad_shape_a, vec *pad_shape_b, vec *unpad_shape,
vec *tail_shape_a, vec *tail_shape_b, vec *tail_shape_unpad) {
auto mm_attrs = AnfAlgo::GetCNodePrimitive(matmul)->attrs();
if (!mm_attrs.count("transpose_a") || !mm_attrs.count("transpose_b")) {
if (mm_attrs.count("transpose_a") != 0 || mm_attrs.count("transpose_b") != 0) {
MS_LOG(ERROR) << "attrs transpose_a and transpose_b need to be set";
return std::tuple(false, false, false);
}


+ 1
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h View File

@@ -61,6 +61,7 @@ using LiteGraphPtr = std::shared_ptr<LiteGraph>;
class LiteGraph::GraphBuilder {
public:
explicit GraphBuilder(const std::string &name = "") { graph_ = std::make_shared<LiteGraph>(name); }
~GraphBuilder() = default;
// Create a parameter of graph
NodePtr Parameter(const NodeBase &baseinfo) {


+ 3
- 3
mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc View File

@@ -77,11 +77,11 @@ void Node::ReplaceWith(const NodePtr &other_node) {
}
}
void Node::RemoveUser(Node *user, size_t index) {
void Node::RemoveUser(Node *const user, size_t index) {
if (auto iter = users_.find(user); iter != users_.end()) {
iter->second.erase(index);
(void)iter->second.erase(index);
if (iter->second.empty()) {
users_.erase(iter);
(void)users_.erase(iter);
}
}
}


+ 1
- 1
mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h View File

@@ -87,7 +87,7 @@ class Node : public NodeBase, public std::enable_shared_from_this<Node> {
private:
// the nodes' users are only maintained by AddInput/SetInput.
void AddUser(Node *user, size_t index) { users_[user].insert(index); }
void RemoveUser(Node *user, size_t index);
void RemoveUser(Node *const user, size_t index);
};
class ConstTensorNode : public Node {


+ 3
- 4
mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc View File

@@ -139,7 +139,6 @@ tensor::TensorPtr CalcByOperator(const NodePtrList &inputs, const std::string &o
{"Sqrt", [](const std::vector<TM> &n) { return sqrt(n[0]); }},
{"Rsqrt", [](const std::vector<TM> &n) { return TM(1) / sqrt(n[0]); }},
};
if (func_map.find(op) == func_map.end()) {
return nullptr;
}
@@ -406,9 +405,9 @@ DShape Conv2dOp::InferShape(const NodePtrList &inputs, const DAttrs &attrs) {
GetValue<std::string>(attrs.find("format")->second) != kOpFormat_NHWC) {
MS_LOG(EXCEPTION) << "check NHWC format failed";
}
constexpr auto axis_n = 0;
constexpr auto axis_h = 1;
constexpr auto axis_w = 2;
const auto axis_n = 0;
const auto axis_h = 1;
const auto axis_w = 2;
auto n = shape0[axis_n];
auto h = shape0[axis_h];
auto w = shape0[axis_w];


+ 4
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.cc View File

@@ -22,6 +22,10 @@ class OpRegister {
public:
OpRegister(const std::string &name, const CreatorFunc &func) { OpRegistry::Instance().Register(name, func); }
~OpRegister() = default;
private:
// for pclint-plus
bool rev_{false};
};
#define JOIN(x, y) x##y


+ 9
- 7
mindspore/ccsrc/backend/optimizer/graph_kernel/parallel_fusion.cc View File

@@ -40,7 +40,7 @@ bool IsOneOf(const AnfNodePtr &node, const std::vector<PrimitivePtr> &ops_prim)
}

void ProcessThroughPassCNode(const std::function<bool(const AnfNodePtr &)> &pass_fn,
OrderedMap<AnfNodePtr, NodeRelation> *node_rels) {
OrderedMap<AnfNodePtr, NodeRelation> *const node_rels) {
std::set<AnfNodePtr> latter_to_be_erased;
for (const auto &[node, node_rel] : (*node_rels)) {
if (!pass_fn(node) || latter_to_be_erased.count(node) != 0) {
@@ -90,7 +90,7 @@ void ProcessThroughPassCNode(const std::function<bool(const AnfNodePtr &)> &pass
}
}

void ProcessTailMakeTupleCNode(OrderedMap<AnfNodePtr, NodeRelation> *node_rels) {
void ProcessTailMakeTupleCNode(OrderedMap<AnfNodePtr, NodeRelation> *const node_rels) {
AnfNodePtrList latter_to_be_erased;
for (auto &[node, node_rel] : (*node_rels)) {
if (!IsPrimitiveCNode(node, prim::kPrimMakeTuple)) {
@@ -177,8 +177,8 @@ bool IsNoOutputsNode(const OrderedMap<AnfNodePtr, NodeRelation> &node_rels, cons
return false;
}

void ProcessLocalStructure(OrderedMap<AnfNodePtr, NodeRelation> *node_rels, std::set<AnfNodePtr> *virtual_noout_nodes,
std::set<AnfNodePtr> *ignore_noin_nodes) {
void ProcessLocalStructure(OrderedMap<AnfNodePtr, NodeRelation> *node_rels,
std::set<AnfNodePtr> *const virtual_noout_nodes, std::set<AnfNodePtr> *ignore_noin_nodes) {
// 1. Local relation
// Graph as following left part, relation D->B and D->E(D is a no input node)
// will make B and E to be multiply inputs node.
@@ -302,7 +302,7 @@ bool Parallelizable(const AnfNodePtr &node) { return WhiteOpsFilter(node) && !Un
std::vector<AnfNodePtrList> SearchFromNodes(const AnfNodePtrList &nodes,
const std::function<bool(const AnfNodePtr &)> &filter_func,
const OrderedMap<AnfNodePtr, NodeRelation> &node_rels, bool is_backward,
std::set<AnfNodePtr> *seen) {
std::set<AnfNodePtr> *const seen) {
// Start from multi-inputs node, stop on seen node or multi-inputs or multi-outputs nodes.
// For backward search, the other multi-inputs node can be contained in.
// For forward search, the other multi-outputs node can be contained in.
@@ -343,7 +343,8 @@ std::vector<AnfNodePtrList> SearchFromNodes(const AnfNodePtrList &nodes,

void SearchStreamFromMultiRelationNode(const AnfNodePtrList &multi_nodes,
const OrderedMap<AnfNodePtr, NodeRelation> &node_rels, bool is_backward,
std::vector<std::vector<AnfNodePtrList>> *groups, std::set<AnfNodePtr> *seen) {
std::vector<std::vector<AnfNodePtrList>> *groups,
std::set<AnfNodePtr> *const seen) {
auto get_related_nodes = is_backward ? [](const NodeRelation &info) { return info.pres; }
: [](const NodeRelation &info) { return info.nexts; };
for (const auto &node : multi_nodes) {
@@ -366,7 +367,8 @@ void SearchStreamFromMultiRelationNode(const AnfNodePtrList &multi_nodes,

void SearchStreamFromUnidirectionalNode(const AnfNodePtrList &ud_nodes,
const OrderedMap<AnfNodePtr, NodeRelation> &node_rels, bool is_backward,
std::vector<std::vector<AnfNodePtrList>> *groups, std::set<AnfNodePtr> *seen) {
std::vector<std::vector<AnfNodePtrList>> *groups,
std::set<AnfNodePtr> *const seen) {
groups->push_back(SearchFromNodes(ud_nodes, Parallelizable, node_rels, is_backward, seen));

// Erase empty groups.


+ 4
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/transform_op_optimizer.cc View File

@@ -146,6 +146,7 @@ class MinCut {
original_edges_(original_edges) {
BuildGraph(original_nodes);
}
~MinCut() = default;

void Run() {
Dinic();
@@ -199,6 +200,7 @@ class TransformOp {
public:
explicit TransformOp(const NodePtr &node)
: op_(node->As<PrimOp>()->op()), format_a_(node->input(0)->format), format_b_(node->format) {}
~TransformOp() = default;
bool IsTransformOp(const NodePtr &node) {
if (node->NodeType() != NType::Primitive || node->As<PrimOp>()->op() != op_) {
return false;
@@ -266,6 +268,8 @@ bool IsFlexibleOp(const NodePtr &node) {
class Mutator {
public:
explicit Mutator(const NodePtr &node) : op_checker_(node), basenode_(node), ori_node_(1) {}
~Mutator() = default;

bool Run() {
VisitNode(basenode_);
if (flexible_ops_.empty()) return false;


Loading…
Cancel
Save