Browse Source

[MSLITE] Codex clean.

feature/build-system-rewrite
wang_shaocong 4 years ago
parent
commit
57eb0933a9
8 changed files with 126 additions and 33 deletions
  1. +45
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_infer.c
  2. +31
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_infer.h
  3. +22
    -8
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c
  4. +3
    -1
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h
  5. +1
    -1
      mindspore/lite/src/inner_kernel.h
  6. +3
    -3
      mindspore/lite/src/weight_decoder.h
  7. +15
    -14
      mindspore/lite/tools/optimizer/graph/control_flow_pass.cc
  8. +6
    -6
      mindspore/lite/tools/optimizer/graph/control_flow_pass.h

+ 45
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_infer.c View File

@@ -0,0 +1,45 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/infer/bias_infer.h"
#include "nnacl/infer/infer_register.h"

int BiasInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1);
if (check_ret != NNACL_OK) {
return check_ret;
}
SetDataTypeFormat(outputs[0], inputs[0]);
if (!InferFlag(inputs, inputs_size)) {
return NNACL_INFER_INVALID;
}

MS_CHECK_TRUE_RET(inputs[0]->shape_size_ >= 1, NNACL_ERR);
MS_CHECK_TRUE_RET(inputs[1]->shape_size_ == 1, NNACL_ERR);
size_t dim = inputs[0]->shape_size_ - 1;
if (inputs[0]->format_ == Format_KCHW || inputs[0]->format_ == Format_NCHW) {
dim = 1;
}
if (inputs[0]->shape_[dim] != inputs[1]->shape_[0]) {
return NNACL_ERR;
}
SetShapeTensor(outputs[0], inputs[0]);

return NNACL_OK;
}

REG_INFER(BiasAdd, PrimType_BiasAdd, BiasInferShape)

+ 31
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_infer.h View File

@@ -0,0 +1,31 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_NNACL_BIAS_INFER_H
#define MINDSPORE_NNACL_BIAS_INFER_H

#include "nnacl/infer/common_infer.h"

#ifdef __cplusplus
extern "C" {
#endif

int BiasInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);

#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_NNACL_BIAS_INFER_H

+ 22
- 8
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use tensor file except in compliance with the License.
@@ -17,7 +17,7 @@
#include <stdlib.h>
#include <string.h>
#include "nnacl/infer/infer_register.h"
#include "backend/kernel_compiler/cpu/nnacl/op_base.h"
#include "nnacl/op_base.h"

#ifndef CONTROLFLOW_TENSORLIST_CLIP
int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, const vvector *tensor_shape) {
@@ -374,6 +374,21 @@ int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
return NNACL_OK;
}

int CommonGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 2);
if (ret != NNACL_OK) {
return ret;
}
SetDataTypeFormat(outputs[0], inputs[0]);
if (!InferFlag(inputs, inputs_size)) {
return NNACL_INFER_INVALID;
}
MS_CHECK_TRUE_RET(inputs[0]->shape_size_ == inputs[1]->shape_size_, NNACL_ERR);
SetShapeTensor(outputs[0], inputs[0]);
return NNACL_OK;
}

int CommonInferShapeWithOneInput(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs,
size_t outputs_size, OpParameter *parameter) {
int ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 1);
@@ -460,12 +475,11 @@ bool InferFlag(const TensorC *const *inputs, size_t inputs_size) {
}

REG_INFER(Abs, PrimType_Abs, CommonInferShape)
REG_INFER(AbsGrad, PrimType_AbsGrad, CommonInferShape)
REG_INFER(AbsGrad, PrimType_AbsGrad, CommonGradInferShape)
REG_INFER(Activation, PrimType_Activation, CommonInferShape)
REG_INFER(ActivationGrad, PrimType_ActivationGrad, CommonInferShape)
REG_INFER(ActivationGrad, PrimType_ActivationGrad, CommonGradInferShape)
REG_INFER(BatchNorm, PrimType_BatchNorm, CommonInferShape)
REG_INFER(BinaryCrossEntropyGrad, PrimType_BinaryCrossEntropyGrad, CommonInferShape)
REG_INFER(BiasAdd, PrimType_BiasAdd, CommonInferShape)
REG_INFER(Ceil, PrimType_Ceil, CommonInferShape)
REG_INFER(Clip, PrimType_Clip, CommonInferShape)
REG_INFER(Cos, PrimType_Cos, CommonInferShape)
@@ -478,13 +492,13 @@ REG_INFER(Floor, PrimType_Floor, CommonInferShapeWithOneInput)
REG_INFER(IsFinite, PrimType_IsFinite, CommonInferShape)
REG_INFER(LeakyRelu, PrimType_LeakyRelu, CommonInferShape)
REG_INFER(Log, PrimType_Log, CommonInferShape)
REG_INFER(LogGrad, PrimType_LogGrad, CommonInferShape)
REG_INFER(LogGrad, PrimType_LogGrad, CommonGradInferShape)
REG_INFER(LogicalNot, PrimType_LogicalNot, CommonInferShape)
REG_INFER(LRN, PrimType_LRN, CommonInferShapeWithNHWC)
REG_INFER(L2Normalize, PrimType_L2NormalizeFusion, CommonInferShape)
REG_INFER(Neg, PrimType_Neg, CommonInferShape)
REG_INFER(NegGrad, PrimType_NegGrad, CommonInferShape)
REG_INFER(PowerGrad, PrimType_PowerGrad, CommonInferShape)
REG_INFER(NegGrad, PrimType_NegGrad, CommonGradInferShape)
REG_INFER(PowerGrad, PrimType_PowerGrad, CommonGradInferShape)
REG_INFER(PReLU, PrimType_PReLUFusion, CommonInferShape)
REG_INFER(Reciprocal, PrimType_Reciprocal, CommonInferShape)
REG_INFER(ReverseSequence, PrimType_ReverseSequence, CommonInferShape)


+ 3
- 1
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -204,6 +204,8 @@ int imax(int a, int b);

int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);
int CommonGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);
int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
const OpParameter *parameter);



+ 1
- 1
mindspore/lite/src/inner_kernel.h View File

@@ -78,7 +78,7 @@ class InnerKernel : public Kernel {

bool InferShapeDone() const {
if (std::any_of(in_tensors_.begin(), in_tensors_.end(),
[](lite::Tensor *input) { return input->data_type() == kObjectTypeTensorType; })) {
[](const lite::Tensor *input) { return input->data_type() == kObjectTypeTensorType; })) {
return false;
}
auto shape = out_tensors_.front()->shape();


+ 3
- 3
mindspore/lite/src/weight_decoder.h View File

@@ -57,7 +57,7 @@ STATUS UnIndexTensorData(const std::vector<int> &unique_values, const std::vecto
if (un_indexed_data.size() * sizeof(T) != dst_data_size) {
MS_LOG(ERROR) << "un idnexed data size: " << un_indexed_data.size() * sizeof(T)
<< " expected by tensor: " << dst_data_size;
return false;
return RET_ERROR;
}
memcpy(dst_data, un_indexed_data.data(), un_indexed_data.size() * sizeof(T));

@@ -102,12 +102,12 @@ STATUS UnSparseTensorData(const std::vector<int> &unique_values, const std::vect
if (un_sparsed_data.size() * sizeof(T) > dst_data_size) {
MS_LOG(ERROR) << "un-sparsed data size: " << un_sparsed_data.size() * sizeof(T)
<< " tensor size: " << dst_data_size;
return false;
return RET_ERROR;
} else if (un_sparsed_data.size() * sizeof(T) < dst_data_size &&
(un_sparsed_data.size() + (1 << coor_best_bit) - 1) * sizeof(T) < dst_data_size) {
MS_LOG(ERROR) << "un-sparsed data size: " << un_sparsed_data.size() * sizeof(T) << " tensor size: " << dst_data_size
<< " coor_best_bit: " << coor_best_bit;
return false;
return RET_ERROR;
}

for (; data_index < dst_data_size / sizeof(T); data_index++) {


+ 15
- 14
mindspore/lite/tools/optimizer/graph/control_flow_pass.cc View File

@@ -555,9 +555,9 @@ int ControlFlowPass::CreateIfPartialNodeExternalInputs(const CNodePtr &if_cnode,

int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &index,
std::vector<AnfNodePtr> *visited_nodes_used_by_after_fg,
const CNodePtr *if_cnode, const FuncGraphPtr *after_fg,
const CNodePtr &if_cnode, const FuncGraphPtr &after_fg,
CNodePtr *then_partial_cnode) {
auto then_vnode = (*if_cnode)->input(index);
auto then_vnode = if_cnode->input(index);
MS_ASSERT(then_vnode != nullptr);
auto then_fg = GetValueNode<std::shared_ptr<FuncGraph>>(then_vnode);
MS_CHECK_TRUE_MSG(then_fg != nullptr, RET_FAILED, "Get value as func_graph failed.");
@@ -566,7 +566,7 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
ValueNodePtr then_partial_anf_primitive = lite::GetPartialFusionPrim();
MS_CHECK_TRUE_MSG(then_partial_anf_primitive != nullptr, RET_FAILED, "GetPartialFusionPrim failed.");
std::vector<AnfNodePtr> then_partial_cnode_inputs{then_partial_anf_primitive, then_vnode};
if (CreateIfPartialNodeExternalInputs(*if_cnode, then_fg, &then_partial_cnode_inputs) != RET_SUCCESS) {
if (CreateIfPartialNodeExternalInputs(if_cnode, then_fg, &then_partial_cnode_inputs) != RET_SUCCESS) {
MS_LOG(ERROR) << "CreateIfPartialNodeExternalInputs failed.";
return RET_FAILED;
}
@@ -613,7 +613,7 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
// create after partial node
ValueNodePtr after_partial_anf_primitive = lite::GetPartialFusionPrim();
MS_CHECK_TRUE_MSG(after_partial_anf_primitive != nullptr, RET_FAILED, "GetPartialFusionPrim failed.");
auto after_value_node = NewValueNode(*after_fg);
auto after_value_node = NewValueNode(after_fg);
MS_CHECK_TRUE_MSG(after_value_node != nullptr, RET_FAILED, "NewValueNode failed.");
// make the right after partial input
std::vector<AnfNodePtr> after_partial_cnode_inputs{after_partial_anf_primitive, after_value_node};
@@ -635,7 +635,7 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
// insert partial node
auto after_partial_cnode = then_fg->NewCNode(after_partial_cnode_inputs);
MS_CHECK_TRUE_MSG(after_partial_cnode != nullptr, RET_FAILED, "NewCNode failed");
auto after_fg_name = (*after_fg)->get_attr("graph_name")->ToString();
auto after_fg_name = after_fg->get_attr("graph_name")->ToString();
after_partial_cnode->set_fullname_with_scope("partial_" + after_fg_name);

// insert call node
@@ -645,10 +645,10 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
call_node->set_fullname_with_scope("call_" + after_partial_cnode->fullname_with_scope());
then_fg->set_output(call_node);
to_process_q.push_back(then_fg);
ReplaceNode(*after_fg, visited_nodes_and_after_partial_inputs_replace_pairs);
ReplaceNode(after_fg, visited_nodes_and_after_partial_inputs_replace_pairs);

// check the inputs of after fg
auto after_fg_inputs_size = (*after_fg)->get_inputs().size();
auto after_fg_inputs_size = after_fg->get_inputs().size();
if (after_fg_inputs_size == after_partial_cnode_inputs.size() - kPartialFirstInputSize) {
return RET_SUCCESS;
}
@@ -657,24 +657,24 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
std::unordered_map<AnfNodePtr, AnfNodePtr> after_partial_after_fg_replace_pairs{};
for (size_t i = kPartialFirstInputSize; i < after_partial_cnode_inputs.size(); ++i) {
auto &input = after_partial_cnode_inputs[i];
auto new_parameter = (*after_fg)->add_parameter();
auto new_parameter = after_fg->add_parameter();
MS_CHECK_TRUE_MSG(new_parameter != nullptr, RET_FAILED, "add_parameter failed");
new_parameter->set_name(std::to_string(i - kPartialFirstInputSize) + "_" + input->fullname_with_scope());
new_parameter->set_abstract(input->abstract());
if (i < kPartialFirstInputSize + if_output_size) {
after_partial_after_fg_replace_pairs[*if_cnode] = new_parameter;
after_partial_after_fg_replace_pairs[if_cnode] = new_parameter;
} else {
after_partial_after_fg_replace_pairs[input] = new_parameter;
}
}
ReplaceNode(*after_fg, after_partial_after_fg_replace_pairs);
ReplaceNode(after_fg, after_partial_after_fg_replace_pairs);

return RET_SUCCESS;
}

int ControlFlowPass::CreateIfElsePartialNode(const FuncGraphPtr &main_fg,
std::vector<AnfNodePtr> *visited_nodes_used_by_after_fg,
const CNodePtr *if_cnode, const FuncGraphPtr *after_fg,
const CNodePtr &if_cnode, const FuncGraphPtr &after_fg,
CNodePtr *else_partial_cnode) {
return CreateIfPartialNode(main_fg, kIfElseIndex, visited_nodes_used_by_after_fg, if_cnode, after_fg,
else_partial_cnode);
@@ -682,7 +682,8 @@ int ControlFlowPass::CreateIfElsePartialNode(const FuncGraphPtr &main_fg,

int ControlFlowPass::CreateIfThenPartialNode(const FuncGraphPtr &main_fg,
std::vector<AnfNodePtr> *visited_nodes_used_by_after_fg,
CNodePtr *if_cnode, FuncGraphPtr *after_fg, CNodePtr *then_partial_cnode) {
const CNodePtr &if_cnode, const FuncGraphPtr &after_fg,
CNodePtr *then_partial_cnode) {
return CreateIfPartialNode(main_fg, kIfThenIndex, visited_nodes_used_by_after_fg, if_cnode, after_fg,
then_partial_cnode);
}
@@ -713,14 +714,14 @@ int ControlFlowPass::ProcessIfOp(const FuncGraphPtr &fg, const std::set<AnfNodeP
VisitedNodesUsedByAfterParts(visited_nodes, remain_nodes, &visited_nodes_used_by_after_fg);

CNodePtr then_partial_cnode = nullptr;
int ret = CreateIfThenPartialNode(fg, &visited_nodes_used_by_after_fg, &if_cnode, &after_fg, &then_partial_cnode);
int ret = CreateIfThenPartialNode(fg, &visited_nodes_used_by_after_fg, if_cnode, after_fg, &then_partial_cnode);
if (ret != RET_SUCCESS) {
MS_LOG(ERROR) << "if create then partial cnode failed, ret: " << ret;
return ret;
}

CNodePtr else_partial_cnode = nullptr;
ret = CreateIfElsePartialNode(fg, &visited_nodes_used_by_after_fg, &if_cnode, &after_fg, &else_partial_cnode);
ret = CreateIfElsePartialNode(fg, &visited_nodes_used_by_after_fg, if_cnode, after_fg, &else_partial_cnode);
if (ret != RET_SUCCESS) {
MS_LOG(ERROR) << "if create else partial cnode failed, ret: " << ret;
return ret;


+ 6
- 6
mindspore/lite/tools/optimizer/graph/control_flow_pass.h View File

@@ -64,14 +64,14 @@ class ControlFlowPass : public Pass {
int CreateIfPartialNodeExternalInputs(const CNodePtr &if_cnode, const FuncGraphPtr &partial_fg,
std::vector<AnfNodePtr> *then_partial_cnode_inputs);
int CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &index,
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, const CNodePtr *if_cnode,
const FuncGraphPtr *after_fg, CNodePtr *then_partial_cnode);
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, const CNodePtr &if_cnode,
const FuncGraphPtr &after_fg, CNodePtr *then_partial_cnode);
int CreateIfThenPartialNode(const FuncGraphPtr &main_fg,
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, CNodePtr *if_cnode,
FuncGraphPtr *after_fg, CNodePtr *then_partial_cnode);
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, const CNodePtr &if_cnode,
const FuncGraphPtr &after_fg, CNodePtr *then_partial_cnode);
int CreateIfElsePartialNode(const FuncGraphPtr &main_fg,
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, const CNodePtr *if_cnode,
const FuncGraphPtr *after_fg, CNodePtr *else_partial_cnode);
std::vector<AnfNodePtr> *fg_inputs_only_used_by_after_partial, const CNodePtr &if_cnode,
const FuncGraphPtr &after_fg, CNodePtr *else_partial_cnode);
int ProcessIfOp(const FuncGraphPtr &fg, const std::set<AnfNodePtr> &visited_nodes,
const std::vector<AnfNodePtr> &remain_nodes, const AnfNodePtr &if_node);



Loading…
Cancel
Save