Browse Source

fix misra

pull/539/head
zhao-lupeng 3 years ago
parent
commit
0380cf64ee
6 changed files with 17 additions and 15 deletions
  1. +2
    -2
      parser/common/op_def/constant_op.cc
  2. +2
    -2
      parser/common/op_def/ir_pb_converter.cc
  3. +1
    -1
      parser/common/op_def/ref_switch_op.cc
  4. +4
    -4
      parser/common/op_def/shape_n_op.cc
  5. +2
    -2
      parser/tensorflow/graph_functiondef.cc
  6. +6
    -4
      parser/tensorflow/graph_optimizer.cc

+ 2
- 2
parser/common/op_def/constant_op.cc View File

@@ -32,9 +32,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ConstantOperator &ConstantOpera
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ConstantOperator &ConstantOperator::DType(ge::DataType t) {
Attr(VAR_ATTR_DTYPE, (int64_t)t);
Attr(VAR_ATTR_DTYPE, static_cast<int64_t>(t));
return *this;
}

ge::DataType ConstantOperator::GetDType() const { return (ge::DataType)GetIntAttr(VAR_ATTR_DTYPE); }
ge::DataType ConstantOperator::GetDType() const { return static_cast<ge::DataType>(GetIntAttr(VAR_ATTR_DTYPE)); }
} // namespace ge

+ 2
- 2
parser/common/op_def/ir_pb_converter.cc View File

@@ -32,7 +32,7 @@ static void ConvertList(const std::pair<std::string, OpAttribute> &op_attr_pair,

vector<int64_t> v_i;
for (int32_t i = 0; i < a_list.i_size(); i++) {
v_i.push_back((int64_t)a_list.i(i));
v_i.push_back(static_cast<int64_t>(a_list.i(i)));
}
if (v_i.size() > 0) {
(void)ge::AttrUtils::SetListInt(op_def, op_attr_pair.first, v_i);
@@ -56,7 +56,7 @@ static void ConvertList(const std::pair<std::string, OpAttribute> &op_attr_pair,
}
vector<int32_t> v_u;
for (int32_t i = 0; i < a_list.u_size(); i++) {
v_u.push_back((int32_t)a_list.u(i));
v_u.push_back(static_cast<int32_t>(a_list.u(i)));
}
if (v_u.size() > 0) {
(void)ge::AttrUtils::SetListInt(op_def, op_attr_pair.first, v_u);


+ 1
- 1
parser/common/op_def/ref_switch_op.cc View File

@@ -23,7 +23,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY RefSwitchOperator::RefSwitchOpe
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY RefSwitchOperator::~RefSwitchOperator() {}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY RefSwitchOperator &RefSwitchOperator::T(ge::DataType t) {
Attr("T", (int64_t)t);
Attr("T", static_cast<int64_t>(t));
return *this;
}
} // namespace ge AUTO GEN PLEASE DO NOT MODIFY IT

+ 4
- 4
parser/common/op_def/shape_n_op.cc View File

@@ -32,20 +32,20 @@ FMK_FUNC_HOST_VISIBILITY ShapeNOperator &ShapeNOperator::N(int64_t n) {
FMK_FUNC_HOST_VISIBILITY int64_t ShapeNOperator::GetN() const { return GetIntAttr(SHAPEN_ATTR_N); }

FMK_FUNC_HOST_VISIBILITY ShapeNOperator &ShapeNOperator::InType(ge::DataType t) {
Attr(SHAPEN_ATTR_IN_TYPE, (int64_t)t);
Attr(SHAPEN_ATTR_IN_TYPE, static_cast<int64_t>(t));
return *this;
}

FMK_FUNC_HOST_VISIBILITY ge::DataType ShapeNOperator::GetInType() const {
return (ge::DataType)GetIntAttr(SHAPEN_ATTR_IN_TYPE);
return static_cast<ge::DataType>(GetIntAttr(SHAPEN_ATTR_IN_TYPE));
}

FMK_FUNC_HOST_VISIBILITY ShapeNOperator &ShapeNOperator::OutType(ge::DataType t) {
Attr(SHAPEN_ATTR_OUT_TYPE, (int64_t)t);
Attr(SHAPEN_ATTR_OUT_TYPE, static_cast<int64_t>(t));
return *this;
}

FMK_FUNC_HOST_VISIBILITY ge::DataType ShapeNOperator::GetOutType() const {
return (ge::DataType)GetIntAttr(SHAPEN_ATTR_OUT_TYPE);
return static_cast<ge::DataType>(GetIntAttr(SHAPEN_ATTR_OUT_TYPE));
}
} // namespace ge

+ 2
- 2
parser/tensorflow/graph_functiondef.cc View File

@@ -320,8 +320,8 @@ domi::Status GraphToFunctionDef::RecordArg(ge::ComputeGraphPtr graph, const vect
return FAILED;
}

(void)ge::AttrUtils::SetInt(op, "T", (int32_t)dtype);
(void)ge::AttrUtils::SetInt(op, "arg_index", (int32_t)index);
(void)ge::AttrUtils::SetInt(op, "T", static_cast<int32_t>(dtype));
(void)ge::AttrUtils::SetInt(op, "arg_index", static_cast<int32_t>(index));
ge::NodePtr arg_node = graph->AddNode(op);
GE_CHECK_NOTNULL(arg_node);
bool node_exists = false;


+ 6
- 4
parser/tensorflow/graph_optimizer.cc View File

@@ -388,7 +388,8 @@ Status ParserGraphOptimizer::RebuildOutputAnchors(vector<ge::OutDataAnchorPtr> &
GE_CHK_BOOL_EXEC(fusion_op_desc->AddOutputDesc(src_out_desc) == ge::GRAPH_SUCCESS, return FAILED);

ge::DataType data_type = src_out_desc.GetDataType();
const std::map<int32_t, int32_t>::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
const std::map<int32_t, int32_t>::const_iterator iter =
GE_TENSORFLOW_DATA_TYPE_MAP.find(static_cast<int32_t>(data_type));
GE_IF_BOOL_EXEC(
iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(),
REPORT_INNER_ERROR("E19999", "datatype:%d of output:%d in node:%s:%s is not supported",
@@ -397,7 +398,7 @@ Status ParserGraphOptimizer::RebuildOutputAnchors(vector<ge::OutDataAnchorPtr> &
return PARAM_INVALID);

int32_t dtype = iter->second;
output_list.push_back((int64_t)dtype);
output_list.push_back(static_cast<int64_t>(dtype));
GELOGI("FUNCDEF: output_list push_back %d.", dtype);
}
GE_IF_BOOL_EXEC(!output_list.empty(), (void)AttrUtils::SetListInt(fusion_op_desc, ge::T_OUT_DATATYPE, output_list));
@@ -424,7 +425,8 @@ Status ParserGraphOptimizer::RebuildInputAnchors(vector<ge::InDataAnchorPtr> &in
return FAILED;
}
ge::DataType data_type = tensorDescPtr->GetDataType();
const std::map<int32_t, int32_t>::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
const std::map<int32_t, int32_t>::const_iterator iter =
GE_TENSORFLOW_DATA_TYPE_MAP.find(static_cast<int32_t>(data_type));
GE_IF_BOOL_EXEC(
iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(),
REPORT_INNER_ERROR("E19999", "datatype:%d of input:%d in node:%s:%s is not supported",
@@ -433,7 +435,7 @@ Status ParserGraphOptimizer::RebuildInputAnchors(vector<ge::InDataAnchorPtr> &in
return PARAM_INVALID);

int32_t dtype = iter->second;
input_list.push_back((int64_t)dtype);
input_list.push_back(static_cast<int64_t>(dtype));
GELOGI("FUNCDEF: input_list push_back %d.", dtype);
}
GE_IF_BOOL_EXEC(!input_list.empty(), (void)AttrUtils::SetListInt(fusion_op_desc, ge::T_IN_DATATYPE, input_list));


Loading…
Cancel
Save