Browse Source

!644 sync ge_dev to master 20220831

Merge pull request !644 from lipeiyang/ge_dev
pull/650/MERGE
lipeiyang Gitee 3 years ago
parent
commit
c9e8869e41
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
28 changed files with 191 additions and 242 deletions
  1. +60
    -60
      parser/common/parser_fp16_t.cc
  2. +58
    -26
      parser/common/parser_fp16_t.h
  3. +1
    -1
      parser/common/parser_inner_ctx.cc
  4. +1
    -1
      parser/common/parser_types.cc
  5. +1
    -1
      parser/common/parser_utils.cc
  6. +2
    -2
      parser/common/parser_utils.h
  7. +1
    -2
      parser/common/pass.h
  8. +2
    -2
      parser/common/pass_manager.cc
  9. +3
    -9
      parser/common/pass_manager.h
  10. +1
    -1
      parser/common/pre_checker.cc
  11. +1
    -1
      parser/common/pre_checker.h
  12. +1
    -1
      parser/common/proto_file_parser.cc
  13. +1
    -1
      parser/common/proto_file_parser.h
  14. +1
    -1
      parser/common/prototype_pass_manager.cc
  15. +1
    -1
      parser/common/prototype_pass_manager.h
  16. +1
    -1
      parser/common/register_tbe.cc
  17. +1
    -1
      parser/common/register_tbe.h
  18. +1
    -1
      parser/common/tbe_plugin_loader.cc
  19. +2
    -2
      parser/common/tbe_plugin_loader.h
  20. +1
    -1
      parser/common/thread_pool.cc
  21. +0
    -2
      parser/common/thread_pool.h
  22. +20
    -39
      parser/common/tuple.h
  23. +1
    -1
      parser/common/types_map.h
  24. +29
    -28
      parser/tensorflow/tensorflow_parser.cc
  25. +0
    -16
      parser/tensorflow/tensorflow_ref_switch_parser.cc
  26. +0
    -20
      parser/tensorflow/tensorflow_ref_switch_parser.h
  27. +0
    -17
      parser/tensorflow/tensorflow_shape_n_parser.cc
  28. +0
    -3
      parser/tensorflow/tensorflow_shape_n_parser.h

+ 60
- 60
parser/common/parser_fp16_t.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,9 +30,9 @@ const TagFp16RoundMode g_round_mode = TagFp16RoundMode::kRoundToNearest;

void ExtractFp16(const uint16_t &val, uint16_t &s, int16_t &e, uint16_t &m) {
// 1.Extract
s = static_cast<uint16_t>(FP16_EXTRAC_SIGN(val));
e = static_cast<int16_t>(FP16_EXTRAC_EXP(val));
m = static_cast<uint16_t>(FP16_EXTRAC_MAN(val));
s = static_cast<uint16_t>(Fp16ExtracSign(val));
e = static_cast<int16_t>(Fp16ExtracExp(val));
m = static_cast<uint16_t>(Fp16ExtracMan(val));
// Denormal
if (e == 0) {
e = 1;
@@ -104,7 +104,7 @@ static float Fp16ToFloat(const uint16_t &fp_val) {
m_ret = hf_man & kFp16ManMask;
m_ret = m_ret << (kFp32ManLen - kFp16ManLen);
}
uint32_t f_val = FP32_CONSTRUCTOR(s_ret, e_ret, m_ret);
uint32_t f_val = Fp32Constructor(s_ret, e_ret, m_ret);
auto p_ret_v = ge::PtrToPtr<uint32_t, float>(&f_val);

return *p_ret_v;
@@ -172,12 +172,12 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) {
int8_t ret;
uint8_t ret_v;
// 1.get s_ret and shift it to bit0.
uint8_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint8_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_DENORM(fp_val)) { // Denormalized number
if (Fp16IsDenorm(fp_val)) { // Denormalized number
ret_v = 0;
ret = *(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v));
return ret;
@@ -186,7 +186,7 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) {
uint64_t long_int_m = hf_m;
uint8_t overflow_flag = 0;
uint16_t shift_out = 0;
if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
overflow_flag = 1;
} else {
while (hf_e != kFp16ExpBias) {
@@ -226,16 +226,16 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) {
static uint8_t Fp16ToUInt8(const uint16_t &fp_val) {
uint8_t m_ret = 0;
// 1.get s_ret and shift it to bit0.
uint8_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint8_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_DENORM(fp_val)) { // Denormalized number
if (Fp16IsDenorm(fp_val)) { // Denormalized number
return 0;
}

if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
m_ret = ~0;
} else {
uint64_t long_int_m = hf_m;
@@ -301,12 +301,12 @@ static int16_t Fp16ToInt16(const uint16_t &fp_val) {
int16_t ret;
uint16_t ret_v;
// 1.get s_ret and shift it to bit0.
uint16_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint16_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_DENORM(fp_val)) { // Denormalized number
if (Fp16IsDenorm(fp_val)) { // Denormalized number
ret_v = 0;
ret = *(ge::PtrToPtr<uint16_t, uint8_t>(&ret_v));
return ret;
@@ -315,7 +315,7 @@ static int16_t Fp16ToInt16(const uint16_t &fp_val) {
uint64_t long_int_m = hf_m;
uint8_t overflow_flag = 0;
uint16_t shift_out = 0;
if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
overflow_flag = 1;
} else {
while (hf_e != kFp16ExpBias) {
@@ -354,16 +354,16 @@ static int16_t Fp16ToInt16(const uint16_t &fp_val) {
static uint16_t Fp16ToUInt16(const uint16_t &fp_val) {
uint16_t m_ret = 0;
// 1.get s_ret and shift it to bit0.
uint16_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint16_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_DENORM(fp_val)) { // Denormalized number
if (Fp16IsDenorm(fp_val)) { // Denormalized number
return 0;
}

if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
m_ret = ~0;
} else {
uint64_t long_int_m = hf_m;
@@ -398,12 +398,12 @@ static uint16_t Fp16ToUInt16(const uint16_t &fp_val) {
static int32_t Fp16ToInt32(const uint16_t &fp_val) {
uint32_t ret_v;
// 1.get s_ret and shift it to bit0.
uint32_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint32_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
ret_v = kInt32Max + s_ret;
} else {
uint64_t long_int_m = hf_m;
@@ -444,16 +444,16 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) {
static uint32_t Fp16ToUInt32(const uint16_t &fp_val) {
uint32_t m_ret;
// 1.get s_ret and shift it to bit0.
uint32_t s_ret = FP16_EXTRAC_SIGN(fp_val);
uint32_t s_ret = Fp16ExtracSign(fp_val);
// 2.get hf_e and hf_m
uint16_t hf_e = FP16_EXTRAC_EXP(fp_val);
uint16_t hf_m = FP16_EXTRAC_MAN(fp_val);
uint16_t hf_e = Fp16ExtracExp(fp_val);
uint16_t hf_m = Fp16ExtracMan(fp_val);

if (FP16_IS_DENORM(fp_val)) { // Denormalized number
if (Fp16IsDenorm(fp_val)) { // Denormalized number
return 0u;
}

if (FP16_IS_INVALID(fp_val)) { // Inf or NaN
if (Fp16IsInvalid(fp_val)) { // Inf or NaN
m_ret = ~0u;
} else {
uint64_t long_int_m = hf_m;
@@ -513,7 +513,7 @@ static uint16_t Fp16AddCalVal(uint16_t s_ret, int16_t e_ret, uint16_t m_ret, uin
m_ret = m_ret >> 1;
}
Fp16Normalize(e_ret, m_ret);
uint16_t ret = FP16_CONSTRUCTOR(s_ret, static_cast<uint16_t>(e_ret), m_ret);
uint16_t ret = Fp16Constructor(s_ret, static_cast<uint16_t>(e_ret), m_ret);
return ret;
}

@@ -640,7 +640,7 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) {

Fp16Normalize(e_ret, m_ret);

uint16_t ret = FP16_CONSTRUCTOR(s_ret, static_cast<uint16_t>(e_ret), m_ret);
uint16_t ret = Fp16Constructor(s_ret, static_cast<uint16_t>(e_ret), m_ret);
return ret;
}

@@ -651,15 +651,15 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) {
/// @return Return fp16_t result of division this by fp
static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) {
uint16_t ret;
if (FP16_IS_ZERO(v_2)) { // result is inf
if (Fp16IsZero(v_2)) { // result is inf
// throw "fp16_t division by zero.";
uint16_t s_a, s_b;
uint16_t s_ret;
s_a = FP16_EXTRAC_SIGN(v_1);
s_b = FP16_EXTRAC_SIGN(v_2);
s_a = Fp16ExtracSign(v_1);
s_b = Fp16ExtracSign(v_2);
s_ret = s_a ^ s_b;
ret = FP16_CONSTRUCTOR(s_ret, kFp16MaxExp, 0u);
} else if (FP16_IS_ZERO(v_1)) {
ret = Fp16Constructor(s_ret, kFp16MaxExp, 0u);
} else if (Fp16IsZero(v_1)) {
ret = 0u;
} else {
uint16_t s_a, s_b;
@@ -747,7 +747,7 @@ fp16_t fp16_t::operator/=(const fp16_t fp) {
// compare
bool fp16_t::operator==(const fp16_t &fp) const {
bool result = true;
if (FP16_IS_ZERO(val) && FP16_IS_ZERO(fp.val)) {
if (Fp16IsZero(val) && Fp16IsZero(fp.val)) {
result = true;
} else {
result = ((val & kBitLen16Max) == (fp.val & kBitLen16Max)); // bit compare
@@ -757,7 +757,7 @@ bool fp16_t::operator==(const fp16_t &fp) const {

bool fp16_t::operator!=(const fp16_t &fp) const {
bool result = true;
if (FP16_IS_ZERO(val) && FP16_IS_ZERO(fp.val)) {
if (Fp16IsZero(val) && Fp16IsZero(fp.val)) {
result = false;
} else {
result = ((val & kBitLen16Max) != (fp.val & kBitLen16Max)); // bit compare
@@ -772,17 +772,17 @@ bool fp16_t::operator>(const fp16_t &fp) const {
bool result = true;

// 1.Extract
s_a = FP16_EXTRAC_SIGN(val);
s_b = FP16_EXTRAC_SIGN(fp.val);
e_a = FP16_EXTRAC_EXP(val);
e_b = FP16_EXTRAC_EXP(fp.val);
m_a = FP16_EXTRAC_MAN(val);
m_b = FP16_EXTRAC_MAN(fp.val);
s_a = Fp16ExtracSign(val);
s_b = Fp16ExtracSign(fp.val);
e_a = Fp16ExtracExp(val);
e_b = Fp16ExtracExp(fp.val);
m_a = Fp16ExtracMan(val);
m_b = Fp16ExtracMan(fp.val);

// Compare
if ((s_a == 0) && (s_b > 0)) { // + -
// -0=0
result = !(FP16_IS_ZERO(val) && FP16_IS_ZERO(fp.val));
result = !(Fp16IsZero(val) && Fp16IsZero(fp.val));
} else if ((s_a == 0) && (s_b == 0)) { // + +
if (e_a > e_b) { // e_a - e_b >= 1; Va always larger than Vb
result = true;
@@ -898,7 +898,7 @@ fp16_t &fp16_t::operator=(const float &f_val) {
}

Fp16Normalize(e_ret, m_ret);
val = FP16_CONSTRUCTOR(s_ret, static_cast<uint16_t>(e_ret), m_ret);
val = Fp16Constructor(s_ret, static_cast<uint16_t>(e_ret), m_ret);
return *this;
}

@@ -923,7 +923,7 @@ fp16_t &fp16_t::operator=(const int8_t &i_val) {
e_ret = e_ret + kFp16ExpBias;
}

val = FP16_CONSTRUCTOR(s_ret, e_ret, m_ret);
val = Fp16Constructor(s_ret, e_ret, m_ret);
return *this;
}

@@ -941,7 +941,7 @@ fp16_t &fp16_t::operator=(const uint8_t &ui_val) {
e_ret = e_ret + kFp16ExpBias;
}

val = FP16_CONSTRUCTOR(s_ret, e_ret, m_ret);
val = Fp16Constructor(s_ret, e_ret, m_ret);
return *this;
}

@@ -982,7 +982,7 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u
e_ret = e_ret + (len - 1);
}
auto m_ret = static_cast<uint16_t>(m_tmp);
ret_val = FP16_CONSTRUCTOR(sign, static_cast<uint16_t>(e_ret), m_ret);
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret);
}
}

@@ -1035,7 +1035,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) {
m_ret = m_ret >> 1;
e_ret = e_ret + 1;
}
if (FP16_IS_INVALID(val)) {
if (Fp16IsInvalid(val)) {
val = kFp16Max;
}
} else {
@@ -1043,7 +1043,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) {
m_ret = m_ret << (static_cast<uint16_t>(kDim11) - len);
e_ret = e_ret + (len - 1);
}
val = FP16_CONSTRUCTOR(0u, static_cast<uint16_t>(e_ret), m_ret);
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret);
}
return *this;
}
@@ -1089,7 +1089,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u
e_ret = e_ret + (len - 1);
}
auto m_ret = static_cast<uint16_t>(m_tmp);
ret_val = FP16_CONSTRUCTOR(sign, static_cast<uint16_t>(e_ret), m_ret);
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret);
}

fp16_t &fp16_t::operator=(const int32_t &i_val) {
@@ -1151,7 +1151,7 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) {
e_ret = e_ret + (len - 1);
}
auto m_ret = static_cast<uint16_t>(m_tmp);
val = FP16_CONSTRUCTOR(0u, static_cast<uint16_t>(e_ret), m_ret);
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret);
}
return *this;
}
@@ -1175,7 +1175,7 @@ fp16_t &fp16_t::operator=(const double &d_val) {
if (e_d >= 0x410u) { // 0x410:1040=1023+16
e_ret = kFp16MaxExp - 1;
m_ret = kFp16MaxMan;
val = FP16_CONSTRUCTOR(s_ret, static_cast<uint16_t>(e_ret), m_ret);
val = Fp16Constructor(s_ret, static_cast<uint16_t>(e_ret), m_ret);
} else if (e_d <= 0x3F0u) { // Exponent underflow converts to denormalized half or signed zero
// 0x3F0:1008=1023-15
// Signed zeros, denormalized floats, and floats with small
@@ -1211,7 +1211,7 @@ fp16_t &fp16_t::operator=(const double &d_val) {
}

Fp16Normalize(e_ret, m_ret);
val = FP16_CONSTRUCTOR(s_ret, static_cast<uint16_t>(e_ret), m_ret);
val = Fp16Constructor(s_ret, static_cast<uint16_t>(e_ret), m_ret);
return *this;
}



+ 58
- 26
parser/common/parser_fp16_t.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -93,8 +93,8 @@ using BitShift = enum {
/// @brief fp16 exponent bias
constexpr uint16_t kFp16ExpBias = 15U;
/// @ingroup fp16 basic parameter
/// @brief the exponent bit length of fp16 is 5
constexpr uint16_t kFp16ExpLen = 5U;
/// @brief the exponent bit length of fp16 is 10
constexpr uint16_t kFp16ExpLen = 10U;
/// @ingroup fp16 basic parameter
/// @brief the mantissa bit length of fp16 is 10
constexpr uint16_t kFp16ManLen = 10U;
@@ -131,37 +131,51 @@ constexpr uint16_t kFp16MaxValidExp = 0x001E;
/// @ingroup fp16 basic parameter
/// @brief maximum mantissa value of fp16(11111 11111)
constexpr uint16_t kFp16MaxMan = 0x03FF;
/// @ingroup fp16 basic parameter
/// @brief absolute minimum normal value of fp16
/// (E=1,M=0 D=2^(-14)=0.00006103515625)
constexpr uint16_t kFp16MinNormal = 1.0f / (2 << 14);
/// @ingroup fp16 basic operator
/// @brief get sign of fp16
#define FP16_EXTRAC_SIGN(x) (((x) >> 15) & 1)
inline uint16_t Fp16ExtracSign(const uint16_t x) {
return (((x) >> kFp16SignIndex) & 1);
}
/// @ingroup fp16 basic operator
/// @brief get exponent of fp16
#define FP16_EXTRAC_EXP(x) (((x) >> 10) & kFp16MaxExp)
inline uint16_t Fp16ExtracExp(const uint16_t x) {
return (((x) >> kFp16ExpLen) & kFp16MaxExp);
}
/// @ingroup fp16 basic operator
/// @brief get mantissa of fp16
#define FP16_EXTRAC_MAN(x) ((((x) >> 0) & 0x3FF) | (((((x) >> 10) & 0x1F) > 0 ? 1 : 0) * 0x400))
inline uint16_t Fp16ExtracMan(const uint16_t x) {
return ((((x) >> 0) & 0x3FF) | (((((x) >> kFp16ManLen) & 0x1F) > 0 ? 1 : 0) * 0x400));
}
/// @ingroup fp16 basic operator
/// @brief constructor of fp16 from sign exponent and mantissa
#define FP16_CONSTRUCTOR(s, e, m) (((s) << kFp16SignIndex) | ((e) << kFp16ManLen) | ((m) & kFp16MaxMan))
inline uint16_t Fp16Constructor(const uint16_t s, const uint16_t e, const uint16_t m) {
return (((s) << kFp16SignIndex) | ((e) << kFp16ManLen) | ((m) & kFp16MaxMan));
}
/// @ingroup fp16 special value judgment
/// @brief whether a fp16 is zero
#define FP16_IS_ZERO(x) (((x) & kFp16AbsMax) == 0)
inline bool Fp16IsZero(const uint16_t x) {
return (((x) & kFp16AbsMax) == 0);
}
/// @ingroup fp16 special value judgment
/// @brief whether a fp16 is a denormalized value
#define FP16_IS_DENORM(x) ((((x) & kFp16ExpMask) == 0))
inline bool Fp16IsDenorm(const uint16_t x) {
return ((((x) & kFp16ExpMask) == 0));
}
/// @ingroup fp16 special value judgment
/// @brief whether a fp16 is infinite
#define FP16_IS_INF(x) (((x)&kFp16AbsMax) == kFp16ExpMask)
inline bool Fp16IsInf(const uint16_t x) {
return (((x)&kFp16AbsMax) == kFp16ExpMask);
}
/// @ingroup fp16 special value judgment
/// @brief whether a fp16 is NaN
#define FP16_IS_NAN(x) ((((x) & kFp16ExpMask) == kFp16ExpMask) && ((x) & kFp16ManMask))
inline bool Fp16IsNan(const uint16_t x) {
return ((((x) & kFp16ExpMask) == kFp16ExpMask) && ((x) & kFp16ManMask));
}
/// @ingroup fp16 special value judgment
/// @brief whether a fp16 is invalid
#define FP16_IS_INVALID(x) (((x) & kFp16ExpMask) == kFp16ExpMask)
inline bool Fp16IsInvalid(const uint16_t x) {
return (((x) & kFp16ExpMask) == kFp16ExpMask);
}
/// @ingroup fp32 basic parameter
/// @brief fp32 exponent bias
constexpr uint16_t kFp32ExpBias = 127U;
@@ -197,25 +211,39 @@ constexpr uint32_t kFp32MaxExp = 0xFFU;
constexpr uint32_t kFp32MaxMan = 0x7FFFFFU;
/// @ingroup fp32 special value judgment
/// @brief whether a fp32 is NaN
#define FP32_IS_NAN(x) ((((x) & kFp32ExpMask) == kFp32ExpMask) && ((x) & kFp32ManMask))
inline bool Fp32IsNan(const uint16_t x) {
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && ((x) & kFp32ManMask));
}
/// @ingroup fp32 special value judgment
/// @brief whether a fp32 is infinite
#define FP32_IS_INF(x) ((((x) & kFp32ExpMask) == kFp32ExpMask) && (!((x) & kFp32ManMask)))
inline bool Fp32IsInf(const uint16_t x) {
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && (!((x) & kFp32ManMask)));
}
/// @ingroup fp32 special value judgment
/// @brief whether a fp32 is a denormalized value
#define FP32_IS_DENORM(x) ((((x)&kFp32ExpMask) == 0))
inline bool Fp32IsDenorm(const uint16_t x) {
return ((((x)&kFp32ExpMask) == 0));
}
/// @ingroup fp32 basic operator
/// @brief get sign of fp32
#define FP32_EXTRAC_SIGN(x) (((x) >> kFp32SignIndex) & 1)
inline bool Fp32ExtracSign(const uint16_t x) {
return (((x) >> kFp32SignIndex) & 1);
}
/// @ingroup fp32 basic operator
/// @brief get exponent of fp16
#define FP32_EXTRAC_EXP(x) (((x)&kFp32ExpMask) >> kFp32ManLen)
inline bool Fp32ExtracExp(const uint16_t x) {
return (((x)&kFp32ExpMask) >> kFp32ManLen);
}
/// @ingroup fp32 basic operator
/// @brief get mantissa of fp16
#define FP32_EXTRAC_MAN(x) (((x)&kFp32ManMask) | (((((x) >> kFp32ManLen) & kFp32MaxExp) > 0 ? 1 : 0) * kFp32ManHideBit))
inline uint16_t Fp32ExtracMan(const uint16_t x) {
return (((x)&kFp32ManMask) | (((((x) >> kFp32ManLen) & kFp32MaxExp) > 0 ? 1 : 0) * kFp32ManHideBit));
}
/// @ingroup fp32 basic operator
/// @brief constructor of fp32 from sign exponent and mantissa
#define FP32_CONSTRUCTOR(s, e, m) (((s) << kFp32SignIndex) | ((e) << kFp32ManLen) | ((m) & kFp32MaxMan))
inline uint16_t Fp32Constructor(const uint16_t s, const uint16_t e, const uint16_t m) {
return (((s) << kFp32SignIndex) | ((e) << kFp32ManLen) | ((m) & kFp32MaxMan));
}
/// @ingroup fp64 basic parameter
/// @brief fp64 exponent bias
constexpr uint16_t kFp64ExpBias = 1023U;
@@ -251,10 +279,14 @@ constexpr uint64_t kFp64MaxExp = 0x07FF;
constexpr uint64_t kFp64MaxMan = 0xFFFFFFFFFFFLLu;
/// @ingroup fp64 special value judgment
/// @brief whether a fp64 is NaN
#define FP64_IS_NAN(x) ((((x) & kFp64ExpMask) == kFp64ExpMask) && ((x) & kFp64ManMask))
inline bool Fp64IsNan(const uint16_t x) {
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && ((x) & kFp64ManMask));
}
/// @ingroup fp64 special value judgment
/// @brief whether a fp64 is infinite
#define FP64_IS_INF(x) ((((x) & kFp64ExpMask) == kFp64ExpMask) && (!((x) & kFp64ManMask)))
inline bool Fp64IsInf(const uint16_t x) {
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && (!((x) & kFp64ManMask)));
}
/// @ingroup integer special value judgment
/// @brief maximum positive value of int8_t (0111 1111)
constexpr int8_t kInt8Max = 0x7F;
@@ -605,7 +637,7 @@ T GetManSum(int16_t e_a, const T &m_a, int16_t e_b, const T &m_b) {
T sum = 0;
if (e_a != e_b) {
T m_tmp = 0;
int16_t e_tmp = std::abs(e_a - e_b);
int16_t e_tmp = static_cast<int16_t>(std::abs(e_a - e_b));
if (e_a > e_b) {
m_tmp = m_b;
m_tmp = RightShift(m_tmp, e_tmp);


+ 1
- 1
parser/common/parser_inner_ctx.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/parser_types.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/parser_utils.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.

* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 2
- 2
parser/common/parser_utils.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.

* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ class ParserUtils {
static Status HandleInputContext(const NodePtr &node,
const std::vector<NodePtr> &input_nodes,
const ComputeGraphPtr &compute_graph);
static Status HandleOutputContext(const NodePtr &node,
static Status HandleOutputContext(const NodePtr &node,
const std::vector<std::pair<NodePtr, int32_t>> &out_node_index,
OutputMapping &output_mapping);
};


+ 1
- 2
parser/common/pass.h View File

@@ -29,10 +29,9 @@ template <typename T>
class Pass {
public:
virtual ~Pass() {}
///
/// run pass
/// @author
///
virtual Status Run(std::shared_ptr<T>) = 0;
};
} // namespace ge


+ 2
- 2
parser/common/pass_manager.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -15,11 +15,11 @@
*/

#include "common/util.h"
#include "parser/common/pass_manager.h"
#include "framework/omg/parser/parser_types.h"
#include "parser/common/acl_graph_parser_util.h"
#include "graph/utils/node_utils.h"
#include "omg/omg_inner_types.h"
#include "parser/common/pass_manager.h"

namespace ge {
namespace parser {


+ 3
- 9
parser/common/pass_manager.h View File

@@ -30,30 +30,24 @@ namespace parser {
///
class PassManager {
public:
///
/// get graph passes
/// @author
///
const std::vector<std::pair<std::string, GraphPass *>> &GraphPasses() const;

///
/// Add graph pass
/// @param [in] pass Pass to be added, it will be destroyed when pass manager destroys.
/// @author
///
Status AddPass(const string &pass_name, GraphPass *const pass);

///
/// Optimize graph with added pass
/// @param [inout] graph graph to be optimized
/// @return SUCCESS optimize successfully
/// @return NOT_CHANGED not optimized
/// @return others optimize failed
/// @author
///
Status Run(const ge::ComputeGraphPtr &graph);

///
/// Optimize graph with specified pass
/// @param [inout] graph graph to be optimized
/// @param [in] passes passes to be used
@@ -61,8 +55,8 @@ public:
/// @return NOT_CHANGED not optimized
/// @return others optimized failed
/// @author
///
static Status Run(const ge::ComputeGraphPtr &graph, std::vector<std::pair<std::string, GraphPass *>> &names_to_passes);
static Status Run(const ge::ComputeGraphPtr &graph,
std::vector<std::pair<std::string, GraphPass *>> &names_to_passes);

~PassManager();



+ 1
- 1
parser/common/pre_checker.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/pre_checker.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/proto_file_parser.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.

* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/proto_file_parser.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.

* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/prototype_pass_manager.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/prototype_pass_manager.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/register_tbe.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/register_tbe.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
parser/common/tbe_plugin_loader.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 2
- 2
parser/common/tbe_plugin_loader.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -55,4 +55,4 @@ private:
};
} // namespace ge

#endif //PARSER_COMMON_TBE_PLUGIN_LOADER_H_
#endif // PARSER_COMMON_TBE_PLUGIN_LOADER_H_

+ 1
- 1
parser/common/thread_pool.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 0
- 2
parser/common/thread_pool.h View File

@@ -25,13 +25,11 @@
#include <queue>
#include <stdexcept>
#include <thread>
#include <utility>
#include <vector>

#include "framework/common/debug/ge_log.h"
#include "framework/common/ge_inner_error_codes.h"
#include "external/ge/ge_api_error_codes.h"
#include "graph/types.h"
#include "parser/common/acl_graph_parser_util.h"

namespace ge {


+ 20
- 39
parser/common/tuple.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,27 +32,23 @@ class Tuple {
delete[] data_heap_;
data_heap_ = nullptr;
}
///
/// @brief copy constructor from another tuple
/// @param s the source tuple
///
inline Tuple(const Tuple<ValueType> &s) { this->assign(s.begin(), s.end()); }
///
/// @brief constructor from initializer list
/// @param init the initializer_list
///
explicit Tuple(const std::initializer_list<ValueType> &init) { this->assign(init.begin(), init.end()); }
///
/// @brief constructor from vector
/// @param init the vector
///
explicit Tuple(const std::vector<ValueType> &init) { // NOLINT(runtime/explicit)
this->assign(init.begin(), init.end());
}
///
/// @brief move constructor from Tuple
/// @param src the source shape
///
inline Tuple(Tuple<ValueType> &&src) { // NOLINT(runtime/explicit)
this->swap(src);
}
@@ -77,102 +73,88 @@ class Tuple {
this->SetDim(end - begin);
(void)std::copy(begin, end, this->begin());
}
///
/// @brief Swap current object with other
/// @param other another object to be swapped.
///
inline void swap(Tuple<ValueType> &other) { // NOLINT(*)
std::swap(ndim_, other.ndim_);
std::swap(num_heap_allocated_, other.num_heap_allocated_);
std::swap(data_stack_, other.data_stack_);
std::swap(data_heap_, other.data_heap_);
}
///
/// @brief assignment from another tuple.
/// @param src source tuple
/// @return reference of self
///
inline Tuple<ValueType> &operator=(const Tuple<ValueType> &src) {
if (&src != this) {
this->assign(src.begin(), src.end());
}
return *this;
}
///
/// @brief assignment from rvalue of another tuple.
/// @param src source tuple
/// @return reference of self
///
inline Tuple<ValueType> &operator=(Tuple<ValueType> &&src) {
if (&src != this) {
Tuple<ValueType>(std::move(src)).swap(*this);
}
return *this;
}
///
/// @brief assignment from initializer list
/// @param init the source initializer list
/// @return reference of self
///
inline Tuple<ValueType> &operator=(std::initializer_list<ValueType> init) {
this->assign(init.begin(), init.end());
return *this;
}
///
/// @return whether two tuple equals
/// @param s the tuple to compare against
///
inline bool operator==(const Tuple<ValueType> &s) const {
if (ndim_ != s.ndim_) {
return false;
}
return std::equal(begin(), end(), s.begin());
}
///
/// @return whether two tuple not equal
/// @param s the tuple to compare against
///
inline bool operator!=(const Tuple<ValueType> &s) const { return !(*this == s); }
///
/// @return the begin data pointer to content of the tuple
///
inline const ValueType *begin() const { return ndim_ <= STACK_CACHE_NUM ? data_stack_ : data_heap_; }
///
/// @return the begin data pointer to content of the tuple
///
inline ValueType *begin() { return ndim_ <= STACK_CACHE_NUM ? data_stack_ : data_heap_; }
///
/// @return the data pointer to end of the tuple
///
inline const ValueType *end() const {
return ndim_ <= STACK_CACHE_NUM ? (data_stack_ + ndim_) : (data_heap_ + ndim_);
}
///
/// @return the data pointer to end the tuple
///
inline ValueType *end() { return ndim_ <= STACK_CACHE_NUM ? (data_stack_ + ndim_) : (data_heap_ + ndim_); }
///
/// @return number of dimension of the tuple
///
inline uint32_t ndim() const { return ndim_; }
///
/// @brief get corresponding index
/// @param i dimension index
/// @return the corresponding dimension size
///
inline ValueType &operator[](size_t i) { return begin()[i]; }
///
/// @brief get corresponding index
/// @param i dimension index
/// @return the corresponding dimension size
///
inline const ValueType &operator[](size_t i) const { return begin()[i]; }
///
/// @brief allow output string of tuple to ostream
/// @param os the output stream
/// @param t the tuple
/// @return the ostream
///
friend std::ostream &operator<<(std::ostream &os, const Tuple<ValueType> &t) {
os << '[';
const ValueType *begin = t.begin();
@@ -186,12 +168,11 @@ class Tuple {
os << ']';
return os;
}
///
/// @brief read tuple from the istream
/// @param is the input stream
/// @param t The tuple
/// @return the istream
///
friend std::istream &operator>>(std::istream &is, Tuple<ValueType> &t) {
// get (
if (!HandleLeftBracket(is, t)) {


+ 1
- 1
parser/common/types_map.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 29
- 28
parser/tensorflow/tensorflow_parser.cc View File

@@ -915,8 +915,9 @@ Status TensorFlowModelParser::CheckOpType(const domi::tensorflow::NodeDef *node_
GE_CHK_STATUS_RET(CheckOpShapeDim(node_def, check_dims[op_type], valid), "failed to check op shape");
GE_IF_BOOL_EXEC(!valid, op_type = ge::parser::FRAMEWORKOP;
GELOGI("Set op %s to frameworkop", node_name.c_str());
framework_ops_[node_name] = node_def;);
);
framework_ops_[node_name] = node_def;
);
);

GE_IF_BOOL_EXEC(
op_type == ge::parser::ADD || op_type == ge::parser::MULTIPLY || op_type == ge::parser::MEAN,
@@ -1777,8 +1778,8 @@ bool TensorFlowModelParser::MaybeFusionOp(shared_ptr<ge::ScopeGraph> &scope_grap
std::vector<ge::ScopeFusionOpInfo> info_list;
auto &impl = scope_graph->impl_;
if (impl->IsFusionOpChild(node_def->name(), info_list)) {
GE_IF_BOOL_EXEC(
info_list.size() > 0, for (size_t i = 0; i < info_list.size(); ++i) {
GE_IF_BOOL_EXEC(info_list.size() > 0,
for (size_t i = 0; i < info_list.size(); ++i) {
fusion_op_type_map_[info_list[i].fusion_node_name].push_back(info_list[i].fusion_op_type);
fusion_op_type_map_[info_list[i].fusion_node_name].push_back(info_list[i].description);
fusion_op_nodedef_map_[info_list[i].fusion_node_name].push_back(node_def);
@@ -3480,35 +3481,35 @@ void TensorFlowModelParser::RemoveInputAttr(domi::tensorflow::NodeDef *node_def,
attr_map->find(ge::ATTR_NAME_INPUT_TENSOR_DESC);
if (it == attr_map->end()) {
GELOGW("Failed to find input desc from tf node_def[%s]", node_def->name().c_str());
} else {
domi::tensorflow::AttrValue *input_attr_value = &(it->second);
auto tmp_attr = input_attr_value->mutable_list()->mutable_func();
auto attr_it = tmp_attr->begin();
int index = 0;
for (auto input_it = inputs->begin(); input_it != inputs->end(); ++input_it, ++index) {
// 1.decide whether to remove the input
bool flag = false;
for (auto &remove_input : remove_inputs_map) {
string remove_input_name = remove_input.first;
vector<int> remove_input_indexs = remove_input.second;
if ((*input_it) == remove_input_name &&
std::find(remove_input_indexs.begin(), remove_input_indexs.end(), index) != remove_input_indexs.end()) {
GELOGD("Remove input attr:%s, index:%d", remove_input_name.c_str(), index);
flag = true;
break;
}
return;
}
domi::tensorflow::AttrValue *input_attr_value = &(it->second);
auto tmp_attr = input_attr_value->mutable_list()->mutable_func();
auto attr_it = tmp_attr->begin();
int index = 0;
for (auto input_it = inputs->begin(); input_it != inputs->end(); ++input_it, ++index) {
// 1.decide whether to remove the input
bool flag = false;
for (auto &remove_input : remove_inputs_map) {
string remove_input_name = remove_input.first;
vector<int> remove_input_indexs = remove_input.second;
if ((*input_it) == remove_input_name &&
std::find(remove_input_indexs.begin(), remove_input_indexs.end(), index) != remove_input_indexs.end()) {
GELOGD("Remove input attr:%s, index:%d", remove_input_name.c_str(), index);
flag = true;
break;
}
}

if (flag) {
// 2.1 remove the input attr
if (!tmp_attr->empty() && (attr_it != tmp_attr->end())) {
attr_it = tmp_attr->erase(attr_it);
} else {
++attr_it;
}
if (flag) {
// 2.1 remove the input attr
if (!tmp_attr->empty() && (attr_it != tmp_attr->end())) {
attr_it = tmp_attr->erase(attr_it);
} else {
++attr_it;
}
} else {
++attr_it;
}
}
}


+ 0
- 16
parser/tensorflow/tensorflow_ref_switch_parser.cc View File

@@ -64,29 +64,13 @@ Status TensorFlowRefSwitchParser::ParseParams(const Message *op_src, ge::OpDescP
op.Name(node->name());

GELOGI("RefSwitch Op %s ParseParams Begin.", node->name().c_str());
GE_RETURN_IF_ERROR(PreParseParams(node, &op));

GE_RETURN_WITH_LOG_IF_ERROR(ParseT(node, &op), "Parse T for node %s failed.", node->name().c_str());

GE_RETURN_IF_ERROR(PostParseParams(node, &op));

Status status = ConvertToOpDesc(op, op_dest);

return status;
}

// AUTO GEN PLEASE DO NOT MODIFY IT
Status TensorFlowRefSwitchParser::PreParseParams(const domi::tensorflow::NodeDef *node, RefSwitchOperator *op) {
(void)node;
(void)op;
return SUCCESS;
}

Status TensorFlowRefSwitchParser::PostParseParams(const domi::tensorflow::NodeDef *node, RefSwitchOperator *op) {
(void)node;
(void)op;
return SUCCESS;
}

REGISTER_OP_PARSER_CREATOR(TENSORFLOW, REFSWITCH, TensorFlowRefSwitchParser);
} // namespace ge

+ 0
- 20
parser/tensorflow/tensorflow_ref_switch_parser.h View File

@@ -35,26 +35,6 @@ class PARSER_FUNC_VISIBILITY TensorFlowRefSwitchParser : public TensorFlowOpPars
Status ParseParams(const Message *op_src, ge::OpDescPtr &op_dest) override;

protected:
/**
* @ingroup domi_omg
* @brief 解析模型文件信息
* @param [in] v_input_const 待解析的模型数据
* @param [out] node 解析后的模型数据
* @return SUCCESS 解析成功
* @return FAILED 解析失败
*/
Status PreParseParams(const domi::tensorflow::NodeDef *node, RefSwitchOperator *op);

/**
* @ingroup domi_omg
* @brief 解析模型文件信息
* @param [in] v_input_const 待解析的模型数据
* @param [out] node 解析后的模型数据
* @return SUCCESS 解析成功
* @return FAILED 解析失败
*/
Status PostParseParams(const domi::tensorflow::NodeDef *node, RefSwitchOperator *op);

/**
* @ingroup domi_omg
* @brief 解析模型文件信息


+ 0
- 17
parser/tensorflow/tensorflow_shape_n_parser.cc View File

@@ -100,16 +100,12 @@ Status TensorFlowShapeNParser::ParseParams(const Message *op_src, ge::OpDescPtr
ShapeNOperator op;
op.Name(node->name());

GE_RETURN_IF_ERROR(PreParseParams(node, &op));

GE_RETURN_WITH_LOG_IF_ERROR(ParseInType(node, &op), "Parse in type for node %s failed.", node->name().c_str());

GE_RETURN_WITH_LOG_IF_ERROR(ParseN(node, &op), "Parse N for node %s failed.", node->name().c_str());

GE_RETURN_WITH_LOG_IF_ERROR(ParseOutType(node, &op), "Parse out type for node %s failed.", node->name().c_str());

GE_RETURN_IF_ERROR(PostParseParams(node, &op));

// add dynamic input/output
domi::tensorflow::AttrValue attr_num;
CHECK_FALSE_EXEC(TensorFlowUtil::FindAttrValue(node, SHAPEN_ATTR_N, attr_num),
@@ -154,18 +150,5 @@ Status TensorFlowShapeNParser::ParseParams(const Message *op_src, ge::OpDescPtr
return SUCCESS;
}

// AUTO GEN PLEASE DO NOT MODIFY IT
Status TensorFlowShapeNParser::PreParseParams(const domi::tensorflow::NodeDef *node, const ShapeNOperator *op) {
(void)node;
(void)op;
return SUCCESS;
}

Status TensorFlowShapeNParser::PostParseParams(const domi::tensorflow::NodeDef *node, const ShapeNOperator *op) {
(void)node;
(void)op;
return SUCCESS;
}

REGISTER_OP_PARSER_CREATOR(TENSORFLOW, SHAPEN, TensorFlowShapeNParser);
} // namespace ge

+ 0
- 3
parser/tensorflow/tensorflow_shape_n_parser.h View File

@@ -27,9 +27,6 @@ class PARSER_FUNC_VISIBILITY TensorFlowShapeNParser : public TensorFlowOpParser
Status ParseParams(const Message *op_src, ge::OpDescPtr &op_dest) override;

protected:
Status PreParseParams(const domi::tensorflow::NodeDef *node, const ShapeNOperator *op);
Status PostParseParams(const domi::tensorflow::NodeDef *node, const ShapeNOperator *op);

static Status ParseN(const domi::tensorflow::NodeDef *node, ShapeNOperator *op);
static Status ParseInType(const domi::tensorflow::NodeDef *node, ShapeNOperator *op);
static Status ParseOutType(const domi::tensorflow::NodeDef *node, ShapeNOperator *op);


Loading…
Cancel
Save