|
|
|
@@ -100,7 +100,8 @@ static float Fp16ToFloat(const uint16_t &fp_val) { |
|
|
|
e_ret = 0; |
|
|
|
m_ret = 0; |
|
|
|
} else { |
|
|
|
e_ret = (static_cast<uint32_t>(hf_exp) - static_cast<uint32_t>(kFp16ExpBias)) + static_cast<uint32_t>(kFp32ExpBias); |
|
|
|
e_ret = static_cast<uint32_t>((static_cast<uint32_t>(hf_exp) - static_cast<uint32_t>(kFp16ExpBias)) + |
|
|
|
static_cast<uint32_t>(kFp32ExpBias)); |
|
|
|
m_ret = hf_man & kFp16ManMask; |
|
|
|
m_ret = m_ret << (kFp32ManLen - kFp16ManLen); |
|
|
|
} |
|
|
|
@@ -132,7 +133,8 @@ static double Fp16ToDouble(const uint16_t &fp_val) { |
|
|
|
e_ret = 0; |
|
|
|
m_ret = 0; |
|
|
|
} else { |
|
|
|
e_ret = (static_cast<uint64_t>(hf_exp) - static_cast<uint64_t>(kFp16ExpBias)) + static_cast<uint64_t>(kFp64ExpBias); |
|
|
|
e_ret = static_cast<uint64_t>((static_cast<uint64_t>(hf_exp) - static_cast<uint64_t>(kFp16ExpBias)) + |
|
|
|
static_cast<uint64_t>(kFp64ExpBias)); |
|
|
|
m_ret = hf_man & kFp16ManMask; |
|
|
|
m_ret = m_ret << (kFp64ManLen - kFp16ManLen); |
|
|
|
} |
|
|
|
@@ -150,7 +152,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) { |
|
|
|
/// @return Return uint8 value of fp16_t object |
|
|
|
static uint8_t GetUint8ValByMan(uint8_t s_ret, const uint64_t &long_int_m, const uint16_t &shift_out) { |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
auto m_ret = static_cast<uint8_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max); |
|
|
|
auto m_ret = static_cast<uint8_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen8Max); |
|
|
|
need_round = need_round && ((s_ret == 0 && m_ret < kInt8Max) || (s_ret == 1 && m_ret <= kInt8Max)); |
|
|
|
if (need_round) { |
|
|
|
m_ret++; |
|
|
|
@@ -172,14 +174,14 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { |
|
|
|
int8_t ret; |
|
|
|
uint8_t ret_v; |
|
|
|
// 1.get s_ret and shift it to bit0. |
|
|
|
uint8_t s_ret = Fp16ExtracSign(fp_val); |
|
|
|
uint8_t s_ret = static_cast<uint8_t>(Fp16ExtracSign(fp_val)); |
|
|
|
// 2.get hf_e and hf_m |
|
|
|
uint16_t hf_e = Fp16ExtracExp(fp_val); |
|
|
|
uint16_t hf_m = Fp16ExtracMan(fp_val); |
|
|
|
|
|
|
|
if (Fp16IsDenorm(fp_val)) { // Denormalized number |
|
|
|
ret_v = 0; |
|
|
|
ret = *(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v)); |
|
|
|
ret = static_cast<int8_t>(*(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v))); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
@@ -215,7 +217,7 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { |
|
|
|
ret_v = GetUint8ValByMan(s_ret, long_int_m, shift_out); |
|
|
|
} |
|
|
|
|
|
|
|
ret = *(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v)); |
|
|
|
ret = static_cast<int8_t>(*(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v))); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
@@ -226,7 +228,7 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { |
|
|
|
static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { |
|
|
|
uint8_t m_ret = 0; |
|
|
|
// 1.get s_ret and shift it to bit0. |
|
|
|
uint8_t s_ret = Fp16ExtracSign(fp_val); |
|
|
|
uint8_t s_ret = static_cast<uint8_t>(Fp16ExtracSign(fp_val)); |
|
|
|
// 2.get hf_e and hf_m |
|
|
|
uint16_t hf_e = Fp16ExtracExp(fp_val); |
|
|
|
uint16_t hf_m = Fp16ExtracMan(fp_val); |
|
|
|
@@ -258,7 +260,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { |
|
|
|
} |
|
|
|
if (overflow_flag == 0U) { |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
m_ret = static_cast<uint8_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max); |
|
|
|
m_ret = static_cast<uint8_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen8Max); |
|
|
|
if (need_round && m_ret != kBitLen8Max) { |
|
|
|
m_ret++; |
|
|
|
} |
|
|
|
@@ -280,7 +282,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { |
|
|
|
/// @return Return uint16 value of fp16_t object |
|
|
|
static uint16_t GetUint16ValByMan(uint16_t s_ret, const uint64_t &long_int_m, const uint16_t &shift_out) { |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
auto m_ret = static_cast<uint16_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen16Max); |
|
|
|
auto m_ret = static_cast<uint16_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen16Max); |
|
|
|
if (need_round && m_ret < kInt16Max) { |
|
|
|
m_ret++; |
|
|
|
} |
|
|
|
@@ -343,7 +345,7 @@ static int16_t Fp16ToInt16(const uint16_t &fp_val) { |
|
|
|
// Generate final result |
|
|
|
ret_v = GetUint16ValByMan(s_ret, long_int_m, shift_out); |
|
|
|
} |
|
|
|
ret = *(ge::PtrToPtr<uint16_t, uint16_t>(&ret_v)); |
|
|
|
ret = static_cast<int16_t>(*(ge::PtrToPtr<uint16_t, uint16_t>(&ret_v))); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
@@ -378,7 +380,7 @@ static uint16_t Fp16ToUInt16(const uint16_t &fp_val) { |
|
|
|
} |
|
|
|
} |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
m_ret = static_cast<uint16_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen16Max); |
|
|
|
m_ret = static_cast<uint16_t>((long_int_m >> static_cast<int16_t>(kFp16ManLen + shift_out)) & kBitLen16Max); |
|
|
|
if (need_round && m_ret != kBitLen16Max) { |
|
|
|
m_ret++; |
|
|
|
} |
|
|
|
@@ -419,7 +421,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) { |
|
|
|
} |
|
|
|
} |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
auto m_ret = static_cast<uint32_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen32Max); |
|
|
|
auto m_ret = static_cast<uint32_t>((long_int_m >> static_cast<uint32_t>(kFp16ManLen + shift_out)) & kBitLen32Max); |
|
|
|
if (need_round && m_ret < kInt32Max) { |
|
|
|
m_ret++; |
|
|
|
} |
|
|
|
@@ -434,7 +436,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) { |
|
|
|
ret_v = (s_ret << static_cast<uint16_t>(kBitShift31)) | (m_ret); |
|
|
|
} |
|
|
|
|
|
|
|
return *(ge::PtrToPtr<uint32_t, uint32_t>(&ret_v)); |
|
|
|
return static_cast<int32_t>(*(ge::PtrToPtr<uint32_t, uint32_t>(&ret_v))); |
|
|
|
} |
|
|
|
|
|
|
|
/// @ingroup fp16_t math conversion static method |
|
|
|
@@ -468,7 +470,7 @@ static uint32_t Fp16ToUInt32(const uint16_t &fp_val) { |
|
|
|
} |
|
|
|
} |
|
|
|
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); |
|
|
|
m_ret = static_cast<uint32_t>(long_int_m >> (kFp16ManLen + shift_out)) & kBitLen32Max; |
|
|
|
m_ret = static_cast<uint32_t>(long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen32Max; |
|
|
|
if (need_round && m_ret != kBitLen32Max) { |
|
|
|
m_ret++; |
|
|
|
} |
|
|
|
@@ -483,10 +485,10 @@ static uint32_t Fp16ToUInt32(const uint16_t &fp_val) { |
|
|
|
|
|
|
|
static uint16_t Fp16AddCalVal(uint16_t s_ret, int16_t e_ret, uint16_t m_ret, uint32_t m_trunc, uint16_t shift_out) { |
|
|
|
uint16_t m_min = kFp16ManHideBit << shift_out; |
|
|
|
uint16_t m_max = m_min << 1; |
|
|
|
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); |
|
|
|
// Denormal |
|
|
|
while (m_ret < m_min && e_ret > 0) { // the value of m_ret should not be smaller than 2^23 |
|
|
|
m_ret = m_ret << 1; |
|
|
|
m_ret = static_cast<uint16_t>(m_ret << 1U); |
|
|
|
m_ret += (kFp32SignMask & m_trunc) >> kFp32SignIndex; |
|
|
|
m_trunc = m_trunc << 1; |
|
|
|
e_ret = e_ret - 1; |
|
|
|
@@ -560,12 +562,12 @@ static uint16_t Fp16Add(uint16_t v_1, uint16_t v_2) { |
|
|
|
|
|
|
|
uint32_t m_trunc = 0; |
|
|
|
int16_t e_ret = std::max(e_a, e_b); |
|
|
|
int16_t e_tmp = std::abs(e_a - e_b); |
|
|
|
int16_t e_tmp = static_cast<int16_t>(std::abs(e_a - e_b)); |
|
|
|
if (e_a > e_b) { |
|
|
|
m_trunc = (m_b << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); |
|
|
|
m_trunc = (m_b << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); |
|
|
|
m_b = RightShift(m_b, e_tmp); |
|
|
|
} else if (e_a < e_b) { |
|
|
|
m_trunc = (m_a << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); |
|
|
|
m_trunc = (m_a << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); |
|
|
|
m_a = RightShift(m_a, e_tmp); |
|
|
|
} |
|
|
|
// calculate mantissav |
|
|
|
@@ -690,7 +692,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { |
|
|
|
m_b = m_tmp; |
|
|
|
} |
|
|
|
m_div = static_cast<float>(m_a * 1.0f / m_b); |
|
|
|
fp16_t fp_div = m_div; |
|
|
|
fp16_t fp_div = static_cast<fp16_t>(m_div); |
|
|
|
ret = fp_div.val; |
|
|
|
if (s_a != s_b) { |
|
|
|
ret |= kFp16SignMask; |
|
|
|
@@ -917,7 +919,7 @@ fp16_t &fp16_t::operator=(const int8_t &i_val) { |
|
|
|
|
|
|
|
e_ret = kFp16ManLen; |
|
|
|
while ((m_ret & kFp16ManHideBit) == 0) { |
|
|
|
m_ret = m_ret << 1; |
|
|
|
m_ret = static_cast<uint16_t>(m_ret << 1U); |
|
|
|
e_ret = e_ret - 1U; |
|
|
|
} |
|
|
|
e_ret = e_ret + kFp16ExpBias; |
|
|
|
@@ -935,7 +937,7 @@ fp16_t &fp16_t::operator=(const uint8_t &ui_val) { |
|
|
|
if (static_cast<bool>(m_ret)) { |
|
|
|
e_ret = kFp16ManLen; |
|
|
|
while ((m_ret & kFp16ManHideBit) == 0) { |
|
|
|
m_ret = m_ret << 1; |
|
|
|
m_ret = static_cast<uint16_t>(m_ret << 1U); |
|
|
|
e_ret = e_ret - 1U; |
|
|
|
} |
|
|
|
e_ret = e_ret + kFp16ExpBias; |
|
|
|
@@ -948,18 +950,18 @@ fp16_t &fp16_t::operator=(const uint8_t &ui_val) { |
|
|
|
static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, uint16_t &ret_val) { |
|
|
|
uint32_t m_tmp = (input_val & kFp32AbsMax); |
|
|
|
uint16_t m_min = kFp16ManHideBit; |
|
|
|
uint16_t m_max = m_min << 1; |
|
|
|
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); |
|
|
|
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); |
|
|
|
if (static_cast<bool>(m_tmp)) { |
|
|
|
int16_t e_ret; |
|
|
|
if (len > static_cast<uint16_t>(kDim11)) { |
|
|
|
e_ret = kFp16ExpBias + kFp16ManLen; |
|
|
|
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); |
|
|
|
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); |
|
|
|
uint32_t trunc_mask = 1; |
|
|
|
for (int i = 1; i < e_tmp; i++) { |
|
|
|
trunc_mask = (trunc_mask << 1) + 1; |
|
|
|
} |
|
|
|
uint32_t m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
uint32_t m_trunc = (m_tmp & trunc_mask) << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
for (int i = 0; i < e_tmp; i++) { |
|
|
|
m_tmp = (m_tmp >> 1); |
|
|
|
e_ret = e_ret + 1; |
|
|
|
@@ -978,8 +980,8 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u |
|
|
|
} |
|
|
|
} else { |
|
|
|
e_ret = static_cast<int16_t>(kFp16ExpBias); |
|
|
|
m_tmp = m_tmp << (kManBitLength - len); |
|
|
|
e_ret = e_ret + (len - 1); |
|
|
|
m_tmp = m_tmp << static_cast<uint16_t>(kManBitLength - len); |
|
|
|
e_ret = e_ret + static_cast<int16_t>(len - 1); |
|
|
|
} |
|
|
|
auto m_ret = static_cast<uint16_t>(m_tmp); |
|
|
|
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret); |
|
|
|
@@ -990,7 +992,7 @@ fp16_t &fp16_t::operator=(const int16_t &i_val) { |
|
|
|
if (i_val == 0) { |
|
|
|
val = 0; |
|
|
|
} else { |
|
|
|
uint16_t ui_val = *(ge::PtrToPtr<const int16_t, const int16_t>(&i_val)); |
|
|
|
uint16_t ui_val = static_cast<uint16_t>(*(ge::PtrToPtr<const int16_t, const int16_t>(&i_val))); |
|
|
|
auto s_ret = static_cast<uint16_t>(ui_val >> static_cast<uint16_t>(kBitShift15)); |
|
|
|
if (static_cast<bool>(s_ret)) { |
|
|
|
int16_t iValM = -i_val; |
|
|
|
@@ -1008,17 +1010,17 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { |
|
|
|
int16_t e_ret; |
|
|
|
uint16_t m_ret = ui_val; |
|
|
|
uint16_t m_min = kFp16ManHideBit; |
|
|
|
uint16_t m_max = m_min << 1; |
|
|
|
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); |
|
|
|
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_ret)); |
|
|
|
if (len > kManBitLength) { |
|
|
|
e_ret = kFp16ExpBias + kFp16ManLen; |
|
|
|
uint32_t m_trunc; |
|
|
|
uint32_t trunc_mask = 1; |
|
|
|
uint16_t e_tmp = len - kManBitLength; |
|
|
|
uint16_t e_tmp = static_cast<uint16_t>(len - kManBitLength); |
|
|
|
for (int i = 1; i < e_tmp; i++) { |
|
|
|
trunc_mask = (trunc_mask << 1) + 1; |
|
|
|
} |
|
|
|
m_trunc = (m_ret & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
m_trunc = (m_ret & trunc_mask) << static_cast<int16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
for (int i = 0; i < e_tmp; i++) { |
|
|
|
m_ret = (m_ret >> 1); |
|
|
|
e_ret = e_ret + 1; |
|
|
|
@@ -1040,8 +1042,8 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { |
|
|
|
} |
|
|
|
} else { |
|
|
|
e_ret = static_cast<int16_t>(kFp16ExpBias); |
|
|
|
m_ret = m_ret << (static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = e_ret + (len - 1); |
|
|
|
m_ret = m_ret << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = static_cast<int16_t>(e_ret + (len - 1)); |
|
|
|
} |
|
|
|
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret); |
|
|
|
} |
|
|
|
@@ -1054,15 +1056,15 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u |
|
|
|
uint32_t m_min = kFp16ManHideBit; |
|
|
|
uint32_t m_max = m_min << 1; |
|
|
|
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); |
|
|
|
if (len > kDim11) { |
|
|
|
if (len > static_cast<uint16_t>(kDim11)) { |
|
|
|
e_ret = kFp16ExpBias + kFp16ManLen; |
|
|
|
uint32_t m_trunc = 0; |
|
|
|
uint32_t trunc_mask = 1; |
|
|
|
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); |
|
|
|
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); |
|
|
|
for (int i = 1; i < e_tmp; i++) { |
|
|
|
trunc_mask = (trunc_mask << 1) + 1; |
|
|
|
} |
|
|
|
m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
m_trunc = (m_tmp & trunc_mask) << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); |
|
|
|
for (int i = 0; i < e_tmp; i++) { |
|
|
|
m_tmp = (m_tmp >> 1); |
|
|
|
e_ret = e_ret + 1; |
|
|
|
@@ -1085,8 +1087,8 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u |
|
|
|
} |
|
|
|
} else { |
|
|
|
e_ret = static_cast<int16_t>(kFp16ExpBias); |
|
|
|
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = e_ret + (len - 1); |
|
|
|
m_tmp = m_tmp << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = e_ret + static_cast<int16_t>(len - 1); |
|
|
|
} |
|
|
|
auto m_ret = static_cast<uint16_t>(m_tmp); |
|
|
|
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret); |
|
|
|
@@ -1116,11 +1118,11 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { |
|
|
|
uint32_t m_min = kFp16ManHideBit; |
|
|
|
uint32_t m_max = m_min << 1; |
|
|
|
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); |
|
|
|
if (len > kDim11) { |
|
|
|
if (len > static_cast<uint16_t>(kDim11)) { |
|
|
|
e_ret = kFp16ExpBias + kFp16ManLen; |
|
|
|
uint32_t m_trunc = 0; |
|
|
|
uint32_t trunc_mask = 1; |
|
|
|
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); |
|
|
|
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); |
|
|
|
for (int i = 1; i < e_tmp; i++) { |
|
|
|
trunc_mask = (trunc_mask << 1) + 1; |
|
|
|
} |
|
|
|
@@ -1147,8 +1149,8 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { |
|
|
|
} |
|
|
|
} else { |
|
|
|
e_ret = static_cast<int16_t>(kFp16ExpBias); |
|
|
|
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = e_ret + (len - 1); |
|
|
|
m_tmp = m_tmp << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); |
|
|
|
e_ret = static_cast<int16_t>(e_ret + (len - 1)); |
|
|
|
} |
|
|
|
auto m_ret = static_cast<uint16_t>(m_tmp); |
|
|
|
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret); |
|
|
|
@@ -1200,7 +1202,7 @@ fp16_t &fp16_t::operator=(const double &d_val) { |
|
|
|
} else { // Regular case with no overflow or underflow |
|
|
|
e_ret = static_cast<int16_t>(e_d - 0x3F0u); |
|
|
|
|
|
|
|
need_round = IsRoundOne(m_d, m_len_delta); |
|
|
|
need_round = IsRoundOne(m_d, static_cast<uint16_t>(m_len_delta)); |
|
|
|
m_ret = static_cast<uint16_t>(m_d >> m_len_delta); |
|
|
|
if (need_round) { |
|
|
|
m_ret++; |
|
|
|
|