Browse Source

!15108 update example && bug fix

From: @zhujingxuan
Reviewed-by: @wangchengyuan,@hangangqiang
Signed-off-by: @wangchengyuan
pull/15108/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
e9065fd270
15 changed files with 127 additions and 59 deletions
  1. +1
    -0
      mindspore/lite/micro/cmake/package_wrapper.cmake
  2. +10
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc
  3. +2
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.cc
  4. +1
    -0
      mindspore/lite/micro/coder/opcoders/parallel.h
  5. +24
    -0
      mindspore/lite/micro/coder/wrapper/int8/conv3x3_run_int8_wrapper.c
  6. +38
    -0
      mindspore/lite/micro/coder/wrapper/int8/conv3x3_run_int8_wrapper.h
  7. +1
    -0
      mindspore/lite/micro/example/mnist_x86/CMakeLists.txt
  8. +2
    -2
      mindspore/lite/micro/example/mnist_x86/mnist.sh
  9. +1
    -0
      mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt
  10. +35
    -35
      mindspore/lite/micro/example/mnist_x86/src/net.c
  11. +2
    -2
      mindspore/lite/micro/example/mnist_x86/src/session.cc
  12. +1
    -1
      mindspore/lite/micro/example/mnist_x86/src/tensor.h
  13. +5
    -9
      mindspore/lite/micro/example/mnist_x86/src/weight.c
  14. +0
    -4
      mindspore/lite/micro/example/mnist_x86/src/weight.h
  15. +4
    -4
      mindspore/lite/micro/example/mobilenetv2/mobilenetv2.sh

+ 1
- 0
mindspore/lite/micro/cmake/package_wrapper.cmake View File

@@ -14,6 +14,7 @@ set(WRAPPER_SRC
${WRAPPER_DIR}/int8/conv_init_int8_wrapper.c
${WRAPPER_DIR}/int8/conv1x1_init_int8_wrapper.c
${WRAPPER_DIR}/int8/conv1x1_run_int8_wrapper.c
${WRAPPER_DIR}/int8/conv3x3_run_int8_wrapper.c
${WRAPPER_DIR}/int8/convolution_depthwise_int8_wrapper.c
${WRAPPER_DIR}/int8/resize_int8_wrapper.c
${WRAPPER_DIR}/int8/slice_int8_wrapper.c


+ 10
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc View File

@@ -137,6 +137,15 @@ int Conv2D3x3Int8Coder::DoCode(CoderContext *const context) {
"conv3x3_int8.c",
"fixed_point.c",
});
if (thread_num_ > 1) {
Collect(context,
{
"wrapper/int8/conv3x3_run_int8_wrapper.h",
},
{
"conv3x3_run_int8_wrapper.c",
});
}
nnacl::NNaclInt8Serializer code;
code.precision(kPrecision);
// call the op function
@@ -154,7 +163,7 @@ int Conv2D3x3Int8Coder::DoCode(CoderContext *const context) {
if (thread_num_ > 1) {
code.CodeBaseStruct("Conv3x3Int8Args", kRunArgs, c8_input_, transformed_filter_addr_, new_bias_addr_,
output_tensor_, tile_buffer_, block_unit_buffer_, tmp_dst_buffer_, tmp_out_, "&conv_param_");
code.CodeFunction(kParallelLaunch, "THREAD_POOL_DEFAULT", "Conv3x3Int8Run", kRunArgsAddr, "thread_num");
code.CodeFunction(kParallelLaunch, gThreadPool, "Conv3x3Int8Run", kRunArgsAddr, gThreadNum);
} else {
code.CodeFunction("Conv3x3Int8", c8_input_, transformed_filter_addr_, new_bias_addr_, output_tensor_, tile_buffer_,
block_unit_buffer_, tmp_dst_buffer_, tmp_out_, kDefaultTaskId, "&conv_param_");


+ 2
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.cc View File

@@ -62,7 +62,8 @@ void MatMulBaseInt8Coder::ResizeParameter() {
param_->row_align_ = UP_ROUND(param_->row_, row_tile_);
param_->col_align_ = UP_ROUND(param_->col_, col_tile_);
param_->deep_16_ = UP_ROUND(param_->deep_, C16NUM);
thread_count_ = MSMIN(param_->op_parameter_.thread_num_, UP_DIV(param_->col_align_, col_tile_));

thread_count_ = MSMIN(kDefaultThreadNum, UP_DIV(param_->col_align_, col_tile_));
thread_stride_ = UP_DIV(UP_DIV(param_->col_align_, col_tile_), thread_count_);
}



+ 1
- 0
mindspore/lite/micro/coder/opcoders/parallel.h View File

@@ -22,6 +22,7 @@ namespace mindspore::lite::micro {
constexpr auto kDefaultTaskId = 0;

constexpr auto kMaxThreadNumSupported = 4;
constexpr auto kDefaultThreadNum = 1;

// ParallelLaunch is defined in thread_pool
constexpr auto kParallelLaunch = "ParallelLaunch";


+ 24
- 0
mindspore/lite/micro/coder/wrapper/int8/conv3x3_run_int8_wrapper.c View File

@@ -0,0 +1,24 @@
/*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "wrapper/int8/conv3x3_run_int8_wrapper.h"

int Conv3x3Int8Run(void *cdata, int task_id) {
Conv3x3Int8Args *args = (Conv3x3Int8Args *)cdata;
Conv3x3Int8(args->input_data, args->transed_weight, args->bias_data, args->output_data, args->tile_buffer,
args->block_unit_buffer, args->tmp_dst_buffer, args->tmp_out, task_id, args->conv_param);
return NNACL_OK;
}

+ 38
- 0
mindspore/lite/micro/coder/wrapper/int8/conv3x3_run_int8_wrapper.h View File

@@ -0,0 +1,38 @@
/*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_INT8_CONV3x3_WRAPPER_INT8_WRAPPER_H_
#define MINDSPORE_LITE_MICRO_INT8_CONV3x3_WRAPPER_INT8_WRAPPER_H_

#include "nnacl/errorcode.h"
#include "nnacl/conv_parameter.h"
#include "nnacl/int8/conv3x3_int8.h"

typedef struct {
int16_t *input_data;
int16_t *transed_weight;
int32_t *bias_data;
int8_t *output_data;
int16_t *tile_buffer;
int16_t *block_unit_buffer;
int32_t *tmp_dst_buffer;
int8_t *tmp_out;
ConvParameter *conv_param;
} Conv3x3Int8Args;

int Conv3x3Int8Run(void *cdata, int task_id);

#endif // MINDSPORE_LITE_MICRO_INT8_CONV3x3_WRAPPER_INT8_WRAPPER_H_

+ 1
- 0
mindspore/lite/micro/example/mnist_x86/CMakeLists.txt View File

@@ -26,6 +26,7 @@ endif()

if(PLATFORM_ARM32)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()

set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")


+ 2
- 2
mindspore/lite/micro/example/mnist_x86/mnist.sh View File

@@ -45,9 +45,9 @@ get_version() {
}

download_inference() {
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-inference-linux-x64"
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
local MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}"
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}-rc2/MindSpore/lite/release/linux/${MINDSPORE_FILE}"

if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}


+ 1
- 0
mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt View File

@@ -44,6 +44,7 @@ endif()

if(PLATFORM_ARM32)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()

set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")


+ 35
- 35
mindspore/lite/micro/example/mnist_x86/src/net.c View File

@@ -37,7 +37,7 @@ int CopyOutputsData(void **outputs, int num) {
if (num != 1) {
return RET_ERROR;
}
memcpy(outputs[0], g_Buffer + 32, 40);
memcpy(outputs[0], g_Buffer+32, 40);
return RET_OK;
}

@@ -62,11 +62,11 @@ void FreeResource() {
}
void Inference() {
{
memset((int16_t *)(g_Buffer + 10144), 0, 2048);
memset((int16_t *)(g_Buffer + 12192), 0, 256);
memset((int *)(g_Buffer + 12448), 0, 6144);
memset((int8_t *)(g_Buffer + 18592), 0, 8112);
memset((int16_t *)(g_Buffer + 26704), 0, 12544);
memset((int16_t *)(g_Buffer+10144), 0, 2048);
memset((int16_t *)(g_Buffer+12192), 0, 256);
memset((int *)(g_Buffer+12448), 0, 6144);
memset((int8_t *)(g_Buffer+18592), 0, 8112);
memset((int16_t *)(g_Buffer+26704), 0, 12544);
QuantArg conv_param__quant_arg_in[1] = {{0.003921568859368562698, -128}};
QuantArg conv_param__quant_arg_w[12] = {{0.005689438898116350174, 0}, {0.006241692230105400085, 0}, {0.007301395758986473083, 0}, {0.005148916970938444138, 0}, {0.005132303573191165924, 0}, {0.004976313561201095581, 0}, {0.00564815988764166832, 0}, {0.002269793068990111351, 0}, {0.0030086529441177845, 0}, {0.005234404932707548141, 0}, {0.007580270525068044662, 0}, {0.004589735530316829681, 0}};
QuantArg conv_param__quant_arg_out[1] = {{0.01811622083187103271, 17}};
@@ -78,24 +78,24 @@ int conv_param__out_act_min[1] = {-128};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(2), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 26);
ConvParameter conv_param_ = {{ "", true, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Input0), (int16_t *)(g_Buffer + 26704), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer + 26704), g_Weight10, g_Weight11, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 12192), (int *)(g_Buffer + 12448), (int8_t *)(g_Buffer + 18592), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 18592), (int8_t *)(g_Buffer + 0), 1, 676, 12);
ConvParameter conv_param_ = {{ "", false, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Input0), (int16_t *)(g_Buffer+26704), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer+26704), g_Weight10, g_Weight11, (int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+10144), (int16_t *)(g_Buffer+12192), (int *)(g_Buffer+12448), (int8_t *)(g_Buffer+18592), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+18592), (int8_t *)(g_Buffer+0), 1, 676, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.01811622083187103271, 17};
static QuantArg pooling_parameter_quant_out = {0.01811622083187103271, 17};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", true, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 8112), (PoolingParameter *)&pooling_parameter, 0);
const PoolingParameter pooling_parameter = {{ "", false, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+8112), (PoolingParameter *)&pooling_parameter, 0);
}
{
memset((int16_t *)(g_Buffer + 10144), 0, 4096);
memset((int16_t *)(g_Buffer + 14240), 0, 256);
memset((int *)(g_Buffer + 14496), 0, 6144);
memset((int8_t *)(g_Buffer + 20640), 0, 1452);
memset((int16_t *)(g_Buffer + 22092), 0, 5408);
memset((int16_t *)(g_Buffer+10144), 0, 4096);
memset((int16_t *)(g_Buffer+14240), 0, 256);
memset((int *)(g_Buffer+14496), 0, 6144);
memset((int8_t *)(g_Buffer+20640), 0, 1452);
memset((int16_t *)(g_Buffer+22092), 0, 5408);
QuantArg conv_param__quant_arg_in[1] = {{0.01811622083187103271, 17}};
QuantArg conv_param__quant_arg_w[12] = {{0.006381968967616558075, 0}, {0.005092236679047346115, 0}, {0.004954888485372066498, 0}, {0.007594361435621976852, 0}, {0.006317862775176763535, 0}, {0.004739056341350078583, 0}, {0.004733041394501924515, 0}, {0.005125139374285936356, 0}, {0.005773660261183977127, 0}, {0.007067613303661346436, 0}, {0.00728381425142288208, 0}, {0.004714466165751218796, 0}};
QuantArg conv_param__quant_arg_out[1] = {{0.118615470826625824, 31}};
@@ -107,26 +107,26 @@ int conv_param__out_act_min[1] = {-128};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 11);
ConvParameter conv_param_ = {{ "", true, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Buffer + 8112), (int16_t *)(g_Buffer + 22092), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer + 22092), g_Weight12, g_Weight13, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 14240), (int *)(g_Buffer + 14496), (int8_t *)(g_Buffer + 20640), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 20640), (int8_t *)(g_Buffer + 0), 1, 121, 12);
ConvParameter conv_param_ = {{ "", false, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Buffer+8112), (int16_t *)(g_Buffer+22092), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer+22092), g_Weight12, g_Weight13, (int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+10144), (int16_t *)(g_Buffer+14240), (int *)(g_Buffer+14496), (int8_t *)(g_Buffer+20640), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+20640), (int8_t *)(g_Buffer+0), 1, 121, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.118615470826625824, 31};
static QuantArg pooling_parameter_quant_out = {0.118615470826625824, 31};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", true, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 1456), (PoolingParameter *)&pooling_parameter, 0);
const PoolingParameter pooling_parameter = {{ "", false, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+1456), (PoolingParameter *)&pooling_parameter, 0);
}
{
const ReshapeQuantArg reshape_quant_arg = {{0.118615470826625824, 31}, {0.118615470826625824, 31}, -128, 127};
Int8Reshape((int8_t *)(g_Buffer + 1456), (int8_t *)(g_Buffer + 0), 300, reshape_quant_arg);
Int8Reshape((int8_t *)(g_Buffer+1456), (int8_t *)(g_Buffer+0), 300, reshape_quant_arg);
}
{
int32_t tmp_weight_zp = 0;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 0)+0, (int8_t *)(g_Buffer + 10144), 1, 300);
CalcInputSums((int8_t *)(g_Buffer + 0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer + 11360), RowMajor);
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+0)+0, (int8_t *)(g_Buffer+10144), 1, 300);
CalcInputSums((int8_t *)(g_Buffer+0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer+11360), RowMajor);
float filter_scale[1] = {0.007667620200663805008};
int filter_zp[1] = {0};
int left_shift[1] = {0};
@@ -137,12 +137,12 @@ int32_t *cur_left = matmul_quant_parameter.left_shift_;
int32_t *cur_right = matmul_quant_parameter.right_shift_;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight15+0 + 0, (int8_t *)(g_Buffer + 304)+0+0, 1, 20, 304, (int *)(g_Buffer + 11360), g_Weight16+0, -128, 127, 11, cur_mul, cur_left, cur_right, 20, false, cur_zp);
MatmulInt8Opt((int8_t *)(g_Buffer+10144), g_Weight15+0 + 0, (int8_t *)(g_Buffer+304)+0+0, 1, 20, 304, (int *)(g_Buffer+11360), g_Weight16+0, -128, 127, 11, cur_mul, cur_left, cur_right, 20, false, cur_zp);
}
{
int32_t tmp_weight_zp = 0;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 304)+0, (int8_t *)(g_Buffer + 10144), 1, 20);
CalcInputSums((int8_t *)(g_Buffer + 304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer + 10272), RowMajor);
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+304)+0, (int8_t *)(g_Buffer+10144), 1, 20);
CalcInputSums((int8_t *)(g_Buffer+304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer+10272), RowMajor);
float filter_scale[1] = {0.006908571347594261169};
int filter_zp[1] = {0};
int left_shift[1] = {0};
@@ -153,16 +153,16 @@ int32_t *cur_left = matmul_quant_parameter.left_shift_;
int32_t *cur_right = matmul_quant_parameter.right_shift_;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight18+0 + 0, (int8_t *)(g_Buffer + 0)+0+0, 1, 10, 32, (int *)(g_Buffer + 10272), g_Weight19+0, -128, 127, -20, cur_mul, cur_left, cur_right, 10, false, cur_zp);
MatmulInt8Opt((int8_t *)(g_Buffer+10144), g_Weight18+0 + 0, (int8_t *)(g_Buffer+0)+0+0, 1, 10, 32, (int *)(g_Buffer+10272), g_Weight19+0, -128, 127, -20, cur_mul, cur_left, cur_right, 10, false, cur_zp);
}
{
const SoftmaxQuantArg quant_args = {{1.073398709297180176, 20}, {0.00390625, -128}, -128, 127, 1152553088, 27, 27};
const SoftmaxParameter softmax_parameter = {{ "", true, 138, g_thread_num, 0}, 1, {1, 10}, 10, 2};
memset((int *)(g_Buffer + 10144), 0, 40);
memset((int *)(g_Buffer + 10184), 0, 40);
SoftmaxInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 16), 1, (int *)(g_Buffer + 10144), (int *)(g_Buffer + 10184), quant_args, (SoftmaxParameter *)&softmax_parameter);
const SoftmaxParameter softmax_parameter = {{ "", false, 138, g_thread_num, 0}, 1, {1, 10}, 10, 2};
memset((int *)(g_Buffer+10144), 0, 40);
memset((int *)(g_Buffer+10184), 0, 40);
SoftmaxInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+16), 1, (int *)(g_Buffer+10144), (int *)(g_Buffer+10184), quant_args, (SoftmaxParameter *)&softmax_parameter);
}
{
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer + 16), (float *)(g_Buffer + 32), 0.00390625, -128, 10);
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer+16), (float *)(g_Buffer+32), 0.00390625, -128, 10);
}
}

+ 2
- 2
mindspore/lite/micro/example/mnist_x86/src/session.cc View File

@@ -24,7 +24,7 @@ namespace mindspore {
namespace lite {
int LiteSession::CompileGraph(lite::Model *model) {
inputs_.resize(1);
Vector<int32_t> in_shape_0;
Vector<int> in_shape_0;
in_shape_0.resize(4);
in_shape_0[0] = 1;
in_shape_0[1] = 28;
@@ -33,7 +33,7 @@ int LiteSession::CompileGraph(lite::Model *model) {
inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeInt8, in_shape_0);
MS_ERROR_IF_NULL(inputs_[0]);
outputs_.resize(1);
Vector<int32_t> out_shape_0;
Vector<int> out_shape_0;
out_shape_0.resize(2);
out_shape_0[0] = 1;
out_shape_0[1] = 10;


+ 1
- 1
mindspore/lite/micro/example/mnist_x86/src/tensor.h View File

@@ -38,7 +38,7 @@ struct QuantArg {
class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(String name, TypeId type, Vector<int32_t> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
MTensor(String name, TypeId type, Vector<int> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
~MTensor() override;

TypeId data_type() const override { return data_type_; }


+ 5
- 9
mindspore/lite/micro/example/mnist_x86/src/weight.c View File

@@ -29,10 +29,6 @@ int32_t *g_Weight16 = NULL;
int32_t *g_Weight17 = NULL;
int8_t *g_Weight18 = NULL;
int32_t *g_Weight19 = NULL;
int8_t g_Weight6[6000];
int32_t g_Weight7[20];
int8_t g_Weight8[200];
int32_t g_Weight9[10];

int Init(void *weight_buffer, int weight_size) {
if (weight_buffer == NULL) {
@@ -43,19 +39,19 @@ int Init(void *weight_buffer, int weight_size) {
size_t size;
size_t offset;
};
int8_t *g_Weight6 = (weight_buffer + 9312);
int32_t *g_Weight7 = (weight_buffer + 15312);
int8_t *g_Weight8 = (weight_buffer + 15392);
int32_t *g_Weight9 = (weight_buffer + 15592);

struct ModelParameter model_params[] = {
{g_Weight10, 3072, 0},
{g_Weight11, 48, 3072},
{g_Weight12, 6144, 3120},
{g_Weight13, 48, 9264},
{g_Weight6, 6000, 9312},
{g_Weight7, 80, 15312},
{g_Weight8, 200, 15392},
{g_Weight9, 40, 15592},
};

for(int i = 0; i < 8; ++i) {
for(int i = 0; i < 4; ++i) {
if (model_params[i].offset + model_params[i].size > weight_size) {
return RET_ERROR;
}


+ 0
- 4
mindspore/lite/micro/example/mnist_x86/src/weight.h View File

@@ -45,7 +45,3 @@ extern int32_t *g_Weight16;
extern int32_t *g_Weight17;
extern int8_t *g_Weight18;
extern int32_t *g_Weight19;
extern int8_t g_Weight6[];
extern int32_t g_Weight7[];
extern int8_t g_Weight8[];
extern int32_t g_Weight9[];

+ 4
- 4
mindspore/lite/micro/example/mobilenetv2/mobilenetv2.sh View File

@@ -67,9 +67,9 @@ download_inference() {
else
local ARM_NAME=aarch32
fi
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-inference-android-${ARM_NAME}"
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-android-${ARM_NAME}"
local MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}"
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}-rc2/MindSpore/lite/release/android/${MINDSPORE_FILE}"

if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
@@ -89,9 +89,9 @@ download_mobile() {
}

gen_mobile() {
local CODEGEN_FILE_NAME="mindspore-lite-${VERSION_STR}-inference-linux-x64"
local CODEGEN_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
local CODEGEN_FILE="${CODEGEN_FILE_NAME}.tar.gz"
local CODEGEN_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${CODEGEN_FILE}"
local CODEGEN_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}-rc2/MindSpore/lite/release/linux/${CODEGEN_FILE}"

if [ ! -e ${BASEPATH}/build/${CODEGEN_FILE} ]; then
wget -c -O ${BASEPATH}/build/${CODEGEN_FILE} --no-check-certificate ${CODEGEN_LITE_DOWNLOAD_URL}


Loading…
Cancel
Save