Browse Source

!7836 add smoke for mindir weight quant

Merge pull request !7836 from ghzl/add-smoke-for-mindir-weight-quant
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
f7d98a5e6b
4 changed files with 50 additions and 12 deletions
  1. +8
    -8
      mindspore/lite/src/ops/primitive_c.cc
  2. +3
    -0
      mindspore/lite/test/models_mindspore_weightquant.cfg
  3. +36
    -1
      mindspore/lite/test/run_benchmark_nets.sh
  4. +3
    -3
      mindspore/lite/tools/anf_exporter/anf_exporter.cc

+ 8
- 8
mindspore/lite/src/ops/primitive_c.cc View File

@@ -240,6 +240,14 @@ void PrimitiveC::PopulaterQuantParam(const Primitive &prim, const std::vector<An
input_quant_param_.emplace_back(quants);
}

// fill input_quant_param_ by not inited quant_parm
if (input_quant_param_.size() < inputs.size()) {
quants.clear();
schema::QuantParamT tmpQuantParam;
quants.emplace_back(tmpQuantParam);
input_quant_param_.insert(input_quant_param_.end(), inputs.size() - input_quant_param_.size(), quants);
}

if (input_quant_param_.size() == kDoubleNum) {
quants.clear();
quantParam.min = 0.0;
@@ -250,14 +258,6 @@ void PrimitiveC::PopulaterQuantParam(const Primitive &prim, const std::vector<An
input_quant_param_.emplace_back(quants);
}

// fill input_quant_param_ by not inited quant_parm
if (input_quant_param_.size() < inputs.size()) {
quants.clear();
schema::QuantParamT tmpQuantParam;
quants.emplace_back(tmpQuantParam);
input_quant_param_.insert(input_quant_param_.end(), inputs.size() - 1 - input_quant_param_.size(), quants);
}

quants.clear();
auto outputMin = prim.GetAttr("output_minq");
auto outputMax = prim.GetAttr("output_maxq");


+ 3
- 0
mindspore/lite/test/models_mindspore_weightquant.cfg View File

@@ -0,0 +1,3 @@
retinaface.mindir
mobilefacenet.mindir
#efficientnet.mindir

+ 36
- 1
mindspore/lite/test/run_benchmark_nets.sh View File

@@ -163,7 +163,7 @@ function Run_Converter() {
fi
done < ${models_fp16_config}

# Convert weightquant models:
# Convert tflite weightquant models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
@@ -179,6 +179,22 @@ function Run_Converter() {
fi
done < ${models_tflite_weightquant_config}

# Convert mindir weightquant models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=MINDIR --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'--quantType=WeightQuant --bitNum=8 --quantWeightSize=500 --quantWeightChannel=16' >> "${run_converter_log_file}"
./converter_lite --fmk=MINDIR --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_weightquant --quantType=WeightQuant --bitNum=8 --quantWeightSize=500 --quantWeightChannel=16
if [ $? = 0 ]; then
converter_result='converter weight_quant '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter weight_quant '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_mindspore_weightquant_config}

# Convert models which do not need to be cared about the accuracy:
while read line; do
model_name=${line}
@@ -378,6 +394,24 @@ function Run_x86() {
fi
done < ${models_tflite_weightquant_config}

# Run mindir weight quantization converted models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_x86_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.weightquant.ms.out >> "${run_x86_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_mindspore_weightquant_config}

# Run converted models which do not need to be cared about the accuracy:
while read line; do
model_name=${line}
@@ -890,6 +924,7 @@ models_fp16_config=${basepath}/models_fp16.cfg
models_mindspore_config=${basepath}/models_mindspore.cfg
models_mindspore_train_config=${basepath}/models_mindspore_train.cfg
models_tflite_gpu_config=${basepath}/models_fp32_gpu.cfg
models_mindspore_weightquant_config=${basepath}/models_mindspore_weightquant.cfg
models_fp16_gpu_config=${basepath}/models_fp16_gpu.cfg
models_arm32_config=${basepath}/models_arm32.cfg
models_compatibility_config=${basepath}/models_compatibility.cfg


+ 3
- 3
mindspore/lite/tools/anf_exporter/anf_exporter.cc View File

@@ -102,9 +102,9 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me
if (!input_quant_params.empty()) {
for (size_t i = 0; i < input_quant_params.size(); i++) {
if (i >= dst_node->inputIndex.size()) {
MS_LOG(ERROR) << "node: " << dst_node->name << " input has " << input_quant_params.size()
<< " quant_params; but only " << dst_node->inputIndex.size() << " input";
return RET_PARAM_INVALID;
MS_LOG(INFO) << "node: " << dst_node->name << " input has " << input_quant_params.size()
<< " quant_params; but only " << dst_node->inputIndex.size() << " input";
break;
}
auto activate_index = dst_node->inputIndex[i];
auto tensor_input = meta_graph->allTensors[activate_index].get();


Loading…
Cancel
Save