Browse Source

!8649 [MSLITE] Release pointer of split parameter in destructor.

From: @wang_shaocong
Reviewed-by: 
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
5b5615fa95
10 changed files with 116 additions and 35 deletions
  1. +2
    -2
      mindspore/lite/src/ops/conv2d.cc
  2. +8
    -4
      mindspore/lite/src/ops/primitive_c.cc
  3. +2
    -2
      mindspore/lite/src/ops/reshape.cc
  4. +2
    -2
      mindspore/lite/src/ops/transpose.cc
  5. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/base/split_base.h
  6. +1
    -0
      mindspore/lite/test/models_onnx.cfg
  7. +0
    -0
      mindspore/lite/test/models_with_several_inputs_or_without_outputs.cfg
  8. +89
    -17
      mindspore/lite/test/run_benchmark_nets.sh
  9. +5
    -6
      mindspore/lite/tools/anf_exporter/anf_exporter.cc
  10. +1
    -1
      mindspore/lite/tools/common/graph_util.cc

+ 2
- 2
mindspore/lite/src/ops/conv2d.cc View File

@@ -219,8 +219,8 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive
attr->padRight = pad_list[3]; attr->padRight = pad_list[3];


auto dilation = CastToInt(prim.GetAttr("dilation"), true); auto dilation = CastToInt(prim.GetAttr("dilation"), true);
attr->dilateH = dilation[0];
attr->dilateW = dilation[1];
attr->dilateH = dilation[2];
attr->dilateW = dilation[3];


auto kernel_size = CastToInt(prim.GetAttr("kernel_size"), true); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"), true);
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size[0];


+ 8
- 4
mindspore/lite/src/ops/primitive_c.cc View File

@@ -191,7 +191,7 @@ std::vector<int> CastToInt(const ValuePtr value, bool is_vector) {
MS_LOG(WARNING) << "valueptr is not a sequence, value may be a scalar."; MS_LOG(WARNING) << "valueptr is not a sequence, value may be a scalar.";
return {}; return {};
} }
if (value->cast<ValueSequeuePtr>()->value().front()->type()->type_name() == "Int64Imm") {
if (value->cast<ValueSequeuePtr>()->value().front()->type()->number_type() == kNumberTypeInt64) {
auto origin_value = GetValue<std::vector<int64_t>>(value); auto origin_value = GetValue<std::vector<int64_t>>(value);
for (size_t index = 0; index < origin_value.size(); ++index) { for (size_t index = 0; index < origin_value.size(); ++index) {
cur_value.push_back(static_cast<int>(origin_value[index])); cur_value.push_back(static_cast<int>(origin_value[index]));
@@ -200,7 +200,7 @@ std::vector<int> CastToInt(const ValuePtr value, bool is_vector) {
cur_value = GetValue<std::vector<int>>(value); cur_value = GetValue<std::vector<int>>(value);
} }
} else { } else {
if (value->type_name() == "Int64Imm") {
if (value->type()->number_type() == kNumberTypeInt64) {
cur_value.push_back(static_cast<int>(GetValue<int64_t>(value))); cur_value.push_back(static_cast<int>(GetValue<int64_t>(value)));
} else { } else {
cur_value.push_back(GetValue<int>(value)); cur_value.push_back(GetValue<int>(value));
@@ -321,9 +321,9 @@ void PrimitiveC::GetAttrDataFromInput(const AnfNodePtr inputNode, std::vector<in
auto tuple = val->cast<ValueTuplePtr>(); auto tuple = val->cast<ValueTuplePtr>();
MS_ASSERT(tuple != nullptr); MS_ASSERT(tuple != nullptr);
for (size_t i = 0; i < tuple->size(); i++) { for (size_t i = 0; i < tuple->size(); i++) {
auto elem = tuple->value()[i]->cast<Int32ImmPtr>();
auto elem = tuple->value()[i];
MS_ASSERT(elem != nullptr); MS_ASSERT(elem != nullptr);
data->emplace_back(static_cast<int>(elem->value()));
data->emplace_back(CastToInt(elem, false).front());
} }
} }
} }
@@ -556,6 +556,10 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<ExpandDims>(prim, inputs, quantType); return NewPrimitiveC<ExpandDims>(prim, inputs, quantType);
} else if (op_type == "UnsortedSegmentSum") { } else if (op_type == "UnsortedSegmentSum") {
return NewPrimitiveC<UnsortedSegmentSum>(prim, inputs, quantType); return NewPrimitiveC<UnsortedSegmentSum>(prim, inputs, quantType);
} else if (op_type == "ResizeNearestNeighbor") {
return NewPrimitiveC<Resize>(prim, inputs, quantType);
} else if (op_type == "ResizeBilinear") {
return NewPrimitiveC<Resize>(prim, inputs, quantType);


#ifdef SUPPORT_TRAIN #ifdef SUPPORT_TRAIN
} else if (op_type == "SoftmaxCrossEntropyWithLogits") { } else if (op_type == "SoftmaxCrossEntropyWithLogits") {


+ 2
- 2
mindspore/lite/src/ops/reshape.cc View File

@@ -58,9 +58,9 @@ int Reshape::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
auto tuple = val->cast<ValueTuplePtr>(); auto tuple = val->cast<ValueTuplePtr>();
MS_ASSERT(tuple != nullptr); MS_ASSERT(tuple != nullptr);
for (size_t i = 0; i < tuple->size(); ++i) { for (size_t i = 0; i < tuple->size(); ++i) {
auto elem = tuple->value()[i]->cast<Int32ImmPtr>();
auto elem = tuple->value()[i];
MS_ASSERT(elem != nullptr); MS_ASSERT(elem != nullptr);
attr->shape.emplace_back(static_cast<int>(elem->value()));
attr->shape.emplace_back(CastToInt(elem, false).front());
} }
} else { } else {
int dim = CastToInt(val, false).front(); int dim = CastToInt(val, false).front();


+ 2
- 2
mindspore/lite/src/ops/transpose.cc View File

@@ -62,9 +62,9 @@ int Transpose::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &
auto tuple = val->cast<ValueTuplePtr>(); auto tuple = val->cast<ValueTuplePtr>();
MS_ASSERT(tuple != nullptr); MS_ASSERT(tuple != nullptr);
for (size_t i = 0; i < tuple->size(); i++) { for (size_t i = 0; i < tuple->size(); i++) {
auto elem = tuple->value()[i]->cast<Int32ImmPtr>();
auto elem = tuple->value()[i];
MS_ASSERT(elem != nullptr); MS_ASSERT(elem != nullptr);
attr->perm.emplace_back(static_cast<int>(elem->value()));
attr->perm.emplace_back(CastToInt(elem, false).front());
} }
} }
} }


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/base/split_base.h View File

@@ -32,7 +32,12 @@ class SplitBaseCPUKernel : public LiteKernel {
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
param = reinterpret_cast<SplitParameter *>(op_parameter_); param = reinterpret_cast<SplitParameter *>(op_parameter_);
} }
~SplitBaseCPUKernel() = default;
~SplitBaseCPUKernel() override {
if (param != nullptr && param->split_sizes_ != nullptr) {
free(param->split_sizes_);
param->split_sizes_ = nullptr;
}
}


int Init() override; int Init() override;
int ReSize() override; int ReSize() override;


+ 1
- 0
mindspore/lite/test/models_onnx.cfg View File

@@ -31,3 +31,4 @@ mosaic-9.onnx
pointilism-9.onnx pointilism-9.onnx
rain-princess-9.onnx rain-princess-9.onnx
udnie-9.onnx udnie-9.onnx
adversarial_pruning.onnx

mindspore/lite/test/models_only_for_process.cfg → mindspore/lite/test/models_with_several_inputs_or_without_outputs.cfg View File


+ 89
- 17
mindspore/lite/test/run_benchmark_nets.sh View File

@@ -220,13 +220,13 @@ function Run_Converter() {
fi fi
done < ${models_mindspore_mixbit_config} done < ${models_mindspore_mixbit_config}


# Convert models which do not need to be cared about the accuracy:
# Convert models which has several inputs or does not need to be cared about the accuracy:
while read line; do while read line; do
model_name=${line}
model_type=${line##*.}
if [[ $model_name == \#* ]] || [[ $model_type == \#* ]]; then
if [[ $line == \#* ]]; then
continue continue
fi fi
model_name=${line%%;*}
model_type=${model_name##*.}
case $model_type in case $model_type in
tflite) tflite)
model_fmk="TFLITE" model_fmk="TFLITE"
@@ -462,17 +462,37 @@ function Run_x86() {
fi fi
done < ${models_mindspore_mixbit_config} done < ${models_mindspore_mixbit_config}


# Run converted models which do not need to be cared about the accuracy:
# Run converted models which has several inputs or does not need to be cared about the accuracy:
while read line; do while read line; do
model_name=${line}
if [[ ${line##*.} == "caffemodel" ]]; then
model_name=${line%.*}
if [[ $line == \#* ]]; then
continue
fi
model_name=${line%%;*}
model_name_len=${#model_name}
input_params=${line:model_name_len+1}
input_num=${input_params%%;*}
input_shape=${input_params##*;}
input_files=''
output_file=''
if [[ -z "$input_files" || $input_files == 1 ]] && [ -e ${ms_models_path}/${model_name}'.ms.bin' ]; then
input_files=$model_name'.ms.bin'
elif [[ ! -z "$input_files" && $input_files > 1 ]]; then
for i in $(seq 1 $input_num)
do
input_files=$input_files$model_name'.ms.bin_'$i','
done
fi
if [ -e ${ms_models_path}/${model_name}'.ms.out' ]; then
output_file=${ms_models_path}/${model_name}'.ms.out'
fi
if [[ ${model_name##*.} == "caffemodel" ]]; then
model_name=${model_name%.*}
fi fi
echo ${model_name} >> "${run_x86_log_file}" echo ${model_name} >> "${run_x86_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "{run_x86_log_file}" echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "{run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1 cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --loopCount=1 --warmUpLoopCount=0' >> "${run_x86_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --loopCount=1 --warmUpLoopCount=0 >> "${run_x86_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile='${input_files}' --benchmarkDataFile='${output_file}' --loopCount=1 --warmUpLoopCount=0' >> "${run_x86_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=${input_files} --benchmarkDataFile=${output_file} --loopCount=1 --warmUpLoopCount=0 >> "${run_x86_log_file}"
if [ $? = 0 ]; then if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else else
@@ -690,17 +710,34 @@ function Run_x86_sse() {
fi fi
done < ${models_mindspore_mixbit_config} done < ${models_mindspore_mixbit_config}


# Run converted models which do not need to be cared about the accuracy:
# Run converted models which has several inputs or does not need to be cared about the accuracy:
while read line; do while read line; do
model_name=${line}
if [[ ${line##*.} == "caffemodel" ]]; then
model_name=${line%.*}
model_name=${line%%;*}
model_name_len=${#model_name}
input_params=${line:model_name_len+1}
input_num=${input_params%%;*}
input_shape=${input_params##*;}
input_files=''
output_file=''
if [[ -z "$input_files" || $input_files == 1 ]] && [ -e ${ms_models_path}/${model_name}'.ms.bin' ]; then
input_files=$model_name'.ms.bin'
elif [[ ! -z "$input_files" && $input_files > 1 ]]; then
for i in $(seq 1 $input_num)
do
input_files=$input_files$model_name'.ms.bin_'$i','
done
fi
if [ -e ${ms_models_path}/${model_name}'.ms.out' ]; then
output_file=${ms_models_path}/${model_name}'.ms.out'
fi
if [[ ${model_name##*.} == "caffemodel" ]]; then
model_name=${model_name%.*}
fi fi
echo ${model_name} >> "${run_x86_sse_log_file}" echo ${model_name} >> "${run_x86_sse_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-sse-'${process_unit_x86} >> "{run_x86_sse_log_file}" echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-sse-'${process_unit_x86} >> "{run_x86_sse_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-sse-${process_unit_x86} || return 1 cd ${x86_path}/mindspore-lite-${version}-runtime-x86-sse-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --loopCount=1 --warmUpLoopCount=0' >> "${run_x86_sse_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --loopCount=1 --warmUpLoopCount=0 >> "${run_x86_sse_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile='${input_files}' --benchmarkDataFile='${output_file}' --loopCount=1 --warmUpLoopCount=0' >> "${run_x86_sse_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=${input_files} --benchmarkDataFile=${output_file} --loopCount=1 --warmUpLoopCount=0 >> "${run_x86_sse_log_file}"
if [ $? = 0 ]; then if [ $? = 0 ]; then
run_result='x86_sse: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} run_result='x86_sse: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else else
@@ -1083,6 +1120,41 @@ function Run_arm64() {
run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi fi
done < ${models_mindspore_weightquant_config} done < ${models_mindspore_weightquant_config}

# Run converted models which has several inputs or does not need to be cared about the accuracy:
while read line; do
model_name=${line%%;*}
model_name_len=${#model_name}
input_params=${line:model_name_len+1}
input_num=${input_params%%;*}
input_shape=${input_params##*;}
input_files=''
output_file=''
if [[ -z "$input_files" || $input_files == 1 ]] && [ -e ${ms_models_path}/${model_name}'.ms.bin' ]; then
input_files=$model_name'.ms.bin'
elif [[ ! -z "$input_files" && $input_files > 1 ]]; then
for i in $(seq 1 $input_num)
do
input_files=$input_files$model_name'.ms.bin_'$i','
done
fi
if [ -e ${ms_models_path}/${model_name}'.ms.out' ]; then
output_file=${ms_models_path}/${model_name}'.ms.out'
fi
if [[ ${model_name##*.} == "caffemodel" ]]; then
model_name=${model_name%.*}
fi
echo ${model_name} >> "${run_arm64_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile='${input_files}' --benchmarkDataFile='${output_file} >> "${run_arm64_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile='${input_files}' --benchmarkDataFile='${output_file} >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_only_for_process_config}
} }


# Run on arm32 platform: # Run on arm32 platform:
@@ -1229,7 +1301,7 @@ models_mindspore_weightquant_config=${basepath}/models_mindspore_weightquant.cfg
models_fp16_gpu_config=${basepath}/models_fp16_gpu.cfg models_fp16_gpu_config=${basepath}/models_fp16_gpu.cfg
models_arm32_config=${basepath}/models_arm32.cfg models_arm32_config=${basepath}/models_arm32.cfg
models_compatibility_config=${basepath}/models_compatibility.cfg models_compatibility_config=${basepath}/models_compatibility.cfg
models_only_for_process_config=${basepath}/models_only_for_process.cfg
models_only_for_process_config=${basepath}/models_with_several_inputs_or_without_outputs.cfg


ms_models_path=${basepath}/ms_models ms_models_path=${basepath}/ms_models




+ 5
- 6
mindspore/lite/tools/anf_exporter/anf_exporter.cc View File

@@ -313,10 +313,10 @@ int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, s
MS_LOG(ERROR) << "cast to ValueNode failed"; MS_LOG(ERROR) << "cast to ValueNode failed";
return RET_ERROR; return RET_ERROR;
} }
auto input_index_key =
get_item_input_cnode->fullname_with_scope() + "_o:" +
std::to_string(value_node->value()->type_name() == "Int64Imm" ? GetValue<int64_t>(value_node->value())
: GetValue<int>(value_node->value()));
auto input_index_key = get_item_input_cnode->fullname_with_scope() + "_o:" +
std::to_string(value_node->value()->type()->number_type() == kNumberTypeInt64
? GetValue<int64_t>(value_node->value())
: GetValue<int>(value_node->value()));
auto iter = node_id_map_.find(input_index_key); auto iter = node_id_map_.find(input_index_key);
if (iter == node_id_map_.end()) { if (iter == node_id_map_.end()) {
#ifdef SUPPORT_TRAIN #ifdef SUPPORT_TRAIN
@@ -415,8 +415,7 @@ int AnfExporter::ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
paramTensor->dataType = typePtr->type_id(); paramTensor->dataType = typePtr->type_id();
paramTensor->dims = {1}; paramTensor->dims = {1};
paramTensor->nodeType = schema::NodeType::NodeType_ValueNode; paramTensor->nodeType = schema::NodeType::NodeType_ValueNode;
auto data = value->cast<mindspore::Int32ImmPtr>();
int real_data = GetValue<int32_t>(data);
int real_data = CastToInt(value, false).front();
paramTensor->data.resize(sizeof(int32_t)); paramTensor->data.resize(sizeof(int32_t));
memcpy(paramTensor->data.data(), &real_data, sizeof(int32_t)); memcpy(paramTensor->data.data(), &real_data, sizeof(int32_t));
node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size();


+ 1
- 1
mindspore/lite/tools/common/graph_util.cc View File

@@ -669,7 +669,7 @@ STATUS ChangeOpAxis(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNo
MS_ASSERT(node->primitive->value != nullptr); MS_ASSERT(node->primitive->value != nullptr);
auto type = node->primitive->value.type; auto type = node->primitive->value.type;
auto input1_ndim = graph->allTensors.at(node->inputIndex[0])->dims.size(); auto input1_ndim = graph->allTensors.at(node->inputIndex[0])->dims.size();
if (input1_ndim != 4 && input1_ndim != 0) {
if (input1_ndim != 4) {
if (node->inputIndex.size() > 1) { if (node->inputIndex.size() > 1) {
auto input2_ndim = graph->allTensors.at(node->inputIndex[1])->dims.size(); auto input2_ndim = graph->allTensors.at(node->inputIndex[1])->dims.size();
if (input2_ndim != 4 && input2_ndim != 0) { if (input2_ndim != 4 && input2_ndim != 0) {


Loading…
Cancel
Save