modified: model_zoo/official/cv/deeplabv3/README.md modified: model_zoo/official/cv/deeplabv3/README_CN.mdtags/v1.2.0-rc1
| @@ -482,6 +482,8 @@ Note: There OS is output stride, and MS is multiscale. | |||
| ## [Export MindIR](#contents) | |||
| Currently, batchsize can only set to 1. | |||
| ```shell | |||
| python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] | |||
| ``` | |||
| @@ -508,7 +508,6 @@ python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [ | |||
| ### 用法 | |||
| 在执行推理前,air文件必须在910上通过export.py文件导出。 | |||
| 目前仅可处理batch_Size为1。 | |||
| ```shell | |||
| @@ -1,14 +1,14 @@ | |||
| cmake_minimum_required(VERSION 3.14.1) | |||
| project(MindSporeCxxTestcase[CXX]) | |||
| project(Ascend310Infer) | |||
| add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined") | |||
| set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/) | |||
| option(MINDSPORE_PATH "mindspore install path" "") | |||
| include_directories(${MINDSPORE_PATH}) | |||
| include_directories(${MINDSPORE_PATH}/include) | |||
| include_directories(${PROJECT_SRC_ROOT}/../inc) | |||
| include_directories(${PROJECT_SRC_ROOT}) | |||
| find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib) | |||
| file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*) | |||
| add_executable(main main.cc utils.cc) | |||
| add_executable(main src/main.cc src/utils.cc) | |||
| target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags) | |||
| @@ -13,6 +13,16 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| if [ ! -d out ]; then | |||
| mkdir out | |||
| fi | |||
| cmake . -DMINDSPORE_PATH="`pip3.7 show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`" | |||
| make | |||
| cd out | |||
| if [ -f "Makefile" ]; then | |||
| make clean | |||
| fi | |||
| cmake .. \ | |||
| -DMINDSPORE_PATH="`pip3.7 show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`" | |||
| make | |||
| @@ -0,0 +1 @@ | |||
| ConvBatchnormFusionPass:off | |||
| @@ -52,6 +52,7 @@ using mindspore::dataset::vision::Decode; | |||
| DEFINE_string(mindir_path, "", "mindir path"); | |||
| DEFINE_string(dataset_path, ".", "dataset path"); | |||
| DEFINE_string(fusion_switch_path, ".", "fusion switch path"); | |||
| DEFINE_int32(device_id, 0, "device id"); | |||
| int PadImage(const MSTensor &input, MSTensor *output) { | |||
| @@ -122,11 +123,17 @@ int main(int argc, char **argv) { | |||
| std::cout << "Invalid mindir" << std::endl; | |||
| return 1; | |||
| } | |||
| if (RealPath(FLAGS_fusion_switch_path).empty()) { | |||
| std::cout << "Invalid fusion switch path" << std::endl; | |||
| return 1; | |||
| } | |||
| GlobalContext::SetGlobalDeviceTarget(mindspore::kDeviceTypeAscend310); | |||
| GlobalContext::SetGlobalDeviceID(FLAGS_device_id); | |||
| auto graph = Serialization::LoadModel(FLAGS_mindir_path, ModelType::kMindIR); | |||
| auto model_context = std::make_shared<mindspore::Context>(); | |||
| if (!FLAGS_fusion_switch_path.empty()) { | |||
| ModelContext::SetFusionSwitchConfigPath(model_context, FLAGS_fusion_switch_path); | |||
| } | |||
| Model model(GraphCell(graph), model_context); | |||
| Status ret = model.Build(); | |||
| if (ret != kSuccess) { | |||
| @@ -75,6 +75,8 @@ def eval_batch_scales(args, eval_net, img_lst, scales, | |||
| def acc_cal(): | |||
| args = parse_args() | |||
| args.image_mean = [103.53, 116.28, 123.675] | |||
| args.image_std = [57.375, 57.120, 58.395] | |||
| # data list | |||
| with open(args.data_lst) as f: | |||
| img_lst = f.readlines() | |||
| @@ -60,10 +60,7 @@ fi | |||
| function compile_app() | |||
| { | |||
| cd ../ascend310_infer/src | |||
| if [ -f "Makefile" ]; then | |||
| make clean | |||
| fi | |||
| cd ../ascend310_infer | |||
| bash build.sh &> build.log | |||
| } | |||
| @@ -78,7 +75,7 @@ function infer() | |||
| fi | |||
| mkdir result_Files | |||
| mkdir time_Result | |||
| ../ascend310_infer/src/main --mindir_path=$model --dataset_path=$data_path --device_id=$device_id &> infer.log | |||
| ../ascend310_infer/out/main --mindir_path=$model --dataset_path=$data_path --device_id=$device_id --fusion_switch_path=../ascend310_infer/fusion_switch.cfg &> infer.log | |||
| } | |||
| function cal_acc() | |||
| @@ -410,10 +410,11 @@ Current batch_Size can only be set to 1. The precision calculation process needs | |||
| ```shell | |||
| # Ascend310 inference | |||
| bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DEVICE_ID] | |||
| bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] | |||
| ``` | |||
| `DEVICE_ID` is optional, default value is 0. | |||
| - `DVPP` is mandatory, and must choose from ["DVPP", "CPU"], it's case-insensitive. Note that the image shape of ssd_vgg16 inference is [300, 300], The DVPP hardware restricts width 16-alignment and height even-alignment. Therefore, the network needs to use the CPU operator to process images. | |||
| - `DEVICE_ID` is optional, default value is 0. | |||
| ### result | |||
| @@ -29,7 +29,7 @@ | |||
| #include "include/api/serialization.h" | |||
| #include "include/minddata/dataset/include/vision_ascend.h" | |||
| #include "include/minddata/dataset/include/execute.h" | |||
| #include "include/minddata/dataset/include/vision.h" | |||
| #include "inc/utils.h" | |||
| using mindspore::GlobalContext; | |||
| @@ -42,13 +42,20 @@ using mindspore::GraphCell; | |||
| using mindspore::kSuccess; | |||
| using mindspore::MSTensor; | |||
| using mindspore::dataset::Execute; | |||
| using mindspore::dataset::TensorTransform; | |||
| using mindspore::dataset::vision::DvppDecodeResizeJpeg; | |||
| using mindspore::dataset::vision::Resize; | |||
| using mindspore::dataset::vision::HWC2CHW; | |||
| using mindspore::dataset::vision::Normalize; | |||
| using mindspore::dataset::vision::Decode; | |||
| DEFINE_string(mindir_path, "", "mindir path"); | |||
| DEFINE_string(dataset_path, ".", "dataset path"); | |||
| DEFINE_int32(device_id, 0, "device id"); | |||
| DEFINE_string(aipp_path, "./aipp.cfg", "aipp path"); | |||
| DEFINE_string(cpu_dvpp, "DVPP", "cpu or dvpp process"); | |||
| DEFINE_int32(image_height, 640, "image height"); | |||
| DEFINE_int32(image_width, 640, "image width"); | |||
| int main(int argc, char **argv) { | |||
| gflags::ParseCommandLineFlags(&argc, &argv, true); | |||
| @@ -56,17 +63,18 @@ int main(int argc, char **argv) { | |||
| std::cout << "Invalid mindir" << std::endl; | |||
| return 1; | |||
| } | |||
| if (RealPath(FLAGS_aipp_path).empty()) { | |||
| std::cout << "Invalid aipp path" << std::endl; | |||
| return 1; | |||
| } | |||
| GlobalContext::SetGlobalDeviceTarget(mindspore::kDeviceTypeAscend310); | |||
| GlobalContext::SetGlobalDeviceID(FLAGS_device_id); | |||
| auto graph = Serialization::LoadModel(FLAGS_mindir_path, ModelType::kMindIR); | |||
| auto model_context = std::make_shared<mindspore::ModelContext>(); | |||
| if (!FLAGS_aipp_path.empty()) { | |||
| ModelContext::SetInsertOpConfigPath(model_context, FLAGS_aipp_path); | |||
| if (FLAGS_cpu_dvpp == "DVPP") { | |||
| if (RealPath(FLAGS_aipp_path).empty()) { | |||
| std::cout << "Invalid aipp path" << std::endl; | |||
| return 1; | |||
| } else { | |||
| ModelContext::SetInsertOpConfigPath(model_context, FLAGS_aipp_path); | |||
| } | |||
| } | |||
| Model model(GraphCell(graph), model_context); | |||
| @@ -84,7 +92,7 @@ int main(int argc, char **argv) { | |||
| std::map<double, double> costTime_map; | |||
| size_t size = all_files.size(); | |||
| Execute resize_op(std::shared_ptr<DvppDecodeResizeJpeg>(new DvppDecodeResizeJpeg({640, 640}))); | |||
| for (size_t i = 0; i < size; ++i) { | |||
| struct timeval start = {0}; | |||
| struct timeval end = {0}; | |||
| @@ -93,11 +101,33 @@ int main(int argc, char **argv) { | |||
| std::vector<MSTensor> inputs; | |||
| std::vector<MSTensor> outputs; | |||
| std::cout << "Start predict input files:" << all_files[i] << std::endl; | |||
| auto imgDvpp = std::make_shared<MSTensor>(); | |||
| resize_op(ReadFileToTensor(all_files[i]), imgDvpp.get()); | |||
| inputs.emplace_back(imgDvpp->Name(), imgDvpp->DataType(), imgDvpp->Shape(), | |||
| if (FLAGS_cpu_dvpp == "DVPP") { | |||
| auto resizeShape = {static_cast <uint32_t>(FLAGS_image_height), static_cast <uint32_t>(FLAGS_image_width)}; | |||
| Execute resize_op(std::shared_ptr<DvppDecodeResizeJpeg>(new DvppDecodeResizeJpeg(resizeShape))); | |||
| auto imgDvpp = std::make_shared<MSTensor>(); | |||
| resize_op(ReadFileToTensor(all_files[i]), imgDvpp.get()); | |||
| inputs.emplace_back(imgDvpp->Name(), imgDvpp->DataType(), imgDvpp->Shape(), | |||
| imgDvpp->Data().get(), imgDvpp->DataSize()); | |||
| } else { | |||
| std::shared_ptr<TensorTransform> decode(new Decode()); | |||
| std::shared_ptr<TensorTransform> hwc2chw(new HWC2CHW()); | |||
| std::shared_ptr<TensorTransform> normalize( | |||
| new Normalize({123.675, 116.28, 103.53}, {58.395, 57.120, 57.375})); | |||
| auto resizeShape = {FLAGS_image_height, FLAGS_image_width}; | |||
| std::shared_ptr<TensorTransform> resize(new Resize(resizeShape)); | |||
| Execute composeDecode({decode, resize, normalize, hwc2chw}); | |||
| auto img = MSTensor(); | |||
| auto image = ReadFileToTensor(all_files[i]); | |||
| composeDecode(image, &img); | |||
| std::vector<MSTensor> model_inputs = model.GetInputs(); | |||
| if (model_inputs.empty()) { | |||
| std::cout << "Invalid model, inputs is empty." << std::endl; | |||
| return 1; | |||
| } | |||
| inputs.emplace_back(model_inputs[0].Name(), model_inputs[0].DataType(), model_inputs[0].Shape(), | |||
| img.Data().get(), img.DataSize()); | |||
| } | |||
| gettimeofday(&start, nullptr); | |||
| ret = model.Predict(inputs, &outputs); | |||
| gettimeofday(&end, nullptr); | |||
| @@ -28,7 +28,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") | |||
| parser.add_argument("--batch_size", type=int, default=1, help="batch size") | |||
| parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") | |||
| parser.add_argument("--file_name", type=str, default="ssd", help="output file name.") | |||
| parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') | |||
| parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') | |||
| parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", | |||
| help="device target") | |||
| args = parser.parse_args() | |||
| @@ -22,7 +22,7 @@ from src.config import config | |||
| from src.eval_utils import metrics | |||
| batch_size = 1 | |||
| parser = argparse.ArgumentParser(description="ssd_mobilenet_v1_fpn inference") | |||
| parser = argparse.ArgumentParser(description="ssd acc calculation") | |||
| parser.add_argument("--result_path", type=str, required=True, help="result files path.") | |||
| parser.add_argument("--img_path", type=str, required=True, help="image file path.") | |||
| parser.add_argument("--drop", action="store_true", help="drop iscrowd images or not.") | |||
| @@ -73,9 +73,8 @@ def get_result(result_path, img_id_file_path): | |||
| image_shape = np.array([img_size[1], img_size[0]]) | |||
| result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin") | |||
| result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin") | |||
| boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(51150, 4) | |||
| box_scores = np.fromfile(result_path_1, dtype=np.float32).reshape(51150, 81) | |||
| boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(config.num_ssd_boxes, 4) | |||
| box_scores = np.fromfile(result_path_1, dtype=np.float32).reshape(config.num_ssd_boxes, config.num_classes) | |||
| pred_data.append({ | |||
| "boxes": boxes, | |||
| @@ -14,8 +14,9 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| if [[ $# -lt 2 || $# -gt 3 ]]; then | |||
| echo "Usage: sh run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DEVICE_ID] | |||
| if [[ $# -lt 3 || $# -gt 4 ]]; then | |||
| echo "Usage: sh run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID] | |||
| DVPP is mandatory, and must choose from [DVPP|CPU], it's case-insensitive | |||
| DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero" | |||
| exit 1 | |||
| fi | |||
| @@ -29,14 +30,16 @@ get_real_path(){ | |||
| } | |||
| model=$(get_real_path $1) | |||
| data_path=$(get_real_path $2) | |||
| DVPP=${3^^} | |||
| device_id=0 | |||
| if [ $# == 3 ]; then | |||
| device_id=$3 | |||
| if [ $# == 4 ]; then | |||
| device_id=$4 | |||
| fi | |||
| echo "mindir name: "$model | |||
| echo "dataset path: "$data_path | |||
| echo "image process mode: "$DVPP | |||
| echo "device id: "$device_id | |||
| export ASCEND_HOME=/usr/local/Ascend/ | |||
| @@ -56,10 +59,7 @@ fi | |||
| function compile_app() | |||
| { | |||
| cd ../ascend310_infer | |||
| if [ -f "Makefile" ]; then | |||
| make clean | |||
| fi | |||
| sh build.sh &> build.log | |||
| sh build.sh &> build.log | |||
| } | |||
| function infer() | |||
| @@ -73,7 +73,14 @@ function infer() | |||
| fi | |||
| mkdir result_Files | |||
| mkdir time_Result | |||
| ../ascend310_infer/out/main --mindir_path=$model --dataset_path=$data_path --device_id=$device_id --aipp_path ../src/aipp.cfg &> infer.log | |||
| if [ "$DVPP" == "DVPP" ];then | |||
| ../ascend310_infer/out/main --mindir_path=$model --dataset_path=$data_path --device_id=$device_id --cpu_dvpp=$DVPP --aipp_path=../ascend310_infer/aipp.cfg --image_height=640 --image_width=640 &> infer.log | |||
| elif [ "$DVPP" == "CPU" ]; then | |||
| ../ascend310_infer/out/main --mindir_path=$model --dataset_path=$data_path --cpu_dvpp=$DVPP --device_id=$device_id --image_height=300 --image_width=300 &> infer.log | |||
| else | |||
| echo "image process mode must be in [DVPP|CPU]" | |||
| exit 1 | |||
| fi | |||
| } | |||
| function cal_acc() | |||