From: @wangyanling10 Reviewed-by: @jpc_chenjianping,@zhang_xue_tong Signed-off-by: @jpc_chenjianping,@zhang_xue_tongpull/15398/MERGE
| @@ -11,7 +11,7 @@ ml_hardware_eyeclose | |||||
| ml_ocr_detect_20200305 | ml_ocr_detect_20200305 | ||||
| Mnet6_0312_extract_pay | Mnet6_0312_extract_pay | ||||
| pose_3d | pose_3d | ||||
| RFB-Epoch-170-no-transpose | |||||
| hiai_face_RFB-Epoch-170-no-transpose | |||||
| tracking | tracking | ||||
| mtk_isface | mtk_isface | ||||
| mtk_landmark | mtk_landmark | ||||
| @@ -53,8 +53,8 @@ hiai_face_recognition_1 | |||||
| hiai_cpu_face_detect | hiai_cpu_face_detect | ||||
| hiai_cpu_face_attr | hiai_cpu_face_attr | ||||
| hiai_face_attr1 | hiai_face_attr1 | ||||
| detect-mbv1-shortcut-400-400_nopostprocess_simplified | |||||
| detect_mbv1_640_480_nopostprocess_simplified | |||||
| mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified | |||||
| mtk_detect_mbv1_640_480_nopostprocess_simplified | |||||
| retinaface | retinaface | ||||
| deconv_test_model | deconv_test_model | ||||
| deconvs_model | deconvs_model | ||||
| @@ -11,14 +11,15 @@ ml_hardware_eyeclose 0.1 | |||||
| ml_ocr_detect_20200305 10 | ml_ocr_detect_20200305 10 | ||||
| Mnet6_0312_extract_pay 15 | Mnet6_0312_extract_pay 15 | ||||
| pose_3d 90 | pose_3d 90 | ||||
| RFB-Epoch-170-no-transpose 4 | |||||
| hiai_face_RFB-Epoch-170-no-transpose 4 | |||||
| tracking 4 | tracking 4 | ||||
| mtk_landmark 1 | mtk_landmark 1 | ||||
| mtk_pose_tuku 1 | mtk_pose_tuku 1 | ||||
| mtk_face_recognition_v1 20 | mtk_face_recognition_v1 20 | ||||
| mtk_2012_ATLANTA_10class_20190614_v41 4 | mtk_2012_ATLANTA_10class_20190614_v41 4 | ||||
| mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified 4 | mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified 4 | ||||
| detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified 1 | |||||
| # mtk_detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified: precision is 5% | |||||
| detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified 5.5 | |||||
| hiai_face_detect_rfb 4 | hiai_face_detect_rfb 4 | ||||
| hiai_face_isface 0.1 | hiai_face_isface 0.1 | ||||
| hiai_face_landmark 0.2 | hiai_face_landmark 0.2 | ||||
| @@ -52,8 +53,9 @@ hiai_face_recognition_1 10 | |||||
| hiai_cpu_face_detect 4 | hiai_cpu_face_detect 4 | ||||
| hiai_cpu_face_attr 12 | hiai_cpu_face_attr 12 | ||||
| hiai_face_attr1 12 | hiai_face_attr1 12 | ||||
| detect-mbv1-shortcut-400-400_nopostprocess_simplified 8 | |||||
| detect_mbv1_640_480_nopostprocess_simplified 6 | |||||
| # mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified: precision is 5% | |||||
| mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified 5.5 | |||||
| mtk_detect_mbv1_640_480_nopostprocess_simplified 5 | |||||
| retinaface 6 | retinaface 6 | ||||
| deconv_test_model 20 | deconv_test_model 20 | ||||
| deconvs_model 1 | deconvs_model 1 | ||||
| @@ -85,6 +87,7 @@ ml_video_edit_reid 1 | |||||
| ml_video_edit_v10_best_model_nomean_20200723 5 | ml_video_edit_v10_best_model_nomean_20200723 5 | ||||
| ml_video_edit_img_segment 3 | ml_video_edit_img_segment 3 | ||||
| ml_video_edit_video_segment_gauss_adaptis_part1 5 | ml_video_edit_video_segment_gauss_adaptis_part1 5 | ||||
| # When the input range is [-1,1], the precision is poor, and the output value is very small (10e-5). If the input range is adjusted to [0,255], the precision will decrease to 15.5415%, and the rest is cumulative error. | |||||
| ml_handpose 175 | ml_handpose 175 | ||||
| hdc_Face_Aesthetic_MTI_Aesthetic 22 | hdc_Face_Aesthetic_MTI_Aesthetic 22 | ||||
| ml_face_compare 5.5 | ml_face_compare 5.5 | ||||
| @@ -96,10 +99,13 @@ ml_face_isface 0.5 | |||||
| ml_face_glasses 2.5 | ml_face_glasses 2.5 | ||||
| # ml_segmentation_matting 26 # output value unstable | # ml_segmentation_matting 26 # output value unstable | ||||
| ml_segmentation_atlanta_10 5 | ml_segmentation_atlanta_10 5 | ||||
| # ml_bodymask: The difference of output node divided by a very small value leads to a large error | |||||
| ml_bodymask 14 | ml_bodymask 14 | ||||
| ml_Hand_deploy 4 | ml_Hand_deploy 4 | ||||
| # ml_hand_3d_detection: The difference of output node divided by a very small value leads to a large error | |||||
| ml_hand_3d_detection 12 | ml_hand_3d_detection 12 | ||||
| ml_hand_3d_regression 3 | ml_hand_3d_regression 3 | ||||
| # ml_ARengine23_bodypose: The difference of output node divided by a very small value leads to a large error | |||||
| ml_ARengine23_bodypose 56 | ml_ARengine23_bodypose 56 | ||||
| ml_ocr_bank_card_detection_inception_tmp 20 | ml_ocr_bank_card_detection_inception_tmp 20 | ||||
| ml_ocr_bank_card_recognition_fcny 0.5 | ml_ocr_bank_card_recognition_fcny 0.5 | ||||
| @@ -114,6 +120,7 @@ ml_2012_ocr_detection_caffe_tmp 1 | |||||
| ml_2012_ocr_rec_caffe 0.5 | ml_2012_ocr_rec_caffe 0.5 | ||||
| ml_lable_model_hebing_device 2 | ml_lable_model_hebing_device 2 | ||||
| ml_face_sex 0.5 | ml_face_sex 0.5 | ||||
| # ml_face_mnet: The precision problem caused by cumulative error. | |||||
| ml_face_mnet 12 | ml_face_mnet 12 | ||||
| ml_segmentation_atlanta_1 0.5 | ml_segmentation_atlanta_1 0.5 | ||||
| bolt_deploy_color-server 0.5 | bolt_deploy_color-server 0.5 | ||||
| @@ -24,4 +24,5 @@ quant_aware_identify_card_detect.onnx | |||||
| tiny-yolov3-11.onnx;2;1,416,416,3:1,2 | tiny-yolov3-11.onnx;2;1,416,416,3:1,2 | ||||
| # cur acc for ml_video_edit_art_transfer is 2+% | # cur acc for ml_video_edit_art_transfer is 2+% | ||||
| ml_video_edit_art_transfer.onnx;3 | ml_video_edit_art_transfer.onnx;3 | ||||
| #ml_table_detection.onnx: onnx quantized model | |||||
| ml_table_detection.onnx | ml_table_detection.onnx | ||||
| @@ -2,26 +2,30 @@ ml_vision_guide_detection1.pb 0.5 | |||||
| ml_vision_guide_detection3.pb 0.5 | ml_vision_guide_detection3.pb 0.5 | ||||
| ml_video_edit_generate_filter.pb 2 | ml_video_edit_generate_filter.pb 2 | ||||
| ml_ocr_jk.pb 0.5 | ml_ocr_jk.pb 0.5 | ||||
| ml_ocr_latin.pb 135 | |||||
| # The accumulated error causes the threshold to be exceeded | |||||
| ml_ocr_latin.pb 12 | |||||
| scan_hms_angle.pb 1.5 | scan_hms_angle.pb 1.5 | ||||
| scan_hms_detect.pb 2.5 | scan_hms_detect.pb 2.5 | ||||
| ml_face_openclose.pb;1,32,32,3 0.5 | ml_face_openclose.pb;1,32,32,3 0.5 | ||||
| ml_object_detect.pb;1,288,288,3 2 | ml_object_detect.pb;1,288,288,3 2 | ||||
| # the inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7). | |||||
| # The inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7). | |||||
| Q_crnn_screen_slim400w_more_20w.pb 72 | Q_crnn_screen_slim400w_more_20w.pb 72 | ||||
| Q_inception-249970-672-11-16.pb 6.5 | Q_inception-249970-672-11-16.pb 6.5 | ||||
| hiai_ssd_mobilenetv2_object.pb 15 | hiai_ssd_mobilenetv2_object.pb 15 | ||||
| hiai_humanDetection.pb 3.5 | hiai_humanDetection.pb 3.5 | ||||
| hiai_PoseEstimation_Pcm.pb 0.5 | hiai_PoseEstimation_Pcm.pb 0.5 | ||||
| # The last layer has a very small value, which leads to a large error | |||||
| hiai_cn_recognize_modify_padv2.pb;1,32,512,1 27 | hiai_cn_recognize_modify_padv2.pb;1,32,512,1 27 | ||||
| hiai_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 17 | hiai_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 17 | ||||
| # the output of mtk_model_ckpt.pb has small value | |||||
| # The output of mtk_model_ckpt.pb has small value | |||||
| mtk_model_ckpt.pb 19 | mtk_model_ckpt.pb 19 | ||||
| mtk_age_gender.pb 0.5 | mtk_age_gender.pb 0.5 | ||||
| # The Difference of output node divided by 0 results in cumulative deviation | |||||
| mtk_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 | mtk_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 | ||||
| # Bccumulative error of conv_batchnorm_fused op | |||||
| mtk_AADB_HADB_MBV2_model.pb;1,224,224,3 5.5 | mtk_AADB_HADB_MBV2_model.pb;1,224,224,3 5.5 | ||||
| mtk_AADB_HADB_MBV3_model.pb;1,224,224,3 4 | mtk_AADB_HADB_MBV3_model.pb;1,224,224,3 4 | ||||
| # the output of mtk_face_features_v1.pb has small value | |||||
| # The output of mtk_face_features_v1.pb has small value | |||||
| mtk_face_features_v1.pb 26 | mtk_face_features_v1.pb 26 | ||||
| model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 | model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 | ||||
| hiai_AADB_HADB_MBV2_model.pb;1,224,224,3 6 | hiai_AADB_HADB_MBV2_model.pb;1,224,224,3 6 | ||||
| @@ -39,12 +43,14 @@ hiai_cpu_face_gazing.pb 0.5 | |||||
| hiai_cpu_face_emotion.pb 2 | hiai_cpu_face_emotion.pb 2 | ||||
| hiai_cv_poseEstimation.pb 103 | hiai_cv_poseEstimation.pb 103 | ||||
| Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb 1.5 | Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb 1.5 | ||||
| # the input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6). | |||||
| # The input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6). | |||||
| Q_crnn_ori_75w_slim_norm.pb 37 | Q_crnn_ori_75w_slim_norm.pb 37 | ||||
| # the output of Q_crnn_ori_v2 model has small values (e-4). | |||||
| # The output of Q_crnn_ori_v2 model has small values (e-4). | |||||
| Q_crnn_ori_v2_405001_notrans_nopre.pb 24 | Q_crnn_ori_v2_405001_notrans_nopre.pb 24 | ||||
| # the input of hiai_latin models are between 0-255 | |||||
| # The input of hiai_latin models are between 0-255 | |||||
| hiai_latin_ocr.pb 4 | hiai_latin_ocr.pb 4 | ||||
| hiai_latin_ocr_1.pb 3.5 | hiai_latin_ocr_1.pb 3.5 | ||||
| hiai_cpu_face_headpose.pb 4 | hiai_cpu_face_headpose.pb 4 | ||||
| # ml_noya_tts_melgan.pb If the input range is adjusted to [- 1,1], the fp16 error can be reduced to 38.9512% | |||||
| ml_noya_tts_melgan.pb;16,16,80 70 | |||||
| bolt_segment.pb 2 | bolt_segment.pb 2 | ||||
| @@ -31,3 +31,4 @@ add_uint8.tflite;2 | |||||
| ml_Heatmap_depth_240180;2 | ml_Heatmap_depth_240180;2 | ||||
| ml_Heatmap_depth_180240;2 | ml_Heatmap_depth_180240;2 | ||||
| hiai_nlu_model.pb;3;1,16:1,16:1,16 | hiai_nlu_model.pb;3;1,16:1,16:1,16 | ||||
| gts_object_detect_lcs.pb;1;420,630,3 | |||||