|
- # [first column]:model_name;input_num;input_shape;threads;extra_info. If there is no need to set these parameters, the
- # content after ";" can be omitted.
- # [second column]:accuracy limit for float16 in arm64 device
- hiai_model_0909_kd_rot_ps_softmax.tflite 10
- hiai_chinese_english_recognize_model_float32.tflite 13
- hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite 10
- hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite 10
- hiai_cn_recognize_modify_padv2.tflite 14
- hiai_model_normalize_object_scene_ps_20200519.tflite 20
- #hiai_detectmodel_06_23_960_480_1180700.tflite 20
- #hiai_detect_curve_model_float32.tflite 20
- #hiai_detectmodel_desnet_256_128_64_32.tflite 20
- mtk_AADB_HADB_MBV2_model_fp32.tflite 2
- mtk_AADB_HADB_MBV3_model_fp32.tflite 6
- mobilenet_v1_0.25_128.tflite 5
- mobilenet_v1_0.25_160.tflite 5
- mobilenet_v1_0.25_192.tflite 3
- mobilenet_v1_0.25_224.tflite 3
- mobilenet_v1_0.5_128.tflite 2.5
- mobilenet_v1_0.5_160.tflite 5
- mobilenet_v1_0.5_192.tflite 6
- mobilenet_v1_0.5_224.tflite 5
- mobilenet_v1_0.75_128.tflite 6
- mobilenet_v1_0.75_160.tflite 6
- mobilenet_v1_0.75_192.tflite 6
- mobilenet_v1_0.75_224.tflite 6
- mobilenet_v1_1.0_128.tflite 10
- mobilenet_v1_1.0_160.tflite 6
- mobilenet_v1_1.0_192.tflite 26
- mobilenet_v1_1.0_224.tflite 6
- mobilenet_v2_1.0_224.tflite 3
- mtk_model_normalize_object_scene_ps_20200519_f32.tflite 20
- mtk_model_ckpt.tflite 20
- mtk_age_gender.tflite 0.1
- mtk_model_face_dress.tflite 2
- mtk_face_features_v1.tflite 21
- densenet.tflite 36
- squeezenet.tflite 6
- resnet_v2_101_299.tflite 3
- mnasnet_1.3_224.tflite 12
- inception_v3.tflite 3
- deeplabv3_257_mv_gpu.tflite 3
- multi_person_mobilenet_v1_075_float.tflite 9
- #hiai_vad.tflite
- ide_label_base.tflite 22
- # ide_label_retrained.tflite involves a softmax-like structure whose output channel is 12.
- # The values in the first few channels are extremely small and casted into 0 in the fp16 subgraph.
- # The next slice operator takes out data from the first 6 channels, which are all zero, causing the error of
- # dividing 0 in the following operator.
- #ide_label_retrained.tflite
- ml_ei_headpose.tflite 3
- ml_ei_landmark.tflite 3
- mnist.tflite 4
- mobilenet.tflite 0.1
- resnet.tflite 120
- scan_hms_angle1.tflite 6
- scan_hms_detect.tflite 12
- hiai_latin_ocr.tflite 45
- hiai_latin_ocr_1.tflite 14.5
- ml_ocr_jk.tflite 2
- nasnet_mobile.tflite 3
- nasnet_large.tflite 3
- inception_resnet_v2.tflite 10
- ml_ocr_latin.tflite 15
- hiai_PoseEstimation_Pcm.tflite 15
- hiai_ssd_mobilenetv2_object.tflite 60
- hiai_cv_focusShootOCRModel_02.tflite 13
- hiai_cv_poseEstimation.tflite 190
- inception_v4.tflite 10
- mtk_model_normalize_object_scene_ps_20200519_f16.tflite 10
- mtk_model_face_dress_fp16.tflite 45
- mtk_AADB_HADB_MBV2_model_f16.tflite 5
- mtk_AADB_HADB_MBV3_model_f16.tflite 10
- mtk_model_emotions_0725_fp16.tflite 3
- mtk_face_features_v1_fp16.tflite 20
- siteAI_digcom_AI_ECN.tflite 0.1
- siteAI_digcom_g2v_keras.tflite 5
- siteAI_trans_nonlinear.tflite 0.2
- siteAI_trans_tcpclassify.tflite 5.3
- siteAI_wireless_depress_w.tflite 8
- siteAI_wireless_restore_w.tflite 0.1
- magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite 5
- ml_object_detect.tflite 3
- ml_object_detect_1.tflite 5
- hiai_cpu_face_emotion.tflite 6
- hiai_cpu_face_gazing.tflite 5
- hiai_cpu_face_headpose.tflite 100
- hiai_humanDetection.tflite 15
- #hiai_cv_focusShootOCRModel_08.tflite 0.3
- #ml_face_openclose.tflite 40
- hiai_face_model_npu.tflite 5
- hiai_ctpn_feature_map.tflite 10
- hiai_cv_labelDetectorModel_v2.tflite 30
- hiai_cv_labelDetectorModel_v4.tflite 3
- hiai_dress_detect.tflite 3
- hiai_cv_saliencyDetectorModel.tflite 3
- hiai_frozen_inference_graph.tflite 5
- hiai_ghostnet.tflite 3
- #hiai_iMaxDN_RGB.tflite 0.4
- #hiai_iMaxSR_RGB.tflite 1
- hiai_label_and_video.tflite 10
- hiai_lm_inference_graph.tflite 3
- efficientnet_lite0_fp32_2.tflite 6
- efficientnet_lite1_fp32_2.tflite 6
- efficientnet_lite2_fp32_2.tflite 3
- efficientnet_lite3_fp32_2.tflite 6
- efficientnet_lite4_fp32_2.tflite 6
- mnasnet_0.50_224_1_metadata_1.tflite 5
- mnasnet_0.75_224_1_metadata_1.tflite 6
- mnasnet_1.0_128_1_metadata_1.tflite 6
- mnasnet_1.0_160_1_metadata_1.tflite 6
- mnasnet_1.0_192_1_metadata_1.tflite 8
- mnasnet_1.0_224_1_metadata_1.tflite 6
- mnasnet_1.0_96_1_metadata_1.tflite 6
- lite-model_on_device_vision_classifier_popular_us_products_V1_1.tflite 16
- lite-model_on_device_vision_classifier_popular_wine_V1_1.tflite 80
- posenet_mobilenet_float_075_1_default_1.tflite 49
- deeplabv3_1_default_1.tflite 6
- lite-model_deeplabv3-mobilenetv2_dm05-float16_1_default_1.tflite 13
- lite-model_deeplabv3-mobilenetv2-float16_1_default_1.tflite 60
- lite-model_east-text-detector_fp16_1.tflite 60
- lite-model_cartoongan_fp16_1.tflite 3
- lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite 6
- gts_detect_5k_tf115.tflite 9.5
- mtk_isface.tflite 0.2
- mtk_landmark.tflite 0.3
- mtk_new_detect.tflite 3
- mtk_pose.tflite 2
- mtk_model_emotions_0727_nosoftmax.tflite 2
- mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 22
- mtk_276landmark_0913.tflite 16
- mtk_face_recognition.tflite 8
- mtk_convert_model.tflite 5.3
- smartreply.tflite 0.1
- mindspore_text_classification_tflite.tflite 9.2 # small output causes big bias
- #ml_location.tflite 0.1
- ml_text_correction.tflite 1
- # ml_pic_shopping.tflite involves subtract two close numbers.
- # In fp16 case, such subtract will cause a great relative error comparing to fp32.
- # e.g. fp32: 27.5 -27.4 = 0.1
- # fp16: 27.6 - 27.4 = 0.2
- #ml_pic_shopping.tflite 0.1
- ml_vision_guide_detection3_pb2tflite.tflite 0.5
- ml_vision_guide_detection1_pb2tflite.tflite 0.5
- ml_pic_shopping_pb2tflite.tflite 95
- ml_ocr_jk_pb2tflite.tflite 0.5
- ml_ocr_latin_pb2tflite.tflite 11.5
- scan_hms_angle_pb2tflite.tflite 2.5
- scan_hms_detect_pb2tflite.tflite 1.5
- ml_location.tflite 0.6
- ml_face_openclose_tflite.tflite 0.5
- ml_object_detect_pb2tflite.tflite 1.5
- # lite-model_on_device_vision_classifier_landmarks_classifier* models' bias are caused by error accumulation and small
- # output value
- lite-model_on_device_vision_classifier_landmarks_classifier_africa_V1_1.tflite 10
- lite-model_on_device_vision_classifier_landmarks_classifier_north_america_V1_1.tflite 19
- lite-model_on_device_vision_classifier_landmarks_classifier_asia_V1_1.tflite 25
- lite-model_on_device_vision_classifier_landmarks_classifier_oceania_antarctica_V1_1.tflite 11
- lite-model_on_device_vision_classifier_landmarks_classifier_europe_V1_1.tflite 32
- lite-model_on_device_vision_classifier_landmarks_classifier_south_america_V1_1.tflite 14
- ml_ei_landmark_pb2tflite.tflite 2
- unet_mbv2_05_104pts.tflite 17
- hiai_AADB_HADB_MBV2_model_f16.tflite 3.5
- hiai_AADB_HADB_MBV2_model_fp32.tflite 4.5
- mtk_age_gender_fp16.tflite 26
- hiai_detect_curve_model_float32.tflite 9
- Q_language_model_hrmini_Q4_b4_17w.tflite 3.5
- lite-model_aiy_vision_classifier_food_V1_1.tflite 47.5
- lite-model_disease-classification_1.tflite 70
- lite-model_models_mushroom-identification_v1_1.tflite 5
- smartreply_1_default_1.tflite 0.5
- text_classification.tflite 0.5
- Q_AADB_HADB_MBV2_model.tflite 5
- # the input of Q_convert model is between 0-255
- Q_convert.tflite 12
- # the input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6).
- Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 29
- # the output of Q_crnn_ori_v2 model has small values (e-4).
- Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 36
- # the inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7).
- Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 71
- Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite 1.5
- # the inputs of two Q_focusocr models are between 0-255, but their outputs have small values (e-6).
- Q_focusocr_cn_recog.tflite 30
- Q_focusocr_jk_recog.tflite 25
- Q_inception-249970-672-11-16_pb2tflite.tflite 6
- # the input of Q_object_scene model is between 0-255
- Q_object_scene.tflite 3
- Q_detect_fpn_add_inception-1448650.tflite 1
- bloom_landmark.tflite 0.5
- # input data: 0~255
- Q888_age_gender_orderd.tflite 1.5
- Q888_face_dress_mv3y.tflite 0.5
- Q888_HADB_AADB_MBV2_model_fp32.tflite 2.5
- Q888_landmark.tflite 0.5
- Q888_pose.tflite 6.1
- # the output contains value less than e-7
- Q888_lapa158_unet_0924.tflite 20
- Q888_isface.tflite 1.0
- Q888_new_detect.tflite 1.5
- Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 2
- # input data: -1~1
- Q888_face_emo_dress_mv3_orderd.tflite 2.5
- Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite 1
- Q_iMaxSR_RGB_385_p_pb2tflite.tflite 5
- bloom_new_detect.tflite 3.5
- bloom_model_age_gender.tflite 0.5
- bloom_isface.tflite 0.5
- # The output values of conv layers range from -e±5 to e±5, which almost reaches the representation limit of fp16. In
- # this range, the fp16 data will has big bias. And the accumulation of this bias lowers the final precision.
- hiai_object_detect_814.tflite 14
- ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 12.1
- ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
- hdc_tb_cn_neg.tflite;3 295
- # The input of hiai_cv_labelDetectorModel_v3.tflite is between 0-255.
- hiai_cv_labelDetectorModel_v3.tflite;2 2
- ml_headpose_pb2tflite.tflite;3;1,64,64,3:16:16 1
- ml_ei_headpose_pb2tflite.tflite;3;1,64,64,3:16:16 0.6
- coco_ssd_mobilenet_v1_1.0.tflite
- hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache_fp32.tflite;2
- hiai_asr_ctc.tflite;2
|