|
- hiai_model_0909_kd_rot_ps_softmax.tflite 10
- hiai_chinese_english_recognize_model_float32.tflite 13
- hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite 10
- hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite 10
- hiai_cn_recognize_modify_padv2.tflite 14
- hiai_model_normalize_object_scene_ps_20200519.tflite 20
- #hiai_detectmodel_06_23_960_480_1180700.tflite 20
- #hiai_detect_curve_model_float32.tflite 20
- #hiai_detectmodel_desnet_256_128_64_32.tflite 20
- mtk_AADB_HADB_MBV2_model_fp32.tflite 5
- mtk_AADB_HADB_MBV3_model_fp32.tflite 6
- mobilenet_v1_0.25_128.tflite 5
- mobilenet_v1_0.25_160.tflite 5
- mobilenet_v1_0.25_192.tflite 3
- mobilenet_v1_0.25_224.tflite 3
- mobilenet_v1_0.5_128.tflite 2.5
- mobilenet_v1_0.5_160.tflite 5
- mobilenet_v1_0.5_192.tflite 6
- mobilenet_v1_0.5_224.tflite 5
- mobilenet_v1_0.75_128.tflite 6
- mobilenet_v1_0.75_160.tflite 6
- mobilenet_v1_0.75_192.tflite 6
- mobilenet_v1_0.75_224.tflite 6
- mobilenet_v1_1.0_128.tflite 10
- mobilenet_v1_1.0_160.tflite 6
- mobilenet_v1_1.0_192.tflite 26
- mobilenet_v1_1.0_224.tflite 6
- mobilenet_v2_1.0_224.tflite 3
- mtk_model_normalize_object_scene_ps_20200519_f32.tflite 20
- mtk_model_ckpt.tflite 20
- mtk_age_gender.tflite 0.1
- mtk_model_face_dress.tflite 2
- mtk_face_features_v1.tflite 20
- densenet.tflite 36
- squeezenet.tflite 6
- resnet_v2_101_299.tflite 3
- mnasnet_1.3_224.tflite 12
- inception_v3.tflite 3
- deeplabv3_257_mv_gpu.tflite 3
- multi_person_mobilenet_v1_075_float.tflite 9
- #hiai_vad.tflite 20
- ide_label_base.tflite 22
- # ide_label_retrained.tflite involves a softmax-like structure whose output channel is 12.
- # The values in the first few channels are extremely small and casted into 0 in the fp16 subgraph.
- # The next slice operator takes out data from the first 6 channels, which are all zero, causing the error of
- # dividing 0 in the following operator.
- #ide_label_retrained.tflite
- ml_ei_headpose.tflite 3
- ml_ei_landmark.tflite 3
- mnist.tflite 4
- mobilenet.tflite 0.1
- resnet.tflite 120
- scan_hms_angle1.tflite 4
- scan_hms_detect.tflite 12
- hiai_latin_ocr.tflite 45
- hiai_latin_ocr_1.tflite 13
- ml_ocr_jk.tflite 2
- nasnet_mobile.tflite 3
- nasnet_large.tflite 3
- inception_resnet_v2.tflite 10
- ml_ocr_latin.tflite 15
- hiai_PoseEstimation_Pcm.tflite 15
- hiai_ssd_mobilenetv2_object.tflite 60
- hiai_cv_focusShootOCRModel_02.tflite 13
- hiai_cv_poseEstimation.tflite 190
- inception_v4.tflite 10
- mtk_model_normalize_object_scene_ps_20200519_f16.tflite 10
- mtk_model_face_dress_fp16.tflite 45
- mtk_AADB_HADB_MBV2_model_f16.tflite 5
- mtk_AADB_HADB_MBV3_model_f16.tflite 10
- mtk_model_emotions_0725_fp16.tflite 3
- mtk_face_features_v1_fp16.tflite 20
- siteAI_digcom_AI_ECN.tflite 0.1
- siteAI_digcom_g2v_keras.tflite 5
- siteAI_trans_nonlinear.tflite 0.1
- siteAI_trans_tcpclassify.tflite 5
- siteAI_wireless_depress_w.tflite 8
- siteAI_wireless_restore_w.tflite 0.1
- magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite 5
- ml_object_detect.tflite 3
- ml_object_detect_1.tflite 5
- hiai_cpu_face_emotion.tflite 6
- hiai_cpu_face_gazing.tflite 5
- hiai_cpu_face_headpose.tflite 100
- hiai_humanDetection.tflite 15
- #hiai_cv_focusShootOCRModel_08.tflite 0.3
- #ml_face_openclose.tflite 40
- hiai_face_model_npu.tflite 5
- hiai_ctpn_feature_map.tflite 10
- hiai_cv_labelDetectorModel_v2.tflite 30
- #hiai_cv_labelDetectorModel_v3.tflite 20
- hiai_cv_labelDetectorModel_v4.tflite 3
- hiai_dress_detect.tflite 3
- hiai_cv_saliencyDetectorModel.tflite 3
- hiai_frozen_inference_graph.tflite 5
- hiai_ghostnet.tflite 3
- #hiai_iMaxDN_RGB.tflite 0.4
- #hiai_iMaxSR_RGB.tflite 1
- hiai_label_and_video.tflite 10
- hiai_lm_inference_graph.tflite 3
- efficientnet_lite0_fp32_2.tflite 6
- efficientnet_lite1_fp32_2.tflite 6
- efficientnet_lite2_fp32_2.tflite 3
- efficientnet_lite3_fp32_2.tflite 6
- efficientnet_lite4_fp32_2.tflite 6
- mnasnet_0.50_224_1_metadata_1.tflite 5
- mnasnet_0.75_224_1_metadata_1.tflite 6
- mnasnet_1.0_128_1_metadata_1.tflite 6
- mnasnet_1.0_160_1_metadata_1.tflite 6
- mnasnet_1.0_192_1_metadata_1.tflite 8
- mnasnet_1.0_224_1_metadata_1.tflite 6
- mnasnet_1.0_96_1_metadata_1.tflite 6
- # ml_vision_guide_detection1.tflite 20
- # ml_vision_guide_detection3.tflite 20
- lite-model_on_device_vision_classifier_popular_us_products_V1_1.tflite 16
- lite-model_on_device_vision_classifier_popular_wine_V1_1.tflite 80
- posenet_mobilenet_float_075_1_default_1.tflite 45
- deeplabv3_1_default_1.tflite 6
- lite-model_deeplabv3-mobilenetv2_dm05-float16_1_default_1.tflite 13
- lite-model_deeplabv3-mobilenetv2-float16_1_default_1.tflite 60
- lite-model_east-text-detector_fp16_1.tflite 60
- lite-model_cartoongan_fp16_1.tflite 3
- lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite 6
- gts_detect_5k_tf115.tflite 6
- mtk_isface.tflite 0.2
- mtk_landmark.tflite 0.1
- mtk_new_detect.tflite 3
- mtk_pose.tflite 2
- mtk_model_emotions_0727_nosoftmax.tflite 2
- mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 22
- mtk_276landmark_0913.tflite 16
- mtk_face_recognition.tflite 8
- mtk_convert_model.tflite 5
- smartreply.tflite 0.1
- mindspore_text_classification_tflite.tflite 4
- #ml_location.tflite 0.1
- ml_text_correction.tflite 1
- # ml_pic_shopping.tflite involves subtract two close numbers.
- # In fp16 case, such subtract will cause a great relative error comparing to fp32.
- # e.g. fp32: 27.5 -27.4 = 0.1
- # fp16: 27.6 - 27.4 = 0.2
- #ml_pic_shopping.tflite 0.1
|