You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

models_tf_fp16.cfg 4.3 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. # [first column]:model_name;input_num;input_shape;threads;extra_info. If there is no need to set these parameters, the
  2. # content after ";" can be omitted.
  3. # [second column]:accuracy limit for float16 in arm64 device
  4. ml_vision_guide_detection1.pb 0.5
  5. ml_vision_guide_detection3.pb 0.5
  6. ml_video_edit_generate_filter.pb 2
  7. ml_ocr_jk.pb 0.8
  8. # The accumulated error causes the threshold to be exceeded
  9. ml_ocr_latin.pb 12
  10. scan_hms_angle.pb 7
  11. scan_hms_detect.pb 2.5
  12. ml_face_openclose.pb;1;1,32,32,3 0.5
  13. ml_object_detect.pb;1;1,288,288,3 2
  14. # The inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7).
  15. Q_crnn_screen_slim400w_more_20w.pb 72
  16. Q_inception-249970-672-11-16.pb 6.5
  17. hiai_ssd_mobilenetv2_object.pb 15
  18. hiai_humanDetection.pb 3.5
  19. hiai_PoseEstimation_Pcm.pb 0.5
  20. # The last layer has a very small value, which leads to a large error
  21. hiai_cn_recognize_modify_padv2.pb;1;1,32,512,1 27
  22. hiai_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 17.1
  23. # The output of mtk_model_ckpt.pb has small value
  24. mtk_model_ckpt.pb 19.5
  25. mtk_age_gender.pb 0.5
  26. # The Difference of output node divided by 0 results in cumulative deviation
  27. mtk_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 10
  28. # Bccumulative error of conv_batchnorm_fused op
  29. mtk_AADB_HADB_MBV2_model.pb;1;1,224,224,3 5.5
  30. mtk_AADB_HADB_MBV3_model.pb;1;1,224,224,3 4
  31. # The output of mtk_face_features_v1.pb has small value
  32. mtk_face_features_v1.pb 26
  33. model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 10
  34. hiai_AADB_HADB_MBV2_model.pb;1;1,224,224,3 6
  35. hiai_frozen_inference_graph.pb 12
  36. hiai_lm_inference_graph.pb 1.2
  37. hiai_ghostnet.pb 0.9
  38. hiai_face_model_npu.pb 0.5
  39. hiai_cv_focusShootOCRModel_02.pb 10.5
  40. hiai_label_and_video.pb;1;1,224,224,3 23
  41. hiai_dress_detect.pb;1;1,960,960,3 1.5
  42. hiai_iMaxDN_RGB.pb 0.5
  43. hiai_iMaxSR_RGB.pb 3.5
  44. hiai_ctpn_feature_map.pb 6.5
  45. hiai_cpu_face_gazing.pb 0.5
  46. hiai_cpu_face_emotion.pb 2.2
  47. hiai_cv_poseEstimation.pb 103
  48. Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb 1.5
  49. # The input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6).
  50. Q_crnn_ori_75w_slim_norm.pb 37
  51. # The output of Q_crnn_ori_v2 model has small values (e-4).
  52. Q_crnn_ori_v2_405001_notrans_nopre.pb 24
  53. # The input of hiai_latin models are between 0-255
  54. hiai_latin_ocr.pb 4
  55. hiai_latin_ocr_1.pb 3.5
  56. hiai_cpu_face_headpose.pb 4
  57. # ml_noya_tts_melgan.pb If the input range is adjusted to [- 1,1], the fp16 error can be reduced to 38.9512%
  58. ml_noya_tts_melgan.pb;1;16,16,80 70
  59. bolt_segment.pb 2
  60. siteAI_wireless_depress_w.pb;1;1,36 0.5
  61. siteAI_wireless_restore_w.pb;1;1,36 0.5
  62. siteAI_trans_nonlinear.pb;1;1,137 0.5
  63. siteAI_trans_nonlinear40g.pb;1;1,271 0.6
  64. siteAI_trans_nonlinear134g.pb;1;1,137 0.5
  65. siteAI_trans_nonlinear134g_nrz.pb;1;1,182 0.6
  66. ml_vision_guide_detection2.pb;1;1,320,320,1 1
  67. # ml_tts_encoder.pb has a round op, which will cause round-off error when the decimal of input value is near 0.5
  68. ml_tts_encoder.pb;4;1,44:1:1:1 9
  69. # encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
  70. #encoder_0111_control_flow.pb;4;1:1,44:1:1 10
  71. ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 12.1
  72. ml_video_edit_img_segment_adaptise.pb;2 40
  73. ml_video_edit_oneclick_adaptis.pb;3 6
  74. #decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
  75. #decoder_step_201217.pb;5 187
  76. #decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
  77. #decoder_step_201217_modified.pb;5 0.5
  78. #encoder_0111.pb is the same model as ml_tts_encoder.pb.
  79. #encoder_0111.pb;4;1:1,44:1:1
  80. ml_female_model_step6_noiseout.pb;66 2
  81. ml_male_model_step6_noiseout.pb;66 2.5
  82. ml_tts_encoder_control_flow.pb;4;1,22:1:1:1;;input_dependent+need_loop 1.5
  83. ml_tts_decoder_control_flow.pb;5;;;need_loop 1
  84. ml_tts_decoder.pb;5 2.5
  85. ml_tts_vocoder.pb;66 53
  86. hiai_transformer_encoder.pb;15 4
  87. decoder_step_nocumsum_v5.pb;13;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127 1.2
  88. hiai_nlu_model_multi.pb;6;1,32:1,32:1,32:1,74:1,11:1,6
  89. hiai_nlu_model_single.pb;3;1,32:1,32:1,32 4.4
  90. fsr_270_mindspore.pb 6.0
  91. fsr_360_mindspore.pb 6.5
  92. fsr_720_mindspore.pb 2.0
  93. hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache.pb;2
  94. tt_raw_h4800_mel80_ms_fe001_ex_20210506_joint_decoder.pb;14;4:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:1,640 0.5