You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

models_tf.cfg 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. unet_model_reconstruct.pb;1:content;1,256,256,3
  2. ml_video_edit_generate_filter.pb;1:lowres_input
  3. densenet.pb;1:Placeholder;1,224,224,3
  4. inception_resnet_v2.pb;1:input;1,299,299,3
  5. inception_v3.pb;1:input;1,299,299,3
  6. inception_v4.pb;1:input;1,299,299,3
  7. mnasnet_1.0_224.pb;1:input
  8. mnasnet_1.3_224.pb;1:input
  9. mobilenet_v1_0.25_128_frozen.pb;1:input;1,128,128,3
  10. mobilenet_v2_1.0_224_frozen.pb;1:input;1,224,224,3
  11. nasnet_large.pb;1:input;1,331,331,3
  12. nasnet_mobile.pb;1:input;1,224,224,3
  13. squeezenet.pb;1:Placeholder;1,224,224,3
  14. ml_ei_headpose.pb;1:input_1;1,64,64,3
  15. ml_ei_landmark.pb;1:input_image;1,160,160,3
  16. ml_face_openclose.pb;1:input;1,32,32,3
  17. ml_object_detect.pb;1:input/input_data;1,288,288,3
  18. ml_ocr_jk.pb;1:input_0
  19. ml_video_edit_enhance.pb;1:lowres_input
  20. ml_vision_guide_detection1.pb;1:input/input_data
  21. ml_vision_guide_detection3.pb;1:input/input_data
  22. scan_hms_angle.pb;1:normalized_input_image_tensor
  23. scan_hms_detect.pb;1:normalized_input_image_tensor
  24. hiai_AADB_HADB_MBV2_model.pb;1:input_0;1,224,224,3
  25. hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1
  26. hiai_cpu_face_emotion.pb;1:input_0
  27. hiai_cpu_face_gazing.pb;1:input_0
  28. hiai_cpu_face_headpose.pb;1:input_0
  29. hiai_ctpn_feature_map.pb;1:input_image
  30. hiai_cv_focusShootOCRModel_02.pb;1:input_0
  31. hiai_cv_focusShootOCRModel_08.pb;1:input
  32. hiai_cv_poseEstimation.pb;1:Image
  33. hiai_detectmodel_06_23_960_480_1180700.pb;1:input
  34. hiai_dress_detect.pb;1:data;1,960,960,3
  35. hiai_face_model_npu.pb;1:input_0
  36. hiai_frozen_inference_graph.pb;1:image_tensor;1,300,300,3
  37. hiai_ghostnet.pb;1:input
  38. hiai_iMaxDN_RGB.pb;1:input
  39. hiai_iMaxSR_RGB.pb;1:input
  40. hiai_label_and_video.pb;1:input_0;1,224,224,3
  41. hiai_latin_ocr.pb;1:input_0
  42. hiai_latin_ocr_1.pb;1:input_0
  43. hiai_lm_inference_graph.pb;1:image_tensor
  44. hiai_model_0909_kd_rot_ps_softmax.pb;1:input_0;1,224,224,3
  45. hiai_PoseEstimation_Pcm.pb;1:image
  46. model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
  47. mtk_AADB_HADB_MBV2_model.pb;1:input_0;1,224,224,3
  48. mtk_AADB_HADB_MBV3_model.pb;1:input_0;1,224,224,3
  49. mtk_age_gender.pb;1:img
  50. mtk_model_ckpt.pb;1:input
  51. mtk_model_face_dress.pb;1:input;1,128,128,3
  52. mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
  53. ml_ocr_latin.pb;1:input_0
  54. ml_noya_tts_melgan.pb;1:inputs;16,16,80
  55. # Q_hand_0812.pb is not suitable for float16. Out of float16 range.
  56. Q_hand_0812.pb;1:input
  57. Q_inception-249970-672-11-16.pb;1:input
  58. Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb;1:input
  59. Q_crnn_screen_slim400w_more_20w.pb;1:input_0
  60. matmul.pb;1:input0
  61. hiai_ssd_mobilenetv2_object.pb;1:image_tensor
  62. hiai_humanDetection.pb;1:normalized_input_image_tensor
  63. hiai_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
  64. mtk_face_features_v1.pb;1:input
  65. Q_crnn_ori_75w_slim_norm.pb;1:input_0
  66. Q_crnn_ori_v2_405001_notrans_nopre.pb;1:input_0
  67. bolt_segment.pb;1:input
  68. siteAI_wireless_depress_w.pb;1:x-input;1,36
  69. siteAI_wireless_restore_w.pb;1:x-input;1,36
  70. siteAI_trans_nonlinear.pb;1:features_placeholder;1,137
  71. siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271
  72. siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137
  73. siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182
  74. ml_vision_guide_detection2.pb;1:input/input_data;1,320,320,1
  75. ml_tts_encoder.pb;4:phones,alpha,spk_id,input_length;1,44:1:1:1;;input_dependent
  76. # encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
  77. #encoder_0111_control_flow.pb;4;1:1,44:1:1;;input_dependent
  78. ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w
  79. ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:backbone_features2,w
  80. #fasterrcnn_crop.pb is the same model as gts_object_detect_Ics.pb.
  81. #fasterrcnn_crop.pb;1;420,630,3
  82. #decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
  83. #decoder_step_201217.pb;5
  84. #decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
  85. #decoder_step_201217_modified.pb;5
  86. #encoder_0111.pb is the same model as ml_tts_encoder.pb.
  87. #encoder_0111.pb;4;1:1,44:1:1
  88. encoder_201228.pb;3:phones,alpha,input_length;1,22:1:1;;input_dependent
  89. ml_video_edit_oneclick_adaptis.pb;3:image_input,point_input,coord_features
  90. tacotron_encoder_stf.pb;5:phones,tones,seg_tags,prosodies,input_length;1,62:1,62:1,62:1,62:1;;input_dependent
  91. # The female/male models and ml_tts_vocoder.pb contain a tensor which is both an input and an output. The tensor name would be keep the same as the output.
  92. female_model_step2_int16_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
  93. ml_female_model_step6_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
  94. ml_male_model_step6_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
  95. ml_tts_decoder_control_flow.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0;;;need_loop
  96. ml_tts_decoder.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0
  97. ml_tts_encoder_control_flow.pb;4:phones,alpha,spk_id,input_length;1,22:1:1:1;;input_dependent+need_loop
  98. ml_tts_vocoder.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
  99. hiai_nlu_model.pb;3:input_ids,input_mask,segment_ids;1,16:1,16:1,16
  100. gts_object_detect_Ics.pb;1:image;420,630,3;;input_dependent+need_loop
  101. hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy
  102. decoder_step_nocumsum_v5.pb;13:h_1,h_2,c_2,c_1,c_0,dec_lr_inputs,dec_lr_posmat,dec_ref_frames,time_step,dec_lr_sigma,h_0,previous_output,dec_lr_dend;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127
  103. ml_audio_kit_encoder_v5.pb;6:input_length,seg_tags,prosodies,phones,alpha,tones;1:1,32:1,32:1,32:1:1,32
  104. hiai_nlu_model_v2.pb;7:input_ids,input_mask,segment_ids,prev_intent,filling_slots,followup_intents,history_slots;1,5:1,5:1,5:1,98:1,174:1,6:1,5
  105. hiai_nlu_model_multi.pb;6:input_ids,attention_mask,token_type_ids,prev_intent,filling_slots,followup_intents;1,32:1,32:1,32:1,74:1,11:1,6
  106. hiai_nlu_model_single.pb;3:input_ids,input_mask,segment_ids;1,32:1,32:1,32
  107. fsr_270_mindspore.pb
  108. fsr_360_mindspore.pb
  109. fsr_720_mindspore.pb
  110. hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache.pb;2
  111. tt_raw_h4800_mel80_ms_fe001_ex_20210506_joint_decoder.pb;14;4:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:1,640