|
- unet_model_reconstruct.pb;1:content;1,256,256,3
- ml_video_edit_generate_filter.pb;1:lowres_input
- densenet.pb;1:Placeholder;1,224,224,3
- inception_resnet_v2.pb;1:input;1,299,299,3
- inception_v3.pb;1:input;1,299,299,3
- inception_v4.pb;1:input;1,299,299,3
- mnasnet_1.0_224.pb;1:input
- mnasnet_1.3_224.pb;1:input
- mobilenet_v1_0.25_128_frozen.pb;1:input;1,128,128,3
- mobilenet_v2_1.0_224_frozen.pb;1:input;1,224,224,3
- nasnet_large.pb;1:input;1,331,331,3
- nasnet_mobile.pb;1:input;1,224,224,3
- squeezenet.pb;1:Placeholder;1,224,224,3
- ml_ei_headpose.pb;1:input_1;1,64,64,3
- ml_ei_landmark.pb;1:input_image;1,160,160,3
- ml_face_openclose.pb;1:input;1,32,32,3
- ml_object_detect.pb;1:input/input_data;1,288,288,3
- ml_ocr_jk.pb;1:input_0
- ml_video_edit_enhance.pb;1:lowres_input
- ml_vision_guide_detection1.pb;1:input/input_data
- ml_vision_guide_detection3.pb;1:input/input_data
- scan_hms_angle.pb;1:normalized_input_image_tensor
- scan_hms_detect.pb;1:normalized_input_image_tensor
- hiai_AADB_HADB_MBV2_model.pb;1:input_0;1,224,224,3
- hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1
- hiai_cpu_face_emotion.pb;1:input_0
- hiai_cpu_face_gazing.pb;1:input_0
- hiai_cpu_face_headpose.pb;1:input_0
- hiai_ctpn_feature_map.pb;1:input_image
- hiai_cv_focusShootOCRModel_02.pb;1:input_0
- hiai_cv_focusShootOCRModel_08.pb;1:input
- hiai_cv_poseEstimation.pb;1:Image
- hiai_detectmodel_06_23_960_480_1180700.pb;1:input
- hiai_dress_detect.pb;1:data;1,960,960,3
- hiai_face_model_npu.pb;1:input_0
- hiai_frozen_inference_graph.pb;1:image_tensor;1,300,300,3
- hiai_ghostnet.pb;1:input
- hiai_iMaxDN_RGB.pb;1:input
- hiai_iMaxSR_RGB.pb;1:input
- hiai_label_and_video.pb;1:input_0;1,224,224,3
- hiai_latin_ocr.pb;1:input_0
- hiai_latin_ocr_1.pb;1:input_0
- hiai_lm_inference_graph.pb;1:image_tensor
- hiai_model_0909_kd_rot_ps_softmax.pb;1:input_0;1,224,224,3
- hiai_PoseEstimation_Pcm.pb;1:image
- model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
- mtk_AADB_HADB_MBV2_model.pb;1:input_0;1,224,224,3
- mtk_AADB_HADB_MBV3_model.pb;1:input_0;1,224,224,3
- mtk_age_gender.pb;1:img
- mtk_model_ckpt.pb;1:input
- mtk_model_face_dress.pb;1:input;1,128,128,3
- mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
- ml_ocr_latin.pb;1:input_0
- ml_noya_tts_melgan.pb;1:inputs;16,16,80
- # Q_hand_0812.pb is not suitable for float16. Out of float16 range.
- Q_hand_0812.pb;1:input
- Q_inception-249970-672-11-16.pb;1:input
- Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb;1:input
- Q_crnn_screen_slim400w_more_20w.pb;1:input_0
- matmul.pb;1:input0
- hiai_ssd_mobilenetv2_object.pb;1:image_tensor
- hiai_humanDetection.pb;1:normalized_input_image_tensor
- hiai_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3
- mtk_face_features_v1.pb;1:input
- Q_crnn_ori_75w_slim_norm.pb;1:input_0
- Q_crnn_ori_v2_405001_notrans_nopre.pb;1:input_0
- bolt_segment.pb;1:input
- siteAI_wireless_depress_w.pb;1:x-input;1,36
- siteAI_wireless_restore_w.pb;1:x-input;1,36
- siteAI_trans_nonlinear.pb;1:features_placeholder;1,137
- siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271
- siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137
- siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182
- ml_vision_guide_detection2.pb;1:input/input_data;1,320,320,1
- ml_tts_encoder.pb;4:phones,alpha,spk_id,input_length;1,44:1:1:1;;input_dependent
- # encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
- #encoder_0111_control_flow.pb;4;1:1,44:1:1;;input_dependent
- ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w
- ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:backbone_features2,w
- #fasterrcnn_crop.pb is the same model as gts_object_detect_Ics.pb.
- #fasterrcnn_crop.pb;1;420,630,3
- #decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
- #decoder_step_201217.pb;5
- #decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
- #decoder_step_201217_modified.pb;5
- #encoder_0111.pb is the same model as ml_tts_encoder.pb.
- #encoder_0111.pb;4;1:1,44:1:1
- encoder_201228.pb;3:phones,alpha,input_length;1,22:1:1;;input_dependent
- ml_video_edit_oneclick_adaptis.pb;3:image_input,point_input,coord_features
- tacotron_encoder_stf.pb;5:phones,tones,seg_tags,prosodies,input_length;1,62:1,62:1,62:1,62:1;;input_dependent
- # The female/male models and ml_tts_vocoder.pb contain a tensor which is both an input and an output. The tensor name would be keep the same as the output.
- female_model_step2_int16_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
- ml_female_model_step6_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
- ml_male_model_step6_noiseout.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
- ml_tts_decoder_control_flow.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0;;;need_loop
- ml_tts_decoder.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0
- ml_tts_encoder_control_flow.pb;4:phones,alpha,spk_id,input_length;1,22:1:1:1;;input_dependent+need_loop
- ml_tts_vocoder.pb;66:cur_mel,noise_next,big_mel_c,upsample_net_conv_in_stack,upsample_net_layers_1_stack,upsample_net_layers_2_stack,upsample_net_layers_3_stack,conv_layers_0_stack,conv_layers_1_stack,conv_layers_2_stack,conv_layers_3_stack,conv_layers_4_stack,conv_layers_5_stack,conv_layers_6_stack,conv_layers_7_stack,conv_layers_8_stack,conv_layers_9_stack,conv_layers_10_stack,conv_layers_11_stack,conv_layers_12_stack,conv_layers_13_stack,conv_layers_14_stack,conv_layers_15_stack,conv_layers_16_stack,conv_layers_17_stack,conv_layers_18_stack,conv_layers_19_stack,conv_layers_20_stack,conv_layers_21_stack,conv_layers_22_stack,conv_layers_23_stack,conv_layers_24_stack,conv_layers_25_stack,conv_layers_26_stack,conv_layers_27_stack,conv_layers_28_stack,conv_layers_29_stack,h_0_stack,h_1_stack,h_2_stack,h_3_stack,h_4_stack,h_5_stack,h_6_stack,h_7_stack,h_8_stack,h_9_stack,h_10_stack,h_11_stack,h_12_stack,h_13_stack,h_14_stack,h_15_stack,h_16_stack,h_17_stack,h_18_stack,h_19_stack,h_20_stack,h_21_stack,h_22_stack,h_23_stack,h_24_stack,h_25_stack,h_26_stack,h_27_stack,h_28_stack
- hiai_nlu_model.pb;3:input_ids,input_mask,segment_ids;1,16:1,16:1,16
- gts_object_detect_Ics.pb;1:image;420,630,3;;input_dependent+need_loop
- hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy
- decoder_step_nocumsum_v5.pb;13:h_1,h_2,c_2,c_1,c_0,dec_lr_inputs,dec_lr_posmat,dec_ref_frames,time_step,dec_lr_sigma,h_0,previous_output,dec_lr_dend;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127
- ml_audio_kit_encoder_v5.pb;6:input_length,seg_tags,prosodies,phones,alpha,tones;1:1,32:1,32:1,32:1:1,32
- hiai_nlu_model_v2.pb;7:input_ids,input_mask,segment_ids,prev_intent,filling_slots,followup_intents,history_slots;1,5:1,5:1,5:1,98:1,174:1,6:1,5
- hiai_nlu_model_multi.pb;6:input_ids,attention_mask,token_type_ids,prev_intent,filling_slots,followup_intents;1,32:1,32:1,32:1,74:1,11:1,6
- hiai_nlu_model_single.pb;3:input_ids,input_mask,segment_ids;1,32:1,32:1,32
- fsr_270_mindspore.pb
- fsr_360_mindspore.pb
- fsr_720_mindspore.pb
- hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache.pb;2
- tt_raw_h4800_mel80_ms_fe001_ex_20210506_joint_decoder.pb;14;4:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:4,7,64:1,640
|