Browse Source

[to #42322933]adjust output form

adjust output form for movie scene segmentation demo

 Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10244194
master
shuying.shu 3 years ago
parent
commit
b876839d51
4 changed files with 15 additions and 17 deletions
  1. +2
    -2
      modelscope/models/cv/movie_scene_segmentation/model.py
  2. +6
    -7
      modelscope/models/cv/movie_scene_segmentation/utils/save_op.py
  3. +5
    -6
      modelscope/outputs.py
  4. +2
    -2
      modelscope/pipelines/cv/movie_scene_segmentation_pipeline.py

+ 2
- 2
modelscope/models/cv/movie_scene_segmentation/model.py View File

@@ -162,11 +162,11 @@ class MovieSceneSegmentationModel(TorchModel):
thres = self.cfg.pipeline.save_threshold

anno_dict = get_pred_boundary(pred_dict, thres)
scene_dict, scene_list = pred2scene(self.shot2keyf, anno_dict)
scene_dict_lst, scene_list = pred2scene(self.shot2keyf, anno_dict)
if self.cfg.pipeline.save_split_scene:
re_dir = scene2video(inputs['input_video_pth'], scene_list, thres)
print(f'Split scene video saved to {re_dir}')
return len(scene_list), scene_dict
return len(scene_list), scene_dict_lst

def preprocess(self, inputs):
logger.info('Begin shot detect......')


+ 6
- 7
modelscope/models/cv/movie_scene_segmentation/utils/save_op.py View File

@@ -21,16 +21,15 @@ def get_pred_boundary(pred_dict, threshold=0.5):
def pred2scene(shot2keyf, anno_dict):
scene_list, pair_list = get_demo_scene_list(shot2keyf, anno_dict)

scene_dict = {}
scene_dict_lst = []
assert len(scene_list) == len(pair_list)
for scene_ind, scene_item in enumerate(scene_list):
scene_dict.update(
{scene_ind: {
'shot': pair_list[scene_ind],
'frame': scene_item
}})
scene_dict_lst.append({
'shot': pair_list[scene_ind],
'frame': scene_item
})

return scene_dict, scene_list
return scene_dict_lst, scene_list


def scene2video(source_movie_fn, scene_list, thres):


+ 5
- 6
modelscope/outputs.py View File

@@ -38,7 +38,7 @@ class OutputKeys(object):
HISTORY = 'history'
TIMESTAMPS = 'timestamps'
SPLIT_VIDEO_NUM = 'split_video_num'
SPLIT_META_DICT = 'split_meta_dict'
SPLIT_META_LIST = 'split_meta_list'


TASK_OUTPUTS = {
@@ -293,18 +293,17 @@ TASK_OUTPUTS = {
# movide scene segmentation result for a single video
# {
# "split_video_num":3,
# "split_meta_dict":
# {
# scene_id:
# "split_meta_list":
# [
# {
# "shot": [0,1,2],
# "frame": [start_frame, end_frame]
# }
# }
# ]
#
# }
Tasks.movie_scene_segmentation:
[OutputKeys.SPLIT_VIDEO_NUM, OutputKeys.SPLIT_META_DICT],
[OutputKeys.SPLIT_VIDEO_NUM, OutputKeys.SPLIT_META_LIST],

# ============ nlp tasks ===================



+ 2
- 2
modelscope/pipelines/cv/movie_scene_segmentation_pipeline.py View File

@@ -60,9 +60,9 @@ class MovieSceneSegmentationPipeline(Pipeline):

def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
data = {'input_video_pth': self.input_video_pth, 'feat': inputs}
video_num, meta_dict = self.model.postprocess(data)
video_num, meta_lst = self.model.postprocess(data)
result = {
OutputKeys.SPLIT_VIDEO_NUM: video_num,
OutputKeys.SPLIT_META_DICT: meta_dict
OutputKeys.SPLIT_META_LIST: meta_lst
}
return result

Loading…
Cancel
Save