siyang.ssy yingda.chen 3 years ago
parent
commit
a8665cc8c5
2 changed files with 4 additions and 4 deletions
  1. +4
    -1
      modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py
  2. +0
    -3
      modelscope/models/multi_modal/mmr/models/modeling.py

+ 4
- 1
modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py View File

@@ -42,7 +42,10 @@ class VideoCLIPForMultiModalEmbedding(TorchModel):
self.max_frames = model_config['max_frames']
self.feature_framerate = model_config['feature_framerate']
self.image_resolution = 224
self.device = model_config['device']
if torch.cuda.is_available():
self.device = model_config['device']
else:
self.device = 'cpu'
self.init_model = f'{model_dir}/{ModelFile.TORCH_MODEL_BIN_FILE}'

self.tokenizer = ClipTokenizer(model_dir)


+ 0
- 3
modelscope/models/multi_modal/mmr/models/modeling.py View File

@@ -85,9 +85,6 @@ class CLIP4Clip(nn.Module):
linear_patch=config['linear_patch'],
use_gc=config['use_gc']).float()

if (platform.system() != 'Darwin'):
convert_weights(self.clip) # fp16

if backbone in ['ViT-B/32', 'ViT-B/16']:
cross_config = SimpleNamespace(**{
'hidden_size': 512,


Loading…
Cancel
Save