diff --git a/modelscope/pipelines/builder.py b/modelscope/pipelines/builder.py index 743ba1cb..eaa6d1c8 100644 --- a/modelscope/pipelines/builder.py +++ b/modelscope/pipelines/builder.py @@ -74,7 +74,7 @@ DEFAULT_MODEL_FOR_PIPELINE = { 'damo/cv_resnet50_video-category'), Tasks.multi_modal_embedding: (Pipelines.multi_modal_embedding, - 'damo/multi-modal_clip-vit-large-patch14-chinese_multi-modal-embedding'), + 'damo/multi-modal_clip-vit-large-patch14_zh'), Tasks.generative_multi_modal_embedding: (Pipelines.generative_multi_modal_embedding, 'damo/multi-modal_gemm-vit-large-patch14_generative-multi-modal-embedding' diff --git a/tests/pipelines/test_multi_modal_embedding.py b/tests/pipelines/test_multi_modal_embedding.py index 001bf951..3bf3af87 100644 --- a/tests/pipelines/test_multi_modal_embedding.py +++ b/tests/pipelines/test_multi_modal_embedding.py @@ -11,7 +11,7 @@ from modelscope.utils.test_utils import test_level class MultiModalEmbeddingTest(unittest.TestCase): - model_id = 'damo/multi-modal_clip-vit-large-patch14-chinese_multi-modal-embedding' + model_id = 'damo/multi-modal_clip-vit-large-patch14_zh' test_text = {'text': '一张风景图'} @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') diff --git a/tests/trainers/test_clip_multi_modal_embedding_trainer.py b/tests/trainers/test_clip_multi_modal_embedding_trainer.py index c1b51ec6..03f82854 100644 --- a/tests/trainers/test_clip_multi_modal_embedding_trainer.py +++ b/tests/trainers/test_clip_multi_modal_embedding_trainer.py @@ -24,7 +24,7 @@ def clip_train_worker(local_rank, ngpus, node_size, node_rank): dist.init_process_group( backend='nccl', world_size=dist_world_size, rank=global_rank) - model_id = 'damo/multi-modal_clip-vit-large-patch14-chinese_multi-modal-embedding' + model_id = 'damo/multi-modal_clip-vit-large-patch14_zh' local_model_dir = snapshot_download(model_id) default_args = dict(