diff --git a/modelscope/pipelines/builder.py b/modelscope/pipelines/builder.py index 6495a5db..3c97c2be 100644 --- a/modelscope/pipelines/builder.py +++ b/modelscope/pipelines/builder.py @@ -18,6 +18,7 @@ PIPELINES = Registry('pipelines') DEFAULT_MODEL_FOR_PIPELINE = { # TaskName: (pipeline_module_name, model_repo) Tasks.image_matting: ('image-matting', 'damo/image-matting-person'), + Tasks.nli: ('nli', 'damo/nlp_structbert_nli_chinese-base'), Tasks.text_classification: ('bert-sentiment-analysis', 'damo/bert-base-sst2'), Tasks.text_generation: ('palm', 'damo/nlp_palm_text-generation_chinese'), diff --git a/tests/pipelines/test_nli.py b/tests/pipelines/test_nli.py new file mode 100644 index 00000000..9167b897 --- /dev/null +++ b/tests/pipelines/test_nli.py @@ -0,0 +1,48 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import unittest + +from maas_hub.snapshot_download import snapshot_download + +from modelscope.models import Model +from modelscope.models.nlp import SbertForNLI +from modelscope.pipelines import NLIPipeline, pipeline +from modelscope.preprocessors import NLIPreprocessor +from modelscope.utils.constant import Tasks + + +class NLITest(unittest.TestCase): + model_id = 'damo/nlp_structbert_nli_chinese-base' + sentence1 = '四川商务职业学院和四川财经职业学院哪个好?' + sentence2 = '四川商务职业学院商务管理在哪个校区?' + + def test_run_from_local(self): + cache_path = snapshot_download(self.model_id) + tokenizer = NLIPreprocessor(cache_path) + model = SbertForNLI(cache_path, tokenizer=tokenizer) + pipeline1 = NLIPipeline(model, preprocessor=tokenizer) + pipeline2 = pipeline(Tasks.nli, model=model, preprocessor=tokenizer) + print(f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n' + f'pipeline1:{pipeline1(input=(self.sentence1, self.sentence2))}') + print() + print( + f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n' + f'pipeline1: {pipeline2(input=(self.sentence1, self.sentence2))}') + + def test_run_with_model_from_modelhub(self): + model = Model.from_pretrained(self.model_id) + tokenizer = NLIPreprocessor(model.model_dir) + pipeline_ins = pipeline( + task=Tasks.nli, model=model, preprocessor=tokenizer) + print(pipeline_ins(input=(self.sentence1, self.sentence2))) + + def test_run_with_model_name(self): + pipeline_ins = pipeline(task=Tasks.nli, model=self.model_id) + print(pipeline_ins(input=(self.sentence1, self.sentence2))) + + def test_run_with_default_model(self): + pipeline_ins = pipeline(task=Tasks.nli) + print(pipeline_ins(input=(self.sentence1, self.sentence2))) + + +if __name__ == '__main__': + unittest.main()