Browse Source

[to #42322933] init

master
思宏 3 years ago
parent
commit
314082810e
2 changed files with 49 additions and 0 deletions
  1. +1
    -0
      modelscope/pipelines/builder.py
  2. +48
    -0
      tests/pipelines/test_nli.py

+ 1
- 0
modelscope/pipelines/builder.py View File

@@ -18,6 +18,7 @@ PIPELINES = Registry('pipelines')
DEFAULT_MODEL_FOR_PIPELINE = {
# TaskName: (pipeline_module_name, model_repo)
Tasks.image_matting: ('image-matting', 'damo/image-matting-person'),
Tasks.nli: ('nli', 'damo/nlp_structbert_nli_chinese-base'),
Tasks.text_classification:
('bert-sentiment-analysis', 'damo/bert-base-sst2'),
Tasks.text_generation: ('palm', 'damo/nlp_palm_text-generation_chinese'),


+ 48
- 0
tests/pipelines/test_nli.py View File

@@ -0,0 +1,48 @@
# Copyright (c) Alibaba, Inc. and its affiliates.
import unittest

from maas_hub.snapshot_download import snapshot_download

from modelscope.models import Model
from modelscope.models.nlp import SbertForNLI
from modelscope.pipelines import NLIPipeline, pipeline
from modelscope.preprocessors import NLIPreprocessor
from modelscope.utils.constant import Tasks


class NLITest(unittest.TestCase):
model_id = 'damo/nlp_structbert_nli_chinese-base'
sentence1 = '四川商务职业学院和四川财经职业学院哪个好?'
sentence2 = '四川商务职业学院商务管理在哪个校区?'

def test_run_from_local(self):
cache_path = snapshot_download(self.model_id)
tokenizer = NLIPreprocessor(cache_path)
model = SbertForNLI(cache_path, tokenizer=tokenizer)
pipeline1 = NLIPipeline(model, preprocessor=tokenizer)
pipeline2 = pipeline(Tasks.nli, model=model, preprocessor=tokenizer)
print(f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n'
f'pipeline1:{pipeline1(input=(self.sentence1, self.sentence2))}')
print()
print(
f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n'
f'pipeline1: {pipeline2(input=(self.sentence1, self.sentence2))}')

def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id)
tokenizer = NLIPreprocessor(model.model_dir)
pipeline_ins = pipeline(
task=Tasks.nli, model=model, preprocessor=tokenizer)
print(pipeline_ins(input=(self.sentence1, self.sentence2)))

def test_run_with_model_name(self):
pipeline_ins = pipeline(task=Tasks.nli, model=self.model_id)
print(pipeline_ins(input=(self.sentence1, self.sentence2)))

def test_run_with_default_model(self):
pipeline_ins = pipeline(task=Tasks.nli)
print(pipeline_ins(input=(self.sentence1, self.sentence2)))


if __name__ == '__main__':
unittest.main()

Loading…
Cancel
Save