Browse Source

[to #42322933]增加对text-ranking任务中文模型的单元测试,以方便得到官方模型打标。

增加对text-ranking任务中文模型的单元测试,以方便得到官方模型打标。
        Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10492754
master
zhangyanzhao.zyz yingda.chen 3 years ago
parent
commit
c4dbb69d65
2 changed files with 94 additions and 21 deletions
  1. +25
    -18
      tests/pipelines/test_text_ranking.py
  2. +69
    -3
      tests/trainers/test_finetune_text_ranking.py

+ 25
- 18
tests/pipelines/test_text_ranking.py View File

@@ -13,7 +13,11 @@ from modelscope.utils.test_utils import test_level


class TextRankingTest(unittest.TestCase):
model_id = 'damo/nlp_corom_passage-ranking_english-base'
models = [
'damo/nlp_corom_passage-ranking_english-base',
'damo/nlp_rom_passage-ranking_chinese-base'
]

inputs = {
'source_sentence': ["how long it take to get a master's degree"],
'sentences_to_compare': [
@@ -26,29 +30,32 @@ class TextRankingTest(unittest.TestCase):

@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_by_direct_model_download(self):
cache_path = snapshot_download(self.model_id)
tokenizer = TextRankingPreprocessor(cache_path)
model = TextRanking.from_pretrained(cache_path)
pipeline1 = TextRankingPipeline(model, preprocessor=tokenizer)
pipeline2 = pipeline(
Tasks.text_ranking, model=model, preprocessor=tokenizer)
print(f'sentence: {self.inputs}\n'
f'pipeline1:{pipeline1(input=self.inputs)}')
print()
print(f'pipeline2: {pipeline2(input=self.inputs)}')
for model_id in self.models:
cache_path = snapshot_download(model_id)
tokenizer = TextRankingPreprocessor(cache_path)
model = TextRanking.from_pretrained(cache_path)
pipeline1 = TextRankingPipeline(model, preprocessor=tokenizer)
pipeline2 = pipeline(
Tasks.text_ranking, model=model, preprocessor=tokenizer)
print(f'sentence: {self.inputs}\n'
f'pipeline1:{pipeline1(input=self.inputs)}')
print()
print(f'pipeline2: {pipeline2(input=self.inputs)}')

@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id)
tokenizer = TextRankingPreprocessor(model.model_dir)
pipeline_ins = pipeline(
task=Tasks.text_ranking, model=model, preprocessor=tokenizer)
print(pipeline_ins(input=self.inputs))
for model_id in self.models:
model = Model.from_pretrained(model_id)
tokenizer = TextRankingPreprocessor(model.model_dir)
pipeline_ins = pipeline(
task=Tasks.text_ranking, model=model, preprocessor=tokenizer)
print(pipeline_ins(input=self.inputs))

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_name(self):
pipeline_ins = pipeline(task=Tasks.text_ranking, model=self.model_id)
print(pipeline_ins(input=self.inputs))
for model_id in self.models:
pipeline_ins = pipeline(task=Tasks.text_ranking, model=model_id)
print(pipeline_ins(input=self.inputs))

@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self):


+ 69
- 3
tests/trainers/test_finetune_text_ranking.py View File

@@ -14,6 +14,7 @@ from modelscope.msdatasets import MsDataset
from modelscope.pipelines import pipeline
from modelscope.trainers import build_trainer
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.test_utils import test_level


class TestFinetuneSequenceClassification(unittest.TestCase):
@@ -58,6 +59,7 @@ class TestFinetuneSequenceClassification(unittest.TestCase):
results_files = os.listdir(self.tmp_dir)
self.assertIn(f'{trainer.timestamp}.log.json', results_files)

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_finetune_msmarco(self):

def cfg_modify_fn(cfg):
@@ -70,7 +72,7 @@ class TestFinetuneSequenceClassification(unittest.TestCase):
'query_sequence': 'query',
'pos_sequence': 'positive_passages',
'neg_sequence': 'negative_passages',
'passage_text_fileds': ['title', 'text'],
'text_fileds': ['title', 'text'],
'qid_field': 'query_id'
},
'val': {
@@ -78,7 +80,7 @@ class TestFinetuneSequenceClassification(unittest.TestCase):
'query_sequence': 'query',
'pos_sequence': 'positive_passages',
'neg_sequence': 'negative_passages',
'passage_text_fileds': ['title', 'text'],
'text_fileds': ['title', 'text'],
'qid_field': 'query_id'
},
}
@@ -112,7 +114,7 @@ class TestFinetuneSequenceClassification(unittest.TestCase):
# load dataset
ds = MsDataset.load('passage-ranking-demo', 'zyznull')
train_ds = ds['train'].to_hf_dataset()
dev_ds = ds['train'].to_hf_dataset()
dev_ds = ds['dev'].to_hf_dataset()

model_id = 'damo/nlp_corom_passage-ranking_english-base'
self.finetune(
@@ -124,6 +126,70 @@ class TestFinetuneSequenceClassification(unittest.TestCase):
output_dir = os.path.join(self.tmp_dir, ModelFile.TRAIN_OUTPUT_DIR)
self.pipeline_text_ranking(output_dir)

@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_finetune_dureader(self):

def cfg_modify_fn(cfg):
cfg.task = 'text-ranking'
cfg['preprocessor'] = {'type': 'text-ranking'}
cfg.train.optimizer.lr = 2e-5
cfg['dataset'] = {
'train': {
'type': 'bert',
'query_sequence': 'query',
'pos_sequence': 'positive_passages',
'neg_sequence': 'negative_passages',
'text_fileds': ['text'],
'qid_field': 'query_id'
},
'val': {
'type': 'bert',
'query_sequence': 'query',
'pos_sequence': 'positive_passages',
'neg_sequence': 'negative_passages',
'text_fileds': ['text'],
'qid_field': 'query_id'
},
}
cfg['train']['neg_samples'] = 4
cfg['evaluation']['dataloader']['batch_size_per_gpu'] = 30
cfg.train.max_epochs = 1
cfg.train.train_batch_size = 4
cfg.train.lr_scheduler = {
'type': 'LinearLR',
'start_factor': 1.0,
'end_factor': 0.0,
'options': {
'by_epoch': False
}
}
cfg.train.hooks = [{
'type': 'CheckpointHook',
'interval': 1
}, {
'type': 'TextLoggerHook',
'interval': 1
}, {
'type': 'IterTimerHook'
}, {
'type': 'EvaluationHook',
'by_epoch': False,
'interval': 5000
}]
return cfg

# load dataset
ds = MsDataset.load('dureader-retrieval-ranking', 'zyznull')
train_ds = ds['train'].to_hf_dataset()
dev_ds = ds['dev'].to_hf_dataset()

model_id = 'damo/nlp_rom_passage-ranking_chinese-base'
self.finetune(
model_id=model_id,
train_dataset=train_ds,
eval_dataset=dev_ds,
cfg_modify_fn=cfg_modify_fn)

def pipeline_text_ranking(self, model_dir):
model = Model.from_pretrained(model_dir)
pipeline_ins = pipeline(task=Tasks.text_ranking, model=model)


Loading…
Cancel
Save