You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_text_classification.py 4.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import shutil
  3. import unittest
  4. import zipfile
  5. from pathlib import Path
  6. from modelscope.fileio import File
  7. from modelscope.models import Model
  8. from modelscope.models.nlp import BertForSequenceClassification
  9. from modelscope.pipelines import SequenceClassificationPipeline, pipeline
  10. from modelscope.preprocessors import SequenceClassificationPreprocessor
  11. from modelscope.pydatasets import PyDataset
  12. from modelscope.utils.constant import Hubs, Tasks
  13. from modelscope.utils.hub import get_model_cache_dir
  14. class SequenceClassificationTest(unittest.TestCase):
  15. def setUp(self) -> None:
  16. self.model_id = 'damo/bert-base-sst2'
  17. # switch to False if downloading everytime is not desired
  18. purge_cache = True
  19. if purge_cache:
  20. shutil.rmtree(
  21. get_model_cache_dir(self.model_id), ignore_errors=True)
  22. def predict(self, pipeline_ins: SequenceClassificationPipeline):
  23. from easynlp.appzoo import load_dataset
  24. set = load_dataset('glue', 'sst2')
  25. data = set['test']['sentence'][:3]
  26. results = pipeline_ins(data[0])
  27. print(results)
  28. results = pipeline_ins(data[1])
  29. print(results)
  30. print(data)
  31. def printDataset(self, dataset: PyDataset):
  32. for i, r in enumerate(dataset):
  33. if i > 10:
  34. break
  35. print(r)
  36. def test_run(self):
  37. model_url = 'https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com' \
  38. '/release/easynlp_modelzoo/alibaba-pai/bert-base-sst2.zip'
  39. cache_path_str = r'.cache/easynlp/bert-base-sst2.zip'
  40. cache_path = Path(cache_path_str)
  41. if not cache_path.exists():
  42. cache_path.parent.mkdir(parents=True, exist_ok=True)
  43. cache_path.touch(exist_ok=True)
  44. with cache_path.open('wb') as ofile:
  45. ofile.write(File.read(model_url))
  46. with zipfile.ZipFile(cache_path_str, 'r') as zipf:
  47. zipf.extractall(cache_path.parent)
  48. path = r'.cache/easynlp/'
  49. model = BertForSequenceClassification(path)
  50. preprocessor = SequenceClassificationPreprocessor(
  51. path, first_sequence='sentence', second_sequence=None)
  52. pipeline1 = SequenceClassificationPipeline(model, preprocessor)
  53. self.predict(pipeline1)
  54. pipeline2 = pipeline(
  55. Tasks.text_classification, model=model, preprocessor=preprocessor)
  56. print(pipeline2('Hello world!'))
  57. def test_run_with_model_from_modelhub(self):
  58. model = Model.from_pretrained(self.model_id)
  59. preprocessor = SequenceClassificationPreprocessor(
  60. model.model_dir, first_sequence='sentence', second_sequence=None)
  61. pipeline_ins = pipeline(
  62. task=Tasks.text_classification,
  63. model=model,
  64. preprocessor=preprocessor)
  65. self.predict(pipeline_ins)
  66. def test_run_with_model_name(self):
  67. text_classification = pipeline(
  68. task=Tasks.text_classification, model=self.model_id)
  69. result = text_classification(
  70. PyDataset.load(
  71. 'glue', name='sst2', target='sentence', hub=Hubs.huggingface))
  72. self.printDataset(result)
  73. def test_run_with_default_model(self):
  74. text_classification = pipeline(task=Tasks.text_classification)
  75. result = text_classification(
  76. PyDataset.load(
  77. 'glue', name='sst2', target='sentence', hub=Hubs.huggingface))
  78. self.printDataset(result)
  79. def test_run_with_dataset(self):
  80. model = Model.from_pretrained(self.model_id)
  81. preprocessor = SequenceClassificationPreprocessor(
  82. model.model_dir, first_sequence='sentence', second_sequence=None)
  83. text_classification = pipeline(
  84. Tasks.text_classification, model=model, preprocessor=preprocessor)
  85. # loaded from huggingface dataset
  86. # TODO: rename parameter as dataset_name and subset_name
  87. dataset = PyDataset.load(
  88. 'glue', name='sst2', target='sentence', hub=Hubs.huggingface)
  89. result = text_classification(dataset)
  90. self.printDataset(result)
  91. if __name__ == '__main__':
  92. unittest.main()