From c59833c7eeb0139d0b74150da3ca6cf4ec32f9a1 Mon Sep 17 00:00:00 2001 From: "wenmeng.zwm" Date: Wed, 15 Jun 2022 14:53:49 +0800 Subject: [PATCH] [to #42461396] feat: test_level support * add test level support * update develop doc Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9021354 --- docs/source/develop.md | 55 ++++++++++++++++++-- modelscope/utils/test_utils.py | 20 +++++++ tests/pipelines/test_image_captioning.py | 1 + tests/pipelines/test_image_matting.py | 4 ++ tests/pipelines/test_person_image_cartoon.py | 3 ++ tests/pipelines/test_text_classification.py | 6 +++ tests/pipelines/test_text_generation.py | 6 ++- tests/run.py | 9 ++++ 8 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 modelscope/utils/test_utils.py diff --git a/docs/source/develop.md b/docs/source/develop.md index f0c8b8b0..f96590b0 100644 --- a/docs/source/develop.md +++ b/docs/source/develop.md @@ -34,13 +34,62 @@ make linter ``` ## 2. Test -### 2.1 Unit test + +### 2.1 Test level + +There are mainly three test levels: + +* level 0: tests for basic interface and function of framework, such as `tests/trainers/test_trainer_base.py` +* level 1: important functional test which test end2end workflow, such as `tests/pipelines/test_image_matting.py` +* level 2: scenario tests for all the implemented modules such as model, pipeline in different algorithm filed. + +Default test level is 0, which will only run those cases of level 0, you can set test level +via environment variable `TEST_LEVEL`. For more details, you can refer to [test-doc](https://alidocs.dingtalk.com/i/nodes/mdvQnONayjBJKLXy1Bp38PY2MeXzp5o0?dontjump=true&nav=spaces&navQuery=spaceId%3Dnb9XJNlZxbgrOXyA) + + ```bash +# run all tests +TEST_LEVEL=2 make test + +# run important functional tests +TEST_LEVEL=1 make test + +# run core UT and basic functional tests make test ``` -### 2.2 Test data -TODO +When writing test cases, you should assign a test level for your test case using +following code. If left default, the test level will be 0, it will run in each +test stage. + +File test_module.py +```python +from modelscope.utils.test_utils import test_level + +class ImageCartoonTest(unittest.TestCase): + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + def test_run_by_direct_model_download(self): + pass +``` + +### 2.2 Run tests + +1. Run your own single test case to test your self-implemented function. You can run your +test file directly, if it fails to run, pls check if variable `TEST_LEVEL` +exists in the environment and unset it. +```bash +python tests/path/to/your_test.py +``` + +2. Remember to run core tests in local environment before start a codereview, by default it will +only run test cases with level 0. +```bash +make tests +``` + +3. After you start a code review, ci tests will be triggered which will run test cases with level 1 + +4. Daily regression tests will run all cases at 0 am each day using master branch. ## Code Review diff --git a/modelscope/utils/test_utils.py b/modelscope/utils/test_utils.py new file mode 100644 index 00000000..c8ea0442 --- /dev/null +++ b/modelscope/utils/test_utils.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# Copyright (c) Alibaba, Inc. and its affiliates. + +import os + +TEST_LEVEL = 2 +TEST_LEVEL_STR = 'TEST_LEVEL' + + +def test_level(): + global TEST_LEVEL + if TEST_LEVEL_STR in os.environ: + TEST_LEVEL = int(os.environ[TEST_LEVEL_STR]) + + return TEST_LEVEL + + +def set_test_level(level: int): + global TEST_LEVEL + TEST_LEVEL = level diff --git a/tests/pipelines/test_image_captioning.py b/tests/pipelines/test_image_captioning.py index 76ffc79d..4fac4658 100644 --- a/tests/pipelines/test_image_captioning.py +++ b/tests/pipelines/test_image_captioning.py @@ -7,6 +7,7 @@ import unittest from modelscope.fileio import File from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks +from modelscope.utils.test_utils import test_level class ImageCaptionTest(unittest.TestCase): diff --git a/tests/pipelines/test_image_matting.py b/tests/pipelines/test_image_matting.py index f1a627a0..ba5d05ad 100644 --- a/tests/pipelines/test_image_matting.py +++ b/tests/pipelines/test_image_matting.py @@ -11,6 +11,7 @@ from modelscope.pipelines import pipeline from modelscope.pydatasets import PyDataset from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.hub import get_model_cache_dir +from modelscope.utils.test_utils import test_level class ImageMattingTest(unittest.TestCase): @@ -38,6 +39,7 @@ class ImageMattingTest(unittest.TestCase): ) cv2.imwrite('result.png', result['output_png']) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_dataset(self): input_location = [ 'http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/data/test/maas/image_matting/test.png' @@ -52,6 +54,7 @@ class ImageMattingTest(unittest.TestCase): cv2.imwrite('result.png', next(result)['output_png']) print(f'Output written to {osp.abspath("result.png")}') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_modelhub(self): img_matting = pipeline(Tasks.image_matting, model=self.model_id) @@ -61,6 +64,7 @@ class ImageMattingTest(unittest.TestCase): cv2.imwrite('result.png', result['output_png']) print(f'Output written to {osp.abspath("result.png")}') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_modelhub_default_model(self): img_matting = pipeline(Tasks.image_matting) diff --git a/tests/pipelines/test_person_image_cartoon.py b/tests/pipelines/test_person_image_cartoon.py index 6f352e42..ed912b1c 100644 --- a/tests/pipelines/test_person_image_cartoon.py +++ b/tests/pipelines/test_person_image_cartoon.py @@ -8,6 +8,7 @@ import cv2 from modelscope.pipelines import pipeline from modelscope.pipelines.base import Pipeline from modelscope.utils.constant import Tasks +from modelscope.utils.test_utils import test_level class ImageCartoonTest(unittest.TestCase): @@ -36,10 +37,12 @@ class ImageCartoonTest(unittest.TestCase): img_cartoon = pipeline(Tasks.image_generation, model=model_dir) self.pipeline_inference(img_cartoon, self.test_image) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_modelhub(self): img_cartoon = pipeline(Tasks.image_generation, model=self.model_id) self.pipeline_inference(img_cartoon, self.test_image) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_modelhub_default_model(self): img_cartoon = pipeline(Tasks.image_generation) self.pipeline_inference(img_cartoon, self.test_image) diff --git a/tests/pipelines/test_text_classification.py b/tests/pipelines/test_text_classification.py index 7f6dc77c..01fdd29b 100644 --- a/tests/pipelines/test_text_classification.py +++ b/tests/pipelines/test_text_classification.py @@ -12,6 +12,7 @@ from modelscope.preprocessors import SequenceClassificationPreprocessor from modelscope.pydatasets import PyDataset from modelscope.utils.constant import Hubs, Tasks from modelscope.utils.hub import get_model_cache_dir +from modelscope.utils.test_utils import test_level class SequenceClassificationTest(unittest.TestCase): @@ -43,6 +44,7 @@ class SequenceClassificationTest(unittest.TestCase): break print(r) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run(self): model_url = 'https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com' \ '/release/easynlp_modelzoo/alibaba-pai/bert-base-sst2.zip' @@ -67,6 +69,7 @@ class SequenceClassificationTest(unittest.TestCase): Tasks.text_classification, model=model, preprocessor=preprocessor) print(pipeline2('Hello world!')) + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_from_modelhub(self): model = Model.from_pretrained(self.model_id) preprocessor = SequenceClassificationPreprocessor( @@ -77,6 +80,7 @@ class SequenceClassificationTest(unittest.TestCase): preprocessor=preprocessor) self.predict(pipeline_ins) + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_name(self): text_classification = pipeline( task=Tasks.text_classification, model=self.model_id) @@ -85,6 +89,7 @@ class SequenceClassificationTest(unittest.TestCase): 'glue', name='sst2', target='sentence', hub=Hubs.huggingface)) self.printDataset(result) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_default_model(self): text_classification = pipeline(task=Tasks.text_classification) result = text_classification( @@ -92,6 +97,7 @@ class SequenceClassificationTest(unittest.TestCase): 'glue', name='sst2', target='sentence', hub=Hubs.huggingface)) self.printDataset(result) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_dataset(self): model = Model.from_pretrained(self.model_id) preprocessor = SequenceClassificationPreprocessor( diff --git a/tests/pipelines/test_text_generation.py b/tests/pipelines/test_text_generation.py index d8f1b495..f98e135d 100644 --- a/tests/pipelines/test_text_generation.py +++ b/tests/pipelines/test_text_generation.py @@ -8,6 +8,7 @@ from modelscope.models.nlp import PalmForTextGenerationModel from modelscope.pipelines import TextGenerationPipeline, pipeline from modelscope.preprocessors import TextGenerationPreprocessor from modelscope.utils.constant import Tasks +from modelscope.utils.test_utils import test_level class TextGenerationTest(unittest.TestCase): @@ -15,7 +16,7 @@ class TextGenerationTest(unittest.TestCase): input1 = "今日天气类型='晴'&温度变化趋势='大幅上升'&最低气温='28℃'&最高气温='31℃'&体感='湿热'" input2 = "今日天气类型='多云'&体感='舒适'&最低气温='26℃'&最高气温='30℃'" - @unittest.skip('skip temporarily to save test time') + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run(self): cache_path = snapshot_download(self.model_id) preprocessor = TextGenerationPreprocessor( @@ -29,6 +30,7 @@ class TextGenerationTest(unittest.TestCase): print() print(f'input: {self.input2}\npipeline2: {pipeline2(self.input2)}') + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_model_from_modelhub(self): model = Model.from_pretrained(self.model_id) preprocessor = TextGenerationPreprocessor( @@ -37,11 +39,13 @@ class TextGenerationTest(unittest.TestCase): task=Tasks.text_generation, model=model, preprocessor=preprocessor) print(pipeline_ins(self.input1)) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_model_name(self): pipeline_ins = pipeline( task=Tasks.text_generation, model=self.model_id) print(pipeline_ins(self.input2)) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_default_model(self): pipeline_ins = pipeline(task=Tasks.text_generation) print(pipeline_ins(self.input2)) diff --git a/tests/run.py b/tests/run.py index 25404d7a..9f5d62a7 100644 --- a/tests/run.py +++ b/tests/run.py @@ -7,6 +7,11 @@ import sys import unittest from fnmatch import fnmatch +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import set_test_level, test_level + +logger = get_logger() + def gather_test_cases(test_dir, pattern, list_tests): case_list = [] @@ -49,5 +54,9 @@ if __name__ == '__main__': '--pattern', default='test_*.py', help='test file pattern') parser.add_argument( '--test_dir', default='tests', help='directory to be tested') + parser.add_argument( + '--level', default=0, help='2 -- all, 1 -- p1, 0 -- p0') args = parser.parse_args() + set_test_level(args.level) + logger.info(f'TEST LEVEL: {test_level()}') main(args)