Browse Source

Merge remote-tracking branch 'origin' into feat/fill_mask

master
suluyan 3 years ago
parent
commit
f279c48396
10 changed files with 19 additions and 24 deletions
  1. +1
    -1
      tests/pipelines/test_image_matting.py
  2. +1
    -1
      tests/pipelines/test_person_image_cartoon.py
  3. +3
    -3
      tests/pipelines/test_sentence_similarity.py
  4. +2
    -0
      tests/pipelines/test_speech_signal_process.py
  5. +2
    -6
      tests/pipelines/test_text_classification.py
  6. +1
    -1
      tests/pipelines/test_text_generation.py
  7. +4
    -7
      tests/pipelines/test_text_to_speech.py
  8. +2
    -2
      tests/pipelines/test_word_segmentation.py
  9. +0
    -1
      tests/preprocessors/test_image.py
  10. +3
    -2
      tests/pydatasets/test_py_dataset.py

+ 1
- 1
tests/pipelines/test_image_matting.py View File

@@ -52,7 +52,7 @@ class ImageMattingTest(unittest.TestCase):
cv2.imwrite('result.png', result['output_png']) cv2.imwrite('result.png', result['output_png'])
print(f'Output written to {osp.abspath("result.png")}') print(f'Output written to {osp.abspath("result.png")}')


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_modelhub_default_model(self): def test_run_modelhub_default_model(self):
img_matting = pipeline(Tasks.image_matting) img_matting = pipeline(Tasks.image_matting)




+ 1
- 1
tests/pipelines/test_person_image_cartoon.py View File

@@ -42,7 +42,7 @@ class ImageCartoonTest(unittest.TestCase):
img_cartoon = pipeline(Tasks.image_generation, model=self.model_id) img_cartoon = pipeline(Tasks.image_generation, model=self.model_id)
self.pipeline_inference(img_cartoon, self.test_image) self.pipeline_inference(img_cartoon, self.test_image)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_modelhub_default_model(self): def test_run_modelhub_default_model(self):
img_cartoon = pipeline(Tasks.image_generation) img_cartoon = pipeline(Tasks.image_generation)
self.pipeline_inference(img_cartoon, self.test_image) self.pipeline_inference(img_cartoon, self.test_image)


+ 3
- 3
tests/pipelines/test_sentence_similarity.py View File

@@ -16,7 +16,7 @@ class SentenceSimilarityTest(unittest.TestCase):
sentence1 = '今天气温比昨天高么?' sentence1 = '今天气温比昨天高么?'
sentence2 = '今天湿度比昨天高么?' sentence2 = '今天湿度比昨天高么?'


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run(self): def test_run(self):
cache_path = snapshot_download(self.model_id) cache_path = snapshot_download(self.model_id)
tokenizer = SequenceClassificationPreprocessor(cache_path) tokenizer = SequenceClassificationPreprocessor(cache_path)
@@ -32,7 +32,7 @@ class SentenceSimilarityTest(unittest.TestCase):
f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n' f'sentence1: {self.sentence1}\nsentence2: {self.sentence2}\n'
f'pipeline1: {pipeline2(input=(self.sentence1, self.sentence2))}') f'pipeline1: {pipeline2(input=(self.sentence1, self.sentence2))}')


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_model_from_modelhub(self): def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id) model = Model.from_pretrained(self.model_id)
tokenizer = SequenceClassificationPreprocessor(model.model_dir) tokenizer = SequenceClassificationPreprocessor(model.model_dir)
@@ -48,7 +48,7 @@ class SentenceSimilarityTest(unittest.TestCase):
task=Tasks.sentence_similarity, model=self.model_id) task=Tasks.sentence_similarity, model=self.model_id)
print(pipeline_ins(input=(self.sentence1, self.sentence2))) print(pipeline_ins(input=(self.sentence1, self.sentence2)))


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
pipeline_ins = pipeline(task=Tasks.sentence_similarity) pipeline_ins = pipeline(task=Tasks.sentence_similarity)
print(pipeline_ins(input=(self.sentence1, self.sentence2))) print(pipeline_ins(input=(self.sentence1, self.sentence2)))


+ 2
- 0
tests/pipelines/test_speech_signal_process.py View File

@@ -6,6 +6,7 @@ from modelscope.fileio import File
from modelscope.metainfo import Pipelines from modelscope.metainfo import Pipelines
from modelscope.pipelines import pipeline from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks
from modelscope.utils.test_utils import test_level


NEAREND_MIC_URL = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/AEC/sample_audio/nearend_mic.wav' NEAREND_MIC_URL = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/AEC/sample_audio/nearend_mic.wav'
FAREND_SPEECH_URL = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/AEC/sample_audio/farend_speech.wav' FAREND_SPEECH_URL = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/AEC/sample_audio/farend_speech.wav'
@@ -33,6 +34,7 @@ class SpeechSignalProcessTest(unittest.TestCase):
# A temporary hack to provide c++ lib. Download it first. # A temporary hack to provide c++ lib. Download it first.
download(AEC_LIB_URL, AEC_LIB_FILE) download(AEC_LIB_URL, AEC_LIB_FILE)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run(self): def test_run(self):
download(NEAREND_MIC_URL, NEAREND_MIC_FILE) download(NEAREND_MIC_URL, NEAREND_MIC_FILE)
download(FAREND_SPEECH_URL, FAREND_SPEECH_FILE) download(FAREND_SPEECH_URL, FAREND_SPEECH_FILE)


+ 2
- 6
tests/pipelines/test_text_classification.py View File

@@ -1,12 +1,8 @@
# Copyright (c) Alibaba, Inc. and its affiliates. # Copyright (c) Alibaba, Inc. and its affiliates.
import shutil import shutil
import unittest import unittest
import zipfile
from pathlib import Path


from modelscope.fileio import File
from modelscope.models import Model from modelscope.models import Model
from modelscope.models.nlp import BertForSequenceClassification
from modelscope.pipelines import SequenceClassificationPipeline, pipeline from modelscope.pipelines import SequenceClassificationPipeline, pipeline
from modelscope.preprocessors import SequenceClassificationPreprocessor from modelscope.preprocessors import SequenceClassificationPreprocessor
from modelscope.pydatasets import PyDataset from modelscope.pydatasets import PyDataset
@@ -62,7 +58,7 @@ class SequenceClassificationTest(unittest.TestCase):
hub=Hubs.huggingface)) hub=Hubs.huggingface))
self.printDataset(result) self.printDataset(result)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
text_classification = pipeline(task=Tasks.text_classification) text_classification = pipeline(task=Tasks.text_classification)
result = text_classification( result = text_classification(
@@ -74,7 +70,7 @@ class SequenceClassificationTest(unittest.TestCase):
hub=Hubs.huggingface)) hub=Hubs.huggingface))
self.printDataset(result) self.printDataset(result)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_dataset(self): def test_run_with_dataset(self):
model = Model.from_pretrained(self.model_id) model = Model.from_pretrained(self.model_id)
preprocessor = SequenceClassificationPreprocessor( preprocessor = SequenceClassificationPreprocessor(


+ 1
- 1
tests/pipelines/test_text_generation.py View File

@@ -68,7 +68,7 @@ class TextGenerationTest(unittest.TestCase):
pipeline_ins = pipeline(task=Tasks.text_generation, model=model_id) pipeline_ins = pipeline(task=Tasks.text_generation, model=model_id)
print(pipeline_ins(input)) print(pipeline_ins(input))


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
pipeline_ins = pipeline(task=Tasks.text_generation) pipeline_ins = pipeline(task=Tasks.text_generation)
print(pipeline_ins(self.input_zh)) print(pipeline_ins(self.input_zh))


+ 4
- 7
tests/pipelines/test_text_to_speech.py View File

@@ -1,7 +1,5 @@
import time
import unittest import unittest


import json
import tensorflow as tf import tensorflow as tf
# NOTICE: Tensorflow 1.15 seems not so compatible with pytorch. # NOTICE: Tensorflow 1.15 seems not so compatible with pytorch.
# A segmentation fault may be raise by pytorch cpp library # A segmentation fault may be raise by pytorch cpp library
@@ -10,21 +8,20 @@ import tensorflow as tf
import torch import torch
from scipy.io.wavfile import write from scipy.io.wavfile import write


from modelscope.fileio import File
from modelscope.metainfo import Pipelines, Preprocessors from modelscope.metainfo import Pipelines, Preprocessors
from modelscope.models import Model, build_model
from modelscope.models.audio.tts.am import SambertNetHifi16k
from modelscope.models.audio.tts.vocoder import AttrDict, Hifigan16k
from modelscope.models import Model
from modelscope.pipelines import pipeline from modelscope.pipelines import pipeline
from modelscope.preprocessors import build_preprocessor from modelscope.preprocessors import build_preprocessor
from modelscope.utils.constant import Fields, InputFields, Tasks
from modelscope.utils.constant import Fields
from modelscope.utils.logger import get_logger from modelscope.utils.logger import get_logger
from modelscope.utils.test_utils import test_level


logger = get_logger() logger = get_logger()




class TextToSpeechSambertHifigan16kPipelineTest(unittest.TestCase): class TextToSpeechSambertHifigan16kPipelineTest(unittest.TestCase):


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_pipeline(self): def test_pipeline(self):
lang_type = 'pinyin' lang_type = 'pinyin'
text = '明天天气怎么样' text = '明天天气怎么样'


+ 2
- 2
tests/pipelines/test_word_segmentation.py View File

@@ -37,13 +37,13 @@ class WordSegmentationTest(unittest.TestCase):
task=Tasks.word_segmentation, model=model, preprocessor=tokenizer) task=Tasks.word_segmentation, model=model, preprocessor=tokenizer)
print(pipeline_ins(input=self.sentence)) print(pipeline_ins(input=self.sentence))


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_name(self): def test_run_with_model_name(self):
pipeline_ins = pipeline( pipeline_ins = pipeline(
task=Tasks.word_segmentation, model=self.model_id) task=Tasks.word_segmentation, model=self.model_id)
print(pipeline_ins(input=self.sentence)) print(pipeline_ins(input=self.sentence))


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
pipeline_ins = pipeline(task=Tasks.word_segmentation) pipeline_ins = pipeline(task=Tasks.word_segmentation)
print(pipeline_ins(input=self.sentence)) print(pipeline_ins(input=self.sentence))


+ 0
- 1
tests/preprocessors/test_image.py View File

@@ -5,7 +5,6 @@ import unittest
import PIL import PIL


from modelscope.preprocessors import load_image from modelscope.preprocessors import load_image
from modelscope.utils.logger import get_logger




class ImagePreprocessorTest(unittest.TestCase): class ImagePreprocessorTest(unittest.TestCase):


+ 3
- 2
tests/pydatasets/test_py_dataset.py View File

@@ -33,6 +33,7 @@ class ImgPreprocessor(Preprocessor):


class PyDatasetTest(unittest.TestCase): class PyDatasetTest(unittest.TestCase):


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_ds_basic(self): def test_ds_basic(self):
ms_ds_full = PyDataset.load('squad') ms_ds_full = PyDataset.load('squad')
ms_ds_full_hf = hfdata.load_dataset('squad') ms_ds_full_hf = hfdata.load_dataset('squad')
@@ -82,7 +83,7 @@ class PyDatasetTest(unittest.TestCase):
drop_remainder=True) drop_remainder=True)
print(next(iter(tf_dataset))) print(next(iter(tf_dataset)))


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
@require_torch @require_torch
def test_to_torch_dataset_img(self): def test_to_torch_dataset_img(self):
ms_image_train = PyDataset.from_hf_dataset( ms_image_train = PyDataset.from_hf_dataset(
@@ -94,7 +95,7 @@ class PyDatasetTest(unittest.TestCase):
dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5) dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)
print(next(iter(dataloader))) print(next(iter(dataloader)))


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
@require_tf @require_tf
def test_to_tf_dataset_img(self): def test_to_tf_dataset_img(self):
import tensorflow as tf import tensorflow as tf


Loading…
Cancel
Save