diff --git a/modelscope/models/cv/video_single_object_tracking/utils/utils.py b/modelscope/models/cv/video_single_object_tracking/utils/utils.py index 31dd57ff..505b2aa9 100644 --- a/modelscope/models/cv/video_single_object_tracking/utils/utils.py +++ b/modelscope/models/cv/video_single_object_tracking/utils/utils.py @@ -238,3 +238,24 @@ def check_box(box: list, image_height, image_width) -> bool: if box[3] < 0 or box[3] >= image_height: return False return True + + +def show_tracking_result(video_in_path, bboxes, video_save_path): + cap = cv2.VideoCapture(video_in_path) + for i in range(len(bboxes)): + box = bboxes[i] + success, frame = cap.read() + if success is False: + raise Exception(video_in_path, + ' can not be correctly decoded by OpenCV.') + if i == 0: + size = (frame.shape[1], frame.shape[0]) + fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') + video_writer = cv2.VideoWriter(video_save_path, fourcc, + cap.get(cv2.CAP_PROP_FPS), size, + True) + cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), + 5) + video_writer.write(frame) + video_writer.release + cap.release() diff --git a/modelscope/utils/cv/heatmap.py b/modelscope/utils/cv/heatmap.py new file mode 100644 index 00000000..4d248a92 --- /dev/null +++ b/modelscope/utils/cv/heatmap.py @@ -0,0 +1,18 @@ +import cv2 +import numpy as np + + +def numpy_to_cv2img(vis_img): + """to convert a np.array Hotmap with shape(h, w) to cv2 img + + Args: + vis_img (np.array): input data + + Returns: + cv2 img + """ + vis_img = (vis_img - vis_img.min()) / ( + vis_img.max() - vis_img.min() + 1e-5) + vis_img = (vis_img * 255).astype(np.uint8) + vis_img = cv2.applyColorMap(vis_img, cv2.COLORMAP_JET) + return vis_img diff --git a/modelscope/utils/cv/image_utils.py b/modelscope/utils/cv/image_utils.py deleted file mode 100644 index ab076df0..00000000 --- a/modelscope/utils/cv/image_utils.py +++ /dev/null @@ -1,136 +0,0 @@ -import cv2 -import numpy as np - -from modelscope.outputs import OutputKeys -from modelscope.preprocessors.image import load_image - - -def numpy_to_cv2img(img_array): - """to convert a np.array with shape(h, w) to cv2 img - - Args: - img_array (np.array): input data - - Returns: - cv2 img - """ - img_array = (img_array - img_array.min()) / ( - img_array.max() - img_array.min() + 1e-5) - img_array = (img_array * 255).astype(np.uint8) - img_array = cv2.applyColorMap(img_array, cv2.COLORMAP_JET) - return img_array - - -def draw_joints(image, np_kps, score, threshold=0.2): - lst_parent_ids_17 = [0, 0, 0, 1, 2, 0, 0, 5, 6, 7, 8, 5, 6, 11, 12, 13, 14] - lst_left_ids_17 = [1, 3, 5, 7, 9, 11, 13, 15] - lst_right_ids_17 = [2, 4, 6, 8, 10, 12, 14, 16] - - lst_parent_ids_15 = [0, 0, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 1] - lst_left_ids_15 = [2, 3, 4, 8, 9, 10] - lst_right_ids_15 = [5, 6, 7, 11, 12, 13] - - if np_kps.shape[0] == 17: - lst_parent_ids = lst_parent_ids_17 - lst_left_ids = lst_left_ids_17 - lst_right_ids = lst_right_ids_17 - - elif np_kps.shape[0] == 15: - lst_parent_ids = lst_parent_ids_15 - lst_left_ids = lst_left_ids_15 - lst_right_ids = lst_right_ids_15 - - for i in range(len(lst_parent_ids)): - pid = lst_parent_ids[i] - if i == pid: - continue - - if (score[i] < threshold or score[1] < threshold): - continue - - if i in lst_left_ids and pid in lst_left_ids: - color = (0, 255, 0) - elif i in lst_right_ids and pid in lst_right_ids: - color = (255, 0, 0) - else: - color = (0, 255, 255) - - cv2.line(image, (int(np_kps[i, 0]), int(np_kps[i, 1])), - (int(np_kps[pid][0]), int(np_kps[pid, 1])), color, 3) - - for i in range(np_kps.shape[0]): - if score[i] < threshold: - continue - cv2.circle(image, (int(np_kps[i, 0]), int(np_kps[i, 1])), 5, - (0, 0, 255), -1) - - -def draw_box(image, box): - cv2.rectangle(image, (int(box[0][0]), int(box[0][1])), - (int(box[1][0]), int(box[1][1])), (0, 0, 255), 2) - - -def draw_keypoints(output, original_image): - poses = np.array(output[OutputKeys.POSES]) - scores = np.array(output[OutputKeys.SCORES]) - boxes = np.array(output[OutputKeys.BOXES]) - assert len(poses) == len(scores) and len(poses) == len(boxes) - image = cv2.imread(original_image, -1) - for i in range(len(poses)): - draw_box(image, np.array(boxes[i])) - draw_joints(image, np.array(poses[i]), np.array(scores[i])) - return image - - -def draw_face_detection_result(img_path, detection_result): - bboxes = np.array(detection_result[OutputKeys.BOXES]) - kpss = np.array(detection_result[OutputKeys.KEYPOINTS]) - scores = np.array(detection_result[OutputKeys.SCORES]) - img = cv2.imread(img_path) - assert img is not None, f"Can't read img: {img_path}" - for i in range(len(scores)): - bbox = bboxes[i].astype(np.int32) - kps = kpss[i].reshape(-1, 2).astype(np.int32) - score = scores[i] - x1, y1, x2, y2 = bbox - cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2) - for kp in kps: - cv2.circle(img, tuple(kp), 1, (0, 0, 255), 1) - cv2.putText( - img, - f'{score:.2f}', (x1, y2), - 1, - 1.0, (0, 255, 0), - thickness=1, - lineType=8) - print(f'Found {len(scores)} faces') - return img - - -def created_boxed_image(image_in, box): - image = load_image(image_in) - img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) - cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), - (0, 255, 0), 3) - return image - - -def show_video_tracking_result(video_in_path, bboxes, video_save_path): - cap = cv2.VideoCapture(video_in_path) - for i in range(len(bboxes)): - box = bboxes[i] - success, frame = cap.read() - if success is False: - raise Exception(video_in_path, - ' can not be correctly decoded by OpenCV.') - if i == 0: - size = (frame.shape[1], frame.shape[0]) - fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') - video_writer = cv2.VideoWriter(video_save_path, fourcc, - cap.get(cv2.CAP_PROP_FPS), size, - True) - cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), - 5) - video_writer.write(frame) - video_writer.release - cap.release() diff --git a/modelscope/utils/nlp/nlp_utils.py b/modelscope/utils/nlp/nlp_utils.py deleted file mode 100644 index 35b374f2..00000000 --- a/modelscope/utils/nlp/nlp_utils.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import List - -from modelscope.outputs import OutputKeys -from modelscope.pipelines.nlp import (ConversationalTextToSqlPipeline, - DialogStateTrackingPipeline) - - -def text2sql_tracking_and_print_results( - test_case, pipelines: List[ConversationalTextToSqlPipeline]): - for p in pipelines: - last_sql, history = '', [] - for item in test_case['utterance']: - case = { - 'utterance': item, - 'history': history, - 'last_sql': last_sql, - 'database_id': test_case['database_id'], - 'local_db_path': test_case['local_db_path'] - } - results = p(case) - print({'question': item}) - print(results) - last_sql = results['text'] - history.append(item) - - -def tracking_and_print_dialog_states( - test_case, pipelines: List[DialogStateTrackingPipeline]): - import json - pipelines_len = len(pipelines) - history_states = [{}] - utter = {} - for step, item in enumerate(test_case): - utter.update(item) - result = pipelines[step % pipelines_len]({ - 'utter': - utter, - 'history_states': - history_states - }) - print(json.dumps(result)) - - history_states.extend([result[OutputKeys.OUTPUT], {}]) diff --git a/tests/pipelines/test_action_recognition.py b/tests/pipelines/test_action_recognition.py index e955eb60..7453f136 100644 --- a/tests/pipelines/test_action_recognition.py +++ b/tests/pipelines/test_action_recognition.py @@ -15,6 +15,23 @@ class ActionRecognitionTest(unittest.TestCase): def setUp(self) -> None: self.model_id = 'damo/cv_TAdaConv_action-recognition' + @unittest.skip('deprecated, download model from model hub instead') + def test_run_with_direct_file_download(self): + model_path = 'https://aquila2-online-models.oss-cn-shanghai.aliyuncs.com/maas_test/pytorch_model.pt' + config_path = 'https://aquila2-online-models.oss-cn-shanghai.aliyuncs.com/maas_test/configuration.json' + with tempfile.TemporaryDirectory() as tmp_dir: + model_file = osp.join(tmp_dir, ModelFile.TORCH_MODEL_FILE) + with open(model_file, 'wb') as ofile1: + ofile1.write(File.read(model_path)) + config_file = osp.join(tmp_dir, ModelFile.CONFIGURATION) + with open(config_file, 'wb') as ofile2: + ofile2.write(File.read(config_path)) + recognition_pipeline = pipeline( + Tasks.action_recognition, model=tmp_dir) + result = recognition_pipeline( + 'data/test/videos/action_recognition_test_video.mp4') + print(f'recognition output: {result}.') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_modelhub(self): recognition_pipeline = pipeline( diff --git a/tests/pipelines/test_body_2d_keypoints.py b/tests/pipelines/test_body_2d_keypoints.py index d010adc5..eca5e961 100644 --- a/tests/pipelines/test_body_2d_keypoints.py +++ b/tests/pipelines/test_body_2d_keypoints.py @@ -9,9 +9,59 @@ from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.pipelines.base import Pipeline from modelscope.utils.constant import Tasks -from modelscope.utils.cv.image_utils import draw_keypoints from modelscope.utils.test_utils import test_level +lst_parent_ids_17 = [0, 0, 0, 1, 2, 0, 0, 5, 6, 7, 8, 5, 6, 11, 12, 13, 14] +lst_left_ids_17 = [1, 3, 5, 7, 9, 11, 13, 15] +lst_right_ids_17 = [2, 4, 6, 8, 10, 12, 14, 16] +lst_spine_ids_17 = [0] + +lst_parent_ids_15 = [0, 0, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 1] +lst_left_ids_15 = [2, 3, 4, 8, 9, 10] +lst_right_ids_15 = [5, 6, 7, 11, 12, 13] +lst_spine_ids_15 = [0, 1, 14] + + +def draw_joints(image, np_kps, score, threshold=0.2): + if np_kps.shape[0] == 17: + lst_parent_ids = lst_parent_ids_17 + lst_left_ids = lst_left_ids_17 + lst_right_ids = lst_right_ids_17 + + elif np_kps.shape[0] == 15: + lst_parent_ids = lst_parent_ids_15 + lst_left_ids = lst_left_ids_15 + lst_right_ids = lst_right_ids_15 + + for i in range(len(lst_parent_ids)): + pid = lst_parent_ids[i] + if i == pid: + continue + + if (score[i] < threshold or score[1] < threshold): + continue + + if i in lst_left_ids and pid in lst_left_ids: + color = (0, 255, 0) + elif i in lst_right_ids and pid in lst_right_ids: + color = (255, 0, 0) + else: + color = (0, 255, 255) + + cv2.line(image, (int(np_kps[i, 0]), int(np_kps[i, 1])), + (int(np_kps[pid][0]), int(np_kps[pid, 1])), color, 3) + + for i in range(np_kps.shape[0]): + if score[i] < threshold: + continue + cv2.circle(image, (int(np_kps[i, 0]), int(np_kps[i, 1])), 5, + (0, 0, 255), -1) + + +def draw_box(image, box): + cv2.rectangle(image, (int(box[0][0]), int(box[0][1])), + (int(box[1][0]), int(box[1][1])), (0, 0, 255), 2) + class Body2DKeypointsTest(unittest.TestCase): @@ -21,7 +71,14 @@ class Body2DKeypointsTest(unittest.TestCase): def pipeline_inference(self, pipeline: Pipeline, pipeline_input): output = pipeline(pipeline_input) - image = draw_keypoints(output, self.test_image) + poses = np.array(output[OutputKeys.POSES]) + scores = np.array(output[OutputKeys.SCORES]) + boxes = np.array(output[OutputKeys.BOXES]) + assert len(poses) == len(scores) and len(poses) == len(boxes) + image = cv2.imread(self.test_image, -1) + for i in range(len(poses)): + draw_box(image, np.array(boxes[i])) + draw_joints(image, np.array(poses[i]), np.array(scores[i])) cv2.imwrite('pose_keypoint.jpg', image) @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') diff --git a/tests/pipelines/test_conversational_text_to_sql.py b/tests/pipelines/test_conversational_text_to_sql.py index 0504cb7c..67a4ce7b 100644 --- a/tests/pipelines/test_conversational_text_to_sql.py +++ b/tests/pipelines/test_conversational_text_to_sql.py @@ -9,7 +9,6 @@ from modelscope.pipelines import pipeline from modelscope.pipelines.nlp import ConversationalTextToSqlPipeline from modelscope.preprocessors import ConversationalTextToSqlPreprocessor from modelscope.utils.constant import Tasks -from modelscope.utils.nlp.nlp_utils import text2sql_tracking_and_print_results from modelscope.utils.test_utils import test_level @@ -26,6 +25,24 @@ class ConversationalTextToSql(unittest.TestCase): ] } + def tracking_and_print_results( + self, pipelines: List[ConversationalTextToSqlPipeline]): + for my_pipeline in pipelines: + last_sql, history = '', [] + for item in self.test_case['utterance']: + case = { + 'utterance': item, + 'history': history, + 'last_sql': last_sql, + 'database_id': self.test_case['database_id'], + 'local_db_path': self.test_case['local_db_path'] + } + results = my_pipeline(case) + print({'question': item}) + print(results) + last_sql = results['text'] + history.append(item) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run_by_direct_model_download(self): cache_path = snapshot_download(self.model_id) @@ -44,7 +61,7 @@ class ConversationalTextToSql(unittest.TestCase): model=model, preprocessor=preprocessor) ] - text2sql_tracking_and_print_results(self.test_case, pipelines) + self.tracking_and_print_results(pipelines) @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_from_modelhub(self): @@ -60,7 +77,7 @@ class ConversationalTextToSql(unittest.TestCase): model=model, preprocessor=preprocessor) ] - text2sql_tracking_and_print_results(self.test_case, pipelines) + self.tracking_and_print_results(pipelines) @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_name(self): @@ -68,12 +85,12 @@ class ConversationalTextToSql(unittest.TestCase): pipeline( task=Tasks.conversational_text_to_sql, model=self.model_id) ] - text2sql_tracking_and_print_results(self.test_case, pipelines) + self.tracking_and_print_results(pipelines) @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run_with_default_model(self): pipelines = [pipeline(task=Tasks.conversational_text_to_sql)] - text2sql_tracking_and_print_results(self.test_case, pipelines) + self.tracking_and_print_results(pipelines) if __name__ == '__main__': diff --git a/tests/pipelines/test_crowd_counting.py b/tests/pipelines/test_crowd_counting.py index 99f5ffd2..1bd5a0dd 100644 --- a/tests/pipelines/test_crowd_counting.py +++ b/tests/pipelines/test_crowd_counting.py @@ -2,12 +2,13 @@ import unittest import cv2 +import numpy as np from PIL import Image from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -from modelscope.utils.cv.image_utils import numpy_to_cv2img +from modelscope.utils.cv.heatmap import numpy_to_cv2img from modelscope.utils.logger import get_logger from modelscope.utils.test_utils import test_level diff --git a/tests/pipelines/test_dialog_state_tracking.py b/tests/pipelines/test_dialog_state_tracking.py index b4d05730..2710ec0d 100644 --- a/tests/pipelines/test_dialog_state_tracking.py +++ b/tests/pipelines/test_dialog_state_tracking.py @@ -1,14 +1,15 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import unittest +from typing import List from modelscope.hub.snapshot_download import snapshot_download from modelscope.models import Model from modelscope.models.nlp import SpaceForDialogStateTracking +from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.pipelines.nlp import DialogStateTrackingPipeline from modelscope.preprocessors import DialogStateTrackingPreprocessor from modelscope.utils.constant import Tasks -from modelscope.utils.nlp.nlp_utils import tracking_and_print_dialog_states from modelscope.utils.test_utils import test_level @@ -78,6 +79,24 @@ class DialogStateTrackingTest(unittest.TestCase): 'User-8': 'Thank you, goodbye', }] + def tracking_and_print_dialog_states( + self, pipelines: List[DialogStateTrackingPipeline]): + import json + pipelines_len = len(pipelines) + history_states = [{}] + utter = {} + for step, item in enumerate(self.test_case): + utter.update(item) + result = pipelines[step % pipelines_len]({ + 'utter': + utter, + 'history_states': + history_states + }) + print(json.dumps(result)) + + history_states.extend([result[OutputKeys.OUTPUT], {}]) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run_by_direct_model_download(self): cache_path = snapshot_download(self.model_id, revision='update') @@ -92,7 +111,7 @@ class DialogStateTrackingTest(unittest.TestCase): model=model, preprocessor=preprocessor) ] - tracking_and_print_dialog_states(pipelines) + self.tracking_and_print_dialog_states(pipelines) @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_from_modelhub(self): @@ -109,7 +128,7 @@ class DialogStateTrackingTest(unittest.TestCase): preprocessor=preprocessor) ] - tracking_and_print_dialog_states(pipelines) + self.tracking_and_print_dialog_states(pipelines) @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_name(self): @@ -119,7 +138,7 @@ class DialogStateTrackingTest(unittest.TestCase): model=self.model_id, model_revision='update') ] - tracking_and_print_dialog_states(pipelines) + self.tracking_and_print_dialog_states(pipelines) if __name__ == '__main__': diff --git a/tests/pipelines/test_face_detection.py b/tests/pipelines/test_face_detection.py index 03dd75a6..d4872e0a 100644 --- a/tests/pipelines/test_face_detection.py +++ b/tests/pipelines/test_face_detection.py @@ -9,7 +9,6 @@ from modelscope.msdatasets import MsDataset from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -from modelscope.utils.cv.image_utils import draw_face_detection_result from modelscope.utils.test_utils import test_level @@ -18,21 +17,46 @@ class FaceDetectionTest(unittest.TestCase): def setUp(self) -> None: self.model_id = 'damo/cv_resnet_facedetection_scrfd10gkps' - def show_result(self, img_path, detection_result): - img = draw_face_detection_result(img_path, detection_result) + def show_result(self, img_path, bboxes, kpss, scores): + bboxes = np.array(bboxes) + kpss = np.array(kpss) + scores = np.array(scores) + img = cv2.imread(img_path) + assert img is not None, f"Can't read img: {img_path}" + for i in range(len(scores)): + bbox = bboxes[i].astype(np.int32) + kps = kpss[i].reshape(-1, 2).astype(np.int32) + score = scores[i] + x1, y1, x2, y2 = bbox + cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2) + for kp in kps: + cv2.circle(img, tuple(kp), 1, (0, 0, 255), 1) + cv2.putText( + img, + f'{score:.2f}', (x1, y2), + 1, + 1.0, (0, 255, 0), + thickness=1, + lineType=8) cv2.imwrite('result.png', img) - print(f'output written to {osp.abspath("result.png")}') + print( + f'Found {len(scores)} faces, output written to {osp.abspath("result.png")}' + ) @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_dataset(self): input_location = ['data/test/images/face_detection.png'] + # alternatively: + # input_location = '/dir/to/images' dataset = MsDataset.load(input_location, target='image') face_detection = pipeline(Tasks.face_detection, model=self.model_id) # note that for dataset output, the inference-output is a Generator that can be iterated. result = face_detection(dataset) result = next(result) - self.show_result(input_location[0], result) + self.show_result(input_location[0], result[OutputKeys.BOXES], + result[OutputKeys.KEYPOINTS], + result[OutputKeys.SCORES]) @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_modelhub(self): @@ -40,14 +64,18 @@ class FaceDetectionTest(unittest.TestCase): img_path = 'data/test/images/face_detection.png' result = face_detection(img_path) - self.show_result(img_path, result) + self.show_result(img_path, result[OutputKeys.BOXES], + result[OutputKeys.KEYPOINTS], + result[OutputKeys.SCORES]) @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run_modelhub_default_model(self): face_detection = pipeline(Tasks.face_detection) img_path = 'data/test/images/face_detection.png' result = face_detection(img_path) - self.show_result(img_path, result) + self.show_result(img_path, result[OutputKeys.BOXES], + result[OutputKeys.KEYPOINTS], + result[OutputKeys.SCORES]) if __name__ == '__main__': diff --git a/tests/pipelines/test_face_image_generation.py b/tests/pipelines/test_face_image_generation.py index c758ea3a..fc2c58cc 100644 --- a/tests/pipelines/test_face_image_generation.py +++ b/tests/pipelines/test_face_image_generation.py @@ -1,4 +1,5 @@ # Copyright (c) Alibaba, Inc. and its affiliates. +import os import os.path as osp import unittest diff --git a/tests/pipelines/test_face_recognition.py b/tests/pipelines/test_face_recognition.py index 015205d6..20e05f65 100644 --- a/tests/pipelines/test_face_recognition.py +++ b/tests/pipelines/test_face_recognition.py @@ -21,6 +21,7 @@ class FaceRecognitionTest(unittest.TestCase): face_recognition = pipeline( Tasks.face_recognition, model=self.model_id) + # note that for dataset output, the inference-output is a Generator that can be iterated. emb1 = face_recognition(img1)[OutputKeys.IMG_EMBEDDING] emb2 = face_recognition(img2)[OutputKeys.IMG_EMBEDDING] sim = np.dot(emb1[0], emb2[0]) diff --git a/tests/pipelines/test_image2image_generation.py b/tests/pipelines/test_image2image_generation.py index 487fe4d0..81aae81e 100644 --- a/tests/pipelines/test_image2image_generation.py +++ b/tests/pipelines/test_image2image_generation.py @@ -3,7 +3,6 @@ import unittest from torchvision.utils import save_image -from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks from modelscope.utils.test_utils import test_level @@ -28,13 +27,13 @@ class Image2ImageGenerationTest(unittest.TestCase): result2 = img2img_gen_pipeline(('data/test/images/img2img_input.jpg', 'data/test/images/img2img_style.jpg')) save_image( - result1[OutputKeys.OUTPUT_IMG].clamp(-1, 1), + result1['output_img'].clamp(-1, 1), 'result1.jpg', range=(-1, 1), normalize=True, nrow=4) save_image( - result2[OutputKeys.OUTPUT_IMG].clamp(-1, 1), + result2['output_img'].clamp(-1, 1), 'result2.jpg', range=(-1, 1), normalize=True, diff --git a/tests/pipelines/test_image_matting.py b/tests/pipelines/test_image_matting.py index 83b7fee2..1bebf3df 100644 --- a/tests/pipelines/test_image_matting.py +++ b/tests/pipelines/test_image_matting.py @@ -18,6 +18,19 @@ class ImageMattingTest(unittest.TestCase): def setUp(self) -> None: self.model_id = 'damo/cv_unet_image-matting' + @unittest.skip('deprecated, download model from model hub instead') + def test_run_with_direct_file_download(self): + model_path = 'http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs' \ + '.com/data/test/maas/image_matting/matting_person.pb' + with tempfile.TemporaryDirectory() as tmp_dir: + model_file = osp.join(tmp_dir, ModelFile.TF_GRAPH_FILE) + with open(model_file, 'wb') as ofile: + ofile.write(File.read(model_path)) + img_matting = pipeline(Tasks.portrait_matting, model=tmp_dir) + + result = img_matting('data/test/images/image_matting.png') + cv2.imwrite('result.png', result[OutputKeys.OUTPUT_IMG]) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_dataset(self): input_location = ['data/test/images/image_matting.png'] diff --git a/tests/pipelines/test_image_style_transfer.py b/tests/pipelines/test_image_style_transfer.py index 4e5bb69b..964e47ac 100644 --- a/tests/pipelines/test_image_style_transfer.py +++ b/tests/pipelines/test_image_style_transfer.py @@ -15,7 +15,7 @@ class ImageStyleTransferTest(unittest.TestCase): def setUp(self) -> None: self.model_id = 'damo/cv_aams_style-transfer_damo' - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skip('deprecated, download model from model hub instead') def test_run_by_direct_model_download(self): snapshot_path = snapshot_download(self.model_id) print('snapshot_path: {}'.format(snapshot_path)) diff --git a/tests/pipelines/test_key_word_spotting_farfield.py b/tests/pipelines/test_key_word_spotting_farfield.py index 4a732950..0b64831a 100644 --- a/tests/pipelines/test_key_word_spotting_farfield.py +++ b/tests/pipelines/test_key_word_spotting_farfield.py @@ -1,6 +1,7 @@ import os.path import unittest +from modelscope.fileio import File from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks from modelscope.utils.test_utils import test_level diff --git a/tests/pipelines/test_ofa_tasks.py b/tests/pipelines/test_ofa_tasks.py index 69bccac1..ab10f573 100644 --- a/tests/pipelines/test_ofa_tasks.py +++ b/tests/pipelines/test_ofa_tasks.py @@ -4,13 +4,14 @@ import unittest from os import path as osp import cv2 +import numpy as np from PIL import Image from modelscope.models import Model from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline +from modelscope.preprocessors.image import load_image from modelscope.utils.constant import Tasks -from modelscope.utils.cv.image_utils import created_boxed_image from modelscope.utils.test_utils import test_level @@ -21,9 +22,11 @@ class OfaTasksTest(unittest.TestCase): os.makedirs(self.output_dir, exist_ok=True) def save_img(self, image_in, box, image_out): - cv2.imwrite( - osp.join(self.output_dir, image_out), - created_boxed_image(image_in, box)) + image = load_image(image_in) + img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) + cv2.rectangle(img, (int(box[0]), int(box[1])), + (int(box[2]), int(box[3])), (0, 255, 0), 3) + cv2.imwrite(osp.join(self.output_dir, image_out), img) @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_with_image_captioning_with_model(self): diff --git a/tests/pipelines/test_person_image_cartoon.py b/tests/pipelines/test_person_image_cartoon.py index bdbf8b61..8b5384ee 100644 --- a/tests/pipelines/test_person_image_cartoon.py +++ b/tests/pipelines/test_person_image_cartoon.py @@ -24,6 +24,19 @@ class ImageCartoonTest(unittest.TestCase): cv2.imwrite('result.png', result[OutputKeys.OUTPUT_IMG]) print(f'Output written to {osp.abspath("result.png")}') + @unittest.skip('deprecated, download model from model hub instead') + def test_run_by_direct_model_download(self): + model_dir = './assets' + if not os.path.exists(model_dir): + os.system( + 'wget https://invi-label.oss-cn-shanghai.aliyuncs.com/label/model/cartoon/assets.zip' + ) + os.system('unzip assets.zip') + + img_cartoon = pipeline( + Tasks.image_portrait_stylization, model=model_dir) + self.pipeline_inference(img_cartoon, self.test_image) + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_modelhub(self): img_cartoon = pipeline( diff --git a/tests/pipelines/test_skin_retouching.py b/tests/pipelines/test_skin_retouching.py index c6dbee2c..a10af416 100644 --- a/tests/pipelines/test_skin_retouching.py +++ b/tests/pipelines/test_skin_retouching.py @@ -23,9 +23,10 @@ class SkinRetouchingTest(unittest.TestCase): cv2.imwrite('result_skinretouching.png', result[OutputKeys.OUTPUT_IMG]) print(f'Output written to {osp.abspath("result_skinretouching.png")}') - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skip('deprecated, download model from model hub instead') def test_run_by_direct_model_download(self): model_dir = snapshot_download(self.model_id) + skin_retouching = pipeline(Tasks.skin_retouching, model=model_dir) self.pipeline_inference(skin_retouching, self.test_image) diff --git a/tests/pipelines/test_video_single_object_tracking.py b/tests/pipelines/test_video_single_object_tracking.py index fc228cd8..f5d4714c 100644 --- a/tests/pipelines/test_video_single_object_tracking.py +++ b/tests/pipelines/test_video_single_object_tracking.py @@ -1,10 +1,11 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import unittest +from modelscope.models.cv.video_single_object_tracking.utils.utils import \ + show_tracking_result from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -from modelscope.utils.cv.image_utils import show_video_tracking_result from modelscope.utils.test_utils import test_level @@ -21,8 +22,8 @@ class SingleObjectTracking(unittest.TestCase): init_bbox = [414, 343, 514, 449] # [x1, y1, x2, y2] result = video_single_object_tracking((video_path, init_bbox)) print('result is : ', result[OutputKeys.BOXES]) - show_video_tracking_result(video_path, result[OutputKeys.BOXES], - './tracking_result.avi') + show_tracking_result(video_path, result[OutputKeys.BOXES], + './tracking_result.avi') @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_modelhub_default_model(self):