baiguan.yt yingda.chen 3 years ago
parent
commit
ce66402345
4 changed files with 17 additions and 20 deletions
  1. +0
    -4
      modelscope/models/cv/product_retrieval_embedding/item_embedding.py
  2. +4
    -1
      modelscope/pipelines/cv/face_image_generation_pipeline.py
  3. +9
    -14
      modelscope/pipelines/cv/image_colorization_pipeline.py
  4. +4
    -1
      modelscope/pipelines/cv/image_super_resolution_pipeline.py

+ 0
- 4
modelscope/models/cv/product_retrieval_embedding/item_embedding.py View File

@@ -1,9 +1,5 @@
import os
import time

import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F



+ 4
- 1
modelscope/pipelines/cv/face_image_generation_pipeline.py View File

@@ -29,7 +29,10 @@ class FaceImageGenerationPipeline(Pipeline):
model: model id on modelscope hub.
"""
super().__init__(model=model, **kwargs)
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.size = 1024
self.latent = 512
self.n_mlp = 8


+ 9
- 14
modelscope/pipelines/cv/image_colorization_pipeline.py View File

@@ -12,7 +12,7 @@ from modelscope.models.cv.image_colorization import (DynamicUnetDeep,
from modelscope.outputs import OutputKeys
from modelscope.pipelines.base import Input, Pipeline
from modelscope.pipelines.builder import PIPELINES
from modelscope.preprocessors import load_image
from modelscope.preprocessors import LoadImage, load_image
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.logger import get_logger

@@ -31,7 +31,13 @@ class ImageColorizationPipeline(Pipeline):
"""
super().__init__(model=model, **kwargs)
self.cut = 8
self.size = 1024 if self.device_name == 'cpu' else 512
self.size = 512
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.size = 1024

self.orig_img = None
self.model_type = 'stable'
self.norm = transforms.Compose([
@@ -82,18 +88,7 @@ class ImageColorizationPipeline(Pipeline):
logger.info('load model done')

def preprocess(self, input: Input) -> Dict[str, Any]:
if isinstance(input, str):
img = load_image(input).convert('LA').convert('RGB')
elif isinstance(input, Image.Image):
img = input.convert('LA').convert('RGB')
elif isinstance(input, np.ndarray):
if len(input.shape) == 2:
input = cv2.cvtColor(input, cv2.COLOR_GRAY2BGR)
img = input[:, :, ::-1] # in rgb order
img = PIL.Image.fromarray(img).convert('LA').convert('RGB')
else:
raise TypeError(f'input should be either str, PIL.Image,'
f' np.array, but got {type(input)}')
img = LoadImage.convert_to_img(input).convert('LA').convert('RGB')

self.wide, self.height = img.size
if self.wide * self.height > self.size * self.size:


+ 4
- 1
modelscope/pipelines/cv/image_super_resolution_pipeline.py View File

@@ -28,7 +28,10 @@ class ImageSuperResolutionPipeline(Pipeline):
model: model id on modelscope hub.
"""
super().__init__(model=model, **kwargs)
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.num_feat = 64
self.num_block = 23
self.scale = 4


Loading…
Cancel
Save