Browse Source

[to #43727050] remove decord from requirements and print installation hint when using specific model

Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9615408
master
wenmeng.zwm 3 years ago
parent
commit
0e2202fa1a
8 changed files with 19 additions and 10 deletions
  1. +1
    -1
      modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py
  2. +1
    -1
      modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py
  3. +2
    -2
      modelscope/pipelines/cv/live_category_pipeline.py
  4. +2
    -2
      modelscope/pipelines/cv/video_category_pipeline.py
  5. +1
    -1
      modelscope/preprocessors/video.py
  6. +5
    -0
      modelscope/utils/error.py
  7. +7
    -2
      modelscope/utils/import_utils.py
  8. +0
    -1
      requirements/cv.txt

+ 1
- 1
modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py View File

@@ -5,6 +5,7 @@ from typing import Any, Dict
import json import json
import numpy as np import numpy as np
import torch import torch
from decord import VideoReader, cpu
from PIL import Image from PIL import Image


from modelscope.metainfo import Models from modelscope.metainfo import Models
@@ -131,7 +132,6 @@ class VideoCLIPForMultiModalEmbedding(TorchModel):
end_time = end_time + 1 end_time = end_time + 1


if exists(video_path): if exists(video_path):
from decord import VideoReader, cpu
vreader = VideoReader(video_path, ctx=cpu(0)) vreader = VideoReader(video_path, ctx=cpu(0))
else: else:
logger.error('non video input, output is wrong!!!') logger.error('non video input, output is wrong!!!')


+ 1
- 1
modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py View File

@@ -1,6 +1,7 @@
import os.path as osp import os.path as osp
from typing import Any, Dict from typing import Any, Dict


import decord
import numpy as np import numpy as np
import torch import torch
import torchvision.transforms.functional as TF import torchvision.transforms.functional as TF
@@ -45,7 +46,6 @@ class CMDSSLVideoEmbeddingPipeline(Pipeline):
logger.info('load model done') logger.info('load model done')


def preprocess(self, input: Input) -> Dict[str, Any]: def preprocess(self, input: Input) -> Dict[str, Any]:
import decord
decord.bridge.set_bridge('native') decord.bridge.set_bridge('native')


transforms = VCompose([ transforms = VCompose([


+ 2
- 2
modelscope/pipelines/cv/live_category_pipeline.py View File

@@ -2,12 +2,14 @@
import os.path as osp import os.path as osp
from typing import Any, Dict from typing import Any, Dict


import decord
import numpy as np import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import torchvision.models as models import torchvision.models as models
import torchvision.transforms.functional as TF import torchvision.transforms.functional as TF
from decord import VideoReader, cpu
from PIL import Image from PIL import Image


from modelscope.metainfo import Pipelines from modelscope.metainfo import Pipelines
@@ -56,8 +58,6 @@ class LiveCategoryPipeline(Pipeline):


def preprocess(self, input: Input) -> Dict[str, Any]: def preprocess(self, input: Input) -> Dict[str, Any]:
if isinstance(input, str): if isinstance(input, str):
import decord
from decord import VideoReader, cpu
decord.bridge.set_bridge('native') decord.bridge.set_bridge('native')
vr = VideoReader(input, ctx=cpu(0)) vr = VideoReader(input, ctx=cpu(0))
indices = np.linspace(0, len(vr) - 1, 4).astype(int) indices = np.linspace(0, len(vr) - 1, 4).astype(int)


+ 2
- 2
modelscope/pipelines/cv/video_category_pipeline.py View File

@@ -2,6 +2,7 @@
import os.path as osp import os.path as osp
from typing import Any, Dict from typing import Any, Dict


import decord
import json import json
import numpy as np import numpy as np
import torch import torch
@@ -9,6 +10,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import torchvision.models as models import torchvision.models as models
import torchvision.transforms.functional as TF import torchvision.transforms.functional as TF
from decord import VideoReader, cpu
from PIL import Image from PIL import Image


from modelscope.metainfo import Pipelines from modelscope.metainfo import Pipelines
@@ -67,8 +69,6 @@ class VideoCategoryPipeline(Pipeline):


def preprocess(self, input: Input) -> Dict[str, Any]: def preprocess(self, input: Input) -> Dict[str, Any]:
if isinstance(input, str): if isinstance(input, str):
import decord
from decord import VideoReader, cpu
decord.bridge.set_bridge('native') decord.bridge.set_bridge('native')
vr = VideoReader(input, ctx=cpu(0)) vr = VideoReader(input, ctx=cpu(0))
indices = np.linspace(0, len(vr) - 1, 16).astype(int) indices = np.linspace(0, len(vr) - 1, 16).astype(int)


+ 1
- 1
modelscope/preprocessors/video.py View File

@@ -6,6 +6,7 @@ import torch
import torch.utils.data import torch.utils.data
import torch.utils.dlpack as dlpack import torch.utils.dlpack as dlpack
import torchvision.transforms._transforms_video as transforms import torchvision.transforms._transforms_video as transforms
from decord import VideoReader
from torchvision.transforms import Compose from torchvision.transforms import Compose




@@ -124,7 +125,6 @@ def _decode_video(cfg, path):
Returns: Returns:
frames (Tensor): video tensor data frames (Tensor): video tensor data
""" """
from decord import VideoReader
vr = VideoReader(path) vr = VideoReader(path)


num_clips_per_video = cfg.TEST.NUM_ENSEMBLE_VIEWS num_clips_per_video = cfg.TEST.NUM_ENSEMBLE_VIEWS


+ 5
- 0
modelscope/utils/error.py View File

@@ -91,3 +91,8 @@ GENERAL_IMPORT_ERROR = """
{0} requires the REQ library but it was not found in your environment. You can install it with pip: {0} requires the REQ library but it was not found in your environment. You can install it with pip:
`pip install REQ` `pip install REQ`
""" """

DECORD_IMPORT_ERROR = """
{0} requires the decord library but it was not found in your environment. You can install it with pip:
`pip install decord>=0.6.0`
"""

+ 7
- 2
modelscope/utils/import_utils.py View File

@@ -287,7 +287,8 @@ REQUIREMENTS_MAAPING = OrderedDict([
('espnet', (is_espnet_available, ('espnet', (is_espnet_available,
GENERAL_IMPORT_ERROR.replace('REQ', 'espnet'))), GENERAL_IMPORT_ERROR.replace('REQ', 'espnet'))),
('easyasr', (is_package_available('easyasr'), AUDIO_IMPORT_ERROR)), ('easyasr', (is_package_available('easyasr'), AUDIO_IMPORT_ERROR)),
('kwsbp', (is_package_available('kwsbp'), AUDIO_IMPORT_ERROR))
('kwsbp', (is_package_available('kwsbp'), AUDIO_IMPORT_ERROR)),
('decord', (is_package_available('decord'), DECORD_IMPORT_ERROR)),
]) ])


SYSTEM_PACKAGE = set(['os', 'sys', 'typing']) SYSTEM_PACKAGE = set(['os', 'sys', 'typing'])
@@ -308,7 +309,7 @@ def requires(obj, requirements):
if req in REQUIREMENTS_MAAPING: if req in REQUIREMENTS_MAAPING:
check = REQUIREMENTS_MAAPING[req] check = REQUIREMENTS_MAAPING[req]
else: else:
check_fn = functools.partial(is_package_available, req)
check_fn = is_package_available(req)
err_msg = GENERAL_IMPORT_ERROR.replace('REQ', req) err_msg = GENERAL_IMPORT_ERROR.replace('REQ', req)
check = (check_fn, err_msg) check = (check_fn, err_msg)
checks.append(check) checks.append(check)
@@ -433,6 +434,10 @@ class LazyImportModule(ModuleType):
if signature in LazyImportModule.AST_INDEX[INDEX_KEY]: if signature in LazyImportModule.AST_INDEX[INDEX_KEY]:
mod_index = LazyImportModule.AST_INDEX[INDEX_KEY][signature] mod_index = LazyImportModule.AST_INDEX[INDEX_KEY][signature]
module_name = mod_index[MODULE_KEY] module_name = mod_index[MODULE_KEY]
if module_name in LazyImportModule.AST_INDEX[REQUIREMENT_KEY]:
requirements = LazyImportModule.AST_INDEX[REQUIREMENT_KEY][
module_name]
requires(module_name, requirements)
importlib.import_module(module_name) importlib.import_module(module_name)
else: else:
logger.warning(f'{signature} not found in ast index file') logger.warning(f'{signature} not found in ast index file')

+ 0
- 1
requirements/cv.txt View File

@@ -1,4 +1,3 @@
decord>=0.6.0
easydict easydict
# tensorflow 1.x compatability requires numpy version to be cap at 1.18 # tensorflow 1.x compatability requires numpy version to be cap at 1.18
numpy<=1.18 numpy<=1.18


Loading…
Cancel
Save