From 2e884cfdcbfea39772bc7ac80c7bd57c6eaed43b Mon Sep 17 00:00:00 2001 From: "yingda.chen" Date: Mon, 1 Aug 2022 16:50:55 +0800 Subject: [PATCH] [to #42322933]specifiy torch model to inherit from TorchModel Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9588834 --- modelscope/models/audio/ans/frcrn.py | 5 +++-- modelscope/models/cv/action_recognition/models.py | 1 - modelscope/models/multi_modal/clip/clip_model.py | 4 ++-- modelscope/models/multi_modal/imagen/imagen_model.py | 3 +-- .../mmr/models/clip_for_mm_video_embedding.py | 5 ++--- .../multi_modal/mplug_for_visual_question_answering.py | 5 +++-- modelscope/models/multi_modal/ofa_for_all_tasks.py | 5 +++-- .../models/nlp/bert_for_sequence_classification.py | 4 ++-- modelscope/models/nlp/csanmt_for_translation.py | 5 +---- modelscope/models/nlp/masked_language.py | 7 ++++--- .../models/nlp/nncrf_for_named_entity_recognition.py | 7 ++----- .../models/nlp/sbert_for_sequence_classification.py | 4 ++-- modelscope/models/nlp/sbert_for_token_classification.py | 5 +++-- .../models/nlp/sbert_for_zero_shot_classification.py | 4 ++-- .../models/nlp/space_for_dialog_intent_prediction.py | 7 ++++--- modelscope/models/nlp/space_for_dialog_modeling.py | 7 ++++--- modelscope/models/nlp/space_for_dialog_state_tracking.py | 9 ++++----- 17 files changed, 42 insertions(+), 45 deletions(-) diff --git a/modelscope/models/audio/ans/frcrn.py b/modelscope/models/audio/ans/frcrn.py index 0ed18d6a..cc580117 100644 --- a/modelscope/models/audio/ans/frcrn.py +++ b/modelscope/models/audio/ans/frcrn.py @@ -6,7 +6,8 @@ import torch.nn as nn import torch.nn.functional as F from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.utils.constant import ModelFile, Tasks from .conv_stft import ConviSTFT, ConvSTFT @@ -59,7 +60,7 @@ class FTB(nn.Module): @MODELS.register_module( Tasks.speech_signal_process, module_name=Models.speech_frcrn_ans_cirm_16k) -class FRCRNModel(Model): +class FRCRNModel(TorchModel): r""" A decorator of FRCRN for integrating into modelscope framework """ def __init__(self, model_dir: str, *args, **kwargs): diff --git a/modelscope/models/cv/action_recognition/models.py b/modelscope/models/cv/action_recognition/models.py index e85b6d81..48e75ae1 100644 --- a/modelscope/models/cv/action_recognition/models.py +++ b/modelscope/models/cv/action_recognition/models.py @@ -1,4 +1,3 @@ -import torch import torch.nn as nn from .tada_convnext import TadaConvNeXt diff --git a/modelscope/models/multi_modal/clip/clip_model.py b/modelscope/models/multi_modal/clip/clip_model.py index eafb3902..e092f4af 100644 --- a/modelscope/models/multi_modal/clip/clip_model.py +++ b/modelscope/models/multi_modal/clip/clip_model.py @@ -13,7 +13,7 @@ from torch.distributed.nn.functional import \ from torchvision.transforms import Compose, Normalize, Resize, ToTensor from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import TorchModel from modelscope.models.builder import MODELS from modelscope.models.multi_modal.clip.clip_bert import TextTransformer from modelscope.models.multi_modal.clip.clip_vit import VisionTransformer @@ -116,7 +116,7 @@ class CLIPModel(nn.Module): @MODELS.register_module(Tasks.multi_modal_embedding, module_name=Models.clip) -class CLIPForMultiModalEmbedding(Model): +class CLIPForMultiModalEmbedding(TorchModel): def __init__(self, model_dir, device_id=-1): super().__init__(model_dir=model_dir, device_id=device_id) diff --git a/modelscope/models/multi_modal/imagen/imagen_model.py b/modelscope/models/multi_modal/imagen/imagen_model.py index dd00ca07..37dacb71 100644 --- a/modelscope/models/multi_modal/imagen/imagen_model.py +++ b/modelscope/models/multi_modal/imagen/imagen_model.py @@ -6,10 +6,9 @@ import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -from PIL import Image from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import Model from modelscope.models.builder import MODELS from modelscope.models.multi_modal.imagen.diffusion import (GaussianDiffusion, beta_schedule) diff --git a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py index 18f61cc4..657f52f8 100644 --- a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py +++ b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py @@ -1,4 +1,3 @@ -import os import random from os.path import exists from typing import Any, Dict @@ -9,7 +8,7 @@ import torch from PIL import Image from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import TorchModel from modelscope.models.builder import MODELS from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.logger import get_logger @@ -22,7 +21,7 @@ logger = get_logger() @MODELS.register_module( Tasks.video_multi_modal_embedding, module_name=Models.video_clip) -class VideoCLIPForMultiModalEmbedding(Model): +class VideoCLIPForMultiModalEmbedding(TorchModel): def __init__(self, model_dir, device_id=-1): super().__init__(model_dir=model_dir, device_id=device_id) diff --git a/modelscope/models/multi_modal/mplug_for_visual_question_answering.py b/modelscope/models/multi_modal/mplug_for_visual_question_answering.py index dc4fcce0..88875fda 100644 --- a/modelscope/models/multi_modal/mplug_for_visual_question_answering.py +++ b/modelscope/models/multi_modal/mplug_for_visual_question_answering.py @@ -1,7 +1,8 @@ from typing import Dict from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks @@ -10,7 +11,7 @@ __all__ = ['MPlugForVisualQuestionAnswering'] @MODELS.register_module( Tasks.visual_question_answering, module_name=Models.mplug) -class MPlugForVisualQuestionAnswering(Model): +class MPlugForVisualQuestionAnswering(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the mplug model from the `model_dir` path. diff --git a/modelscope/models/multi_modal/ofa_for_all_tasks.py b/modelscope/models/multi_modal/ofa_for_all_tasks.py index aaeccaf9..0ec87d66 100644 --- a/modelscope/models/multi_modal/ofa_for_all_tasks.py +++ b/modelscope/models/multi_modal/ofa_for_all_tasks.py @@ -8,7 +8,8 @@ import torch.cuda import torch.nn.functional as F from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.outputs import OutputKeys from modelscope.preprocessors.ofa.utils.collate import collate_tokens @@ -32,7 +33,7 @@ __all__ = ['OfaForAllTasks'] @MODELS.register_module(Tasks.image_classification, module_name=Models.ofa) @MODELS.register_module(Tasks.summarization, module_name=Models.ofa) @MODELS.register_module(Tasks.text_classification, module_name=Models.ofa) -class OfaForAllTasks(Model): +class OfaForAllTasks(TorchModel): def __init__(self, model_dir, *args, **kwargs): super().__init__(model_dir=model_dir, *args, **kwargs) diff --git a/modelscope/models/nlp/bert_for_sequence_classification.py b/modelscope/models/nlp/bert_for_sequence_classification.py index 530ba786..75105f36 100644 --- a/modelscope/models/nlp/bert_for_sequence_classification.py +++ b/modelscope/models/nlp/bert_for_sequence_classification.py @@ -5,7 +5,7 @@ import json import numpy as np from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import TorchModel from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks @@ -13,7 +13,7 @@ __all__ = ['BertForSequenceClassification'] @MODELS.register_module(Tasks.text_classification, module_name=Models.bert) -class BertForSequenceClassification(Model): +class BertForSequenceClassification(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): # Model.__init__(self, model_dir, model_cls, first_sequence, *args, **kwargs) diff --git a/modelscope/models/nlp/csanmt_for_translation.py b/modelscope/models/nlp/csanmt_for_translation.py index 5ff24fea..41abd701 100644 --- a/modelscope/models/nlp/csanmt_for_translation.py +++ b/modelscope/models/nlp/csanmt_for_translation.py @@ -1,10 +1,7 @@ import math -import os from collections import namedtuple -from typing import Any, Dict +from typing import Dict -import json -import numpy as np import tensorflow as tf from modelscope.metainfo import Models diff --git a/modelscope/models/nlp/masked_language.py b/modelscope/models/nlp/masked_language.py index 8f3ba0f7..ffe9631d 100644 --- a/modelscope/models/nlp/masked_language.py +++ b/modelscope/models/nlp/masked_language.py @@ -1,16 +1,17 @@ -from typing import Any, Dict, Optional, Union +from typing import Dict import numpy as np from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks __all__ = ['BertForMaskedLM', 'StructBertForMaskedLM', 'VecoForMaskedLM'] -class MaskedLanguageModelBase(Model): +class MaskedLanguageModelBase(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): super().__init__(model_dir, *args, **kwargs) diff --git a/modelscope/models/nlp/nncrf_for_named_entity_recognition.py b/modelscope/models/nlp/nncrf_for_named_entity_recognition.py index de16f8bf..de6bef65 100644 --- a/modelscope/models/nlp/nncrf_for_named_entity_recognition.py +++ b/modelscope/models/nlp/nncrf_for_named_entity_recognition.py @@ -1,15 +1,12 @@ import os from typing import Any, Dict, List, Optional -import json -import numpy as np import torch import torch.nn as nn -from torch.autograd import Variable from transformers import AutoConfig, AutoModel from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import TorchModel from modelscope.models.builder import MODELS from modelscope.utils.constant import ModelFile, Tasks @@ -18,7 +15,7 @@ __all__ = ['TransformerCRFForNamedEntityRecognition'] @MODELS.register_module( Tasks.named_entity_recognition, module_name=Models.tcrf) -class TransformerCRFForNamedEntityRecognition(Model): +class TransformerCRFForNamedEntityRecognition(TorchModel): def __init__(self, model_dir, *args, **kwargs): super().__init__(model_dir, *args, **kwargs) diff --git a/modelscope/models/nlp/sbert_for_sequence_classification.py b/modelscope/models/nlp/sbert_for_sequence_classification.py index 20ccdb83..59fcf6fa 100644 --- a/modelscope/models/nlp/sbert_for_sequence_classification.py +++ b/modelscope/models/nlp/sbert_for_sequence_classification.py @@ -7,7 +7,7 @@ import torch from sofa.models.sbert.modeling_sbert import SbertModel, SbertPreTrainedModel from torch import nn -from modelscope.models.base import Model +from modelscope.models import TorchModel class SbertTextClassfier(SbertPreTrainedModel): @@ -43,7 +43,7 @@ class SbertTextClassfier(SbertPreTrainedModel): return SbertTextClassfier.from_pretrained(model_dir, **model_args) -class SbertForSequenceClassificationBase(Model): +class SbertForSequenceClassificationBase(TorchModel): def __init__(self, model_dir: str, model_args=None, *args, **kwargs): super().__init__(model_dir, *args, **kwargs) diff --git a/modelscope/models/nlp/sbert_for_token_classification.py b/modelscope/models/nlp/sbert_for_token_classification.py index 784d2bd1..748c4107 100644 --- a/modelscope/models/nlp/sbert_for_token_classification.py +++ b/modelscope/models/nlp/sbert_for_token_classification.py @@ -4,7 +4,8 @@ import numpy as np import torch from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks @@ -12,7 +13,7 @@ __all__ = ['SbertForTokenClassification'] @MODELS.register_module(Tasks.word_segmentation, module_name=Models.structbert) -class SbertForTokenClassification(Model): +class SbertForTokenClassification(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the word segmentation model from the `model_dir` path. diff --git a/modelscope/models/nlp/sbert_for_zero_shot_classification.py b/modelscope/models/nlp/sbert_for_zero_shot_classification.py index e9ee2026..b772cf45 100644 --- a/modelscope/models/nlp/sbert_for_zero_shot_classification.py +++ b/modelscope/models/nlp/sbert_for_zero_shot_classification.py @@ -3,7 +3,7 @@ from typing import Any, Dict import numpy as np from modelscope.metainfo import Models -from modelscope.models.base import Model +from modelscope.models import TorchModel from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks @@ -12,7 +12,7 @@ __all__ = ['SbertForZeroShotClassification'] @MODELS.register_module( Tasks.zero_shot_classification, module_name=Models.structbert) -class SbertForZeroShotClassification(Model): +class SbertForZeroShotClassification(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the zero shot classification model from the `model_dir` path. diff --git a/modelscope/models/nlp/space_for_dialog_intent_prediction.py b/modelscope/models/nlp/space_for_dialog_intent_prediction.py index 2759547e..bd0eb63b 100644 --- a/modelscope/models/nlp/space_for_dialog_intent_prediction.py +++ b/modelscope/models/nlp/space_for_dialog_intent_prediction.py @@ -1,10 +1,11 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import os -from typing import Any, Dict +from typing import Dict from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase from modelscope.preprocessors.space import IntentBPETextField @@ -16,7 +17,7 @@ __all__ = ['SpaceForDialogIntent'] @MODELS.register_module( Tasks.dialog_intent_prediction, module_name=Models.space) -class SpaceForDialogIntent(Model): +class SpaceForDialogIntent(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the test generation model from the `model_dir` path. diff --git a/modelscope/models/nlp/space_for_dialog_modeling.py b/modelscope/models/nlp/space_for_dialog_modeling.py index 21060e31..60713c3d 100644 --- a/modelscope/models/nlp/space_for_dialog_modeling.py +++ b/modelscope/models/nlp/space_for_dialog_modeling.py @@ -1,10 +1,11 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import os -from typing import Any, Dict, Optional +from typing import Dict from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase from modelscope.preprocessors.space import MultiWOZBPETextField @@ -15,7 +16,7 @@ __all__ = ['SpaceForDialogModeling'] @MODELS.register_module(Tasks.dialog_modeling, module_name=Models.space) -class SpaceForDialogModeling(Model): +class SpaceForDialogModeling(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the test generation model from the `model_dir` path. diff --git a/modelscope/models/nlp/space_for_dialog_state_tracking.py b/modelscope/models/nlp/space_for_dialog_state_tracking.py index 7cfb1c54..de5f95ce 100644 --- a/modelscope/models/nlp/space_for_dialog_state_tracking.py +++ b/modelscope/models/nlp/space_for_dialog_state_tracking.py @@ -1,17 +1,16 @@ -import os -from typing import Any, Dict +from typing import Dict from modelscope.metainfo import Models -from modelscope.models.base import Model, Tensor +from modelscope.models import TorchModel +from modelscope.models.base import Tensor from modelscope.models.builder import MODELS from modelscope.utils.constant import Tasks -from modelscope.utils.nlp.space.utils_dst import batch_to_device __all__ = ['SpaceForDialogStateTracking'] @MODELS.register_module(Tasks.dialog_state_tracking, module_name=Models.space) -class SpaceForDialogStateTracking(Model): +class SpaceForDialogStateTracking(TorchModel): def __init__(self, model_dir: str, *args, **kwargs): """initialize the test generation model from the `model_dir` path.