Browse Source

[to #42322933]specifiy torch model to inherit from TorchModel

Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9588834
master
yingda.chen 3 years ago
parent
commit
2e884cfdcb
17 changed files with 42 additions and 45 deletions
  1. +3
    -2
      modelscope/models/audio/ans/frcrn.py
  2. +0
    -1
      modelscope/models/cv/action_recognition/models.py
  3. +2
    -2
      modelscope/models/multi_modal/clip/clip_model.py
  4. +1
    -2
      modelscope/models/multi_modal/imagen/imagen_model.py
  5. +2
    -3
      modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py
  6. +3
    -2
      modelscope/models/multi_modal/mplug_for_visual_question_answering.py
  7. +3
    -2
      modelscope/models/multi_modal/ofa_for_all_tasks.py
  8. +2
    -2
      modelscope/models/nlp/bert_for_sequence_classification.py
  9. +1
    -4
      modelscope/models/nlp/csanmt_for_translation.py
  10. +4
    -3
      modelscope/models/nlp/masked_language.py
  11. +2
    -5
      modelscope/models/nlp/nncrf_for_named_entity_recognition.py
  12. +2
    -2
      modelscope/models/nlp/sbert_for_sequence_classification.py
  13. +3
    -2
      modelscope/models/nlp/sbert_for_token_classification.py
  14. +2
    -2
      modelscope/models/nlp/sbert_for_zero_shot_classification.py
  15. +4
    -3
      modelscope/models/nlp/space_for_dialog_intent_prediction.py
  16. +4
    -3
      modelscope/models/nlp/space_for_dialog_modeling.py
  17. +4
    -5
      modelscope/models/nlp/space_for_dialog_state_tracking.py

+ 3
- 2
modelscope/models/audio/ans/frcrn.py View File

@@ -6,7 +6,8 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.constant import ModelFile, Tasks
from .conv_stft import ConviSTFT, ConvSTFT from .conv_stft import ConviSTFT, ConvSTFT
@@ -59,7 +60,7 @@ class FTB(nn.Module):


@MODELS.register_module( @MODELS.register_module(
Tasks.speech_signal_process, module_name=Models.speech_frcrn_ans_cirm_16k) Tasks.speech_signal_process, module_name=Models.speech_frcrn_ans_cirm_16k)
class FRCRNModel(Model):
class FRCRNModel(TorchModel):
r""" A decorator of FRCRN for integrating into modelscope framework """ r""" A decorator of FRCRN for integrating into modelscope framework """


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):


+ 0
- 1
modelscope/models/cv/action_recognition/models.py View File

@@ -1,4 +1,3 @@
import torch
import torch.nn as nn import torch.nn as nn


from .tada_convnext import TadaConvNeXt from .tada_convnext import TadaConvNeXt


+ 2
- 2
modelscope/models/multi_modal/clip/clip_model.py View File

@@ -13,7 +13,7 @@ from torch.distributed.nn.functional import \
from torchvision.transforms import Compose, Normalize, Resize, ToTensor from torchvision.transforms import Compose, Normalize, Resize, ToTensor


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import TorchModel
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.models.multi_modal.clip.clip_bert import TextTransformer from modelscope.models.multi_modal.clip.clip_bert import TextTransformer
from modelscope.models.multi_modal.clip.clip_vit import VisionTransformer from modelscope.models.multi_modal.clip.clip_vit import VisionTransformer
@@ -116,7 +116,7 @@ class CLIPModel(nn.Module):




@MODELS.register_module(Tasks.multi_modal_embedding, module_name=Models.clip) @MODELS.register_module(Tasks.multi_modal_embedding, module_name=Models.clip)
class CLIPForMultiModalEmbedding(Model):
class CLIPForMultiModalEmbedding(TorchModel):


def __init__(self, model_dir, device_id=-1): def __init__(self, model_dir, device_id=-1):
super().__init__(model_dir=model_dir, device_id=device_id) super().__init__(model_dir=model_dir, device_id=device_id)


+ 1
- 2
modelscope/models/multi_modal/imagen/imagen_model.py View File

@@ -6,10 +6,9 @@ import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from PIL import Image


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import Model
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.models.multi_modal.imagen.diffusion import (GaussianDiffusion, from modelscope.models.multi_modal.imagen.diffusion import (GaussianDiffusion,
beta_schedule) beta_schedule)


+ 2
- 3
modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py View File

@@ -1,4 +1,3 @@
import os
import random import random
from os.path import exists from os.path import exists
from typing import Any, Dict from typing import Any, Dict
@@ -9,7 +8,7 @@ import torch
from PIL import Image from PIL import Image


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import TorchModel
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.logger import get_logger from modelscope.utils.logger import get_logger
@@ -22,7 +21,7 @@ logger = get_logger()


@MODELS.register_module( @MODELS.register_module(
Tasks.video_multi_modal_embedding, module_name=Models.video_clip) Tasks.video_multi_modal_embedding, module_name=Models.video_clip)
class VideoCLIPForMultiModalEmbedding(Model):
class VideoCLIPForMultiModalEmbedding(TorchModel):


def __init__(self, model_dir, device_id=-1): def __init__(self, model_dir, device_id=-1):
super().__init__(model_dir=model_dir, device_id=device_id) super().__init__(model_dir=model_dir, device_id=device_id)


+ 3
- 2
modelscope/models/multi_modal/mplug_for_visual_question_answering.py View File

@@ -1,7 +1,8 @@
from typing import Dict from typing import Dict


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks


@@ -10,7 +11,7 @@ __all__ = ['MPlugForVisualQuestionAnswering']


@MODELS.register_module( @MODELS.register_module(
Tasks.visual_question_answering, module_name=Models.mplug) Tasks.visual_question_answering, module_name=Models.mplug)
class MPlugForVisualQuestionAnswering(Model):
class MPlugForVisualQuestionAnswering(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the mplug model from the `model_dir` path. """initialize the mplug model from the `model_dir` path.


+ 3
- 2
modelscope/models/multi_modal/ofa_for_all_tasks.py View File

@@ -8,7 +8,8 @@ import torch.cuda
import torch.nn.functional as F import torch.nn.functional as F


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.outputs import OutputKeys from modelscope.outputs import OutputKeys
from modelscope.preprocessors.ofa.utils.collate import collate_tokens from modelscope.preprocessors.ofa.utils.collate import collate_tokens
@@ -32,7 +33,7 @@ __all__ = ['OfaForAllTasks']
@MODELS.register_module(Tasks.image_classification, module_name=Models.ofa) @MODELS.register_module(Tasks.image_classification, module_name=Models.ofa)
@MODELS.register_module(Tasks.summarization, module_name=Models.ofa) @MODELS.register_module(Tasks.summarization, module_name=Models.ofa)
@MODELS.register_module(Tasks.text_classification, module_name=Models.ofa) @MODELS.register_module(Tasks.text_classification, module_name=Models.ofa)
class OfaForAllTasks(Model):
class OfaForAllTasks(TorchModel):


def __init__(self, model_dir, *args, **kwargs): def __init__(self, model_dir, *args, **kwargs):
super().__init__(model_dir=model_dir, *args, **kwargs) super().__init__(model_dir=model_dir, *args, **kwargs)


+ 2
- 2
modelscope/models/nlp/bert_for_sequence_classification.py View File

@@ -5,7 +5,7 @@ import json
import numpy as np import numpy as np


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import TorchModel
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks


@@ -13,7 +13,7 @@ __all__ = ['BertForSequenceClassification']




@MODELS.register_module(Tasks.text_classification, module_name=Models.bert) @MODELS.register_module(Tasks.text_classification, module_name=Models.bert)
class BertForSequenceClassification(Model):
class BertForSequenceClassification(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
# Model.__init__(self, model_dir, model_cls, first_sequence, *args, **kwargs) # Model.__init__(self, model_dir, model_cls, first_sequence, *args, **kwargs)


+ 1
- 4
modelscope/models/nlp/csanmt_for_translation.py View File

@@ -1,10 +1,7 @@
import math import math
import os
from collections import namedtuple from collections import namedtuple
from typing import Any, Dict
from typing import Dict


import json
import numpy as np
import tensorflow as tf import tensorflow as tf


from modelscope.metainfo import Models from modelscope.metainfo import Models


+ 4
- 3
modelscope/models/nlp/masked_language.py View File

@@ -1,16 +1,17 @@
from typing import Any, Dict, Optional, Union
from typing import Dict


import numpy as np import numpy as np


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks


__all__ = ['BertForMaskedLM', 'StructBertForMaskedLM', 'VecoForMaskedLM'] __all__ = ['BertForMaskedLM', 'StructBertForMaskedLM', 'VecoForMaskedLM']




class MaskedLanguageModelBase(Model):
class MaskedLanguageModelBase(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
super().__init__(model_dir, *args, **kwargs) super().__init__(model_dir, *args, **kwargs)


+ 2
- 5
modelscope/models/nlp/nncrf_for_named_entity_recognition.py View File

@@ -1,15 +1,12 @@
import os import os
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional


import json
import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
from torch.autograd import Variable
from transformers import AutoConfig, AutoModel from transformers import AutoConfig, AutoModel


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import TorchModel
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.constant import ModelFile, Tasks


@@ -18,7 +15,7 @@ __all__ = ['TransformerCRFForNamedEntityRecognition']


@MODELS.register_module( @MODELS.register_module(
Tasks.named_entity_recognition, module_name=Models.tcrf) Tasks.named_entity_recognition, module_name=Models.tcrf)
class TransformerCRFForNamedEntityRecognition(Model):
class TransformerCRFForNamedEntityRecognition(TorchModel):


def __init__(self, model_dir, *args, **kwargs): def __init__(self, model_dir, *args, **kwargs):
super().__init__(model_dir, *args, **kwargs) super().__init__(model_dir, *args, **kwargs)


+ 2
- 2
modelscope/models/nlp/sbert_for_sequence_classification.py View File

@@ -7,7 +7,7 @@ import torch
from sofa.models.sbert.modeling_sbert import SbertModel, SbertPreTrainedModel from sofa.models.sbert.modeling_sbert import SbertModel, SbertPreTrainedModel
from torch import nn from torch import nn


from modelscope.models.base import Model
from modelscope.models import TorchModel




class SbertTextClassfier(SbertPreTrainedModel): class SbertTextClassfier(SbertPreTrainedModel):
@@ -43,7 +43,7 @@ class SbertTextClassfier(SbertPreTrainedModel):
return SbertTextClassfier.from_pretrained(model_dir, **model_args) return SbertTextClassfier.from_pretrained(model_dir, **model_args)




class SbertForSequenceClassificationBase(Model):
class SbertForSequenceClassificationBase(TorchModel):


def __init__(self, model_dir: str, model_args=None, *args, **kwargs): def __init__(self, model_dir: str, model_args=None, *args, **kwargs):
super().__init__(model_dir, *args, **kwargs) super().__init__(model_dir, *args, **kwargs)


+ 3
- 2
modelscope/models/nlp/sbert_for_token_classification.py View File

@@ -4,7 +4,8 @@ import numpy as np
import torch import torch


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks


@@ -12,7 +13,7 @@ __all__ = ['SbertForTokenClassification']




@MODELS.register_module(Tasks.word_segmentation, module_name=Models.structbert) @MODELS.register_module(Tasks.word_segmentation, module_name=Models.structbert)
class SbertForTokenClassification(Model):
class SbertForTokenClassification(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the word segmentation model from the `model_dir` path. """initialize the word segmentation model from the `model_dir` path.


+ 2
- 2
modelscope/models/nlp/sbert_for_zero_shot_classification.py View File

@@ -3,7 +3,7 @@ from typing import Any, Dict
import numpy as np import numpy as np


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model
from modelscope.models import TorchModel
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks


@@ -12,7 +12,7 @@ __all__ = ['SbertForZeroShotClassification']


@MODELS.register_module( @MODELS.register_module(
Tasks.zero_shot_classification, module_name=Models.structbert) Tasks.zero_shot_classification, module_name=Models.structbert)
class SbertForZeroShotClassification(Model):
class SbertForZeroShotClassification(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the zero shot classification model from the `model_dir` path. """initialize the zero shot classification model from the `model_dir` path.


+ 4
- 3
modelscope/models/nlp/space_for_dialog_intent_prediction.py View File

@@ -1,10 +1,11 @@
# Copyright (c) Alibaba, Inc. and its affiliates. # Copyright (c) Alibaba, Inc. and its affiliates.


import os import os
from typing import Any, Dict
from typing import Dict


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase
from modelscope.preprocessors.space import IntentBPETextField from modelscope.preprocessors.space import IntentBPETextField
@@ -16,7 +17,7 @@ __all__ = ['SpaceForDialogIntent']


@MODELS.register_module( @MODELS.register_module(
Tasks.dialog_intent_prediction, module_name=Models.space) Tasks.dialog_intent_prediction, module_name=Models.space)
class SpaceForDialogIntent(Model):
class SpaceForDialogIntent(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the test generation model from the `model_dir` path. """initialize the test generation model from the `model_dir` path.


+ 4
- 3
modelscope/models/nlp/space_for_dialog_modeling.py View File

@@ -1,10 +1,11 @@
# Copyright (c) Alibaba, Inc. and its affiliates. # Copyright (c) Alibaba, Inc. and its affiliates.


import os import os
from typing import Any, Dict, Optional
from typing import Dict


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase from modelscope.models.nlp.backbones import SpaceGenerator, SpaceModelBase
from modelscope.preprocessors.space import MultiWOZBPETextField from modelscope.preprocessors.space import MultiWOZBPETextField
@@ -15,7 +16,7 @@ __all__ = ['SpaceForDialogModeling']




@MODELS.register_module(Tasks.dialog_modeling, module_name=Models.space) @MODELS.register_module(Tasks.dialog_modeling, module_name=Models.space)
class SpaceForDialogModeling(Model):
class SpaceForDialogModeling(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the test generation model from the `model_dir` path. """initialize the test generation model from the `model_dir` path.


+ 4
- 5
modelscope/models/nlp/space_for_dialog_state_tracking.py View File

@@ -1,17 +1,16 @@
import os
from typing import Any, Dict
from typing import Dict


from modelscope.metainfo import Models from modelscope.metainfo import Models
from modelscope.models.base import Model, Tensor
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS from modelscope.models.builder import MODELS
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks
from modelscope.utils.nlp.space.utils_dst import batch_to_device


__all__ = ['SpaceForDialogStateTracking'] __all__ = ['SpaceForDialogStateTracking']




@MODELS.register_module(Tasks.dialog_state_tracking, module_name=Models.space) @MODELS.register_module(Tasks.dialog_state_tracking, module_name=Models.space)
class SpaceForDialogStateTracking(Model):
class SpaceForDialogStateTracking(TorchModel):


def __init__(self, model_dir: str, *args, **kwargs): def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the test generation model from the `model_dir` path. """initialize the test generation model from the `model_dir` path.


Loading…
Cancel
Save