From a8fd9c4afea7fbe0a8d478587911702f0d470a53 Mon Sep 17 00:00:00 2001 From: "shichen.fsc" Date: Wed, 7 Sep 2022 20:12:44 +0800 Subject: [PATCH] [to #42322933] add new pipeline - PoNet for fill-mask Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10019083 --- modelscope/metainfo.py | 3 + modelscope/models/nlp/__init__.py | 2 + modelscope/models/nlp/ponet/__init__.py | 41 + .../models/nlp/ponet/configuration_ponet.py | 117 ++ modelscope/models/nlp/ponet/modeling_ponet.py | 1591 +++++++++++++++++ .../models/nlp/ponet/tokenization_ponet.py | 155 ++ .../models/nlp/ponet_for_masked_language.py | 53 + modelscope/pipelines/nlp/__init__.py | 2 + .../pipelines/nlp/fill_mask_ponet_pipeline.py | 136 ++ modelscope/preprocessors/__init__.py | 8 +- modelscope/preprocessors/nlp.py | 306 +++- modelscope/preprocessors/slp.py | 223 --- modelscope/utils/nlp/nlp_utils.py | 20 + tests/pipelines/test_fill_mask_ponet.py | 48 + 14 files changed, 2475 insertions(+), 230 deletions(-) create mode 100644 modelscope/models/nlp/ponet/__init__.py create mode 100644 modelscope/models/nlp/ponet/configuration_ponet.py create mode 100644 modelscope/models/nlp/ponet/modeling_ponet.py create mode 100644 modelscope/models/nlp/ponet/tokenization_ponet.py create mode 100644 modelscope/models/nlp/ponet_for_masked_language.py create mode 100644 modelscope/pipelines/nlp/fill_mask_ponet_pipeline.py delete mode 100644 modelscope/preprocessors/slp.py create mode 100644 tests/pipelines/test_fill_mask_ponet.py diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index 994095c3..f904b5df 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -62,6 +62,7 @@ class Models(object): gpt3 = 'gpt3' plug = 'plug' bert_for_ds = 'bert-for-document-segmentation' + ponet = 'ponet' # audio models sambert_hifigan = 'sambert-hifigan' @@ -179,6 +180,7 @@ class Pipelines(object): sentiment_classification = 'sentiment-classification' text_classification = 'text-classification' fill_mask = 'fill-mask' + fill_mask_ponet = 'fill-mask-ponet' csanmt_translation = 'csanmt-translation' nli = 'nli' dialog_intent_prediction = 'dialog-intent-prediction' @@ -281,6 +283,7 @@ class Preprocessors(object): sequence_labeling_tokenizer = 'sequence-labeling-tokenizer' word_segment_text_to_label_preprocessor = 'word-segment-text-to-label-preprocessor' fill_mask = 'fill-mask' + fill_mask_ponet = 'fill-mask-ponet' faq_question_answering_preprocessor = 'faq-question-answering-preprocessor' conversational_text_to_sql = 'conversational-text-to-sql' re_tokenizer = 're-tokenizer' diff --git a/modelscope/models/nlp/__init__.py b/modelscope/models/nlp/__init__.py index 40be8665..a3a12c22 100644 --- a/modelscope/models/nlp/__init__.py +++ b/modelscope/models/nlp/__init__.py @@ -13,6 +13,7 @@ if TYPE_CHECKING: from .gpt3 import GPT3ForTextGeneration from .masked_language import (StructBertForMaskedLM, VecoForMaskedLM, BertForMaskedLM, DebertaV2ForMaskedLM) + from .ponet_for_masked_language import PoNetForMaskedLM from .nncrf_for_named_entity_recognition import ( TransformerCRFForNamedEntityRecognition, LSTMCRFForNamedEntityRecognition) @@ -46,6 +47,7 @@ else: 'TransformerCRFForNamedEntityRecognition', 'LSTMCRFForNamedEntityRecognition' ], + 'ponet_for_masked_language': ['PoNetForMaskedLM'], 'palm_v2': ['PalmForTextGeneration'], 'sbert_for_faq_question_answering': ['SbertForFaqQuestionAnswering'], 'star_text_to_sql': ['StarForTextToSql'], diff --git a/modelscope/models/nlp/ponet/__init__.py b/modelscope/models/nlp/ponet/__init__.py new file mode 100644 index 00000000..6d26b194 --- /dev/null +++ b/modelscope/models/nlp/ponet/__init__.py @@ -0,0 +1,41 @@ +# Copyright 2021-2022 The Alibaba DAMO Team Authors. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from modelscope.utils.import_utils import LazyImportModule + +if TYPE_CHECKING: + from .configuration_ponet import PoNetConfig + from .modeling_ponet import (PoNetForMaskedLM, PoNetModel, + PoNetPreTrainedModel) + from .tokenization_ponet import PoNetTokenizer +else: + _import_structure = { + 'configuration_ponet': ['PoNetConfig'], + 'modeling_ponet': + ['PoNetForMaskedLM', 'PoNetModel', 'PoNetPreTrainedModel'], + 'tokenization_ponet': ['PoNetTokenizer'], + } + + import sys + + sys.modules[__name__] = LazyImportModule( + __name__, + globals()['__file__'], + _import_structure, + module_spec=__spec__, + extra_objects={}, + ) diff --git a/modelscope/models/nlp/ponet/configuration_ponet.py b/modelscope/models/nlp/ponet/configuration_ponet.py new file mode 100644 index 00000000..70294fc2 --- /dev/null +++ b/modelscope/models/nlp/ponet/configuration_ponet.py @@ -0,0 +1,117 @@ +# Copyright 2021-2022 The Alibaba DAMO Team Authors. +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PoNet model configuration, mainly copied from :class:`~transformers.BertConfig` """ +from transformers import PretrainedConfig + +from modelscope.utils import logger as logging + +logger = logging.get_logger(__name__) + + +class PoNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration + of a :class:`~modelscope.models.nlp.ponet.PoNetModel`. + It is used to instantiate a PoNet model according to the specified arguments. + + Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model + outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. + + + Args: + vocab_size (:obj:`int`, `optional`, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + :obj:`inputs_ids` passed when calling :class:`~transformers.BertModel` or + :class:`~transformers.TFBertModel`. + hidden_size (:obj:`int`, `optional`, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (:obj:`int`, `optional`, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (:obj:`int`, `optional`, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (:obj:`int`, `optional`, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, + :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. + hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (:obj:`int`, `optional`, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (:obj:`int`, `optional`, defaults to 2): + The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.BertModel` or + :class:`~transformers.TFBertModel`. + initializer_range (:obj:`float`, `optional`, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): + Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, + :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on + :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) + `__. For more information on :obj:`"relative_key_query"`, please refer to + `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) + `__. + use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if ``config.is_decoder=True``. + classifier_dropout (:obj:`float`, `optional`): + The dropout ratio for the classification head. + clsgsepg (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not use a trick to make sure the segment and local information will not leak. + """ + model_type = 'ponet' + + def __init__(self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act='gelu', + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type='absolute', + use_cache=True, + classifier_dropout=None, + clsgsepg=True, + **kwargs): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.clsgsepg = clsgsepg diff --git a/modelscope/models/nlp/ponet/modeling_ponet.py b/modelscope/models/nlp/ponet/modeling_ponet.py new file mode 100644 index 00000000..f37954db --- /dev/null +++ b/modelscope/models/nlp/ponet/modeling_ponet.py @@ -0,0 +1,1591 @@ +# Copyright 2021-2022 The Alibaba DAMO Team Authors. +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch PoNet model. """ + +import math +from dataclasses import dataclass +from distutils.version import LooseVersion +from typing import Optional, Tuple + +import torch +import torch.utils.checkpoint +from packaging import version +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from transformers.activations import ACT2FN +from transformers.file_utils import (ModelOutput, add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, MaskedLMOutput, + SequenceClassifierOutput, TokenClassifierOutput) +from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) +from transformers.models.bert.modeling_bert import \ + load_tf_weights_in_bert as load_tf_weights_in_ponet + +from modelscope.utils.logger import get_logger +from .configuration_ponet import PoNetConfig + +logger = get_logger(__name__) + +is_pytorch_12plus = LooseVersion(torch.__version__) >= LooseVersion('1.12.0') + +_CHECKPOINT_FOR_DOC = 'ponet-base-uncased' +_CONFIG_FOR_DOC = 'PoNetConfig' +_TOKENIZER_FOR_DOC = 'PoNetTokenizer' + +CLS_ID = 101 +EOS_ID = 102 + + +def segment_max(src, index, dim=1): + if is_pytorch_12plus: + out = torch.zeros_like(src).scatter_reduce( + dim, + index[:, :, None].expand_as(src), + src, + reduce='amax', + include_self=False) + else: + dummy_scatter_index = index[:, :, None].expand_as(src) + min_value = src.min() - 1 + dummpy_scatter_shape = (*src.shape[:-1], index.max() + 1, + src.shape[-1]) + dummy_scatter_index_expand = dummy_scatter_index.unsqueeze(-2).expand( + *dummpy_scatter_shape) + index_reconstruct_expand = torch.arange( + index.max() + 1, + device=src.device)[None, None, :, + None].expand(*dummpy_scatter_shape) + src_expand = src.unsqueeze(-2).expand(*dummpy_scatter_shape) + out, _ = src_expand.masked_scatter( + dummy_scatter_index_expand != index_reconstruct_expand, + torch.full_like(src_expand, min_value.item())).max(dim=1) + + dummy = index.unsqueeze(-1).expand(*index.shape[:2], out.size(-1)) + return torch.gather(out, dim, dummy).to(dtype=src.dtype) + + +def get_segment_index(input_ids, cls_id=CLS_ID, eos_id=EOS_ID): + mask = (input_ids == cls_id).to( + dtype=torch.long) + (input_ids == eos_id).to(dtype=torch.long) + mask = mask + torch.cat([torch.zeros_like(mask[:, 0:1]), mask[:, :-1]], + dim=1) + return mask.cumsum(dim=1) - 1 + + +def get_token_type_mask(input_ids, cls_id=CLS_ID, eos_id=EOS_ID): + mask = (input_ids == cls_id) | (input_ids == eos_id) + return mask + + +def get_win_max(hidden_states, kernel_size=3): + m = nn.MaxPool1d(kernel_size, stride=1, padding=kernel_size // 2) + out = m(hidden_states.permute(0, 2, 1)).permute(0, 2, 1) + return out + + +class PoNetEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + if version.parse(torch.__version__) > version.parse('1.6.0'): + self.register_buffer( + 'token_type_ids', + torch.zeros( + self.position_ids.size(), + dtype=torch.long, + device=self.position_ids.device), + persistent=False, + ) + + def forward(self, + input_ids=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, + past_key_values_length:seq_length + + past_key_values_length] + + if token_type_ids is None: + if hasattr(self, 'token_type_ids'): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros( + input_shape, + dtype=torch.long, + device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class PoNetSelfAttention(nn.Module): + + def __init__(self, config): + super().__init__() + + self.dense_local = nn.Linear(config.hidden_size, config.hidden_size) + self.dense_segment = nn.Linear(config.hidden_size, config.hidden_size) + + self.num_attention_heads = config.num_attention_heads + self.clsgsepg = getattr(config, 'clsgsepg', True) + self.attention_head_size = int(config.hidden_size + / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.dense_q = nn.Linear(config.hidden_size, self.all_head_size) + self.dense_k = nn.Linear(config.hidden_size, self.all_head_size) + self.dense_o = nn.Linear(config.hidden_size, self.all_head_size) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, + self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) # bz, head, len, head_size + + def forward( + self, + hidden_states, + segment_index, + token_type_mask, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + + context_layer_q = self.transpose_for_scores( + self.dense_q(hidden_states)) + context_layer_k = self.transpose_for_scores( + self.dense_k(hidden_states)) + context_layer_v = context_layer_k + context_layer_o = self.transpose_for_scores( + self.dense_o(hidden_states)) + + if attention_mask is not None: + _attention_mask = (attention_mask.squeeze(1).unsqueeze(-1) < -1) + + if attention_mask is not None: + context_layer_q.masked_fill_(_attention_mask, 0.0) + q = context_layer_q.sum(dim=-2) / torch.ones_like( + _attention_mask).to(dtype=context_layer_q.dtype).masked_fill( + _attention_mask, 0.0).sum(dim=-2) + else: + q = context_layer_q.mean(dim=-2) + att = torch.einsum('bdh,bdlh -> bdl', q, context_layer_k) / math.sqrt( + context_layer_q.shape[-1]) + if attention_mask is not None: + att = att + attention_mask.squeeze(1) + att_prob = att.softmax(dim=-1) + v = torch.einsum('bdlh,bdl->bdh', context_layer_v, att_prob) + + context_layer_segment = self.dense_segment(hidden_states) + context_layer_local = self.dense_local(hidden_states) + if attention_mask is not None: + context_layer_local.masked_fill_( + _attention_mask.squeeze(1), -10000) + context_layer_segment.masked_fill_( + _attention_mask.squeeze(1), -10000) + + if self.clsgsepg: + # XXX: a trick to make sure the segment and local information will not leak + context_layer_local = get_win_max( + context_layer_local.masked_fill( + token_type_mask.unsqueeze(dim=-1), -10000)) + context_layer_segment = segment_max( + context_layer_segment, index=segment_index) + + context_layer_segment.masked_fill_( + token_type_mask.unsqueeze(dim=-1), 0.0) + context_layer_local.masked_fill_( + token_type_mask.unsqueeze(dim=-1), 0.0) + else: + context_layer_local = get_win_max(context_layer_local) + context_layer_segment = segment_max( + context_layer_segment, index=segment_index) + + context_layer_local = self.transpose_for_scores(context_layer_local) + context_layer_segment = self.transpose_for_scores( + context_layer_segment) + + context_layer = (v.unsqueeze(dim=-2) + context_layer_segment + ) * context_layer_o + context_layer_local + context_layer = context_layer.permute(0, 2, 1, 3).reshape( + *hidden_states.shape[:2], -1) + + if attention_mask is not None: + context_layer.masked_fill_(_attention_mask.squeeze(1), 0.0) + + outputs = (context_layer, + att_prob) if output_attentions else (context_layer, ) + return outputs + + +class PoNetSelfOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class PoNetIntermediate(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class PoNetOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class PoNetAttention(nn.Module): + + def __init__(self, config): + super().__init__() + self.self = PoNetSelfAttention(config) + self.output = PoNetSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, + self.self.attention_head_size, self.pruned_heads) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len( + heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + segment_index, + token_type_mask, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + segment_index, + token_type_mask, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output, + ) + self_outputs[1:] # add attentions if we output them + return outputs + + +class PoNetLayer(nn.Module): + + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = PoNetAttention(config) + + config.is_decoder = False # XXX: Decoder is not yet impletemented. + self.is_decoder = config.is_decoder + + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + assert self.is_decoder, f'{self} should be used as a decoder model if cross attention is added' + self.crossattention = PoNetAttention(config) + self.intermediate = PoNetIntermediate(config) + self.output = PoNetOutput(config) + + def forward( + self, + hidden_states, + segment_index, + token_type_mask, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[: + 2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + segment_index, + token_type_mask, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[ + 1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + assert hasattr( + self, 'crossattention' + ), f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`' # noqa * + + cross_attn_past_key_value = past_key_value[ + -2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[ + 1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward(self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output) + outputs = (layer_output, ) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class PoNetEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [PoNetLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward( + self, + hidden_states, + segment_index, + token_type_mask, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + ) if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + if getattr(self.config, 'gradient_checkpointing', + False) and self.training: + + if use_cache: + logger.warning( + '`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting ' + '`use_cache=False`...') + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + segment_index, + token_type_mask, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + segment_index, + token_type_mask, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + all_self_attentions = all_self_attentions + ( + layer_outputs[1], ) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + ( + layer_outputs[2], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class PoNetPooler(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class PoNetPredictionHeadTransform(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class PoNetLMPredictionHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.transform = PoNetPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class PoNetOnlyMLMHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = PoNetLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class PoNetPreTrainingHeads(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = PoNetLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 3) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class PoNetPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = PoNetConfig + load_tf_weights = load_tf_weights_in_ponet + base_model_prefix = 'ponet' + _keys_to_ignore_on_load_missing = [r'position_ids'] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +@dataclass +class PoNetForPreTrainingOutput(ModelOutput): + """ + Output type of :class:`~transformers.PoNetForPreTraining`. + + Args: + loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + mlm_loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + Masked language modeling loss. + sop_loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + sop loss. + prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states + (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed + or when ``config.output_hidden_states=True``): + Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape :obj:`(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed + or when ``config.output_attentions=True``): + Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, + sequence_length, sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + mlm_loss: Optional[torch.FloatTensor] = None + sop_loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +PONET_START_DOCSTRING = r""" + + This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic + methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, + pruning heads etc.) + + This model is also a PyTorch `torch.nn.Module `__ + subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to + general usage and behavior. + + Parameters: + config (:class:`~modelscope.models.nlp.ponet.PoNetConfig`): + Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model + weights. +""" + +PONET_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using :class:`~modelscope.models.nlp.ponet.PoNetTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + `What are attention masks? <../glossary.html#attention-mask>`__ + token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, + 1]``: + + - 0 corresponds to a `sentence A` token, + - 1 corresponds to a `sentence B` token. + + `What are token type IDs? <../glossary.html#token-type-ids>`_ + position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + + `What are position IDs? <../glossary.html#position-ids>`_ + head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): + Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): + Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert :obj:`input_ids` indices into associated + vectors than the model's internal embedding lookup matrix. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + + +@add_start_docstrings( + 'The bare PoNet Model transformer outputting raw hidden-states without any specific head on top.', + PONET_START_DOCSTRING, +) +class PoNetModel(PoNetPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration + set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = PoNetEmbeddings(config) + self.encoder = PoNetEncoder(config) + + self.pooler = PoNetPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + encoder_hidden_states + (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` + with each tuple having 4 tensors of shape :obj: + `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + 'You cannot specify both input_ids and inputs_embeds at the same time' + ) + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + else: + raise ValueError( + 'You have to specify either input_ids or inputs_embeds') + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[ + 2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + if token_type_ids is None: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape, device) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size( + ) + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + + segment_index = get_segment_index( + input_ids) if segment_ids is None else segment_ids + token_type_mask = get_token_type_mask(input_ids) + encoder_outputs = self.encoder( + embedding_output, + segment_index, + token_type_mask, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler( + sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + PoNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + PONET_START_DOCSTRING, +) +class PoNetForPreTraining(PoNetPreTrainedModel): + + def __init__(self, config): + super().__init__(config) + + self.ponet = PoNetModel(config) + self.cls = PoNetPreTrainingHeads(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @replace_return_docstrings( + output_type=PoNetForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + next_sentence_label=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): + Used to hide legacy arguments that have been deprecated. + + Returns: + + Example:: + + >>> from transformers import PoNetTokenizer, PoNetForPreTraining + >>> import torch + + >>> tokenizer = PoNetTokenizer.from_pretrained('ponet-base-uncased') + >>> model = PoNetForPreTraining.from_pretrained('ponet-base-uncased') + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.prediction_logits + >>> seq_relationship_logits = outputs.seq_relationship_logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ponet( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls( + sequence_output, pooled_output) + + total_loss = None + masked_lm_loss = None + next_sentence_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + next_sentence_loss = loss_fct( + seq_relationship_score.view(-1, 3), + next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss, masked_lm_loss, next_sentence_loss) + + output) if total_loss is not None else output + + return PoNetForPreTrainingOutput( + loss=total_loss, + mlm_loss=masked_lm_loss, + sop_loss=next_sentence_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """PoNet Model with a `language modeling` head on top for CLM fine-tuning. """, + PONET_START_DOCSTRING) +class PoNetLMHeadModel(PoNetPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning( + 'If you want to use `PoNetLMHeadModel` as a standalone, add `is_decoder=True.`' + ) + + self.ponet = PoNetModel(config, add_pooling_layer=False) + self.cls = PoNetOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @replace_return_docstrings( + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` + with each tuple having 4 tensors of shape : + obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + + Returns: + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.ponet( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past=None, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'past_key_values': past + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + +@add_start_docstrings( + """PoNet Model with a `language modeling` head on top. """, + PONET_START_DOCSTRING) +class PoNetForMaskedLM(PoNetPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + 'If you want to use `PoNetForMaskedLM` make sure `config.is_decoder=False` for ' + 'bi-directional self-attention.') + + self.ponet = PoNetModel(config, add_pooling_layer=False) + self.cls = PoNetOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + segment_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ponet( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((masked_lm_loss, ) + + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + PoNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + PONET_START_DOCSTRING, +) +class PoNetForSequenceClassification(PoNetPreTrainedModel): + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.ponet = PoNetModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., + config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), + If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ponet( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = 'regression' + elif self.num_labels > 1 and (labels.dtype == torch.long + or labels.dtype == torch.int): + self.config.problem_type = 'single_label_classification' + else: + self.config.problem_type = 'multi_label_classification' + + if self.config.problem_type == 'regression': + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == 'single_label_classification': + loss_fct = CrossEntropyLoss() + loss = loss_fct( + logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == 'multi_label_classification': + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits, ) + outputs[2:] + return ((loss, ) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + PoNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + PONET_START_DOCSTRING, +) +class PoNetForTokenClassification(PoNetPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r'pooler'] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.ponet = PoNetModel(config, add_pooling_layer=False) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward( + PONET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - + 1]``. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ponet( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, labels.view(-1), + torch.tensor(loss_fct.ignore_index).type_as(labels)) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct( + logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits, ) + outputs[2:] + return ((loss, ) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/modelscope/models/nlp/ponet/tokenization_ponet.py b/modelscope/models/nlp/ponet/tokenization_ponet.py new file mode 100644 index 00000000..21544886 --- /dev/null +++ b/modelscope/models/nlp/ponet/tokenization_ponet.py @@ -0,0 +1,155 @@ +# Copyright 2021-2022 The Alibaba DAMO Team Authors. +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for PoNet """ + +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from transformers.file_utils import PaddingStrategy +from transformers.models.bert.tokenization_bert import BertTokenizer + +from modelscope.utils.constant import ModelFile +from modelscope.utils.logger import get_logger + +logger = get_logger(__name__) + +VOCAB_FILES_NAMES = {'vocab_file': ModelFile.VOCAB_FILE} + +PRETRAINED_VOCAB_FILES_MAP = {'vocab_file': {}} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + 'nlp_ponet_fill-mask_chinese-base': 512, + 'nlp_ponet_fill-mask_english-base': 512, +} + +PRETRAINED_INIT_CONFIGURATION = { + 'nlp_ponet_fill-mask_chinese-base': { + 'do_lower_case': True + }, + 'nlp_ponet_fill-mask_english-base': { + 'do_lower_case': True + }, +} + + +class PoNetTokenizer(BertTokenizer): + r""" + Construct an PoNet tokenizer. Based on BertTokenizer. + + This tokenizer inherits from :class:`~transformers.BertTokenizer` which contains most of the main methods. + Users should refer to this superclass for more information regarding those methods. + + Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning + parameters. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or + batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + >= 7.5 (Volta). + return_attention_mask: (optional) Set to False to avoid returning + attention mask (default: set to model specifics) + """ + # Load from model defaults + if return_attention_mask is None: + return_attention_mask = 'attention_mask' in self.model_input_names + + required_input = encoded_inputs[self.model_input_names[0]] + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and ( + max_length % pad_to_multiple_of != 0): + max_length = ( + (max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len( + required_input) != max_length + + if needs_to_be_padded: + difference = max_length - len(required_input) + if self.padding_side == 'right': + if return_attention_mask: + encoded_inputs['attention_mask'] = [1] * len( + required_input) + [0] * difference + if 'token_type_ids' in encoded_inputs: + encoded_inputs['token_type_ids'] = ( + encoded_inputs['token_type_ids'] + + [self.pad_token_type_id] * difference) + if 'special_tokens_mask' in encoded_inputs: + encoded_inputs['special_tokens_mask'] = encoded_inputs[ + 'special_tokens_mask'] + [1] * difference + if 'segment_ids' in encoded_inputs: + encoded_inputs[ + 'segment_ids'] = encoded_inputs['segment_ids'] + [ + encoded_inputs['segment_ids'][-1] + 1 + ] * difference # noqa * + encoded_inputs[self.model_input_names[ + 0]] = required_input + [self.pad_token_id] * difference + elif self.padding_side == 'left': + if return_attention_mask: + encoded_inputs['attention_mask'] = [0] * difference + [ + 1 + ] * len(required_input) + if 'token_type_ids' in encoded_inputs: + encoded_inputs['token_type_ids'] = [ + self.pad_token_type_id + ] * difference + encoded_inputs['token_type_ids'] + if 'segment_ids' in encoded_inputs: + encoded_inputs['segment_ids'] = [encoded_inputs['segment_ids'][-1] + 1] * difference + \ + encoded_inputs['segment_ids'] # noqa * + if 'special_tokens_mask' in encoded_inputs: + encoded_inputs['special_tokens_mask'] = [ + 1 + ] * difference + encoded_inputs['special_tokens_mask'] + encoded_inputs[self.model_input_names[ + 0]] = [self.pad_token_id] * difference + required_input + else: + raise ValueError('Invalid padding strategy:' + + str(self.padding_side)) + elif return_attention_mask and 'attention_mask' not in encoded_inputs: + encoded_inputs['attention_mask'] = [1] * len(required_input) + + return encoded_inputs diff --git a/modelscope/models/nlp/ponet_for_masked_language.py b/modelscope/models/nlp/ponet_for_masked_language.py new file mode 100644 index 00000000..11f4bc11 --- /dev/null +++ b/modelscope/models/nlp/ponet_for_masked_language.py @@ -0,0 +1,53 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +from typing import Any, Dict + +from modelscope.metainfo import Models +from modelscope.models.base import TorchModel +from modelscope.models.builder import MODELS +from modelscope.models.nlp.ponet import \ + PoNetForMaskedLM as PoNetForMaskedLMTransformer +from modelscope.outputs import OutputKeys +from modelscope.utils.constant import Tasks + +__all__ = ['PoNetForMaskedLM'] + + +@MODELS.register_module(Tasks.fill_mask, module_name=Models.ponet) +class PoNetForMaskedLM(TorchModel, PoNetForMaskedLMTransformer): + """PoNet for MLM model.'. + + Inherited from ponet.PoNetForMaskedLM and TorchModel, so this class can be registered into Model sets. + """ + + def __init__(self, config, model_dir): + super(TorchModel, self).__init__(model_dir) + PoNetForMaskedLMTransformer.__init__(self, config) + + def forward(self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + segment_ids=None, + position_ids=None, + head_mask=None, + labels=None): + output = PoNetForMaskedLMTransformer.forward( + self, + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + segment_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask, + labels=labels) + output[OutputKeys.INPUT_IDS] = input_ids + return output + + @classmethod + def _instantiate(cls, **kwargs): + model_dir = kwargs.get('model_dir') + return super(PoNetForMaskedLMTransformer, + PoNetForMaskedLM).from_pretrained( + pretrained_model_name_or_path=model_dir, + model_dir=model_dir) diff --git a/modelscope/pipelines/nlp/__init__.py b/modelscope/pipelines/nlp/__init__.py index 9baeefbb..42dfc972 100644 --- a/modelscope/pipelines/nlp/__init__.py +++ b/modelscope/pipelines/nlp/__init__.py @@ -11,6 +11,7 @@ if TYPE_CHECKING: from .document_segmentation_pipeline import DocumentSegmentationPipeline from .faq_question_answering_pipeline import FaqQuestionAnsweringPipeline from .fill_mask_pipeline import FillMaskPipeline + from .fill_mask_ponet_pipeline import FillMaskPoNetPreprocessor from .information_extraction_pipeline import InformationExtractionPipeline from .named_entity_recognition_pipeline import NamedEntityRecognitionPipeline from .pair_sentence_classification_pipeline import PairSentenceClassificationPipeline @@ -36,6 +37,7 @@ else: 'document_segmentation_pipeline': ['DocumentSegmentationPipeline'], 'faq_question_answering_pipeline': ['FaqQuestionAnsweringPipeline'], 'fill_mask_pipeline': ['FillMaskPipeline'], + 'fill_mask_ponet_pipeline': ['FillMaskPoNetPipeline'], 'named_entity_recognition_pipeline': ['NamedEntityRecognitionPipeline'], 'information_extraction_pipeline': ['InformationExtractionPipeline'], diff --git a/modelscope/pipelines/nlp/fill_mask_ponet_pipeline.py b/modelscope/pipelines/nlp/fill_mask_ponet_pipeline.py new file mode 100644 index 00000000..0bb72430 --- /dev/null +++ b/modelscope/pipelines/nlp/fill_mask_ponet_pipeline.py @@ -0,0 +1,136 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import os +from typing import Any, Dict, Optional, Union + +import torch + +from modelscope.metainfo import Pipelines +from modelscope.models import Model +from modelscope.outputs import OutputKeys +from modelscope.pipelines.base import Pipeline, Tensor +from modelscope.pipelines.builder import PIPELINES +from modelscope.preprocessors import FillMaskPoNetPreprocessor, Preprocessor +from modelscope.utils.config import Config +from modelscope.utils.constant import ModelFile, Tasks + +__all__ = ['FillMaskPonetPipeline'] +_type_map = {'ponet': 'bert'} + + +@PIPELINES.register_module( + Tasks.fill_mask, module_name=Pipelines.fill_mask_ponet) +class FillMaskPonetPipeline(Pipeline): + + def __init__(self, + model: Union[Model, str], + preprocessor: Optional[Preprocessor] = None, + first_sequence='sentence', + **kwargs): + """Use `model` and `preprocessor` to create a nlp fill mask pipeline for prediction + + Args: + model (str or Model): Supply either a local model dir which supported fill-mask task, + or a fill-mask model id from the model hub, or a torch model instance. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. + first_sequence: The key to read the sentence in. + + NOTE: Inputs of type 'str' are also supported. In this scenario, the 'first_sequence' + param will have no effect. + + Example: + >>> from modelscope.pipelines import pipeline + >>> pipeline_ins = pipeline( + 'fill-mask', model='damo/nlp_ponet_fill-mask_english-base') + >>> input = 'Everything in [MASK] you call reality is really [MASK] a reflection of your [MASK].' + >>> print(pipeline_ins(input)) + + NOTE2: Please pay attention to the model's special tokens. + If bert based model(bert, structbert, etc.) is used, the mask token is '[MASK]'. + If the xlm-roberta(xlm-roberta, veco, etc.) based model is used, the mask token is ''. + To view other examples plese check the tests/pipelines/test_fill_mask.py. + """ + fill_mask_model = model if isinstance( + model, Model) else Model.from_pretrained(model) + + self.config = Config.from_file( + os.path.join(fill_mask_model.model_dir, ModelFile.CONFIGURATION)) + + if preprocessor is None: + preprocessor = FillMaskPoNetPreprocessor( + fill_mask_model.model_dir, + first_sequence=first_sequence, + second_sequence=None, + sequence_length=kwargs.pop('sequence_length', 512)) + + fill_mask_model.eval() + super().__init__( + model=fill_mask_model, preprocessor=preprocessor, **kwargs) + + self.preprocessor = preprocessor + + self.tokenizer = preprocessor.tokenizer + self.mask_id = {'roberta': 250001, 'bert': 103} + + self.rep_map = { + 'bert': { + '[unused0]': '', + '[PAD]': '', + '[unused1]': '', + r' +': ' ', + '[SEP]': '', + '[unused2]': '', + '[CLS]': '', + '[UNK]': '' + }, + 'roberta': { + r' +': ' ', + '': '', + '': '', + '': '', + '': '', + '': ' ' + } + } + + def forward(self, inputs: Dict[str, Any], + **forward_params) -> Dict[str, Any]: + with torch.no_grad(): + return self.model(inputs, **forward_params) + + def postprocess(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]: + """process the prediction results + + Args: + inputs (Dict[str, Any]): _description_ + + Returns: + Dict[str, str]: the prediction results + """ + import numpy as np + logits = inputs[OutputKeys.LOGITS].detach().cpu().numpy() + input_ids = inputs[OutputKeys.INPUT_IDS].detach().cpu().numpy() + pred_ids = np.argmax(logits, axis=-1) + model_type = self.model.config.model_type + process_type = model_type if model_type in self.mask_id else _type_map[ + model_type] + rst_ids = np.where(input_ids == self.mask_id[process_type], pred_ids, + input_ids) + + def rep_tokens(string, rep_map): + for k, v in rep_map.items(): + string = string.replace(k, v) + return string.strip() + + pred_strings = [] + for ids in rst_ids: # batch + if 'language' in self.config.model and self.config.model.language == 'zh': + pred_string = self.tokenizer.convert_ids_to_tokens(ids) + pred_string = ''.join(pred_string) + else: + pred_string = self.tokenizer.decode(ids) + pred_string = rep_tokens(pred_string, self.rep_map[process_type]) + pred_strings.append(pred_string) + + return {OutputKeys.TEXT: pred_strings} diff --git a/modelscope/preprocessors/__init__.py b/modelscope/preprocessors/__init__.py index 0123b32e..6012b5ba 100644 --- a/modelscope/preprocessors/__init__.py +++ b/modelscope/preprocessors/__init__.py @@ -22,8 +22,8 @@ if TYPE_CHECKING: PairSentenceClassificationPreprocessor, FillMaskPreprocessor, ZeroShotClassificationPreprocessor, NERPreprocessor, TextErrorCorrectionPreprocessor, FaqQuestionAnsweringPreprocessor, - SequenceLabelingPreprocessor, RelationExtractionPreprocessor) - from .slp import DocumentSegmentationPreprocessor + SequenceLabelingPreprocessor, RelationExtractionPreprocessor, + DocumentSegmentationPreprocessor, FillMaskPoNetPreprocessor) from .space import (DialogIntentPredictionPreprocessor, DialogModelingPreprocessor, DialogStateTrackingPreprocessor) @@ -52,9 +52,9 @@ else: 'ZeroShotClassificationPreprocessor', 'NERPreprocessor', 'TextErrorCorrectionPreprocessor', 'FaqQuestionAnsweringPreprocessor', 'SequenceLabelingPreprocessor', - 'RelationExtractionPreprocessor' + 'RelationExtractionPreprocessor', + 'DocumentSegmentationPreprocessor', 'FillMaskPoNetPreprocessor' ], - 'slp': ['DocumentSegmentationPreprocessor'], 'space': [ 'DialogIntentPredictionPreprocessor', 'DialogModelingPreprocessor', 'DialogStateTrackingPreprocessor', 'InputFeatures' diff --git a/modelscope/preprocessors/nlp.py b/modelscope/preprocessors/nlp.py index aaa83ed1..84e7ca4d 100644 --- a/modelscope/preprocessors/nlp.py +++ b/modelscope/preprocessors/nlp.py @@ -1,6 +1,7 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import os.path as osp +import re import uuid from typing import Any, Dict, Iterable, Optional, Tuple, Union @@ -11,13 +12,17 @@ from transformers import AutoTokenizer, BertTokenizerFast from modelscope.metainfo import Models, Preprocessors from modelscope.models.nlp.structbert import SbertTokenizerFast from modelscope.outputs import OutputKeys -from modelscope.utils.config import ConfigFields -from modelscope.utils.constant import Fields, InputFields, ModeKeys +from modelscope.utils.config import Config, ConfigFields +from modelscope.utils.constant import Fields, InputFields, ModeKeys, ModelFile from modelscope.utils.hub import get_model_type, parse_label_mapping +from modelscope.utils.logger import get_logger +from modelscope.utils.nlp.nlp_utils import import_external_nltk_data from modelscope.utils.type_assert import type_assert from .base import Preprocessor from .builder import PREPROCESSORS +logger = get_logger() + __all__ = [ 'Tokenize', 'SequenceClassificationPreprocessor', 'TextGenerationPreprocessor', 'TokenClassificationPreprocessor', @@ -25,7 +30,8 @@ __all__ = [ 'SingleSentenceClassificationPreprocessor', 'FillMaskPreprocessor', 'ZeroShotClassificationPreprocessor', 'NERPreprocessor', 'TextErrorCorrectionPreprocessor', 'FaqQuestionAnsweringPreprocessor', - 'SequenceLabelingPreprocessor', 'RelationExtractionPreprocessor' + 'SequenceLabelingPreprocessor', 'RelationExtractionPreprocessor', + 'DocumentSegmentationPreprocessor', 'FillMaskPoNetPreprocessor' ] @@ -903,3 +909,297 @@ class FaqQuestionAnsweringPreprocessor(Preprocessor): max_length = self.MAX_LEN return self.tokenizer.batch_encode_plus( sentence_list, padding=True, max_length=max_length) + + +@PREPROCESSORS.register_module( + Fields.nlp, module_name=Preprocessors.document_segmentation) +class DocumentSegmentationPreprocessor(Preprocessor): + + def __init__(self, model_dir: str, config, *args, **kwargs): + """preprocess the data + + Args: + model_dir (str): model path + """ + + super().__init__(*args, **kwargs) + + self.tokenizer = BertTokenizerFast.from_pretrained( + model_dir, + use_fast=True, + ) + self.question_column_name = 'labels' + self.context_column_name = 'sentences' + self.example_id_column_name = 'example_id' + self.label_to_id = {'B-EOP': 0, 'O': 1} + self.target_specical_ids = set() + self.target_specical_ids.add(self.tokenizer.eos_token_id) + self.max_seq_length = config.max_position_embeddings + self.label_list = ['B-EOP', 'O'] + + def __call__(self, examples) -> Dict[str, Any]: + questions = examples[self.question_column_name] + contexts = examples[self.context_column_name] + example_ids = examples[self.example_id_column_name] + num_examples = len(questions) + + sentences = [] + for sentence_list in contexts: + sentence_list = [_ + '[EOS]' for _ in sentence_list] + sentences.append(sentence_list) + + try: + tokenized_examples = self.tokenizer( + sentences, + is_split_into_words=True, + add_special_tokens=False, + return_token_type_ids=True, + return_attention_mask=True, + ) + except Exception as e: + logger.error(e) + return {} + + segment_ids = [] + token_seq_labels = [] + for example_index in range(num_examples): + example_input_ids = tokenized_examples['input_ids'][example_index] + example_labels = questions[example_index] + example_labels = [ + self.label_to_id[_] if _ in self.label_to_id else -100 + for _ in example_labels + ] + example_token_labels = [] + segment_id = [] + cur_seg_id = 1 + for token_index in range(len(example_input_ids)): + if example_input_ids[token_index] in self.target_specical_ids: + example_token_labels.append(example_labels[cur_seg_id - 1]) + segment_id.append(cur_seg_id) + cur_seg_id += 1 + else: + example_token_labels.append(-100) + segment_id.append(cur_seg_id) + + segment_ids.append(segment_id) + token_seq_labels.append(example_token_labels) + + tokenized_examples['segment_ids'] = segment_ids + tokenized_examples['token_seq_labels'] = token_seq_labels + + new_segment_ids = [] + new_token_seq_labels = [] + new_input_ids = [] + new_token_type_ids = [] + new_attention_mask = [] + new_example_ids = [] + new_sentences = [] + + for example_index in range(num_examples): + example_input_ids = tokenized_examples['input_ids'][example_index] + example_token_type_ids = tokenized_examples['token_type_ids'][ + example_index] + example_attention_mask = tokenized_examples['attention_mask'][ + example_index] + example_segment_ids = tokenized_examples['segment_ids'][ + example_index] + example_token_seq_labels = tokenized_examples['token_seq_labels'][ + example_index] + example_sentences = contexts[example_index] + example_id = example_ids[example_index] + example_total_num_sentences = len(questions[example_index]) + example_total_num_tokens = len( + tokenized_examples['input_ids'][example_index]) + accumulate_length = [ + i for i, x in enumerate(tokenized_examples['input_ids'] + [example_index]) + if x == self.tokenizer.eos_token_id + ] + samples_boundary = [] + left_index = 0 + sent_left_index = 0 + sent_i = 0 + + # for sent_i, length in enumerate(accumulate_length): + while sent_i < len(accumulate_length): + length = accumulate_length[sent_i] + right_index = length + 1 + sent_right_index = sent_i + 1 + if right_index - left_index >= self.max_seq_length - 1 or right_index == example_total_num_tokens: + samples_boundary.append([left_index, right_index]) + + sample_input_ids = [ + self.tokenizer.cls_token_id + ] + example_input_ids[left_index:right_index] + sample_input_ids = sample_input_ids[:self.max_seq_length] + + sample_token_type_ids = [ + 0 + ] + example_token_type_ids[left_index:right_index] + sample_token_type_ids = sample_token_type_ids[:self. + max_seq_length] + + sample_attention_mask = [ + 1 + ] + example_attention_mask[left_index:right_index] + sample_attention_mask = sample_attention_mask[:self. + max_seq_length] + + sample_segment_ids = [ + 0 + ] + example_segment_ids[left_index:right_index] + sample_segment_ids = sample_segment_ids[:self. + max_seq_length] + + sample_token_seq_labels = [ + -100 + ] + example_token_seq_labels[left_index:right_index] + sample_token_seq_labels = sample_token_seq_labels[:self. + max_seq_length] + + if sent_right_index - 1 == sent_left_index: + left_index = right_index + sample_input_ids[-1] = self.tokenizer.eos_token_id + sample_token_seq_labels[-1] = -100 + else: + left_index = accumulate_length[sent_i - 1] + 1 + if sample_token_seq_labels[-1] != -100: + sample_token_seq_labels[-1] = -100 + + if sent_right_index - 1 == sent_left_index or right_index == example_total_num_tokens: + sample_sentences = example_sentences[ + sent_left_index:sent_right_index] + sent_left_index = sent_right_index + sent_i += 1 + else: + sample_sentences = example_sentences[ + sent_left_index:sent_right_index - 1] + sent_left_index = sent_right_index - 1 + + if (len([_ for _ in sample_token_seq_labels if _ != -100 + ])) != len(sample_sentences) - 1 and (len([ + _ + for _ in sample_token_seq_labels if _ != -100 + ])) != len(sample_sentences): + tmp = [] + for w_i, w, l in zip( + sample_input_ids, + self.tokenizer.decode(sample_input_ids).split( + ' '), sample_token_seq_labels): + tmp.append((w_i, w, l)) + while len(sample_input_ids) < self.max_seq_length: + sample_input_ids.append(self.tokenizer.pad_token_id) + sample_token_type_ids.append(0) + sample_attention_mask.append(0) + sample_segment_ids.append(example_total_num_sentences + + 1) + sample_token_seq_labels.append(-100) + + new_input_ids.append(sample_input_ids) + new_token_type_ids.append(sample_token_type_ids) + new_attention_mask.append(sample_attention_mask) + new_segment_ids.append(sample_segment_ids) + new_token_seq_labels.append(sample_token_seq_labels) + new_example_ids.append(example_id) + new_sentences.append(sample_sentences) + else: + sent_i += 1 + continue + + output_samples = {} + + output_samples['input_ids'] = new_input_ids + output_samples['token_type_ids'] = new_token_type_ids + output_samples['attention_mask'] = new_attention_mask + + output_samples['segment_ids'] = new_segment_ids + output_samples['example_id'] = new_example_ids + output_samples['labels'] = new_token_seq_labels + output_samples['sentences'] = new_sentences + + return output_samples + + +@PREPROCESSORS.register_module( + Fields.nlp, module_name=Preprocessors.fill_mask_ponet) +class FillMaskPoNetPreprocessor(NLPTokenizerPreprocessorBase): + """The tokenizer preprocessor used in MLM task. + """ + + def __init__(self, model_dir: str, mode=ModeKeys.INFERENCE, **kwargs): + kwargs['truncation'] = kwargs.get('truncation', True) + kwargs['padding'] = kwargs.get('padding', 'max_length') + kwargs['max_length'] = kwargs.pop('sequence_length', 512) + kwargs['return_token_type_ids'] = kwargs.get('return_token_type_ids', + True) + super().__init__(model_dir, pair=False, mode=mode, **kwargs) + + self.cfg = Config.from_file( + osp.join(model_dir, ModelFile.CONFIGURATION)) + self.language = self.cfg.model.get('language', 'en') + if self.language == 'en': + from nltk.tokenize import sent_tokenize + import_external_nltk_data( + osp.join(model_dir, 'nltk_data'), 'tokenizers/punkt') + elif self.language in ['zh', 'cn']: + + def sent_tokenize(para): + para = re.sub(r'([。!!?\?])([^”’])', r'\1\n\2', para) # noqa * + para = re.sub(r'(\.{6})([^”’])', r'\1\n\2', para) # noqa * + para = re.sub(r'(\…{2})([^”’])', r'\1\n\2', para) # noqa * + para = re.sub(r'([。!?\?][”’])([^,。!?\?])', r'\1\n\2', + para) # noqa * + para = para.rstrip() + return [_ for _ in para.split('\n') if _] + else: + raise NotImplementedError + + self.sent_tokenize = sent_tokenize + self.max_length = kwargs['max_length'] + + def __call__(self, data: Union[str, Tuple, Dict]) -> Dict[str, Any]: + """process the raw input data + + Args: + data (tuple): [sentence1, sentence2] + sentence1 (str): a sentence + Example: + 'you are so handsome.' + sentence2 (str): a sentence + Example: + 'you are so beautiful.' + Returns: + Dict[str, Any]: the preprocessed data + """ + + text_a, text_b, labels = self.parse_text_and_label(data) + output = self.tokenizer( + text_a, + text_b, + return_tensors='pt' if self._mode == ModeKeys.INFERENCE else None, + **self.tokenize_kwargs) + max_seq_length = self.max_length + + if text_b is None: + segment_ids = [] + seg_lens = list( + map( + len, + self.tokenizer( + self.sent_tokenize(text_a), + add_special_tokens=False, + truncation=True)['input_ids'])) + segment_id = [0] + sum( + [[i] * sl for i, sl in enumerate(seg_lens, start=1)], []) + segment_id = segment_id[:max_seq_length - 1] + segment_ids.append(segment_id + [segment_id[-1] + 1] + * (max_seq_length - len(segment_id))) + output['segment_ids'] = segment_ids + + output = { + k: np.array(v) if isinstance(v, list) else v + for k, v in output.items() + } + + self.labels_to_id(labels, output) + return output diff --git a/modelscope/preprocessors/slp.py b/modelscope/preprocessors/slp.py deleted file mode 100644 index d9c2d9b7..00000000 --- a/modelscope/preprocessors/slp.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. - -from typing import Any, Dict - -from transformers import BertTokenizerFast - -from modelscope.metainfo import Preprocessors -from modelscope.utils.constant import Fields -from modelscope.utils.hub import get_model_type, parse_label_mapping -from modelscope.utils.type_assert import type_assert -from .base import Preprocessor -from .builder import PREPROCESSORS - -__all__ = ['DocumentSegmentationPreprocessor'] - - -@PREPROCESSORS.register_module( - Fields.nlp, module_name=Preprocessors.document_segmentation) -class DocumentSegmentationPreprocessor(Preprocessor): - - def __init__(self, model_dir: str, config, *args, **kwargs): - """preprocess the data - - Args: - model_dir (str): model path - """ - - super().__init__(*args, **kwargs) - - self.tokenizer = BertTokenizerFast.from_pretrained( - model_dir, - use_fast=True, - ) - self.question_column_name = 'labels' - self.context_column_name = 'sentences' - self.example_id_column_name = 'example_id' - self.label_to_id = {'B-EOP': 0, 'O': 1} - self.target_specical_ids = set() - self.target_specical_ids.add(self.tokenizer.eos_token_id) - self.max_seq_length = config.max_position_embeddings - self.label_list = ['B-EOP', 'O'] - - def __call__(self, examples) -> Dict[str, Any]: - questions = examples[self.question_column_name] - contexts = examples[self.context_column_name] - example_ids = examples[self.example_id_column_name] - num_examples = len(questions) - - sentences = [] - for sentence_list in contexts: - sentence_list = [_ + '[EOS]' for _ in sentence_list] - sentences.append(sentence_list) - - try: - tokenized_examples = self.tokenizer( - sentences, - is_split_into_words=True, - add_special_tokens=False, - return_token_type_ids=True, - return_attention_mask=True, - ) - except Exception as e: - print(str(e)) - return {} - - segment_ids = [] - token_seq_labels = [] - for example_index in range(num_examples): - example_input_ids = tokenized_examples['input_ids'][example_index] - example_labels = questions[example_index] - example_labels = [ - self.label_to_id[_] if _ in self.label_to_id else -100 - for _ in example_labels - ] - example_token_labels = [] - segment_id = [] - cur_seg_id = 1 - for token_index in range(len(example_input_ids)): - if example_input_ids[token_index] in self.target_specical_ids: - example_token_labels.append(example_labels[cur_seg_id - 1]) - segment_id.append(cur_seg_id) - cur_seg_id += 1 - else: - example_token_labels.append(-100) - segment_id.append(cur_seg_id) - - segment_ids.append(segment_id) - token_seq_labels.append(example_token_labels) - - tokenized_examples['segment_ids'] = segment_ids - tokenized_examples['token_seq_labels'] = token_seq_labels - - new_segment_ids = [] - new_token_seq_labels = [] - new_input_ids = [] - new_token_type_ids = [] - new_attention_mask = [] - new_example_ids = [] - new_sentences = [] - - for example_index in range(num_examples): - example_input_ids = tokenized_examples['input_ids'][example_index] - example_token_type_ids = tokenized_examples['token_type_ids'][ - example_index] - example_attention_mask = tokenized_examples['attention_mask'][ - example_index] - example_segment_ids = tokenized_examples['segment_ids'][ - example_index] - example_token_seq_labels = tokenized_examples['token_seq_labels'][ - example_index] - example_sentences = contexts[example_index] - example_id = example_ids[example_index] - example_total_num_sentences = len(questions[example_index]) - example_total_num_tokens = len( - tokenized_examples['input_ids'][example_index]) - accumulate_length = [ - i for i, x in enumerate(tokenized_examples['input_ids'] - [example_index]) - if x == self.tokenizer.eos_token_id - ] - samples_boundary = [] - left_index = 0 - sent_left_index = 0 - sent_i = 0 - - # for sent_i, length in enumerate(accumulate_length): - while sent_i < len(accumulate_length): - length = accumulate_length[sent_i] - right_index = length + 1 - sent_right_index = sent_i + 1 - if right_index - left_index >= self.max_seq_length - 1 or right_index == example_total_num_tokens: - samples_boundary.append([left_index, right_index]) - - sample_input_ids = [ - self.tokenizer.cls_token_id - ] + example_input_ids[left_index:right_index] - sample_input_ids = sample_input_ids[:self.max_seq_length] - - sample_token_type_ids = [ - 0 - ] + example_token_type_ids[left_index:right_index] - sample_token_type_ids = sample_token_type_ids[:self. - max_seq_length] - - sample_attention_mask = [ - 1 - ] + example_attention_mask[left_index:right_index] - sample_attention_mask = sample_attention_mask[:self. - max_seq_length] - - sample_segment_ids = [ - 0 - ] + example_segment_ids[left_index:right_index] - sample_segment_ids = sample_segment_ids[:self. - max_seq_length] - - sample_token_seq_labels = [ - -100 - ] + example_token_seq_labels[left_index:right_index] - sample_token_seq_labels = sample_token_seq_labels[:self. - max_seq_length] - - if sent_right_index - 1 == sent_left_index: - left_index = right_index - sample_input_ids[-1] = self.tokenizer.eos_token_id - sample_token_seq_labels[-1] = -100 - else: - left_index = accumulate_length[sent_i - 1] + 1 - if sample_token_seq_labels[-1] != -100: - sample_token_seq_labels[-1] = -100 - - if sent_right_index - 1 == sent_left_index or right_index == example_total_num_tokens: - sample_sentences = example_sentences[ - sent_left_index:sent_right_index] - sent_left_index = sent_right_index - sent_i += 1 - else: - sample_sentences = example_sentences[ - sent_left_index:sent_right_index - 1] - sent_left_index = sent_right_index - 1 - - if (len([_ for _ in sample_token_seq_labels if _ != -100 - ])) != len(sample_sentences) - 1 and (len([ - _ - for _ in sample_token_seq_labels if _ != -100 - ])) != len(sample_sentences): - tmp = [] - for w_i, w, l in zip( - sample_input_ids, - self.tokenizer.decode(sample_input_ids).split( - ' '), sample_token_seq_labels): - tmp.append((w_i, w, l)) - while len(sample_input_ids) < self.max_seq_length: - sample_input_ids.append(self.tokenizer.pad_token_id) - sample_token_type_ids.append(0) - sample_attention_mask.append(0) - sample_segment_ids.append(example_total_num_sentences - + 1) - sample_token_seq_labels.append(-100) - - new_input_ids.append(sample_input_ids) - new_token_type_ids.append(sample_token_type_ids) - new_attention_mask.append(sample_attention_mask) - new_segment_ids.append(sample_segment_ids) - new_token_seq_labels.append(sample_token_seq_labels) - new_example_ids.append(example_id) - new_sentences.append(sample_sentences) - else: - sent_i += 1 - continue - - output_samples = {} - - output_samples['input_ids'] = new_input_ids - output_samples['token_type_ids'] = new_token_type_ids - output_samples['attention_mask'] = new_attention_mask - - output_samples['segment_ids'] = new_segment_ids - output_samples['example_id'] = new_example_ids - output_samples['labels'] = new_token_seq_labels - output_samples['sentences'] = new_sentences - - return output_samples diff --git a/modelscope/utils/nlp/nlp_utils.py b/modelscope/utils/nlp/nlp_utils.py index 35b374f2..64b12007 100644 --- a/modelscope/utils/nlp/nlp_utils.py +++ b/modelscope/utils/nlp/nlp_utils.py @@ -1,3 +1,4 @@ +import os.path as osp from typing import List from modelscope.outputs import OutputKeys @@ -41,3 +42,22 @@ def tracking_and_print_dialog_states( print(json.dumps(result)) history_states.extend([result[OutputKeys.OUTPUT], {}]) + + +def import_external_nltk_data(nltk_data_dir, package_name): + """import external nltk_data, and extract nltk zip package. + + Args: + nltk_data_dir (str): external nltk_data dir path, eg. /home/xx/nltk_data + package_name (str): nltk package name, eg. tokenizers/punkt + """ + import nltk + nltk.data.path.append(nltk_data_dir) + + filepath = osp.join(nltk_data_dir, package_name + '.zip') + zippath = osp.join(nltk_data_dir, package_name) + packagepath = osp.dirname(zippath) + if not osp.exists(zippath): + import zipfile + with zipfile.ZipFile(filepath) as zf: + zf.extractall(osp.join(packagepath)) diff --git a/tests/pipelines/test_fill_mask_ponet.py b/tests/pipelines/test_fill_mask_ponet.py new file mode 100644 index 00000000..707cc201 --- /dev/null +++ b/tests/pipelines/test_fill_mask_ponet.py @@ -0,0 +1,48 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import unittest + +from modelscope.metainfo import Pipelines +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.test_utils import test_level + + +class FillMaskPonetTest(unittest.TestCase): + model_id_ponet = { + 'zh': 'damo/nlp_ponet_fill-mask_chinese-base', + 'en': 'damo/nlp_ponet_fill-mask_english-base' + } + + ori_texts = { + 'zh': + '段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。' + '你师父差得动你,你师父可差不动我。', + 'en': + 'Everything in what you call reality is really just a reflection of your ' + 'consciousness. Your whole universe is just a mirror reflection of your story.' + } + + test_inputs = { + 'zh': + '段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK],你' + '师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。', + 'en': + 'Everything in [MASK] you call reality is really [MASK] a reflection of your ' + '[MASK]. Your [MASK] universe is just a mirror [MASK] of your story.' + } + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_run_with_ponet_model(self): + for language in ['zh', 'en']: + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language] + + pipeline_ins = pipeline( + task=Tasks.fill_mask, model=self.model_id_ponet[language]) + + print(f'\nori_text: {ori_text}\ninput: {test_input}\npipeline: ' + f'{pipeline_ins(test_input)}\n') + + +if __name__ == '__main__': + unittest.main()