Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10550136master
| @@ -254,6 +254,7 @@ class Pipelines(object): | |||||
| translation_en_to_de = 'translation_en_to_de' # keep it underscore | translation_en_to_de = 'translation_en_to_de' # keep it underscore | ||||
| translation_en_to_ro = 'translation_en_to_ro' # keep it underscore | translation_en_to_ro = 'translation_en_to_ro' # keep it underscore | ||||
| translation_en_to_fr = 'translation_en_to_fr' # keep it underscore | translation_en_to_fr = 'translation_en_to_fr' # keep it underscore | ||||
| token_classification = 'token-classification' | |||||
| # audio tasks | # audio tasks | ||||
| sambert_hifigan_tts = 'sambert-hifigan-tts' | sambert_hifigan_tts = 'sambert-hifigan-tts' | ||||
| @@ -66,7 +66,6 @@ class TokenClassificationModel(SingleBackboneTaskModelBase): | |||||
| attentions=outputs.attentions, | attentions=outputs.attentions, | ||||
| offset_mapping=input['offset_mapping'], | offset_mapping=input['offset_mapping'], | ||||
| ) | ) | ||||
| return outputs | |||||
| def extract_logits(self, outputs): | def extract_logits(self, outputs): | ||||
| return outputs[OutputKeys.LOGITS].cpu().detach() | return outputs[OutputKeys.LOGITS].cpu().detach() | ||||
| @@ -17,6 +17,8 @@ from modelscope.utils.tensor_utils import (torch_nested_detach, | |||||
| __all__ = ['TokenClassificationPipeline'] | __all__ = ['TokenClassificationPipeline'] | ||||
| @PIPELINES.register_module( | |||||
| Tasks.token_classification, module_name=Pipelines.token_classification) | |||||
| @PIPELINES.register_module( | @PIPELINES.register_module( | ||||
| Tasks.token_classification, module_name=Pipelines.part_of_speech) | Tasks.token_classification, module_name=Pipelines.part_of_speech) | ||||
| @PIPELINES.register_module( | @PIPELINES.register_module( | ||||
| @@ -41,7 +43,7 @@ class TokenClassificationPipeline(Pipeline): | |||||
| str) else model | str) else model | ||||
| if preprocessor is None: | if preprocessor is None: | ||||
| preprocessor = Model.from_pretrained( | |||||
| preprocessor = Preprocessor.from_pretrained( | |||||
| model.model_dir, | model.model_dir, | ||||
| sequence_length=kwargs.pop('sequence_length', 128)) | sequence_length=kwargs.pop('sequence_length', 128)) | ||||
| model.eval() | model.eval() | ||||