Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9608407master
| @@ -112,7 +112,7 @@ class Pipelines(object): | |||||
| csanmt_translation = 'csanmt-translation' | csanmt_translation = 'csanmt-translation' | ||||
| nli = 'nli' | nli = 'nli' | ||||
| dialog_intent_prediction = 'dialog-intent-prediction' | dialog_intent_prediction = 'dialog-intent-prediction' | ||||
| dialog_modeling = 'dialog-modeling' | |||||
| task_oriented_conversation = 'task-oriented-conversation' | |||||
| dialog_state_tracking = 'dialog-state-tracking' | dialog_state_tracking = 'dialog-state-tracking' | ||||
| zero_shot_classification = 'zero-shot-classification' | zero_shot_classification = 'zero-shot-classification' | ||||
| text_error_correction = 'text-error-correction' | text_error_correction = 'text-error-correction' | ||||
| @@ -15,7 +15,8 @@ from modelscope.utils.constant import ModelFile, Tasks | |||||
| __all__ = ['SpaceForDialogModeling'] | __all__ = ['SpaceForDialogModeling'] | ||||
| @MODELS.register_module(Tasks.dialog_modeling, module_name=Models.space) | |||||
| @MODELS.register_module( | |||||
| Tasks.task_oriented_conversation, module_name=Models.space) | |||||
| class SpaceForDialogModeling(TorchModel): | class SpaceForDialogModeling(TorchModel): | ||||
| def __init__(self, model_dir: str, *args, **kwargs): | def __init__(self, model_dir: str, *args, **kwargs): | ||||
| @@ -33,8 +34,8 @@ class SpaceForDialogModeling(TorchModel): | |||||
| Config.from_file( | Config.from_file( | ||||
| os.path.join(self.model_dir, ModelFile.CONFIGURATION))) | os.path.join(self.model_dir, ModelFile.CONFIGURATION))) | ||||
| import torch | |||||
| self.config.use_gpu = self.config.use_gpu and torch.cuda.is_available() | |||||
| self.config.use_gpu = True if 'device' not in kwargs or kwargs[ | |||||
| 'device'] == 'gpu' else False | |||||
| self.text_field = kwargs.pop( | self.text_field = kwargs.pop( | ||||
| 'text_field', | 'text_field', | ||||
| @@ -326,7 +326,7 @@ TASK_OUTPUTS = { | |||||
| # (Deprecated) dialog modeling prediction result for single sample | # (Deprecated) dialog modeling prediction result for single sample | ||||
| # sys : ['you', 'are', 'welcome', '.', 'have', 'a', 'great', 'day', '!'] | # sys : ['you', 'are', 'welcome', '.', 'have', 'a', 'great', 'day', '!'] | ||||
| Tasks.dialog_modeling: [OutputKeys.RESPONSE], | |||||
| Tasks.task_oriented_conversation: [OutputKeys.RESPONSE], | |||||
| # (Deprecated) dialog state tracking result for single sample | # (Deprecated) dialog state tracking result for single sample | ||||
| # { | # { | ||||
| @@ -51,8 +51,8 @@ DEFAULT_MODEL_FOR_PIPELINE = { | |||||
| Tasks.dialog_intent_prediction: | Tasks.dialog_intent_prediction: | ||||
| (Pipelines.dialog_intent_prediction, | (Pipelines.dialog_intent_prediction, | ||||
| 'damo/nlp_space_dialog-intent-prediction'), | 'damo/nlp_space_dialog-intent-prediction'), | ||||
| Tasks.dialog_modeling: (Pipelines.dialog_modeling, | |||||
| 'damo/nlp_space_dialog-modeling'), | |||||
| Tasks.task_oriented_conversation: (Pipelines.task_oriented_conversation, | |||||
| 'damo/nlp_space_dialog-modeling'), | |||||
| Tasks.dialog_state_tracking: (Pipelines.dialog_state_tracking, | Tasks.dialog_state_tracking: (Pipelines.dialog_state_tracking, | ||||
| 'damo/nlp_space_dialog-state-tracking'), | 'damo/nlp_space_dialog-state-tracking'), | ||||
| Tasks.text_error_correction: | Tasks.text_error_correction: | ||||
| @@ -5,7 +5,7 @@ from modelscope.utils.import_utils import LazyImportModule | |||||
| if TYPE_CHECKING: | if TYPE_CHECKING: | ||||
| from .dialog_intent_prediction_pipeline import DialogIntentPredictionPipeline | from .dialog_intent_prediction_pipeline import DialogIntentPredictionPipeline | ||||
| from .dialog_modeling_pipeline import DialogModelingPipeline | |||||
| from .task_oriented_conversation_pipeline import TaskOrientedConversationPipeline | |||||
| from .dialog_state_tracking_pipeline import DialogStateTrackingPipeline | from .dialog_state_tracking_pipeline import DialogStateTrackingPipeline | ||||
| from .fill_mask_pipeline import FillMaskPipeline | from .fill_mask_pipeline import FillMaskPipeline | ||||
| from .named_entity_recognition_pipeline import NamedEntityRecognitionPipeline | from .named_entity_recognition_pipeline import NamedEntityRecognitionPipeline | ||||
| @@ -24,7 +24,8 @@ else: | |||||
| _import_structure = { | _import_structure = { | ||||
| 'dialog_intent_prediction_pipeline': | 'dialog_intent_prediction_pipeline': | ||||
| ['DialogIntentPredictionPipeline'], | ['DialogIntentPredictionPipeline'], | ||||
| 'dialog_modeling_pipeline': ['DialogModelingPipeline'], | |||||
| 'task_oriented_conversation_pipeline': | |||||
| ['TaskOrientedConversationPipeline'], | |||||
| 'dialog_state_tracking_pipeline': ['DialogStateTrackingPipeline'], | 'dialog_state_tracking_pipeline': ['DialogStateTrackingPipeline'], | ||||
| 'fill_mask_pipeline': ['FillMaskPipeline'], | 'fill_mask_pipeline': ['FillMaskPipeline'], | ||||
| 'single_sentence_classification_pipeline': | 'single_sentence_classification_pipeline': | ||||
| @@ -11,12 +11,13 @@ from modelscope.pipelines.builder import PIPELINES | |||||
| from modelscope.preprocessors import DialogModelingPreprocessor | from modelscope.preprocessors import DialogModelingPreprocessor | ||||
| from modelscope.utils.constant import Tasks | from modelscope.utils.constant import Tasks | ||||
| __all__ = ['DialogModelingPipeline'] | |||||
| __all__ = ['TaskOrientedConversationPipeline'] | |||||
| @PIPELINES.register_module( | @PIPELINES.register_module( | ||||
| Tasks.dialog_modeling, module_name=Pipelines.dialog_modeling) | |||||
| class DialogModelingPipeline(Pipeline): | |||||
| Tasks.task_oriented_conversation, | |||||
| module_name=Pipelines.task_oriented_conversation) | |||||
| class TaskOrientedConversationPipeline(Pipeline): | |||||
| def __init__(self, | def __init__(self, | ||||
| model: Union[SpaceForDialogModeling, str], | model: Union[SpaceForDialogModeling, str], | ||||
| @@ -79,7 +79,7 @@ class NLPTasks(object): | |||||
| token_classification = 'token-classification' | token_classification = 'token-classification' | ||||
| conversational = 'conversational' | conversational = 'conversational' | ||||
| text_generation = 'text-generation' | text_generation = 'text-generation' | ||||
| dialog_modeling = 'dialog-modeling' | |||||
| task_oriented_conversation = 'task-oriented-conversation' | |||||
| dialog_intent_prediction = 'dialog-intent-prediction' | dialog_intent_prediction = 'dialog-intent-prediction' | ||||
| dialog_state_tracking = 'dialog-state-tracking' | dialog_state_tracking = 'dialog-state-tracking' | ||||
| table_question_answering = 'table-question-answering' | table_question_answering = 'table-question-answering' | ||||
| @@ -6,13 +6,13 @@ from modelscope.hub.snapshot_download import snapshot_download | |||||
| from modelscope.models import Model | from modelscope.models import Model | ||||
| from modelscope.models.nlp import SpaceForDialogModeling | from modelscope.models.nlp import SpaceForDialogModeling | ||||
| from modelscope.pipelines import pipeline | from modelscope.pipelines import pipeline | ||||
| from modelscope.pipelines.nlp import DialogModelingPipeline | |||||
| from modelscope.pipelines.nlp import TaskOrientedConversationPipeline | |||||
| from modelscope.preprocessors import DialogModelingPreprocessor | from modelscope.preprocessors import DialogModelingPreprocessor | ||||
| from modelscope.utils.constant import Tasks | from modelscope.utils.constant import Tasks | ||||
| from modelscope.utils.test_utils import test_level | from modelscope.utils.test_utils import test_level | ||||
| class DialogModelingTest(unittest.TestCase): | |||||
| class TaskOrientedConversationTest(unittest.TestCase): | |||||
| model_id = 'damo/nlp_space_dialog-modeling' | model_id = 'damo/nlp_space_dialog-modeling' | ||||
| test_case = { | test_case = { | ||||
| 'sng0073': { | 'sng0073': { | ||||
| @@ -92,7 +92,7 @@ class DialogModelingTest(unittest.TestCase): | |||||
| } | } | ||||
| def generate_and_print_dialog_response( | def generate_and_print_dialog_response( | ||||
| self, pipelines: List[DialogModelingPipeline]): | |||||
| self, pipelines: List[TaskOrientedConversationPipeline]): | |||||
| result = {} | result = {} | ||||
| for step, item in enumerate(self.test_case['sng0073']['log']): | for step, item in enumerate(self.test_case['sng0073']['log']): | ||||
| @@ -108,39 +108,37 @@ class DialogModelingTest(unittest.TestCase): | |||||
| @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') | @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') | ||||
| def test_run_by_direct_model_download(self): | def test_run_by_direct_model_download(self): | ||||
| cache_path = snapshot_download(self.model_id) | |||||
| cache_path = snapshot_download( | |||||
| self.model_id, revision='task_oriented_conversation') | |||||
| preprocessor = DialogModelingPreprocessor(model_dir=cache_path) | preprocessor = DialogModelingPreprocessor(model_dir=cache_path) | ||||
| model = SpaceForDialogModeling( | model = SpaceForDialogModeling( | ||||
| model_dir=cache_path, | model_dir=cache_path, | ||||
| text_field=preprocessor.text_field, | text_field=preprocessor.text_field, | ||||
| config=preprocessor.config, | |||||
| device='cpu') | |||||
| config=preprocessor.config) | |||||
| pipelines = [ | pipelines = [ | ||||
| DialogModelingPipeline( | |||||
| model=model, preprocessor=preprocessor, device='cpu'), | |||||
| TaskOrientedConversationPipeline( | |||||
| model=model, preprocessor=preprocessor), | |||||
| pipeline( | pipeline( | ||||
| task=Tasks.dialog_modeling, | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model=model, | model=model, | ||||
| preprocessor=preprocessor, | |||||
| device='cpu') | |||||
| preprocessor=preprocessor) | |||||
| ] | ] | ||||
| self.generate_and_print_dialog_response(pipelines) | self.generate_and_print_dialog_response(pipelines) | ||||
| @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') | @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') | ||||
| def test_run_with_model_from_modelhub(self): | def test_run_with_model_from_modelhub(self): | ||||
| model = Model.from_pretrained(self.model_id) | |||||
| preprocessor = DialogModelingPreprocessor( | |||||
| model_dir=model.model_dir, device='cpu') | |||||
| model = Model.from_pretrained( | |||||
| self.model_id, revision='task_oriented_conversation') | |||||
| preprocessor = DialogModelingPreprocessor(model_dir=model.model_dir) | |||||
| pipelines = [ | pipelines = [ | ||||
| DialogModelingPipeline( | |||||
| model=model, preprocessor=preprocessor, device='cpu'), | |||||
| TaskOrientedConversationPipeline( | |||||
| model=model, preprocessor=preprocessor), | |||||
| pipeline( | pipeline( | ||||
| task=Tasks.dialog_modeling, | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model=model, | model=model, | ||||
| preprocessor=preprocessor, | |||||
| device='cpu') | |||||
| preprocessor=preprocessor) | |||||
| ] | ] | ||||
| self.generate_and_print_dialog_response(pipelines) | self.generate_and_print_dialog_response(pipelines) | ||||
| @@ -149,17 +147,25 @@ class DialogModelingTest(unittest.TestCase): | |||||
| def test_run_with_model_name(self): | def test_run_with_model_name(self): | ||||
| pipelines = [ | pipelines = [ | ||||
| pipeline( | pipeline( | ||||
| task=Tasks.dialog_modeling, model=self.model_id, device='cpu'), | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model=self.model_id, | |||||
| model_revision='task_oriented_conversation'), | |||||
| pipeline( | pipeline( | ||||
| task=Tasks.dialog_modeling, model=self.model_id, device='cpu') | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model=self.model_id, | |||||
| model_revision='task_oriented_conversation') | |||||
| ] | ] | ||||
| self.generate_and_print_dialog_response(pipelines) | self.generate_and_print_dialog_response(pipelines) | ||||
| @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') | @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') | ||||
| def test_run_with_default_model(self): | def test_run_with_default_model(self): | ||||
| pipelines = [ | pipelines = [ | ||||
| pipeline(task=Tasks.dialog_modeling, device='cpu'), | |||||
| pipeline(task=Tasks.dialog_modeling, device='cpu') | |||||
| pipeline( | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model_revision='task_oriented_conversation'), | |||||
| pipeline( | |||||
| task=Tasks.task_oriented_conversation, | |||||
| model_revision='task_oriented_conversation') | |||||
| ] | ] | ||||
| self.generate_and_print_dialog_response(pipelines) | self.generate_and_print_dialog_response(pipelines) | ||||