From 4dcc61f550a60f152cddcdd67c9baf32a5322d14 Mon Sep 17 00:00:00 2001 From: haixuanTao Date: Wed, 19 Mar 2025 11:01:01 +0100 Subject: [PATCH] make history conditional --- node-hub/dora-transformers/dora_transformers/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node-hub/dora-transformers/dora_transformers/main.py b/node-hub/dora-transformers/dora_transformers/main.py index de386d07..e11e331f 100644 --- a/node-hub/dora-transformers/dora_transformers/main.py +++ b/node-hub/dora-transformers/dora_transformers/main.py @@ -20,6 +20,7 @@ MODEL_NAME = os.getenv("MODEL_NAME", "Qwen/Qwen2.5-0.5B-Instruct") MAX_TOKENS = int(os.getenv("MAX_TOKENS", "512")) DEVICE = os.getenv("DEVICE", "auto") TORCH_DTYPE = os.getenv("TORCH_DTYPE", "auto") +HISTORY= os.getenv("HISTORY", "False").lower() == "true" # Words that trigger the model to respond @@ -90,8 +91,8 @@ def main(): if len(ACTIVATION_WORDS) == 0 or any( word in ACTIVATION_WORDS for word in words ): - response, _history = generate_response(model, tokenizer, text, history) - + response, tmp_history = generate_response(model, tokenizer, text, history) + history = tmp_history if HISTORY else history node.send_output( output_id="text", data=pa.array([response]), metadata={} )