Browse Source

fix:解决agent_enable为false无法输出的bug

main
gjl 1 year ago
parent
commit
740d8058e0
4 changed files with 28 additions and 110 deletions
  1. +1
    -1
      requirements.txt
  2. +11
    -6
      src/mindpilot/app/chat/chat.py
  3. +3
    -2
      src/mindpilot/app/configs/__init__.py
  4. +13
    -101
      src/mindpilot/app/configs/prompt_config.py

+ 1
- 1
requirements.txt View File

@@ -3,7 +3,7 @@ mindnlp == 0.3.1
fastapi == 0.111.0
uvicorn == 0.30.1
dataclasses-json == 0.6.7
langchain == 0.2.7
langchain~=0.2.13
starlette~=0.37.2
sse-starlette == 2.1.2
openai == 1.35.14


+ 11
- 6
src/mindpilot/app/chat/chat.py View File

@@ -16,7 +16,7 @@ from ..callback_handler.agent_callback_handler import (
AgentStatus,
)
from ..chat.utils import History
from ..configs import MODEL_CONFIG, TOOL_CONFIG, OPENAI_PROMPT
from ..configs import MODEL_CONFIG, TOOL_CONFIG, OPENAI_PROMPT, PROMPT_TEMPLATES
from ..utils.system_utils import get_ChatOpenAI, get_tool, wrap_done, MsgType
from ..agent.utils import get_agent_from_id

@@ -59,16 +59,21 @@ def create_models_chains(
):
if history:
history = [History.from_data(h) for h in history]
input_msg = History(role="user", content=PROMPT_TEMPLATES["llm_model"]["with_history"]).to_msg_template(
False
)
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_template() for i in history]
[i.to_msg_template() for i in history] + [input_msg]
)
else:
# TODO 完善
chat_prompt = ChatPromptTemplate.from_messages([("system", "您好,我是智能Agent桌面助手MindPilot,请问有什么可以帮您?")])
input_msg = History(role="user", content=PROMPT_TEMPLATES["llm_model"]["default"]).to_msg_template(
False
)
chat_prompt = ChatPromptTemplate.from_messages([input_msg])

llm = models
llm.callbacks = callbacks
chain = LLMChain(prompt=chat_prompt, llm=llm)
chain = LLMChain(prompt=chat_prompt, llm=llm, verbose=True)

if agent_enable:
agent_executor = agents_registry(
@@ -77,7 +82,7 @@ def create_models_chains(
full_chain = {"input": lambda x: x["input"], "chat_history": lambda x: x["chat_history"]} | agent_executor
else:
chain.llm.callbacks = callbacks
full_chain = {"input": lambda x: x["input"]} | chain
full_chain = {"input": lambda x: x["input"], "chat_history": lambda x: x["chat_history"]} | chain
return full_chain




+ 3
- 2
src/mindpilot/app/configs/__init__.py View File

@@ -1,6 +1,6 @@
from .system_config import HOST, PORT
from .model_config import MODEL_CONFIG
from .prompt_config import OPENAI_PROMPT
from .prompt_config import OPENAI_PROMPT, PROMPT_TEMPLATES
from .tool_config import TOOL_CONFIG

__all__ = [
@@ -9,4 +9,5 @@ __all__ = [
"MODEL_CONFIG",
"OPENAI_PROMPT",
"TOOL_CONFIG",
]
"PROMPT_TEMPLATES"
]

+ 13
- 101
src/mindpilot/app/configs/prompt_config.py View File

@@ -1,104 +1,16 @@
# PROMPT_TEMPLATES = {
# "preprocess_model": {
# "default": "你只要回复0 和 1 ,代表不需要使用工具。以下几种问题不需要使用工具:"
# "1. 需要联网查询的内容\n"
# "2. 需要计算的内容\n"
# "3. 需要查询实时性的内容\n"
# "如果我的输入满足这几种情况,返回1。其他输入,请你回复0,你只要返回一个数字\n"
# "这是我的问题:"
# },
# "llm_model": {
# "default": "{{input}}",
# "with_history": "The following is a friendly conversation between a human and an AI. "
# "The AI is talkative and provides lots of specific details from its context. "
# "If the AI does not know the answer to a question, it truthfully says it does not know.\n\n"
# "Current conversation:\n"
# "{{history}}\n"
# "Human: {{input}}\n"
# "AI:",
# "rag": "【指令】根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,不允许在答案中添加编造成分,答案请使用中文。\n\n"
# "【已知信息】{{context}}\n\n"
# "【问题】{{question}}\n",
# "rag_default": "{{question}}",
# },
# "agent_prompt": {
# "default": '''Respond to the human as helpfully and accurately as possible. You have access to the following tools:
#
# {tools}
#
# Use a JSON blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
#
# Valid "action" values: "Final Answer" or {tool_names}
#
# Provide only ONE action per $JSON_BLOB, as shown:
#
# ```
# {{
# "action": $TOOL_NAME,
# "action_input": $INPUT
# }}
# ```
#
# Please strictly follow format below:
#
# Question: input question to answer
# Thought: consider previous and subsequent steps
# Action:
# ```
# $JSON_BLOB
# ```
# Observation: action result
# ... (repeat Thought/Action/Observation N times)
# Thought: I know what to respond
# Action:
# ```
# {{
# "action": "Final Answer",
# "action_input": "Final response to human"
# }}
# ```
#
# Begin! Reminder to ALWAYS respond with a valid JSON blob of a single action. Use tools if necessary. Try to reply in Chinese as much as possible.Don't forget the Question, Thought, and Observation sections.Please provide as much output content as possible for the Final Answer.
# ''',
#
# "ChatGLM": """You can answer using the tools.Respond to the human as helpfully and accurately as possible.\n
# You have access to the following tools:\n
# {tools}\n
# Use a json blob to specify a tool by providing an action key {tool name}\n
# and an action_input key (tool input).\n
# Valid "action" values: "Final Answer" or {tool_names}\n
# Provide only ONE action per $JSON_BLOB, as shown:\n\n
# ```\n
# {{{{\n
# "action": $TOOL_NAME,\n
# "action_input": $INPUT\n
# }}}}\n
# ```\n\n
# Follow this format:\n\n
# Question: input question to answer\n
# Thought: consider previous and subsequent steps\n
# Action:\n
# ```\n
# $JSON_BLOB\n
# ```\n
# Observation: action result\n
# ... (repeat Thought/Action/Observation N times)\n
# Thought: I know what to respond\n
# Action:\n
# ```\n
# {{{{\n
# "action": "Final Answer",\n
# "action_input": "Final response to human"\n
# }}}}\n
# Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary.\n
# Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n
# Question: {input}\n\n
# {agent_scratchpad}\n""",
# },
# "postprocess_model": {
# "default": "{{input}}",
# },
# }
PROMPT_TEMPLATES = {
"llm_model": {
"default": "{{input}}",
"with_history": "The following is a friendly conversation between a human and an AI. "
"The AI is talkative and provides lots of specific details from its context. "
"If the AI does not know the answer to a question, it truthfully says it does not know."
"Please reply in Chinese.\n\n"
"Current conversation:\n"
"{{chat_history}}\n"
"Human: {{input}}\n"
"AI:",
}
}

OPENAI_PROMPT = '''
Respond to the human as helpfully and accurately as possible. You have access to the following tools:


Loading…
Cancel
Save