From 624501c86d2a93378e4e67d8a2f26fd0a520ae4d Mon Sep 17 00:00:00 2001 From: gjl <2802427218@qq.com> Date: Tue, 23 Jul 2024 13:31:41 +0800 Subject: [PATCH] V0.1.0 --- src/mindpilot/app/agent/agents_registry.py | 3 + src/mindpilot/app/api/chat_routes.py | 76 +--------------------- src/mindpilot/app/chat/chat.py | 20 +++--- src/mindpilot/app/configs/model_config.py | 4 +- src/mindpilot/app/configs/prompt_config.py | 66 +++++++++---------- 5 files changed, 49 insertions(+), 120 deletions(-) diff --git a/src/mindpilot/app/agent/agents_registry.py b/src/mindpilot/app/agent/agents_registry.py index 3a29ff8..0fca645 100644 --- a/src/mindpilot/app/agent/agents_registry.py +++ b/src/mindpilot/app/agent/agents_registry.py @@ -1,5 +1,7 @@ +import typing from typing import List, Sequence +import langchain_core from langchain import hub from langchain.agents import AgentExecutor, create_structured_chat_agent from langchain_core.callbacks import BaseCallbackHandler @@ -20,6 +22,7 @@ def agents_registry( prompt = ChatPromptTemplate.from_messages([SystemMessage(content=prompt)]) else: prompt = hub.pull("hwchase17/structured-chat-agent") # default prompt + print(prompt) agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) agent_executor = AgentExecutor( diff --git a/src/mindpilot/app/api/chat_routes.py b/src/mindpilot/app/api/chat_routes.py index 504ee46..72ac6e2 100644 --- a/src/mindpilot/app/api/chat_routes.py +++ b/src/mindpilot/app/api/chat_routes.py @@ -1,85 +1,11 @@ from __future__ import annotations - -from typing import Dict, List - from fastapi import APIRouter, Request -from langchain.prompts.prompt import PromptTemplate - -# from app.api.api_schemas import MsgType, OpenAIChatInput from ..chat.chat import chat -from ..utils import ( - get_OpenAIClient, - get_prompt_template, - get_tool, - get_tool_config, -) - -from .openai_routes import openai_request chat_router = APIRouter(prefix="/chat", tags=["MindPilot对话"]) chat_router.post( "/chat/online", - summary="与llm模型对话", + summary="以API方式与在线llm模型进行对话", )(chat) - -# @chat_router.post("/chat/completions", summary="兼容 openai 的统一 chat 接口") -# async def chat_completions( -# request: Request, -# body: OpenAIChatInput, -# ) -> Dict: -# """ -# 请求参数与 openai.chat.completions.create 一致,可以通过 extra_body 传入额外参数 -# tools 和 tool_choice 可以直接传工具名称,会根据项目里包含的 tools 进行转换 -# 通过不同的参数组合调用不同的 chat 功能: -# - tool_choice -# - extra_body 中包含 tool_input: 直接调用 tool_choice(tool_input) -# - extra_body 中不包含 tool_input: 通过 agent 调用 tool_choice -# - tools: agent 对话 -# - 其它:LLM 对话 -# 返回与 openai 兼容的 Dict -# """ -# client = get_OpenAIClient(model_name=body.model, is_async=True) -# extra = {**body.model_extra} or {} -# for key in list(extra): -# delattr(body, key) -# -# global global_model_name -# global_model_name = body.model -# -# if isinstance(body.tools, list): -# for i in range(len(body.tools)): -# if isinstance(body.tools[i], str): -# if t := get_tool(body.tools[i]): -# body.tools[i] = { -# "type": "function", -# "function": { -# "name": t.name, -# "description": t.description, -# "parameters": t.args, -# }, -# } -# # agent chat with tool calls -# if body.tools: -# chat_model_config = {} -# tool_names = [x["function"]["name"] for x in body.tools] -# tool_config = {name: get_tool_config(name) for name in tool_names} -# # print(tool_config) -# result = await chat( -# query=body.messages[-1]["content"], -# # query="查询北京的天气状况,并搜索互联网给出北京的旅游攻略", -# metadata=extra.get("metadata", {}), -# conversation_id=extra.get("conversation_id", ""), -# # message_id=message_id, -# history_len=-1, -# history=body.messages[:-1], -# stream=body.stream, -# chat_model_config=extra.get("chat_model_config", chat_model_config), -# tool_config=extra.get("tool_config", tool_config), -# ) -# return result -# else: -# # TODO 使用用户指定工具 -# pass -# diff --git a/src/mindpilot/app/chat/chat.py b/src/mindpilot/app/chat/chat.py index d8e714b..c795a0a 100644 --- a/src/mindpilot/app/chat/chat.py +++ b/src/mindpilot/app/chat/chat.py @@ -4,7 +4,7 @@ import uuid from typing import AsyncIterable, List from fastapi import Body -from langchain.schema.runnable import RunnableSequence +from langchain.chains import LLMChain from langchain.prompts.chat import ChatPromptTemplate from langchain_core.messages import AIMessage, HumanMessage, convert_to_messages from sse_starlette.sse import EventSourceResponse @@ -48,7 +48,7 @@ def create_models_from_config(configs, callbacks, stream): def create_models_chains( - history, prompts, models, tools, callbacks, metadata + history, prompts, models, tools, callbacks, metadata, agent_enable ): chat_prompt = None @@ -65,22 +65,20 @@ def create_models_chains( False ) chat_prompt = ChatPromptTemplate.from_messages([input_msg]) + print(chat_prompt) llm = models["llm_model"] llm.callbacks = callbacks + chain = LLMChain(prompt=chat_prompt, llm=llm) - if "action_model" in models and tools is not None: + if agent_enable: agent_executor = agents_registry( llm=llm, callbacks=callbacks, tools=tools, prompt=None, verbose=True ) full_chain = {"input": lambda x: x["input"]} | agent_executor else: - full_chain = RunnableSequence( - steps=[ - {"input": lambda x: x["input"]}, - chat_prompt | llm - ] - ) + chain.llm.callbacks = callbacks + full_chain = {"input": lambda x: x["input"]} | chain return full_chain @@ -99,7 +97,8 @@ async def chat( ), stream: bool = Body(True, description="流式输出"), chat_model_config: dict = Body({}, description="LLM 模型配置", examples=[]), - tool_config: List[str] = Body([], description="工具配置", examples=["weather_check"]), + tool_config: List[str] = Body([], description="工具配置", examples=[]), + agent_enable: bool = Body(True, description="是否启用Agent") ): """Agent 对话""" @@ -121,6 +120,7 @@ async def chat( callbacks=callbacks, history=history, metadata=metadata, + agent_enable=agent_enable ) _history = [History.from_data(h) for h in history] diff --git a/src/mindpilot/app/configs/model_config.py b/src/mindpilot/app/configs/model_config.py index ee814fa..36fffc4 100644 --- a/src/mindpilot/app/configs/model_config.py +++ b/src/mindpilot/app/configs/model_config.py @@ -10,7 +10,7 @@ MODEL_CONFIG = { }, "llm_model": { "glm-4": { - "temperature": 0.8, + "temperature": 0.9, "max_tokens": 4096, "history_len": 10, "prompt_name": "default", @@ -21,7 +21,7 @@ MODEL_CONFIG = { "glm-4": { "temperature": 0.1, "max_tokens": 4096, - "prompt_name": "GPT-4", + "prompt_name": "ChatGLM3", "callbacks": True, }, }, diff --git a/src/mindpilot/app/configs/prompt_config.py b/src/mindpilot/app/configs/prompt_config.py index d1b15f8..5a20b2f 100644 --- a/src/mindpilot/app/configs/prompt_config.py +++ b/src/mindpilot/app/configs/prompt_config.py @@ -50,39 +50,39 @@ PROMPT_TEMPLATES = { "Question:{input}\n" "Thought:{agent_scratchpad}\n", - "ChatGLM3": "You can answer using the tools.Respond to the human as helpfully and accurately as possible.\n" - "You have access to the following tools:\n" - "{tools}\n" - "Use a json blob to specify a tool by providing an action key (tool name)\n" - "and an action_input key (tool input).\n" - 'Valid "action" values: "Final Answer" or [{tool_names}]\n' - "Provide only ONE action per $JSON_BLOB, as shown:\n\n" - "```\n" - "{{{{\n" - ' "action": $TOOL_NAME,\n' - ' "action_input": $INPUT\n' - "}}}}\n" - "```\n\n" - "Follow this format:\n\n" - "Question: input question to answer\n" - "Thought: consider previous and subsequent steps\n" - "Action:\n" - "```\n" - "$JSON_BLOB\n" - "```\n" - "Observation: action result\n" - "... (repeat Thought/Action/Observation N times)\n" - "Thought: I know what to respond\n" - "Action:\n" - "```\n" - "{{{{\n" - ' "action": "Final Answer",\n' - ' "action_input": "Final response to human"\n' - "}}}}\n" - "Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary.\n" - "Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n" - "Question: {input}\n\n" - "{agent_scratchpad}\n", + "ChatGLM3": """You can answer using the tools.Respond to the human as helpfully and accurately as possible.\n +You have access to the following tools:\n +{tools}\n +Use a json blob to specify a tool by providing an action key {tool name}\n +and an action_input key (tool input).\n +Valid "action" values: "Final Answer" or {tool_names}\n +Provide only ONE action per $JSON_BLOB, as shown:\n\n +```\n +{{{{\n + "action": $TOOL_NAME,\n + "action_input": $INPUT\n +}}}}\n +```\n\n +Follow this format:\n\n +Question: input question to answer\n +Thought: consider previous and subsequent steps\n +Action:\n +```\n +$JSON_BLOB\n +```\n +Observation: action result\n +... (repeat Thought/Action/Observation N times)\n +Thought: I know what to respond\n +Action:\n +```\n +{{{{\n + "action": "Final Answer",\n + "action_input": "Final response to human"\n +}}}}\n +Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary.\n +Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n +Question: {input}\n\n +{agent_scratchpad}\n""", "qwen": "Answer the following questions as best you can. You have access to the following APIs:\n\n" "{tools}\n\n"