import requests
from langchain.tools import tool #LangChain的工具装饰器,将函数转换为AI可调用的工具from typing import Type
from pydantic import BaseModel, Field
import os
# 1. 订单查询工具classOrderQueryInput(BaseModel):
order_id:str= Field(..., description="The order ID to query")#装饰器是一种高级功能,它允许在不修改原函数代码的情况下,为函数添加额外的功能。#将一个普通的 Python 函数"包装"成一个可以被 AI 大语言模型(LLM)识别和调用的工具(Tool)#(args_schema=OrderQueryInput):这是传递给 @tool 装饰器的参数,定义了工具输入参数的名称、类型、描述等,帮助 AI 模型理解如何正确地调用这个函数。@tool(args_schema=OrderQueryInput)defquery_order_tool(order_id:str)->str:# -> str::返回类型提示,表明这个函数执行完毕后会返回一个字符串(String)类型的结果"""
查询用户订单信息。需要先验证用户身份。
"""# 模拟调用内部订单系统API# 真实环境中,这里会是 requests.get(f"{ORDER_API_URL}/{order_id}", headers=...)print(f" [Tool Call] Querying order: {order_id
}")# 模拟响应 创建字典 构建了一个订单对象的详细信息,包括产品、尺寸、日期、状态
mock_order_data ={"order_id": order_id,"product":"Running Shoes (Model X)","size":"42","order_date":"2024-09-15","status":"Delivered","customer_id":"cust_12345"}returnf"Order Details: {str(mock_order_data)}"# 2. 退换货政策查询工具 (RAG)@tool# @tool 装饰器告诉AI框架:“这个函数是我(AI)可以用的一个工具”。defquery_return_policy_tool(product_category:str)->str:"""
根据产品类别查询退换货政策。如果用户未指定类别,则查询通用政策。
"""# 在真实环境中,这里会从向量数据库检索print(f" [Tool Call] Querying return policy for: {product_category
}")
policies ={"general":"You can return most items within 30 days of delivery. Items must be unworn and in original packaging.","shoes":"Shoes can be exchanged for a different size within 45 days. Must have original box and no signs of wear.","electronics":"Electronics can be returned within 14 days. Must be factory reset and all accessories included."}
policy = policies.get(product_category.lower(), policies["general"])returnf"Our return policy for {product_category
}: {policy
}"# 3. 创建客服工单工具classCreateTicketInput(BaseModel):
issue_summary:str= Field(..., description="A summary of the customer's issue")
priority:str= Field("medium", description="Priority of the ticket: low, medium, high")@tool(args_schema=CreateTicketInput)defcreate_support_ticket_tool(issue_summary:str, priority:str="medium")->str:"""
在第三方系统(如Zendesk、Jira)中创建支持工单。用于当智能体无法解决问题时。
"""print(f" [Tool Call] Creating Support Ticket. Priority: {priority
}. Issue: {issue_summary
}")# 模拟创建工单的API调用
ticket_id ="TICKET-0987"returnf"Successfully created a support ticket for you. Your ticket ID is {ticket_id
}. A human agent will contact you shortly."
生产环境改进建议
defquery_order_tool(order_id:str)->str:try:
response = requests.get(r"{os.getenv('ORDER_API_URL')}/{order_id}",
headers ={"Authorization":f"Bearer {os.getenv('API_TOKEN')}"},
timeout =10)returnf"Order Details:{response.json()}"except requests.RequestException as e:returnf"Sorry, I couldn't retrieve your order details. Error: {str(e)}"
四、构建智能体工作流
+-------------+
| agent | # 决策节点
+-------------+
|
v (条件路由)
+---------+---------+
| | |
v v v
+-------+ END +-------------+
| tools | | human_agent |
+-------+ +-------------+
| |
+--------+ |
v v
+-------------+
| agent | # 循环回到agent
+-------------+
agent.py
智能体决策节点,接收当前对话状态,调用LLM会自动决定是否需要调用工具,返回AI消息到状态中
路由函数 - 核心决策逻辑
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_openai import ChatOpenAI
from models import AgentState
from tools import query_order_tool, query_return_policy_tool, create_support_ticket_tool
from typing import Literal, Dict, Any
# 初始化LLM
llm = ChatOpenAI(model="gpt-4-turbo", temperature=0)#模型选择: gpt-4-turbo 提供强大的推理能力 温度设置: temperature=0 确保确定性输出,适合客服场景# 绑定工具 工具绑定: bind_tools() 让LLM知道可用的工具及其功能
llm_with_tools = llm.bind_tools([query_order_tool, query_return_policy_tool, create_support_ticket_tool])# 定义工具节点 ToolNode 专门处理工具调用的执行
tool_node = ToolNode(tools=[query_order_tool, query_return_policy_tool, create_support_ticket_tool])defagent_step(state: AgentState):#接收当前对话状态"""智能体决策节点"""print(f" [Agent Step] Current Step: {state['current_step']}")
messages = state["messages"]
response = llm_with_tools.invoke(messages)#调用LLM生成响应,LLM会自动决定是否需要调用工具return{"messages":[response]}# 返回AI消息到状态中defroute_to_tools(state: AgentState)-> Literal["tools","end","human_agent"]:"""路由函数,决定下一步是调用工具、结束还是转人工"""
ai_msg = state["messages"][-1]#检查工具调用: 如果AI消息包含tool_calls,转到"tools"节点ifnothasattr(ai_msg,'tool_calls')orlen(ai_msg.tool_calls)==0:# 如果没有工具调用,检查是否需要结束或转人工if"thank you"in state["messages"][-2].content.lower():#结束条件: 用户说"thank you"时结束对话return"end"if"human"in state["messages"][-2].content.lower():return"human_agent"return"end"return"tools"defcall_human_agent(state: AgentState):"""调用创建工单工具并结束对话"""
issue_summary =f"Customer requested human agent. Conversation history: {state['messages']}"
tool_input ={"issue_summary": issue_summary,"priority":"medium"}
result = create_support_ticket_tool.invoke(tool_input)#返回友好的转接消息return{"messages":[AIMessage(content=f"I've escalated your issue to our human team. {result
}")]}# 构建图
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent_step)
workflow.add_node("tools", tool_node)
workflow.add_node("human_agent", call_human_agent)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent",
route_to_tools,{"tools":"tools","end": END,"human_agent":"human_agent"})
workflow.add_edge("tools","agent")
workflow.add_edge("human_agent", END)# 编译图
app = workflow.compile()
import asyncio
from models import AgentState, UserIdentity
from agent import app
# 模拟一个测试用户
test_user = UserIdentity(user_id="cust_12345", session_id="test_session_001", is_authenticated=True)deftest_conversation():# 模拟用户消息:询问订单状态
test_messages =["Hi, I want to check the status of my order ORD-67890"]
state = AgentState(messages=[], user_identity=test_user, current_step="start")for msg in test_messages:
state["messages"].append(HumanMessage(content=msg))print(f"User: {msg
}")# 调用智能体
state = app.invoke(state)
agent_msg = state["messages"][-1]print(f"Agent: {agent_msg.content
}")ifhasattr(agent_msg,'tool_calls')and agent_msg.tool_calls:print(f"Agent called tools: {agent_msg.tool_calls
}")print("---")if __name__ =="__main__":
test_conversation()
curl -X 'POST'\'http://localhost:8000/chat'\
-H 'Authorization: Bearer fake_jwt_token_for_testing'\
-H 'Content-Type: application/json'\
-d '{"message": "I need to return my shoes, what is your policy?"}'