重构架构:恢复统一的 llm_call 节点,移除错误的 final_response 节点
All checks were successful
构建并部署 AI Agent 服务 / deploy (push) Successful in 5m50s

This commit is contained in:
2026-05-01 14:01:48 +08:00
parent 1e15a0e550
commit 4ee769a79f
4 changed files with 94 additions and 161 deletions

View File

@@ -3,7 +3,6 @@ React 模式节点模块 - 带超时和重试功能
包含:
- react_reason_node: 使用 intent.py 进行推理
- error_handling_node: 错误处理节点
- final_response_node: 最终回答节点
- init_state_node: 初始化状态节点
注意:为了兼容 LangGraph 的同步接口,我们保留了同步的 react_reason 调用
@@ -233,98 +232,6 @@ def error_handling_node(state: MainGraphState) -> MainGraphState:
return state
# ========== 3. 最终回答节点 ==========
from langchain_core.runnables.config import RunnableConfig
from langchain_core.messages import AIMessage
async def final_response_node(state: MainGraphState, config: RunnableConfig) -> MainGraphState:
"""
最终回答节点:调用 LLM 生成最终回答(支持流式输出)
"""
state.current_phase = "finalizing"
# 如果已经有 final_result 了,直接返回
if state.final_result:
state.current_phase = "done"
return state
import time
start_time = time.time()
try:
# 构建 LLM 调用链
from app.agent.prompts import create_system_prompt
from app.model_services.chat_services import get_chat_service
from app.logger import debug, info
llm = get_chat_service()
prompt = create_system_prompt(tools=[])
chain = prompt | llm
# 构建上下文
memory_context = getattr(state, "memory_context", "暂无用户信息")
# 添加 RAG 上下文到消息
messages_with_context = list(state.messages)
if state.rag_context:
# 把 RAG 上下文作为系统消息添加
from langchain_core.messages import SystemMessage
rag_system_msg = SystemMessage(content=f"以下是检索到的相关信息:\n{state.rag_context}")
# 插入到第一个用户消息之前
inserted = False
for i, msg in enumerate(messages_with_context):
if msg.type == "human":
messages_with_context.insert(i, rag_system_msg)
inserted = True
break
if not inserted:
messages_with_context.insert(0, rag_system_msg)
# 调用 LLM流式输出
chunks = []
async for chunk in chain.astream(
{
"messages": messages_with_context,
"memory_context": memory_context
},
config=config
):
chunks.append(chunk)
# 将所有 chunk 合并成最终的 AIMessage
if chunks:
response = chunks[0]
for chunk in chunks[1:]:
response = response + chunk
else:
response = AIMessage(content="")
elapsed_time = time.time() - start_time
# 更新状态
state.messages.append(response)
state.final_result = response.content
state.success = True
state.current_phase = "done"
state.end_time = datetime.now().isoformat()
state.llm_calls = getattr(state, "llm_calls", 0) + 1
info(f"⏱️ [LLM统计] 调用耗时: {elapsed_time:.2f}")
except Exception as e:
from app.logger import error
import traceback
error(f"❌ [LLM错误] 调用失败: {e}")
traceback.print_exc()
state.final_result = "抱歉,模型暂时无法响应,请稍后再试。"
state.success = False
state.current_phase = "done"
return state
# ========== 4. 初始化状态节点 ==========
def init_state_node(state: MainGraphState) -> MainGraphState:
@@ -353,11 +260,11 @@ def route_by_reasoning(state: MainGraphState) -> str:
"""
# 先检查特殊情况
if state.current_phase == "max_steps_exceeded":
return "final_response"
return "llm_call"
if state.current_phase == "error_handling" or state.current_error:
return "handle_error"
if state.current_phase == "finalizing" or state.current_phase == "done":
return "final_response"
return "llm_call"
if state.current_phase == "retrying":
if state.retry_action and "rag" in state.retry_action.lower():
return "rag_retrieve"
@@ -367,7 +274,7 @@ def route_by_reasoning(state: MainGraphState) -> str:
reasoning_result: Optional[ReasoningResult] = state.debug_info.get("reasoning_result")
if not reasoning_result:
return "final_response"
return "llm_call"
# 使用 intent.py 提供的路由函数
route = get_route_by_reasoning(reasoning_result)
@@ -375,18 +282,18 @@ def route_by_reasoning(state: MainGraphState) -> str:
# 映射到我们的节点名称
# 注意:这些名称必须与 main_graph_builder.py 中定义的节点名称一致
route_mapping = {
"direct_response": "final_response",
"direct_response": "llm_call",
"retrieve_rag": "rag_retrieve",
"re_retrieve_rag": "rag_retrieve",
"web_search": "web_search", # ⭐ 新增:联网搜索
"clarify": "final_response",
"call_tool": "final_response", # 暂时映射到 final_response后续可以扩展
"web_search": "web_search",
"clarify": "llm_call",
"call_tool": "llm_call",
"contact": "contact_subgraph",
"dictionary": "dictionary_subgraph",
"news_analysis": "news_analysis_subgraph",
}
return route_mapping.get(route, "final_response")
return route_mapping.get(route, "llm_call")
# ========== 导出 ==========
@@ -394,8 +301,7 @@ def route_by_reasoning(state: MainGraphState) -> str:
__all__ = [
"init_state_node",
"react_reason_node",
"web_search_node", # ⭐ 新增
"web_search_node",
"error_handling_node",
"final_response_node",
"route_by_reasoning"
]