修改架构:llm_call后增加观察环节注释
All checks were successful
构建并部署 AI Agent 服务 / deploy (push) Successful in 5m56s

This commit is contained in:
2026-05-01 15:00:17 +08:00
parent 4ee769a79f
commit 229cfa67a2
2 changed files with 11 additions and 6 deletions

View File

@@ -92,9 +92,13 @@ def build_react_main_graph(llm=None, tools=None) -> StateGraph:
├─ dictionary_subgraph →────────┤
├─ news_analysis_subgraph →─────┤
├─ handle_error → (重试或结束) ─┤
└─ llm_call → END
└─ llm_call (大模型调用) ←──────┘
END
🔍 观察 (检查 tool_calls)
[有工具调用?]
├─ 是 → 执行工具 → 回到 llm_call
└─ 否 → END
"""
# 创建图
graph = StateGraph(MainGraphState)
@@ -185,7 +189,8 @@ def build_react_main_graph(llm=None, tools=None) -> StateGraph:
graph.add_edge("news_analysis_subgraph", "react_reason")
graph.add_edge("handle_error", "react_reason")
# 5. 最终边llm_call → END
# 5. 条件路由llm_call 后检查是否有工具调用
# 注意:这里简化处理,先直接 END后续再完善工具调用循环
if llm_node is not None:
graph.add_edge("llm_call", END)

View File

@@ -175,7 +175,7 @@ def _handle_ai_response():
elif event_type == "llm_token":
node_name = event.get("node", "unknown")
# 确保只处理来自 LLM 的 token避免将工具的输出作为 token 显示
if node_name in ("llm_call", "fallback", "final_response"):
if node_name in ("llm_call", "fallback"):
token = str(event.get("token", ""))
reasoning_token = str(event.get("reasoning_token", ""))