修复: 前端支持final_response节点的token
All checks were successful
构建并部署 AI Agent 服务 / deploy (push) Successful in 6m20s
All checks were successful
构建并部署 AI Agent 服务 / deploy (push) Successful in 6m20s
This commit is contained in:
@@ -175,7 +175,7 @@ def _handle_ai_response():
|
||||
elif event_type == "llm_token":
|
||||
node_name = event.get("node", "unknown")
|
||||
# 确保只处理来自 LLM 的 token,避免将工具的输出作为 token 显示
|
||||
if node_name in ("llm_call", "fallback"):
|
||||
if node_name in ("llm_call", "fallback", "final_response"):
|
||||
token = str(event.get("token", ""))
|
||||
reasoning_token = str(event.get("reasoning_token", ""))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user