diff --git a/frontend/src/components/chat_area.py b/frontend/src/components/chat_area.py index 4711773..f07e227 100644 --- a/frontend/src/components/chat_area.py +++ b/frontend/src/components/chat_area.py @@ -137,13 +137,45 @@ def _handle_ai_response(): # [DEBUG] 可以在前端终端看到接收到的事件 import logging - if event_type == "llm_token": - logging.debug(f"[Frontend Stream] token: {repr(event.get('token'))}, reasoning: {repr(event.get('reasoning_token'))}") + logging.debug(f"[Frontend Stream] 收到事件: {event_type}, 完整内容: {repr(event)}") - # 1. 处理 LLM Token 流 (打字机效果) - if event_type == "llm_token": + # 1. 处理各种控制事件,显示到思考过程 + if event_type == "intent_classified": + intent = event.get("intent", "unknown") + confidence = event.get("confidence", 0) + reasoning = event.get("reasoning", "") + api_thought += f"\n\n🧠 **意图识别**: {intent} (置信度: {confidence:.2f})" + if reasoning: + api_thought += f"\n 推理: {reasoning}" + display_thought = api_thought + thought_placeholder.info(f"**🤔 思考过程 (正在思考...)**\n\n{display_thought}▌") + + elif event_type == "path_decision": + path = event.get("path", "unknown") + intent = event.get("intent", "") + api_thought += f"\n\n🛤️ **路径决策**: 选择路径 '{path}'" + if intent: + api_thought += f" (基于意图: {intent})" + display_thought = api_thought + thought_placeholder.info(f"**🤔 思考过程 (正在思考...)**\n\n{display_thought}▌") + + elif event_type == "node_start": + node_name = event.get("node", "unknown") + api_thought += f"\n\n▶️ **开始节点**: {node_name}" + display_thought = api_thought + thought_placeholder.info(f"**🤔 思考过程 (正在思考...)**\n\n{display_thought}▌") + + elif event_type == "node_end": + node_name = event.get("node", "unknown") + api_thought += f"\n\n⏹️ **完成节点**: {node_name}" + display_thought = api_thought + thought_placeholder.info(f"**🤔 思考过程 (正在思考...)**\n\n{display_thought}▌") + + # 2. 处理 LLM Token 流 (打字机效果) + elif event_type == "llm_token": + node_name = event.get("node", "unknown") # 确保只处理来自 LLM 的 token,避免将工具的输出作为 token 显示 - if event.get("node") in ("llm_call", "fallback"): + if node_name in ("llm_call", "fallback"): token = str(event.get("token", "")) reasoning_token = str(event.get("reasoning_token", "")) @@ -197,6 +229,14 @@ def _handle_ai_response(): if display_text or not is_thinking: cursor = "▌" if not is_thinking else "" message_placeholder.markdown(display_text + cursor) + else: + # 对于非 llm_call/fallback 的 token,记录到思考过程用于调试 + token = str(event.get("token", "")) + reasoning_token = str(event.get("reasoning_token", "")) + if token: + api_thought += f"\n[调试] 节点 {node_name} 输出: {repr(token)}" + display_thought = api_thought + thought_placeholder.info(f"**🤔 思考过程 (正在思考...)**\n\n{display_thought}▌") # 2. 处理状态更新 (节点完成、工具结果等) elif event_type == "state_update":