feat: 集成混合路由快速路径和前端 SSE 事件支持
Some checks failed
构建并部署 AI Agent 服务 / deploy (push) Failing after 6m11s

This commit is contained in:
2026-04-26 17:44:52 +08:00
parent 87fb32a967
commit 5bf75459b8
3 changed files with 389 additions and 134 deletions

View File

@@ -4,6 +4,7 @@ AI Agent 服务类 - 支持多模型动态切换
"""
import json
import asyncio
# 本地模块
from ..graph.graph_builder import GraphBuilder, GraphContext
@@ -21,6 +22,8 @@ class AIAgentService:
self.tools_by_name = TOOLS_BY_NAME.copy()
# 添加:意图分类器
self.intent_classifier = get_intent_classifier()
# RAG 管道(可选,需要时设置)
self.rag_pipeline = None
async def initialize(self):
# 1. 初始化 RAG 工具(如果需要)
@@ -100,7 +103,7 @@ class AIAgentService:
return str(value)
async def process_message_stream(self, message: str, thread_id: str, model_name: str, user_id: str = "default_user"):
"""流式处理消息,返回异步生成器"""
"""流式处理消息,返回异步生成器(支持混合路由)"""
graph = self.graphs.get(model_name)
if not graph:
raise ValueError(f"模型 '{model_name}' 未找到或未初始化")
@@ -112,6 +115,38 @@ class AIAgentService:
input_state = {"messages": [{"role": "user", "content": message}]}
context = GraphContext(user_id=user_id)
# ========== 新增:混合路由 ==========
intent_result = await self.intent_classifier.classify(message)
info(f"🧠 意图识别: {intent_result.intent_type} (置信度: {intent_result.confidence:.2f})")
info(f"📝 推理: {intent_result.reasoning}")
# 发送意图分类事件
yield {
"type": "intent_classified",
"intent": intent_result.intent_type.value,
"confidence": intent_result.confidence,
"reasoning": intent_result.reasoning
}
# 根据意图决定路径
use_react_loop = True
if intent_result.confidence >= 0.6:
intent_str = intent_result.intent_type.value
if intent_str in ["chitchat", "clarify"]:
use_react_loop = False
elif intent_str == "knowledge" and self.rag_pipeline:
use_react_loop = False
# 发送路径决策事件
yield {
"type": "path_decision",
"path": "react_loop" if use_react_loop else "fast",
"intent": intent_result.intent_type.value
}
# ====================================
if use_react_loop:
# ========== React 循环路径 ==========
current_node = None
tool_calls_in_progress = {}
@@ -239,3 +274,123 @@ class AIAgentService:
yield {
"type": "done"
}
else:
# ========== 快速路径 ==========
intent_str = intent_result.intent_type.value
if intent_str == "chitchat":
# 闲聊直接回答
reply = await self._generate_fast_reply(
message,
"你是一个友好的助手,请礼貌回应用户的问候或闲聊。"
)
for char in reply:
yield {
"type": "llm_token",
"node": "fast_path",
"content": char
}
await asyncio.sleep(0.03)
elif intent_str == "clarify":
# 澄清反问
reply = await self._generate_fast_reply(
message,
"用户的问题不够明确,请礼貌地询问更多细节,以便更好地帮助用户。"
)
for char in reply:
yield {
"type": "llm_token",
"node": "fast_path",
"content": char
}
await asyncio.sleep(0.03)
elif intent_str == "knowledge" and self.rag_pipeline:
# 快速 RAG
yield {
"type": "node_start",
"node": "fast_rag"
}
yield {
"type": "reasoning",
"node": "fast_rag",
"content": "正在查询知识库..."
}
# 模拟 RAG 检索
await asyncio.sleep(0.3)
# 使用 RAG 生成回答
reply = await self._generate_rag_reply(message)
yield {
"type": "node_end",
"node": "fast_rag"
}
for char in reply:
yield {
"type": "llm_token",
"node": "fast_path",
"content": char
}
await asyncio.sleep(0.03)
else:
# 兜底:直接回答
reply = await self._generate_fast_reply(
message,
"请简洁回答用户的问题。"
)
for char in reply:
yield {
"type": "llm_token",
"node": "fast_path",
"content": char
}
await asyncio.sleep(0.03)
yield {
"type": "done"
}
async def _generate_fast_reply(self, message: str, system_prompt: str) -> str:
"""快速生成回复(不经过 React 循环)"""
# 使用默认模型生成回复
model_name = next(iter(self.graphs.keys()), "zhipu")
llm = get_all_chat_services().get(model_name)
if not llm:
return "抱歉,服务暂时不可用。"
prompt = f"{system_prompt}\n\n用户: {message}"
response = await llm.ainvoke(prompt)
return response.content if hasattr(response, 'content') else str(response)
async def _generate_rag_reply(self, message: str) -> str:
"""使用 RAG 生成回复"""
if not self.rag_pipeline:
return await self._generate_fast_reply(message, "请简洁回答用户的问题。")
# 检索文档
docs = await self.rag_pipeline.aretrieve(message)
context = self.rag_pipeline.format_context(docs)
# 生成回答
model_name = next(iter(self.graphs.keys()), "zhipu")
llm = get_all_chat_services().get(model_name)
if not llm:
return "抱歉,服务暂时不可用。"
prompt = f"""请根据以下参考文档回答用户问题。
参考文档:
{context or "(无相关文档)"}
用户问题: {message}
"""
response = await llm.ainvoke(prompt)
return response.content if hasattr(response, 'content') else str(response)

View File

@@ -216,6 +216,44 @@ export const HumanReviewCard: React.FC<HumanReviewCardProps> = ({ review, onActi
);
};
// 新增:路径指示器组件
interface PathIndicatorProps {
path?: string;
intent?: string;
confidence?: number;
}
export const PathIndicator: React.FC<PathIndicatorProps> = ({
path,
intent,
confidence
}) => {
if (!path) return null;
const getPathIcon = () => path === 'fast' ? '⚡' : '🔄';
const getPathText = () => path === 'fast' ? '快速路径' : 'React 循环';
const getPathColor = () => path === 'fast' ? 'text-green-600' : 'text-blue-600';
return (
<div className="flex items-center gap-2 text-xs text-gray-500 mb-2 flex-wrap">
<span>{getPathIcon()}</span>
<span className={getPathColor()}>{getPathText()}</span>
{intent && (
<>
<span></span>
<span>: {intent}</span>
</>
)}
{confidence && (
<>
<span></span>
<span>: {(confidence * 100).toFixed(0)}%</span>
</>
)}
</div>
);
};
interface AssistantMessageProps {
message: Message;
onReviewAction?: (review: HumanReview, action: 'approve' | 'reject' | 'modify', comment?: string, modifiedContent?: string) => void;
@@ -228,6 +266,15 @@ export const AssistantMessage: React.FC<AssistantMessageProps> = ({ message, onR
AI
</div>
<div className="flex-1">
{/* 新增:路径指示器 */}
{message.metadata && (
<PathIndicator
path={message.metadata.path}
intent={message.metadata.intent}
confidence={message.metadata.confidence}
/>
)}
<ReasoningSection content={message.reasoning} />
{message.toolCalls.map(toolCall => (
@@ -329,7 +376,7 @@ interface ChatContainerProps {
export const ChatContainer: React.FC<ChatContainerProps> = ({ model = 'zhipu', threadId: propThreadId }) => {
const [threadId] = useState(() => propThreadId || Date.now().toString());
const { messages, isLoading, sendMessage, handleReviewAction } = useChat();
const { messages, isLoading, sendMessage, handleReviewAction, lastIntent } = useChat();
const handleSend = (text: string) => {
sendMessage(text, threadId, model);
@@ -376,4 +423,4 @@ export const ChatContainer: React.FC<ChatContainerProps> = ({ model = 'zhipu', t
);
};
export default ChatContainer;
export default ChatContainer

View File

@@ -5,6 +5,21 @@ export interface SSEEvent {
[key: string]: any;
}
// 新增:意图分类事件
export interface IntentClassifiedEvent extends SSEEvent {
type: 'intent_classified';
intent: string;
confidence: number;
reasoning: string;
}
// 新增:路径决策事件
export interface PathDecisionEvent extends SSEEvent {
type: 'path_decision';
path: 'fast' | 'react_loop';
intent: string;
}
export interface ToolCall {
id: string;
tool: string;
@@ -30,6 +45,12 @@ export interface Message {
humanReview?: HumanReview;
isLoading: boolean;
timestamp: Date;
// 新增:元数据
metadata?: {
intent?: string;
confidence?: number;
path?: 'fast' | 'react_loop';
};
}
const API_BASE = 'http://localhost:8079';
@@ -117,6 +138,8 @@ export class ApiClient {
export function useChat() {
const [messages, setMessages] = useState<Message[]>([]);
const [isLoading, setIsLoading] = useState(false);
// 新增:最后一次意图识别结果
const [lastIntent, setLastIntent] = useState<{ intent: string; confidence: number } | null>(null);
const [apiClient] = useState(() => new ApiClient());
const currentMessageRef = useRef<Message | null>(null);
@@ -129,6 +152,8 @@ export function useChat() {
toolCalls: [],
isLoading: role === 'assistant',
timestamp: new Date(),
// 新增:初始化元数据
metadata: undefined,
};
setMessages(prev => [...prev, message]);
currentMessageRef.current = message;
@@ -149,12 +174,39 @@ export function useChat() {
const sendMessage = useCallback(async (text: string, threadId: string, model: string = 'zhipu') => {
setIsLoading(true);
setLastIntent(null); // 重置
addMessage('user', text);
addMessage('assistant', '');
try {
for await (const event of apiClient.chatStream(text, threadId, model)) {
switch (event.type) {
// 新增:处理意图分类事件
case 'intent_classified':
console.log(`🧠 意图识别: ${event.intent} (置信度: ${event.confidence})`);
setLastIntent({
intent: event.intent,
confidence: event.confidence,
});
updateCurrentMessage({
metadata: {
intent: event.intent,
confidence: event.confidence,
},
});
break;
// 新增:处理路径决策事件
case 'path_decision':
console.log(`🧭 路径决策: ${event.path}`);
updateCurrentMessage({
metadata: {
...currentMessageRef.current?.metadata,
path: event.path,
},
});
break;
case 'node_start':
console.log('Node started:', event.node);
break;
@@ -268,5 +320,6 @@ export function useChat() {
isLoading,
sendMessage,
handleReviewAction,
lastIntent, // 新增:导出最后意图
};
}