RAG数据库生成

This commit is contained in:
2026-04-19 15:01:40 +08:00
parent c18e8a9860
commit cc8ef41ef9
17 changed files with 1089 additions and 577 deletions

View File

@@ -1,16 +1,17 @@
"""
Embedding model wrapper for llama.cpp service.
嵌入模型包装器,用于 llama.cpp 服务。
"""
import os
import httpx
from typing import List, Optional
from urllib.parse import urljoin
from langchain_openai import OpenAIEmbeddings
from langchain_core.embeddings import Embeddings
class LlamaCppEmbedder:
"""Wrapper for llama.cpp embedding service via OpenAI-compatible API."""
"""通过 OpenAI 兼容 API 封装 llama.cpp 嵌入服务。"""
def __init__(
self,
@@ -22,47 +23,66 @@ class LlamaCppEmbedder:
self.api_key = api_key or os.getenv("LLAMACPP_API_KEY", "")
self.model = model
# Ensure URL ends with /v1
self.base_url = urljoin(self.base_url.rstrip("/") + "/", "v1")
def as_langchain_embeddings(self) -> OpenAIEmbeddings:
"""Create LangChain OpenAIEmbeddings instance."""
return OpenAIEmbeddings(
openai_api_base=self.base_url,
openai_api_key=self.api_key,
model=self.model,
)
def as_langchain_embeddings(self) -> Embeddings:
"""创建 LangChain 兼容的嵌入实例。"""
return _LlamaCppLangchainAdapter(self)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents."""
emb = self.as_langchain_embeddings()
return emb.embed_documents(texts)
"""嵌入一批文档。"""
return self._call_embedding_api(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a single query."""
emb = self.as_langchain_embeddings()
return emb.embed_query(text)
"""嵌入单个查询。"""
return self._call_embedding_api([text])[0]
def get_embedding_dimension(self) -> int:
"""Get embedding dimension by embedding a test string."""
"""通过嵌入测试字符串获取嵌入维度。"""
test_embedding = self.embed_query("test")
return len(test_embedding)
def _call_embedding_api(self, texts: List[str]) -> List[List[float]]:
"""直接调用 llama.cpp 嵌入 API。"""
base = self.base_url.rstrip("/")
if not base.endswith("/v1"):
base = base + "/v1"
class MockEmbedder:
"""Mock embedder for testing without a real service."""
headers = {"Content-Type": "application/json"}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
def __init__(self, dimension: int = 768):
self.dimension = dimension
payload = {
"input": texts,
"model": self.model,
}
def as_langchain_embeddings(self) -> OpenAIEmbeddings:
raise NotImplementedError("MockEmbedder cannot be used as LangChain embeddings")
with httpx.Client(timeout=120) as client:
response = client.post(
f"{base}/embeddings",
headers=headers,
json=payload,
)
response.raise_for_status()
data = response.json()
# 处理不同响应格式
if isinstance(data, list):
# llama.cpp 直接返回列表
return [item["embedding"] for item in data]
elif isinstance(data, dict) and "data" in data:
# OpenAI 标准格式
return [item["embedding"] for item in sorted(data["data"], key=lambda x: x["index"])]
else:
raise ValueError(f"未知的嵌入 API 响应格式: {data}")
class _LlamaCppLangchainAdapter(Embeddings):
"""将 LlamaCppEmbedder 适配为 LangChain Embeddings 接口。"""
def __init__(self, embedder: LlamaCppEmbedder):
self._embedder = embedder
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [[0.0] * self.dimension for _ in texts]
return self._embedder.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
return [0.0] * self.dimension
def get_embedding_dimension(self) -> int:
return self.dimension
return self._embedder.embed_query(text)