75 lines
2.6 KiB
Python
75 lines
2.6 KiB
Python
"""
|
|
重排序器模块 (适配版)
|
|
使用远程 llama.cpp 服务 (兼容 OpenAI Rerank API) 替代本地 Cross-Encoder
|
|
"""
|
|
import requests
|
|
from typing import List
|
|
from langchain_core.documents import Document
|
|
|
|
class LLaMaCPPReranker:
|
|
"""使用远程 llama.cpp 服务对检索结果重排序。"""
|
|
|
|
def __init__(self,
|
|
base_url: str,
|
|
api_key: str,
|
|
top_n: int = 5,
|
|
timeout: int = 60):
|
|
"""
|
|
初始化远程重排序器
|
|
|
|
Args:
|
|
base_url: llama.cpp 服务的地址和端口,默认为环境变量 LLAMACPP_RERANKER_URL 或 "http://127.0.0.1:8083"。
|
|
top_n: 返回前 N 个结果。
|
|
api_key: API 密钥,默认为环境变量 LLAMACPP_API_KEY 。
|
|
timeout: 请求超时时间(秒)。
|
|
"""
|
|
self.base_url = base_url
|
|
self.api_key = api_key
|
|
self.top_n = top_n
|
|
self.timeout = timeout
|
|
self.endpoint = f"{self.base_url}/rerank"
|
|
|
|
def compress_documents(
|
|
self, documents: List[Document], query: str
|
|
) -> List[Document]:
|
|
"""
|
|
对文档进行重排序
|
|
|
|
Args:
|
|
documents: 待排序的文档列表
|
|
query: 查询字符串
|
|
|
|
Returns:
|
|
排序后的文档列表
|
|
"""
|
|
if not documents:
|
|
return []
|
|
|
|
# 准备请求体
|
|
# 根据 llama.cpp 的 OpenAI 兼容性,文档是一个字符串列表
|
|
payload = {
|
|
"model": "bge-reranker-v2-m3",
|
|
"query": query,
|
|
"documents": [doc.page_content for doc in documents],
|
|
"top_n": self.top_n
|
|
}
|
|
headers = {
|
|
"Content-Type": "application/json",
|
|
"Authorization": f"Bearer {self.api_key}"
|
|
}
|
|
|
|
try:
|
|
response = requests.post(self.endpoint, json=payload, headers=headers, timeout=self.timeout)
|
|
response.raise_for_status() # 检查请求是否成功
|
|
results = response.json()
|
|
|
|
# 解析返回结果
|
|
# 返回格式: {"results": [{"index": 0, "document": "...", "relevance_score": 0.8}, ...]}
|
|
# 按相关性得分降序排列
|
|
sorted_indices = [item["index"] for item in results["results"]]
|
|
sorted_docs = [documents[idx] for idx in sorted_indices]
|
|
return sorted_docs
|
|
|
|
except Exception as e:
|
|
print(f"警告: 远程重排序过程出错,将使用原始排序。错误: {e}")
|
|
return documents[:self.top_n] |