Files
ailine/.env.docker
root efa8bbcd03
Some checks failed
构建并部署 AI Agent 服务 / deploy (push) Failing after 5m11s
添加配置
2026-04-21 22:07:20 +08:00

84 lines
3.8 KiB
Docker
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# =============================================================================
# Docker Compose 服务器部署配置模板
# 用法: cp .env.docker .env 然后填入敏感密钥
# =============================================================================
# -----------------------------------------------------------------------------
# AI 模型 API 密钥(⭐ 敏感配置 - 必须填入真实值)
# -----------------------------------------------------------------------------
ZHIPUAI_API_KEY=your_zhipuai_api_key_here # ⭐ 敏感密钥配置
DEEPSEEK_API_KEY=your_deepseek_api_key_here # ⭐ 敏感密钥配置
LLAMACPP_API_KEY=your_llamacpp_api_key_here # ⭐ 敏感密钥配置
# -----------------------------------------------------------------------------
# PostgreSQL 数据库配置(分离配置,易于管理)
# -----------------------------------------------------------------------------
DB_HOST=115.190.121.151
DB_PORT=5432
DB_USER=postgres
DB_PASSWORD=your_db_password_here # ⭐ 敏感密钥配置
DB_NAME=langgraph_db
# 完整连接字符串(也支持直接配置,优先使用分离配置)
DB_URI=postgresql://postgres:${DB_PASSWORD}@115.190.121.151:5432/langgraph_db?sslmode=disable
# -----------------------------------------------------------------------------
# Qdrant 向量数据库配置URL + API密钥 配对)
# -----------------------------------------------------------------------------
QDRANT_URL=http://115.190.121.151:6333
QDRANT_API_KEY=your_qdrant_api_key_here # ⭐ 敏感密钥配置
QDRANT_COLLECTION_NAME=mem0_user_memories
# -----------------------------------------------------------------------------
# llama.cpp 服务配置URL + API密钥 配对)
# -----------------------------------------------------------------------------
# 主 LLM 服务 (Gemma-4-E2B GGUF) - 端口 18000 (Docker host 映射)
VLLM_BASE_URL=http://host.docker.internal:18000/v1
# Embedding 服务 (Qwen3-Embedding-0.6B GGUF) - 端口 18001
LLAMACPP_EMBEDDING_URL=http://host.docker.internal:18001/v1
# LLAMACPP_API_KEY=your_llamacpp_api_key_here (已在上面配置)
# Reranker 服务 (bge-reranker-v2-m3) - 端口 18002
LLAMACPP_RERANKER_URL=http://host.docker.internal:18002/v1
# -----------------------------------------------------------------------------
# RAG 索引构建配置(非敏感,可直接使用)
# -----------------------------------------------------------------------------
RAG_COLLECTION_NAME=rag_documents
RAG_CHUNK_SIZE=500
RAG_CHUNK_OVERLAP=50
RAG_PARENT_CHUNK_SIZE=1000
RAG_CHILD_CHUNK_SIZE=200
RAG_PARENT_CHUNK_OVERLAP=100
RAG_CHILD_CHUNK_OVERLAP=20
RAG_STRATEGY=parent-child
RAG_STORAGE_TYPE=postgres
# -----------------------------------------------------------------------------
# 日志调试配置(部署时可灵活调整)
# -----------------------------------------------------------------------------
# 日志级别DEBUG, INFO, WARNING, ERROR, CRITICAL
# 生产环境推荐 WARNING排查问题时改为 DEBUG
LOG_LEVEL=WARNING
# 是否启用 DEBUG 模式
# true: 输出详细调试信息,包含完整的工具调用、数据库查询等
# false: 仅输出关键信息,适合生产环境
DEBUG=false
# 是否启用 Graph 流转追踪
# true: 输出每个节点的输入输出状态,便于调试工作流
# false: 关闭追踪,减少日志量
ENABLE_GRAPH_TRACE=false
# -----------------------------------------------------------------------------
# 应用行为配置
# -----------------------------------------------------------------------------
MEMORY_SUMMARIZE_INTERVAL=10
# -----------------------------------------------------------------------------
# 前端配置
# -----------------------------------------------------------------------------
# Docker Compose 内部网络,使用服务名 'backend'
API_URL=http://backend:8079/chat