Files
ailine/.env.example
root 626bae54ff
Some checks failed
构建并部署 AI Agent 服务 / deploy (push) Failing after 18s
前端修改
2026-04-16 03:21:38 +08:00

42 lines
1.7 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# =============================================================================
# Agent1 环境配置模板
# 用法: cp .env.example .env 然后根据实际需求修改
# =============================================================================
# ⭐ 敏感密钥配置
# =============================================================================
# AI 模型 API 密钥
ZHIPUAI_API_KEY=your_zhipuai_api_key_here
DEEPSEEK_API_KEY=your_deepseek_api_key_here
# llama.cpp 服务认证 Token与容器启动参数一致
LLAMACPP_API_KEY=token-abc123
# ⭐ 日志调试配置(本地开发可灵活调整)
# =============================================================================
# 日志级别DEBUG, INFO, WARNING, ERROR, CRITICAL
# 本地开发推荐 DEBUG生产环境使用 WARNING
LOG_LEVEL=DEBUG
# 是否启用 DEBUG 模式
DEBUG=true
# 是否启用 Graph 流转追踪
ENABLE_GRAPH_TRACE=true
# ⭐ 可选配置(如需覆盖 Dockerfile/docker-compose.yml 中的默认值)
# =============================================================================
# 数据库连接(如需使用本地数据库而非远程服务器)
# DB_URI=postgresql://postgres:mysecretpassword@localhost:5432/langgraph_db?sslmode=disable
# Qdrant 地址(如需使用本地 Qdrant 而非远程服务器)
# QDRANT_URL=http://localhost:6333
QDRANT_COLLECTION_NAME=mem0_user_memories
# llama.cpp 服务地址(如端口有变化)
# VLLM_BASE_URL=http://localhost:8081/v1
# VLLM_EMBEDDING_URL=http://localhost:8082/v1
# 前端 API 地址(本地开发时需显式配置)
# 注意:这里只需要域名和端口,不需要 /chat 路径
API_URL=http://localhost:8083