62 lines
2.0 KiB
Python
62 lines
2.0 KiB
Python
"""
|
||
前端配置管理模块
|
||
集中管理所有配置项,支持环境变量覆盖
|
||
"""
|
||
|
||
import os
|
||
from dataclasses import dataclass
|
||
from typing import Optional
|
||
from dotenv import load_dotenv
|
||
|
||
# 加载 .env 文件
|
||
load_dotenv()
|
||
|
||
|
||
@dataclass
|
||
class FrontendConfig:
|
||
"""前端配置类 - 统一管理所有配置项"""
|
||
|
||
# ==================== API 配置 ====================
|
||
api_base: str = ""
|
||
|
||
# ==================== 页面配置 ====================
|
||
page_title: str = "AI 个人助手"
|
||
page_icon: str = "🤖"
|
||
layout: str = "wide"
|
||
|
||
# ==================== 模型配置 ====================
|
||
default_model: str = "local" # 更改为local作为默认模型
|
||
model_options: Optional[dict] = None
|
||
|
||
# ==================== 用户配置 ====================
|
||
default_user_id: str = "default_user"
|
||
|
||
# ==================== 历史记录配置 ====================
|
||
history_limit: int = 50
|
||
summary_max_length: int = 30
|
||
|
||
# ==================== 流式响应配置 ====================
|
||
stream_timeout: int = 120
|
||
|
||
def __post_init__(self):
|
||
"""初始化后处理 - 设置默认值和加载环境变量"""
|
||
if self.model_options is None:
|
||
self.model_options = {
|
||
"local": "本地 llama.cpp(Gemma-4)", # 本地模型作为第一个
|
||
"deepseek": "DeepSeek V3.2(在线)", # DeepSeek 作为中间
|
||
"zhipu": "智谱 GLM-4.7-Flash(在线)" # GLM-4.7 作为最后一个
|
||
}
|
||
|
||
# 从环境变量加载配置
|
||
self._load_from_env()
|
||
|
||
def _load_from_env(self):
|
||
"""从环境变量加载配置(优先级最高)"""
|
||
# API 地址(移除 /chat 后缀)
|
||
# 优先级:环境变量 API_URL > 默认值
|
||
api_url = os.getenv("API_URL", "http://127.0.0.1:8079")
|
||
self.api_base = api_url.replace("/chat", "").rstrip("/")
|
||
|
||
|
||
# 全局配置实例(单例模式)
|
||
config = FrontendConfig() |