Merge branch 'main' into feat-pictureAnalysis

This commit is contained in:
SuperManTouX 2026-03-03 16:57:46 +08:00
commit 2d4e777056
6 changed files with 458 additions and 6 deletions

151
LOGGING_SYSTEM.md Normal file
View File

@ -0,0 +1,151 @@
# 统一日志管理系统说明
## 功能特性
1. **多级别日志支持**DEBUG, INFO, WARNING, ERROR, CRITICAL
2. **结构化日志**支持JSON格式的结构化日志输出
3. **文件轮转**:自动按日期和大小分割日志文件
4. **系统监控**:记录系统状态和性能指标
5. **请求追踪**记录API请求和响应信息
6. **错误追踪**:详细记录异常和错误信息
## 使用方法
### 1. 基本日志记录
```python
from utils.logger import log_debug, log_info, log_warning, log_error, log_critical
log_info("服务启动成功")
log_warning("内存使用率较高")
log_error("API请求失败")
```
### 2. 结构化日志
```python
from utils.logger import log_structured
log_structured(
"info",
"用户登录成功",
user_id="12345",
ip_address="192.168.1.100",
timestamp=datetime.now().isoformat()
)
```
### 3. 请求日志
```python
from utils.logger import log_request_info, log_response_info
log_request_info("POST", "/api/chat", "192.168.1.100", "Mozilla/5.0...")
log_response_info(200, 150.5, "/api/chat", "POST", "192.168.1.100")
```
### 4. 错误详情记录
```python
from utils.logger import log_error_detail
try:
# 可能出错的代码
pass
except Exception as e:
log_error_detail(
type(e).__name__,
str(e),
str(e.__traceback__),
context={"user_id": "123", "action": "chat_request"}
)
```
### 5. 对话交互记录
```python
from utils.logger import log_chat_interaction
log_chat_interaction(
user_input="你好,请帮我分析这张图片",
ai_response="这张图片显示了一座山和一片湖泊",
model="qwen-vl-plus",
conversation_id="conv_abc123"
)
```
### 6. 系统状态记录
```python
from utils.logger import log_system_status
log_system_status(
status="healthy",
uptime=3600.5,
cpu_usage=45.2,
memory_usage=60.8,
disk_usage=75.1
)
```
## 配置
### 环境变量配置
| 环境变量 | 默认值 | 说明 |
|---------|--------|------|
| LOG_LEVEL | INFO | 日志级别 |
| LOG_DIR | logs | 日志文件目录 |
| LOG_MAX_BYTES | 10485760 | 单个日志文件最大大小 |
| LOG_BACKUP_COUNT | 5 | 保留的备份日志数量 |
### 配置文件
创建 `logging.conf` 文件来配置日志系统:
```
LOG_LEVEL=INFO
LOG_DIR=logs
LOG_MAX_BYTES=10485760
LOG_BACKUP_COUNT=5
```
## 日志文件组织
- 日志文件按日期分割:`ai-chat-api_2026-03-03.log`
- 自动轮转,当日志文件达到指定大小时创建新文件
- 保留最近5个日志文件旧文件会被自动删除
- 日志文件存放在 `logs/` 目录下
## 集成到现有代码
在你的 FastAPI 应用中:
```python
from utils.logger import setup_global_logger
# 初始化日志系统
logger = setup_global_logger()
@app.middleware("http")
async def logging_middleware(request, call_next):
start_time = time.time()
# 记录请求
logger.info(f"Request: {request.method} {request.url.path}")
response = await call_next(request)
# 记录响应
process_time = time.time() - start_time
logger.info(f"Response: {response.status_code} in {process_time:.2f}s")
return response
```
## 注意事项
1. 使用结构化日志便于日志分析和查询
2. 避免在日志中记录敏感信息如密码、token等
3. 适当使用日志级别避免过度记录DEBUG信息
4. 定期清理旧的日志文件,防止磁盘空间不足

38
server/init_logging.py Normal file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env python
"""
初始化日志系统
"""
import os
from utils.logger import setup_global_logger
def init_logging_system():
"""
初始化日志系统
"""
# 从环境变量获取日志配置,如果没有则使用默认值
log_level = os.getenv("LOG_LEVEL", "INFO")
log_dir = os.getenv("LOG_DIR", "logs")
# 尝试从配置文件读取值
try:
with open("logging.conf", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("LOG_LEVEL="):
log_level = line.split("=", 1)[1].strip()
elif line.startswith("LOG_DIR="):
log_dir = line.split("=", 1)[1].strip()
except FileNotFoundError:
pass # 如果配置文件不存在,则使用环境变量或默认值
# 设置全局日志系统
logger = setup_global_logger(
name="ai-chat-api",
log_level=log_level,
log_dir=log_dir
)
return logger
if __name__ == "__main__":
logger = init_logging_system()
logger.info("Logging system initialized successfully")

14
server/logging.conf Normal file
View File

@ -0,0 +1,14 @@
# 日志配置文件
# 可以在 .env 文件中设置以下环境变量来控制日志行为
# 日志级别: DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_LEVEL=INFO
# 日志文件目录
LOG_DIR=logs
# 日志文件最大大小 (字节)
LOG_MAX_BYTES=10485760 # 10MB
# 保留的备份日志文件数量
LOG_BACKUP_COUNT=5

View File

@ -2,12 +2,18 @@
数据模型定义
"""
from pydantic import BaseModel
from typing import Dict, List, Optional, Any
from typing import Dict, List, Optional, Any, Union
class ChatMessageContentItem(BaseModel):
type: str # "text" or "image_url"
text: Optional[str] = None
image_url: Optional[Dict[str, str]] = None # {"url": "...", "detail": "auto|low|high"}
class ChatMessage(BaseModel):
role: str
content: str
content: Union[str, List[ChatMessageContentItem]] # 支持字符串或内容项列表
images: Optional[List[str]] = None
files: Optional[List[str]] = None

View File

@ -7,6 +7,8 @@ import uuid
from datetime import datetime
from typing import Dict
from .logger import log_request_info, log_response_info, log_error_detail, log_chat_interaction
def get_current_timestamp():
"""获取当前时间戳"""
@ -31,14 +33,12 @@ def format_api_response(content: str, conversation_id: str = None, model: str =
def log_request(method: str, path: str, client_ip: str = "unknown"):
"""记录请求日志"""
print(f"[INFO] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - "
f"HTTP {method} {path} - IP: {client_ip}")
log_request_info(method, path, client_ip)
def log_response(status_code: int, process_time: float):
"""记录响应日志"""
print(f"[INFO] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - "
f"Response {status_code}, Process Time: {process_time:.2f}ms")
log_response_info(status_code, process_time)
def extract_delta_content(full_content: str, previous_content: str) -> str:

243
server/utils/logger.py Normal file
View File

@ -0,0 +1,243 @@
"""
统一日志管理系统
提供结构化日志记录功能支持不同日志级别文件输出轮转等
"""
import logging
import os
import sys
from datetime import datetime
from pathlib import Path
from logging.handlers import RotatingFileHandler
import json
class LoggerSetup:
"""日志系统配置类"""
def __init__(self, name: str = "ai-chat-server", log_level: str = "INFO",
log_dir: str = "logs", max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5):
"""
初始化日志系统
Args:
name: 日志记录器名称
log_level: 日志级别 ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
log_dir: 日志文件存储目录
max_bytes: 单个日志文件最大大小字节
backup_count: 保留的备份文件数量
"""
self.name = name
self.log_level = getattr(logging, log_level.upper(), logging.INFO)
self.log_dir = Path(log_dir)
self.max_bytes = max_bytes
self.backup_count = backup_count
# 创建日志目录
self.log_dir.mkdir(exist_ok=True)
# 设置日志格式
self.formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
)
# 创建logger实例
self.logger = self._setup_logger()
def _setup_logger(self):
"""设置logger实例"""
logger = logging.getLogger(self.name)
logger.setLevel(self.log_level)
# 避免重复添加处理器
if logger.handlers:
logger.handlers.clear()
# 控制台处理器
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self.log_level)
console_handler.setFormatter(self.formatter)
logger.addHandler(console_handler)
# 文件处理器 - 按日期分割
date_str = datetime.now().strftime("%Y-%m-%d")
log_file = self.log_dir / f"{self.name}_{date_str}.log"
file_handler = RotatingFileHandler(
str(log_file),
maxBytes=self.max_bytes,
backupCount=self.backup_count,
encoding='utf-8'
)
file_handler.setLevel(self.log_level)
file_handler.setFormatter(self.formatter)
logger.addHandler(file_handler)
return logger
def get_logger(self):
"""获取配置好的logger实例"""
return self.logger
# 全局日志实例
_logger_instance = None
def setup_global_logger(name: str = "ai-chat-server", log_level: str = "INFO",
log_dir: str = "logs", max_bytes: int = 10 * 1024 * 1024,
backup_count: int = 5):
"""
设置全局日志系统
Args:
name: 日志记录器名称
log_level: 日志级别
log_dir: 日志文件目录
max_bytes: 最大文件大小
backup_count: 备份文件数
"""
global _logger_instance
logger_setup = LoggerSetup(name, log_level, log_dir, max_bytes, backup_count)
_logger_instance = logger_setup.get_logger()
return _logger_instance
def get_logger(name: str = None):
"""
获取日志记录器实例
Args:
name: 如果提供返回子记录器否则返回全局记录器
"""
global _logger_instance
if _logger_instance is None:
# 如果没有初始化,默认创建一个
_logger_instance = setup_global_logger()
if name and name != _logger_instance.name:
return _logger_instance.getChild(name)
return _logger_instance
# 便捷的日志记录函数
def log_debug(message: str, *args, **kwargs):
"""记录DEBUG级别日志"""
logger = get_logger()
logger.debug(message, *args, **kwargs)
def log_info(message: str, *args, **kwargs):
"""记录INFO级别日志"""
logger = get_logger()
logger.info(message, *args, **kwargs)
def log_warning(message: str, *args, **kwargs):
"""记录WARNING级别日志"""
logger = get_logger()
logger.warning(message, *args, **kwargs)
def log_error(message: str, *args, **kwargs):
"""记录ERROR级别日志"""
logger = get_logger()
logger.error(message, *args, **kwargs)
def log_critical(message: str, *args, **kwargs):
"""记录CRITICAL级别日志"""
logger = get_logger()
logger.critical(message, *args, **kwargs)
def log_exception(message: str = ""):
"""记录异常信息"""
logger = get_logger()
logger.exception(message)
def log_structured(level: str, message: str, **details):
"""
记录结构化日志
Args:
level: 日志级别
message: 日志消息
**details: 额外的结构化数据
"""
logger = get_logger()
structured_log = {
"timestamp": datetime.now().isoformat(),
"level": level.upper(),
"message": message,
"details": details
}
getattr(logger, level.lower())(json.dumps(structured_log, ensure_ascii=False))
def log_request_info(method: str, path: str, client_ip: str = "unknown",
user_agent: str = "", referer: str = ""):
"""记录请求信息日志"""
log_structured(
"info",
"API Request",
method=method,
path=path,
client_ip=client_ip,
user_agent=user_agent,
referer=referer
)
def log_response_info(status_code: int, process_time: float, path: str = "",
method: str = "", client_ip: str = ""):
"""记录响应信息日志"""
log_structured(
"info",
"API Response",
status_code=status_code,
process_time_ms=process_time,
path=path,
method=method,
client_ip=client_ip
)
def log_error_detail(error_type: str, error_message: str, traceback_info: str = "",
context: dict = None):
"""记录详细的错误信息"""
log_structured(
"error",
f"{error_type}: {error_message}",
traceback=traceback_info,
context=context or {}
)
def log_chat_interaction(user_input: str, ai_response: str, model: str = "",
conversation_id: str = "", tokens_used: dict = None):
"""记录聊天交互日志"""
log_structured(
"info",
"Chat Interaction",
user_input=user_input[:100] + "..." if len(user_input) > 100 else user_input, # 截断长输入
ai_response=ai_response[:100] + "..." if len(ai_response) > 100 else ai_response,
model=model,
conversation_id=conversation_id,
tokens_used=tokens_used
)
def log_system_status(status: str, uptime: float = 0, cpu_usage: float = 0,
memory_usage: float = 0, disk_usage: float = 0):
"""记录系统状态日志"""
log_structured(
"info",
"System Status",
status=status,
uptime_seconds=uptime,
cpu_percent=cpu_usage,
memory_percent=memory_usage,
disk_percent=disk_usage
)