ai-chat-ui/server_python/api/chat_routes.py

376 lines
15 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""
API 路由定义
"""
import os
import json
import uuid
from datetime import datetime
from typing import Dict, List
from pathlib import Path
from fastapi import HTTPException, File, UploadFile
from fastapi.responses import JSONResponse, StreamingResponse
import dashscope
from dashscope import Generation
# 导入模型和工具函数(使用绝对路径)
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from models.chat_models import ChatRequest, ModelInfo
from utils.helpers import (
get_current_timestamp,
generate_unique_id,
format_api_response,
extract_delta_content
)
# 模拟数据库 - 实际应用中应使用持久化存储
conversations_db: Dict[str, dict] = {}
# 配置上传目录
upload_dir = Path("uploads")
upload_dir.mkdir(exist_ok=True)
async def chat_endpoint_handler(body: dict):
"""
聊天接口处理器 - 与阿里云百炼API兼容的接口
这个端点会接收前端的聊天请求并转发到阿里云百炼API
"""
try:
# 检查请求格式并适配
# 如果是OpenAI兼容格式 (来自streamChat)
if 'messages' in body:
messages = body.get('messages', [])
model = body.get('model', 'qwen-plus')
stream = body.get('stream', True)
temperature = body.get('temperature', 0.7)
max_tokens = body.get('max_tokens', 2000)
else:
# 否则是前端简化格式 (来自chat函数)
# 需要将其转换为OpenAI兼容格式
message_text = body.get('message', '')
# 检查message是否已经是格式化的列表带图片的情况
if isinstance(message_text, list):
user_content = message_text
else:
# 如果是字符串,转换为标准格式
user_content = [{"type": "text", "text": message_text}]
messages = [
{"role": "system", "content": body.get('systemPrompt', '你是一个支持视觉理解的助手。')},
{"role": "user", "content": user_content}
]
model = body.get('model', 'qwen-plus')
stream = body.get('stream', False) # 默认为非流式
temperature = body.get('temperature', 0.7)
max_tokens = body.get('maxTokens', 2000)
if stream:
# 流式响应
async def event_generator():
try:
responses = Generation.call(
model=model,
messages=messages,
stream=True,
max_tokens=max_tokens,
temperature=temperature
)
full_content = "" # 用于累计完整内容
for idx, response in enumerate(responses):
if response.status_code == 200:
# 检查响应是否包含预期的内容
# DashScope API的响应结构可能是 output.choices 或 output.text
content = None
# 尝试从 output.choices 获取内容
if (hasattr(response, 'output') and
response.output and
hasattr(response.output, 'choices') and
response.output.choices is not None and
len(response.output.choices) > 0 and
'message' in response.output.choices[0] and
'content' in response.output.choices[0]['message']):
content = response.output.choices[0]['message']['content']
# 只有当内容发生变化时才发送增量
if len(content) > len(full_content):
delta_content = extract_delta_content(content, full_content)
full_content = content
if delta_content.strip(): # 只有当有非空白新内容时才发送
# 构建 SSE 数据块
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": delta_content},
"finish_reason": None
}
]
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
# 否则尝试从 output.text 获取内容DashScope特定格式
elif (hasattr(response, 'output') and
response.output and
'text' in response.output):
content = response.output.get('text')
# 只有当内容发生变化时才发送增量
if len(content) > len(full_content):
delta_content = extract_delta_content(content, full_content)
full_content = content
if delta_content.strip(): # 只有当有非空白新内容时才发送
# 构建 SSE 数据块
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": delta_content},
"finish_reason": None
}
]
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
else:
# 错误处理
error_data = {
"error": {
"message": f"API Error: {response.code} - {response.message}",
"type": "api_error",
"param": None,
"code": response.code
}
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
break
# 发送结束信号
finish_data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop"
}
]
}
yield f"data: {json.dumps(finish_data, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
error_data = {
"error": {
"message": str(e),
"type": "server_error"
}
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
return StreamingResponse(event_generator(), media_type="text/event-stream")
else:
# 非流式响应
response = Generation.call(
model=model,
messages=messages,
stream=False,
max_tokens=max_tokens,
temperature=temperature
)
if response.status_code == 200:
# 检查响应是否包含预期的内容
# DashScope API的响应结构可能是 output.choices 或 output.text
content = None
# 尝试从 output.choices 获取内容
if (hasattr(response, 'output') and
response.output and
hasattr(response.output, 'choices') and
response.output.choices is not None and
len(response.output.choices) > 0 and
'message' in response.output.choices[0] and
'content' in response.output.choices[0]['message']):
content = response.output.choices[0]['message']['content']
# 否则尝试从 output.text 获取内容DashScope特定格式
elif (hasattr(response, 'output') and
response.output and
'text' in response.output):
content = response.output.get('text')
if content:
# 构建前端期望的响应格式
chat_response = format_api_response(
content=content,
conversation_id=body.get('conversationId'),
model=model
)
if hasattr(response, 'usage') and response.usage:
chat_response["usage"] = {
"promptTokens": response.usage.input_tokens,
"completionTokens": response.usage.output_tokens,
"totalTokens": response.usage.total_tokens
}
return JSONResponse(content=chat_response, ensure_ascii=False)
else:
raise HTTPException(
status_code=500,
detail="API Response does not contain expected content"
)
else:
raise HTTPException(
status_code=500,
detail=f"API Error: {response.code} - {response.message}"
)
except Exception as e:
print(f"[ERROR] Error in chat endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def get_models_handler():
"""获取模型列表处理器"""
models = [
ModelInfo(
id="qwen-max",
name="通义千问 Max",
description="最强大的模型",
maxTokens=8192,
provider="Aliyun"
),
ModelInfo(
id="qwen-plus",
name="通义千问 Plus",
description="能力均衡",
maxTokens=8192,
provider="Aliyun"
),
ModelInfo(
id="qwen-turbo",
name="通义千问 Turbo",
description="速度更快、成本更低",
maxTokens=8192,
provider="Aliyun"
)
]
return [model.dict() for model in models]
async def get_conversations_handler():
"""获取所有对话处理器"""
return list(conversations_db.values())
async def get_conversation_handler(conversation_id: str):
"""获取特定对话处理器"""
conversation = conversations_db.get(conversation_id)
if not conversation:
raise HTTPException(status_code=404, detail="对话不存在")
return conversation
async def save_conversation_handler(data: dict):
"""保存或更新对话处理器"""
try:
conversation_id = data.get('id') or generate_unique_id()
conversation = {
"id": conversation_id,
"title": data.get('title', '新对话'),
"messages": data.get('messages', []),
"updatedAt": datetime.utcnow().isoformat(),
"createdAt": data.get('createdAt', datetime.utcnow().isoformat())
}
conversations_db[conversation_id] = conversation
return conversation
except Exception as e:
print(f"[ERROR] Error saving conversation: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def delete_conversation_handler(conversation_id: str):
"""删除对话处理器"""
if conversation_id in conversations_db:
del conversations_db[conversation_id]
return {"success": True, "message": "删除成功"}
else:
raise HTTPException(status_code=404, detail="对话不存在")
async def upload_file_handler(file: UploadFile = File(...)):
"""文件上传处理器"""
try:
# 检查文件类型
allowed_types = ['image/jpeg', 'image/png', 'image/gif', 'image/webp', 'text/plain', 'application/pdf']
if file.content_type not in allowed_types:
raise HTTPException(status_code=400, detail=f"不支持的文件类型: {file.content_type}")
# 生成唯一文件名
file_extension = Path(file.filename).suffix.lower()
unique_filename = f"{int(datetime.utcnow().timestamp())}_{generate_unique_id()}{file_extension}"
file_path = upload_dir / unique_filename
# 保存文件
with open(file_path, "wb") as f:
content = await file.read()
f.write(content)
# 返回文件信息
file_url = f"http://localhost:8000/uploads/{unique_filename}"
result = {
"url": file_url,
"name": file.filename,
"size": len(content),
"mimeType": file.content_type
}
print(f"[INFO] File uploaded: {result}")
return result
except Exception as e:
print(f"[ERROR] Upload error: {str(e)}")
raise HTTPException(status_code=500, detail=f"上传失败: {str(e)}")
def serve_upload_handler(filename: str):
"""提供上传文件访问处理器"""
file_path = upload_dir / filename
if not file_path.exists():
raise HTTPException(status_code=404, detail="文件不存在")
from fastapi.responses import FileResponse
return FileResponse(str(file_path))
async def stop_generation_handler(message_id: str = None):
"""停止生成处理器"""
message = f"已发出停止指令消息ID: {message_id}" if message_id else "已发出停止指令"
return {"success": True, "message": message}