Merge branch 'fix/more-model-and-prompt-lost' into main

This commit is contained in:
肖应宇 2026-03-11 14:55:18 +08:00
commit 0da4e06050
12 changed files with 520 additions and 123 deletions

View File

@ -14,12 +14,40 @@ from core import get_logger
logger = get_logger()
# 支持深度思考的模型
THINKING_MODELS = {"qwen3-max", "qwen3.5-plus"}
# 需要使用多模态接口的模型qwen3.5 系列)
MULTIMODAL_API_MODELS = {"qwen3.5-plus", "qwen3.5-flash"}
# 百炼模型配置
DASHSCOPE_MODELS = [
ModelInfo(
id="qwen-max",
name="通义千问 Max",
description="最强大的模型",
id="qwen3-max",
name="Qwen3-Max",
description="千问系列效果最好的模型,适合复杂、多步骤的任务。",
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="qwen3.5-plus",
name="Qwen3.5-Plus",
description="能力均衡推理效果、成本和速度介于千问Max和千问Flash之间适合中等复杂任务。",
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_vision=True,
supports_files=False,
),
ModelInfo(
id="qwen3.5-flash",
name="Qwen3.5-Flash",
description="千问系列速度最快、成本极低的模型适合简单任务。千问Flash采用灵活的阶梯定价相比千问Turbo计费更合理。",
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
@ -27,28 +55,6 @@ DASHSCOPE_MODELS = [
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="qwen-plus",
name="通义千问 Plus",
description="能力均衡",
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="qwen-turbo",
name="通义千问 Turbo",
description="速度更快、成本更低",
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="qwen-vl-max",
name="通义万相 VL-Max",
@ -60,17 +66,6 @@ DASHSCOPE_MODELS = [
supports_vision=True,
supports_files=False,
),
ModelInfo(
id="qwen-vl-plus",
name="通义万相 VL-Plus",
description="支持视觉理解的多模态模型",
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=True,
supports_files=False,
),
]
@ -89,6 +84,14 @@ class DashScopeAdapter(BaseAdapter):
"""获取 API Key"""
return os.getenv("ALIYUN_API_KEY") or os.getenv("DASHSCOPE_API_KEY", "")
def _needs_multimodal_api(self, model: str) -> bool:
"""检查模型是否需要使用多模态 API"""
return model.lower() in MULTIMODAL_API_MODELS
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in THINKING_MODELS
def list_models(self) -> List[ModelInfo]:
return DASHSCOPE_MODELS
@ -104,6 +107,7 @@ class DashScopeAdapter(BaseAdapter):
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - files: {request.files}")
logger.info(f" - deep_thinking: {request.deep_thinking}")
logger.info(
f" - messages: {json.dumps(request.messages, ensure_ascii=False, indent=2)}"
)
@ -112,7 +116,11 @@ class DashScopeAdapter(BaseAdapter):
has_multimodal = self._has_multimodal_content(request)
logger.info(f" - has_multimodal: {has_multimodal}")
if has_multimodal:
# 检查是否需要使用多模态接口qwen3.5 系列)
needs_multimodal_api = self._needs_multimodal_api(request.model)
logger.info(f" - needs_multimodal_api: {needs_multimodal_api}")
if has_multimodal or needs_multimodal_api:
return await self._multimodal_chat(request)
else:
return await self._text_chat(request)
@ -136,6 +144,9 @@ class DashScopeAdapter(BaseAdapter):
# 转换消息格式
messages = self._build_text_messages(request)
logger.info(f"[DashScope] 文本聊天 - 转换后的消息:")
logger.info(f" - messages_count: {len(messages)}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
if request.stream:
return self._stream_text_chat(messages, request)
@ -163,26 +174,97 @@ class DashScopeAdapter(BaseAdapter):
"""流式文本聊天"""
logger.info(f"[DashScope] 开始流式文本响应...")
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(request.model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(request.model)})")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
from dashscope import Generation
full_content = ""
full_reasoning = ""
chunk_count = 0
responses = Generation.call(
model=request.model,
messages=messages,
stream=True,
temperature=request.temperature,
max_tokens=request.max_tokens,
result_format="message",
)
error_occurred = False
# 构建 API 调用参数
api_params = {
"model": request.model,
"messages": messages,
"stream": True,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
# 打印 API 调用参数
logger.info(f"[DashScope] API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - result_format: {api_params['result_format']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
try:
responses = Generation.call(**api_params)
except Exception as e:
error_occurred = True
logger.error(f"[DashScope] API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
# 返回错误响应
error_data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [{
"index": 0,
"delta": {"content": f"API 调用失败: {str(e)}"},
"finish_reason": "stop",
}],
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
return
for resp in responses:
if resp.status_code == 200:
chunk_count += 1
content = resp.output.choices[0].message.content
choice = resp.output.choices[0]
# 处理深度思考内容reasoning_content
reasoning_content = getattr(choice.message, "reasoning_content", None)
if reasoning_content:
# 计算增量
if len(reasoning_content) > len(full_reasoning):
delta_reasoning = reasoning_content[len(full_reasoning):]
full_reasoning = reasoning_content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [
{
"index": 0,
"delta": {"reasoning_content": delta_reasoning},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content = choice.message.content
if content and len(content) > len(full_content):
# DashScope 流式响应返回完整内容,计算增量
delta = content[len(full_content) :]
@ -201,6 +283,9 @@ class DashScopeAdapter(BaseAdapter):
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
else:
# 记录非200响应
logger.warning(f"[DashScope] 非200响应: status_code={resp.status_code}, code={resp.code}, message={resp.message}")
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
@ -216,6 +301,8 @@ class DashScopeAdapter(BaseAdapter):
logger.info(f"[DashScope] 流式文本响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
@ -230,17 +317,57 @@ class DashScopeAdapter(BaseAdapter):
from dashscope import Generation
resp = Generation.call(
model=request.model,
messages=messages,
stream=False,
temperature=request.temperature,
max_tokens=request.max_tokens,
result_format="message",
)
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(request.model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(request.model)})")
# 构建 API 调用参数
api_params = {
"model": request.model,
"messages": messages,
"stream": False,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
# 打印 API 调用参数
logger.info(f"[DashScope] API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - result_format: {api_params['result_format']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
try:
resp = Generation.call(**api_params)
except Exception as e:
logger.error(f"[DashScope] API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"DashScope API 调用异常: {str(e)}"},
)
if resp.status_code == 200:
content = resp.output.choices[0].message.content
message = resp.output.choices[0].message
content = message.content or ""
# 构建响应消息
response_message = {"role": "assistant", "content": content}
# 处理深度思考内容
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
@ -249,7 +376,7 @@ class DashScopeAdapter(BaseAdapter):
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": content},
"message": response_message,
"finish_reason": "stop",
}
],
@ -263,8 +390,11 @@ class DashScopeAdapter(BaseAdapter):
}
# 打印响应结果
logger.info(f"[DashScope] 响应结果:")
logger.info(f"[DashScope] 响应成功:")
logger.info(f" - status_code: {resp.status_code}")
logger.info(f" - content_length: {len(content)} 字符")
if reasoning_content:
logger.info(f" - reasoning_length: {len(reasoning_content)} 字符")
logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
@ -275,7 +405,10 @@ class DashScopeAdapter(BaseAdapter):
return JSONResponse(content=response)
logger.error(f"[DashScope] 请求失败: {resp.code} - {resp.message}")
logger.error(f"[DashScope] 请求失败:")
logger.error(f" - status_code: {resp.status_code}")
logger.error(f" - code: {resp.code}")
logger.error(f" - message: {resp.message}")
return JSONResponse(
status_code=500,
content={"error": f"DashScope Error: {resp.code} - {resp.message}"},
@ -288,13 +421,20 @@ class DashScopeAdapter(BaseAdapter):
dashscope.api_key = self._get_api_key()
logger.info(f"[DashScope] 开始多模态聊天...")
# 转换消息格式
messages = self._build_multimodal_messages(request)
logger.info(f"[DashScope] 多模态消息转换完成:")
logger.info(f" - messages_count: {len(messages)}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
# 选择多模态模型
model = request.model
if "qwen-" in model and "vl" not in model:
original_model = model
model = model.replace("qwen-", "qwen-vl-")
logger.info(f"[DashScope] 模型自动切换: {original_model} -> {model}")
if request.stream:
return self._stream_multimodal_chat(messages, model, request)
@ -338,6 +478,8 @@ class DashScopeAdapter(BaseAdapter):
else:
img_url = ""
logger.info(f"[DashScope] 原始图片URL: {img_url}")
# 转换 http URL 为 file:// 格式(如果是本地文件)
if img_url.startswith(("http://", "https://")):
from urllib.parse import urlparse
@ -350,42 +492,122 @@ class DashScopeAdapter(BaseAdapter):
img_url = f"file://{'/'.join(path_parts[uploads_idx:])}"
except ValueError:
pass
elif not img_url.startswith("file://"):
elif not img_url.startswith("file://") and not img_url.startswith(("http://", "https://")):
img_url = f"file://{img_url}"
logger.info(f"[DashScope] 转换后图片URL: {img_url}")
return img_url
def _stream_multimodal_chat(
self, messages: List[Dict], model: str, request: ChatCompletionRequest
):
"""流式多模态聊天"""
logger.info(f"[DashScope] 开始流式多模态响应...")
logger.info(f" - model: {model}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - temperature: {request.temperature}")
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(model)})")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
from dashscope import MultiModalConversation
responses = MultiModalConversation.call(
model=model,
messages=messages,
stream=True,
max_tokens=request.max_tokens,
temperature=request.temperature,
)
full_content = ""
full_reasoning = ""
chunk_count = 0
error_occurred = False
# 打印 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": True,
"enable_thinking": False,
"max_tokens": request.max_tokens,
"temperature": request.temperature,
}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
logger.info(f"[DashScope] 流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
try:
responses = MultiModalConversation.call(**api_params)
except Exception as e:
error_occurred = True
logger.error(f"[DashScope] 多模态 API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
error_data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [{
"index": 0,
"delta": {"content": f"API 调用失败: {str(e)}"},
"finish_reason": "stop",
}],
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
return
for resp in responses:
chunk_count += 1
if resp.status_code == 200:
try:
content_items = resp.output.choices[0]["message"]["content"]
choice = resp.output.choices[0]
message = choice["message"]
# 处理深度思考内容reasoning_content
# 多模态 API 返回的 reasoning_content 也是独立的片段
reasoning_content = message.get("reasoning_content", "")
if reasoning_content:
delta_reasoning = reasoning_content
full_reasoning += reasoning_content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"reasoning_content": delta_reasoning},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content_items = message.get("content", [])
text = ""
for item in content_items:
if isinstance(item, dict) and "text" in item:
text += item["text"]
if len(text) > len(full_content):
delta = text[len(full_content) :]
full_content = text
# 多模态 API 返回的 content 是独立的片段(不是累积的),直接作为 delta
if text:
delta = text
full_content += text
data = {
"id": f"chatcmpl-{generate_unique_id()}",
@ -401,8 +623,10 @@ class DashScopeAdapter(BaseAdapter):
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
except (KeyError, IndexError, TypeError):
pass
except (KeyError, IndexError, TypeError) as e:
logger.warning(f"[DashScope] 解析多模态响应异常: {str(e)}")
else:
logger.warning(f"[DashScope] 非200响应: status_code={resp.status_code}, code={resp.code}, message={resp.message}")
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
@ -414,6 +638,19 @@ class DashScopeAdapter(BaseAdapter):
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 打印流式响应结果
logger.info(f"[DashScope] 流式多模态响应完成:")
logger.info(f" - chunks: {chunk_count}")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(f" - reasoning: {full_reasoning[:500]}..." if len(full_reasoning) > 500 else f" - reasoning: {full_reasoning}")
logger.info(f" - content_length: {len(full_content)} 字符")
logger.info(
f" - content: {full_content[:500]}..."
if len(full_content) > 500
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_multimodal_chat(
@ -424,22 +661,64 @@ class DashScopeAdapter(BaseAdapter):
from dashscope import MultiModalConversation
resp = MultiModalConversation.call(
model=model,
messages=messages,
stream=False,
max_tokens=request.max_tokens,
temperature=request.temperature,
)
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(model)})")
logger.info(f"[DashScope] 开始非流式多模态响应...")
logger.info(f" - model: {model}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - temperature: {request.temperature}")
# 打印 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": False,
"max_tokens": request.max_tokens,
"enable_thinking": False,
"temperature": request.temperature,
}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
logger.info(f"[DashScope] 非流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
try:
resp = MultiModalConversation.call(**api_params)
except Exception as e:
logger.error(f"[DashScope] 多模态 API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"DashScope API 调用异常: {str(e)}"},
)
if resp.status_code == 200:
try:
content_items = resp.output.choices[0]["message"]["content"]
message = resp.output.choices[0]["message"]
content_items = message.get("content", [])
text = ""
for item in content_items:
if isinstance(item, dict) and "text" in item:
text += item["text"]
# 构建响应消息
response_message = {"role": "assistant", "content": text}
# 处理深度思考内容
reasoning_content = message.get("reasoning_content")
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
@ -448,18 +727,38 @@ class DashScopeAdapter(BaseAdapter):
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": text},
"message": response_message,
"finish_reason": "stop",
}
],
}
# 打印响应结果
logger.info(f"[DashScope] 多模态响应成功:")
logger.info(f" - status_code: {resp.status_code}")
logger.info(f" - content_length: {len(text)} 字符")
if reasoning_content:
logger.info(f" - reasoning_length: {len(reasoning_content)} 字符")
logger.info(
f" - content_preview: {text[:200]}..."
if len(text) > 200
else f" - content: {text}"
)
return JSONResponse(content=response)
except (KeyError, IndexError, TypeError) as e:
logger.error(f"[DashScope] 解析多模态响应异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"Parse error: {str(e)}"},
)
logger.error(f"[DashScope] 多模态请求失败:")
logger.error(f" - status_code: {resp.status_code}")
logger.error(f" - code: {resp.code}")
logger.error(f" - message: {resp.message}")
return JSONResponse(
status_code=500,
content={"error": f"DashScope Error: {resp.code} - {resp.message}"},

View File

@ -17,6 +17,17 @@ logger = get_logger()
# GLM 模型配置
GLM_MODELS = [
ModelInfo(
id="glm-5",
name="GLM-5",
description="Coding与长程Agent能力SOTA",
max_tokens=128000,
provider="ZhipuAI",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="glm-4.6v",
name="GLM-4.6V(推荐)",

View File

@ -11,15 +11,15 @@ from .base import BaseAdapter
# 模型前缀到平台名称的映射
MODEL_PREFIX_MAP = {
# 智谱 GLM
"glm-": "glm",
"glm": "glm",
# 阿里云百炼Qwen 系列)
"qwen-": "dashscope",
"qwen": "dashscope",
# OpenAI
"gpt-": "openai",
"o1-": "openai",
"o3-": "openai",
"gpt": "openai",
"o1": "openai",
"o3": "openai",
# Deepseek
"deepseek-": "deepseek",
"deepseek": "deepseek",
}
# 已注册的适配器实例

View File

@ -42,14 +42,14 @@
</button>
<!-- 导出对话 -->
<button
<!-- <button
class="header-btn"
title="导出对话"
:disabled="messageCount === 0"
@click="$emit('export')"
>
<Download :size="18" />
</button>
</button> -->
<!-- 更多操作 -->
<!-- <button

View File

@ -223,15 +223,25 @@ async function handleSend(
await chatStore.createConversation();
}
//
// 使使
const systemPrompt = options?.systemPrompt || currentConversation.value?.settings?.systemPrompt;
//
const existingMessages = currentConversation.value?.messages || [];
const hasSystemMessage = existingMessages.some((m: any) => m.role === MessageRole.SYSTEM);
//
if (systemPrompt && !hasSystemMessage) {
await chatStore.addMessage(MessageRole.SYSTEM, {
type: MessageType.TEXT,
text: systemPrompt,
});
}
//
const updatedMessages = currentConversation.value?.messages || [];
const MAX_HISTORY_ROUNDS = 20; // 20 40
const historyMessages = existingMessages
.filter(
(m: any) =>
m.role === MessageRole.USER || m.role === MessageRole.ASSISTANT,
)
.filter((m: any) => m.content?.text) //
const historyMessages = updatedMessages.filter((m: any) => m.content?.text) //
.slice(-(MAX_HISTORY_ROUNDS * 2))
.map((m: any) => ({ role: m.role, content: m.content.text }));

View File

@ -3,7 +3,7 @@
<div ref="containerRef" class="message-list" @scroll="handleScroll">
<!-- 欢迎界面 -->
<WelcomeScreen
v-if="messages.length === 0"
v-if="visibleMessages.length === 0"
@select="$emit('select-suggestion', $event)"
/>
@ -12,12 +12,12 @@
<div class="messages-wrapper">
<TransitionGroup name="message">
<MessageBubble
v-for="(message, index) in messages"
v-for="(message, index) in visibleMessages"
:key="message.id"
:message="message"
:show-timestamp="showTimestamp"
:compact="compact"
:is-New="index === messages.length - 1"
:is-New="index === visibleMessages.length - 1"
@retry="$emit('retry', message.id)"
@regenerate="$emit('regenerate', message.id)"
@copy="handleCopy(message)"
@ -62,12 +62,13 @@
</template>
<script setup lang="ts">
import { ref, watch, nextTick, onMounted } from "vue";
import { ref, watch, nextTick, onMounted, computed } from "vue";
import { useChatStore } from "@/stores/chat";
import MessageBubble from "@/components/message/MessageBubble.vue";
import WelcomeScreen from "./WelcomeScreen.vue";
import { Bot, ChevronDown } from "@/components/icons";
import type { Message, Attachment, VideoInfo, Suggestion } from "@/types/chat";
import { MessageRole } from "@/types/chat";
const props = withDefaults(
defineProps<{
@ -83,6 +84,13 @@ const props = withDefaults(
},
);
//
const visibleMessages = computed(() => {
return props.messages.filter(
(message) => message.role !== MessageRole.SYSTEM
);
});
const emit = defineEmits<{
retry: [messageId: string];
regenerate: [messageId: string];
@ -175,7 +183,7 @@ function handleDownloadFile(file: Attachment) {
//
watch(
() => props.messages.length,
() => visibleMessages.value.length,
(newLen, oldLen) => {
if (newLen > oldLen) {
if (isAutoScrolling.value) {
@ -191,7 +199,7 @@ watch(
//
watch(
() => props.messages[props.messages.length - 1]?.content.text,
() => visibleMessages.value[visibleMessages.value.length - 1]?.content.text,
() => {
if (isAutoScrolling.value) {
nextTick(() => {

View File

@ -8,9 +8,9 @@
</div>
<div class="logo-glow"></div>
</div>
<h1 class="title">Kexue AI 智能助手</h1>
<h1 class="title">大学教育助手</h1>
<p class="subtitle">
大学生用GPT把自己学废了? Study模式拒绝直接给答案引导学生思考
等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入等待输入
</p>
</div>

View File

@ -251,7 +251,8 @@ function autoResize() {
textarea.style.height = "auto";
const maxHeight = isExpanded.value ? 400 : 160;
textarea.style.height = `${Math.min(textarea.scrollHeight, maxHeight)}px`;
// 1px
textarea.style.height = `${Math.min(textarea.scrollHeight, maxHeight)+1}px`;
}
//
@ -631,7 +632,7 @@ onMounted(() => {
textarea {
width: 100%;
min-height: 24px;
min-height: 25px;
max-height: 160px;
padding: 8px 0;
border: none;

View File

@ -163,7 +163,7 @@ async function textCopy(data: any) {
/* 可折叠内容 */
.thinking-content {
max-height: 2000px;
overflow: hidden;
overflow: auto;
transition:
max-height 0.35s ease,
opacity 0.25s ease;

View File

@ -202,6 +202,7 @@ import FormSelect from "@/components/ui/FormSelect.vue";
import { MessageSquare, X, Check, Trash2 } from "@/components/icons";
import { chatApi } from "@/services/api.ts";
import type { ConversationSettings } from "@/types/chat";
import { MessageRole, MessageType } from "@/types/chat";
const chatStore = useChatStore();
const settingsStore = useSettingsStore();
@ -359,6 +360,25 @@ function handleSave() {
//
chatStore.updateConversationSettings(conversation.value.id, convSettings);
//
if (localSettings.value.systemPrompt) {
const messages = conversation.value.messages || [];
const systemMsgIndex = messages.findIndex((m: any) => m.role === MessageRole.SYSTEM);
if (systemMsgIndex >= 0) {
//
chatStore.updateMessage(messages[systemMsgIndex].id, {
content: {
type: MessageType.TEXT,
text: localSettings.value.systemPrompt,
},
});
} else {
//
chatStore.addSystemMessage(conversation.value.id, localSettings.value.systemPrompt);
}
}
close();
//

View File

@ -121,21 +121,36 @@ class ChatApi {
}
// 将前端简化的请求翻译为 OpenAI 兼容的规范请求体
// 构建 messages 数组system + 历史消息 + 当前用户消息
const systemMessage = {
role: "system",
content:
request.systemPrompt ||
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。",
};
const currentUserMessage = {
role: "user",
content: userContent,
};
const allMessages =
request.history && request.history.length > 0
? [systemMessage, ...request.history, currentUserMessage]
: [systemMessage, currentUserMessage];
// 检查历史消息中是否已有系统消息
const historyHasSystem = request.history?.some((m) => m.role === "system");
// 构建 messages 数组
let allMessages: Array<{ role: string; content: any }>;
if (request.history && request.history.length > 0) {
// 如果历史中有系统消息,直接使用历史消息
if (historyHasSystem) {
allMessages = [...request.history, { role: "user", content: userContent }];
} else {
// 否则添加系统消息
const systemMessage = {
role: "system",
content:
request.systemPrompt ||
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。",
};
allMessages = [systemMessage, ...request.history, { role: "user", content: userContent }];
}
} else {
// 没有历史消息,添加系统消息
const systemMessage = {
role: "system",
content:
request.systemPrompt ||
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。",
};
allMessages = [systemMessage, { role: "user", content: userContent }];
}
const openAiRequest = {
model: request.model || "glm-4-flash",

View File

@ -290,6 +290,38 @@ export const useChatStore = defineStore("chat", () => {
return message;
}
// 添加系统消息(放在消息列表开头)
async function addSystemMessage(
conversationId: string,
systemPrompt: string
): Promise<Message> {
const conversation = conversations.value.find((c) => c.id === conversationId);
if (!conversation) {
throw new Error("Conversation not found");
}
const message: Message = {
id: generateId(),
role: MessageRole.SYSTEM,
content: { type: "text" as const, text: systemPrompt },
timestamp: Date.now(),
isStreaming: false,
} as Message;
// 将系统消息插入到消息列表开头
conversation.messages.unshift(message);
conversation.updatedAt = Date.now();
// 异步保存
try {
await conversationApi.addMessage(conversationId, message);
} catch (error) {
console.error("Failed to save system message:", error);
}
return message;
}
// 更新消息
async function updateMessage(messageId: string, updates: Partial<Message>) {
const conversation = currentConversation.value;
@ -479,6 +511,7 @@ export const useChatStore = defineStore("chat", () => {
renameConversation,
updateConversationSettings,
addMessage,
addSystemMessage,
updateMessage,
updateMessageContent,
saveConversation,