Merge branch 'fix/web-search-and-css'

This commit is contained in:
SuperManTouX 2026-03-12 14:13:22 +08:00
commit 3b7a831840
16 changed files with 510 additions and 180 deletions

8
.gitignore vendored
View File

@ -16,13 +16,10 @@ uploads
.venv
__pycache__
.claude
<<<<<<< HEAD
*.db
=======
.trae
.agent
.agents
>>>>>>> feat/database
# Editor directories and files
.vscode/*
@ -34,7 +31,6 @@ __pycache__
*.njsproj
*.sln
*.sw?
<<<<<<< HEAD
# Skills
.skills
@ -42,9 +38,7 @@ __pycache__
.agents
.trae
skills-lock.json
=======
*.db
server/data/*.db
skills-lock.json
>>>>>>> feat/database
tsconfig.tsbuildinfo

View File

@ -10,6 +10,7 @@ from typing import Dict, List
from fastapi.responses import JSONResponse, StreamingResponse
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode
from core import get_logger
logger = get_logger()
@ -29,7 +30,7 @@ DASHSCOPE_MODELS = [
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
),
@ -40,7 +41,7 @@ DASHSCOPE_MODELS = [
max_tokens=8192,
provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_web_search=True,
supports_vision=True,
supports_files=False,
),
@ -51,7 +52,7 @@ DASHSCOPE_MODELS = [
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
),
@ -188,7 +189,7 @@ class DashScopeAdapter(BaseAdapter):
chunk_count = 0
error_occurred = False
# 构建 API 调用参数
# 打印 API 调用参数
api_params = {
"model": request.model,
"messages": messages,
@ -197,6 +198,13 @@ class DashScopeAdapter(BaseAdapter):
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
@ -330,6 +338,13 @@ class DashScopeAdapter(BaseAdapter):
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
@ -531,6 +546,13 @@ class DashScopeAdapter(BaseAdapter):
"max_tokens": request.max_tokens,
"temperature": request.temperature,
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
@ -679,6 +701,13 @@ class DashScopeAdapter(BaseAdapter):
"enable_thinking": False,
"temperature": request.temperature,
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:

View File

@ -11,6 +11,7 @@ from typing import Dict, List, Optional
from fastapi.responses import JSONResponse, StreamingResponse
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode, build_glm_search_tool
from core import get_logger
logger = get_logger()
@ -24,7 +25,7 @@ GLM_MODELS = [
max_tokens=128000,
provider="ZhipuAI",
supports_thinking=True,
supports_web_search=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
),
@ -64,11 +65,11 @@ GLM_MODELS = [
ModelInfo(
id="glm-z1-flash",
name="GLM-Z1 Flash",
description="深度思考推理模型",
description="深度思考推理模型,默认开启深度思考",
max_tokens=128000,
provider="ZhipuAI",
supports_thinking=True,
supports_web_search=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
),
@ -129,10 +130,10 @@ class GLMAdapter(BaseAdapter):
# 构建额外参数
extra_kwargs = {}
web_search = self._get_web_search_mode(request)
web_search_mode = get_web_search_mode(request)
if web_search:
extra_kwargs["tools"] = [self._build_web_search_tool(web_search)]
if web_search_mode:
extra_kwargs["tools"] = [build_glm_search_tool(web_search_mode)]
extra_kwargs["tool_choice"] = "auto"
# 深度思考正向选择True 时启用False 时禁用)
@ -260,46 +261,6 @@ class GLMAdapter(BaseAdapter):
"""检查模型是否支持深度思考"""
return model.lower() in THINKING_MODELS
def _get_web_search_mode(self, request: ChatCompletionRequest) -> str:
"""获取联网搜索模式"""
if request.deep_search:
return "deep"
elif request.web_search:
return "simple"
return ""
def _build_web_search_tool(self, mode: str) -> Dict:
"""构建联网搜索工具"""
from datetime import datetime
today = datetime.now().strftime("%Y年%m月%d")
if mode == "deep":
# 深度搜索:返回搜索结果详情
return {
"type": "web_search",
"web_search": {
"enable": True,
"search_engine": "search_pro",
"search_result": True,
"search_prompt": f"你是一位智能助手。请用简洁的语言总结网络搜索{{search_result}}中的关键信息,按重要性排序并引用来源日期。今天的日期是{today}",
"count": 5,
"search_recency_filter": "noLimit",
"content_size": "high",
},
}
else:
# 简单搜索
return {
"type": "web_search",
"web_search": {
"enable": True,
"search_engine": "search_pro",
"search_result": True,
"count": 5,
},
}
def _stream_chat(
self, client, messages, model, request, extra_kwargs
) -> StreamingResponse:

View File

@ -10,6 +10,7 @@ from typing import Dict, List, Optional
from fastapi.responses import JSONResponse, StreamingResponse
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode, build_openai_search_tool, execute_tavily_search, get_current_time_info
from core import get_logger
logger = get_logger()
@ -155,6 +156,21 @@ class OpenAIAdapter(BaseAdapter):
# 构建消息
messages = self._build_messages(request)
# 统一添加联网搜索插件参数
web_search_mode = get_web_search_mode(request)
if web_search_mode:
# 注入当前时间信息到 System Prompt 中,以便模型拥有时间感知能力
time_info = get_current_time_info()
has_system = False
for msg in messages:
if msg.get("role") == "system":
msg["content"] = f"当前系统时间:{time_info}\n" + str(msg.get("content", ""))
has_system = True
break
if not has_system:
messages.insert(0, {"role": "system", "content": f"当前系统时间:{time_info}"})
logger.info(
f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}"
)
@ -167,6 +183,10 @@ class OpenAIAdapter(BaseAdapter):
"max_tokens": request.max_tokens,
"stream": request.stream,
}
if web_search_mode:
search_tool = build_openai_search_tool(web_search_mode)
kwargs["tools"] = [search_tool]
# DeepSeek 深度思考支持
extra_body = None
@ -219,17 +239,27 @@ class OpenAIAdapter(BaseAdapter):
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
nonlocal kwargs
resp = client.chat.completions.create(**kwargs)
full_content = ""
full_reasoning = ""
chunk_count = 0
for chunk in resp:
if chunk.choices:
# 可能需要执行多轮对话(当发生工具调用时)
while True:
resp = client.chat.completions.create(**kwargs)
full_content = ""
full_reasoning = ""
chunk_count = 0
tool_calls = []
current_tool_call = None
for chunk in resp:
if not chunk.choices:
continue
chunk_count += 1
delta = chunk.choices[0].delta
# 1. 收集可能有内容/推理
delta_content = {}
if hasattr(delta, "content") and delta.content:
delta_content["content"] = delta.content
@ -238,7 +268,27 @@ class OpenAIAdapter(BaseAdapter):
delta_content["reasoning_content"] = delta.reasoning_content
full_reasoning += delta.reasoning_content
if delta_content:
# 2. 收集可能产生的 tool_calls (流式)
if hasattr(delta, "tool_calls") and delta.tool_calls:
for tool_call_chunk in delta.tool_calls:
idx = tool_call_chunk.index
# 确保 tool_calls 列表足够长
while len(tool_calls) <= idx:
tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
if tool_call_chunk.id:
tool_calls[idx]["id"] += tool_call_chunk.id
if tool_call_chunk.type:
# 对于 type, 因为 OpenAI 可能会传 chunks, 但通常只在第一块或者每块传, 为了避免 functionfunction, 使用赋值而非累加
tool_calls[idx]["type"] = tool_call_chunk.type
if tool_call_chunk.function:
if tool_call_chunk.function.name:
tool_calls[idx]["function"]["name"] += tool_call_chunk.function.name
if tool_call_chunk.function.arguments:
tool_calls[idx]["function"]["arguments"] += tool_call_chunk.function.arguments
# 3. 输出给前端普通文本
if delta_content and not tool_calls:
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
@ -253,28 +303,72 @@ class OpenAIAdapter(BaseAdapter):
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
# 检查此轮请求是否收到了完整工具调用,若是则执行搜索逻辑并追加继续请求,不再让外部函数退出
if tool_calls:
logger.info(f"[{provider_name}] 检测到流式中包含了工具调用进行拦截并处理: {json.dumps(tool_calls, ensure_ascii=False)}")
# 把大模型的工具调用请求也追加进去
assistant_msg = {
"role": "assistant",
"content": full_content or None, # 如果工具和普通内容同时存在也保留
"tool_calls": tool_calls
}
if full_reasoning:
assistant_msg["reasoning_content"] = full_reasoning
elif self._provider_type == "deepseek" and self._supports_thinking(kwargs["model"]):
# DeepSeek 推理模型在有工具调用时必须有 reasoning_content 字段
assistant_msg["reasoning_content"] = ""
kwargs["messages"].append(assistant_msg)
for tc in tool_calls:
if tc["function"]["name"] == "web_search":
try:
args = json.loads(tc["function"]["arguments"])
query = args.get("query", "")
mode = "deep" if "advanced" in str(kwargs.get("tools", [])) else "simple"
logger.info(f"[{provider_name}] 执行搜索插件: {query}")
search_result = execute_tavily_search(query, mode=mode)
except Exception as e:
search_result = f"获取搜索参数或执行搜索失败: {str(e)}"
logger.error(search_result)
# 把执行结果告诉大模型
kwargs["messages"].append({
"role": "tool",
"tool_call_id": tc["id"],
"name": "web_search",
"content": search_result
})
# 工具执行完毕,继续发起下一轮请求大模型归纳总结输出
continue
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 如果没有工具调用或者全部分发完毕,正常结束给前端
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 打印流式响应结果
logger.info(f"[{provider_name}] 流式响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
# 打印流式响应结果
logger.info(f"[{provider_name}] 流式响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
# 结束外层循环退出生成器
break
return StreamingResponse(generator(), media_type="text/event-stream")
@ -284,10 +378,58 @@ class OpenAIAdapter(BaseAdapter):
"""非流式聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
resp = client.chat.completions.create(**kwargs)
while True:
resp = client.chat.completions.create(**kwargs)
message = resp.choices[0].message
content = message.content or ""
message = resp.choices[0].message
# 判断是否涉及工具调用
if hasattr(message, "tool_calls") and message.tool_calls:
# 记录这轮的助手回复
assistant_msg = {"role": "assistant", "content": message.content or None}
# openai sdk 对象转 dict 存储 tool_calls
tool_calls_dict = []
for tc in message.tool_calls:
tc_dict = {
"id": tc.id,
"type": tc.type,
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
}
tool_calls_dict.append(tc_dict)
assistant_msg["tool_calls"] = tool_calls_dict
if hasattr(message, "reasoning_content") and message.reasoning_content:
assistant_msg["reasoning_content"] = message.reasoning_content
elif self._provider_type == "deepseek" and self._supports_thinking(kwargs["model"]):
# DeepSeek 推理模型在有工具调用时必须有 reasoning_content 字段
assistant_msg["reasoning_content"] = ""
kwargs["messages"].append(assistant_msg)
# 执行所有的工具调用
for tc in tool_calls_dict:
if tc["function"]["name"] == "web_search":
try:
args = json.loads(tc["function"]["arguments"])
query = args.get("query", "")
mode = "deep" if "advanced" in str(kwargs.get("tools", [])) else "simple"
search_result = execute_tavily_search(query, mode=mode)
except Exception as e:
search_result = f"执行搜索失败: {str(e)}"
# 把执行结果追加到消息中
kwargs["messages"].append({
"role": "tool",
"tool_call_id": tc["id"],
"name": "web_search",
"content": search_result
})
# 工具调用完成,发起下一轮请求获取归纳答案
continue
# 处理普通的文本回复
content = message.content or ""
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",

119
server/adapters/plugins.py Normal file
View File

@ -0,0 +1,119 @@
import os
import urllib.request
import json
from typing import Dict
from datetime import datetime
from .base import ChatCompletionRequest
def get_current_time_info() -> str:
"""获取当前时间信息"""
now = datetime.now()
weekdays = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
return f"{now.strftime('%Y年%m月%d%H:%M:%S')} {weekdays[now.weekday()]}"
def get_web_search_mode(request: ChatCompletionRequest) -> str:
"""获取联网搜索模式"""
if getattr(request, 'deep_search', False):
return "deep"
elif getattr(request, 'web_search', False):
return "simple"
return ""
def execute_tavily_search(query: str, mode: str = "simple") -> str:
"""真实调用 Tavily 搜索 API"""
api_key = os.getenv("TAVILY_API_KEY")
if not api_key:
return "本地环境变量 TAVILY_API_KEY 未配置,无法进行搜索。"
url = "https://api.tavily.com/search"
headers = {"Content-Type": "application/json"}
data = {
"api_key": api_key,
"query": query,
"search_depth": "advanced" if mode == "deep" else "basic",
"include_answer": False,
"max_results": 5 if mode == "deep" else 3
}
req = urllib.request.Request(url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
try:
with urllib.request.urlopen(req) as response:
result = json.loads(response.read().decode('utf-8'))
results = result.get("results", [])
if not results:
return "搜索未返回结果。"
formatted_res = []
for i, res in enumerate(results):
formatted_res.append(f"[{i+1}] {res.get('title')}\n{res.get('content')}\n链接: {res.get('url')}")
return "\n\n".join(formatted_res)
except Exception as e:
return f"搜索请求失败,错误: {str(e)}"
def build_openai_search_tool(mode: str) -> Dict:
"""
构建兼容型联网搜索插件工具结构 ( DeepSeek / OpenAI SDK 使用)
注意此类提供标准的 Tool/Function Function calling 模板
深度思考通常结合内置联网或者其他外挂流程实现
"""
if mode == "deep":
return {
"type": "function",
"function": {
"name": "web_search",
"description": "深度互联网搜索插件(查找并阅读网页内容)",
"parameters": {"type": "object", "properties": {"query": {"type": "string"}}},
}
}
else:
return {
"type": "function",
"function": {
"name": "web_search",
"description": "进行互联网搜索并获取实时信息或资料以辅助回答。",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "要搜索的准确关键词或短语"
}
},
"required": ["query"]
},
}
}
def build_glm_search_tool(mode: str) -> Dict:
"""构建 GLM 联网搜索工具"""
today = get_current_time_info()
if mode == "deep":
# 深度搜索:返回搜索结果详情
return {
"type": "web_search",
"web_search": {
"enable": True,
"search_engine": "search_pro",
"search_result": True,
"search_prompt": f"你是一位智能助手。请用简洁的语言总结网络搜索{{search_result}}中的关键信息,按重要性排序并引用来源日期。今天的日期是{today}",
"count": 5,
"search_recency_filter": "noLimit",
"content_size": "high",
},
}
else:
# 简单搜索
return {
"type": "web_search",
"web_search": {
"enable": True,
"search_engine": "search_pro",
"search_result": True,
"count": 5,
},
}

View File

@ -0,0 +1,46 @@
import os
import sys
import urllib.request
import json
def test_tavily(api_key: str):
url = "https://api.tavily.com/search"
headers = {
"Content-Type": "application/json"
}
data = {
"api_key": api_key,
"query": "武汉明天的天气",
"search_depth": "basic",
"include_answer": False,
"max_results": 3
}
# 模拟请求
req = urllib.request.Request(url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
try:
with urllib.request.urlopen(req) as response:
result = json.loads(response.read().decode('utf-8'))
print("✅ Tavily API Key 测试成功!成功获取以下搜索结果:\n")
for i, res in enumerate(result.get("results", [])):
print(f"[{i+1}] 标题: {res.get('title')}")
print(f" 内容: {res.get('content')}")
print(f" 链接: {res.get('url')}\n")
except urllib.error.HTTPError as e:
print(f"❌ 请求失败HTTP 错误代码: {e.code}")
print("这通常意味着您的 API Key 错误或无效。详细信息:")
error_msg = e.read().decode('utf-8')
print(error_msg)
except Exception as e:
print(f"❌ 发生其他错误: {str(e)}")
if __name__ == "__main__":
key = input("请输入您的 Tavily API Key (以 tvly- 开头): ").strip()
if not key:
print("未输入 Key程序退出。")
sys.exit(1)
print("\n正在连接 Tavily 进行测试搜索...")
test_tavily(key)

View File

@ -180,6 +180,8 @@ async function handleSend(
webSearch?: boolean;
deepThinking?: boolean;
systemPrompt?: string;
skipUserMessage?: boolean;
conversationTitle?: string;
},
) {
//
@ -220,7 +222,13 @@ async function handleSend(
//
if (!currentConversation.value) {
await chatStore.createConversation();
await chatStore.createConversation(options?.conversationTitle || text);
} else if (currentConversation.value.title === "新对话") {
// ""
chatStore.renameConversation(
currentConversation.value.id,
options?.conversationTitle || text
);
}
// 使使
@ -245,13 +253,15 @@ async function handleSend(
.slice(-(MAX_HISTORY_ROUNDS * 2))
.map((m: any) => ({ role: m.role, content: m.content.text }));
//
await chatStore.addMessage(MessageRole.USER, {
type: MessageType.TEXT,
text,
images: attachments.filter((a) => a.type === "image"),
files: attachments.filter((a) => a.type === "file"),
});
//
if (!options?.skipUserMessage) {
await chatStore.addMessage(MessageRole.USER, {
type: MessageType.TEXT,
text,
images: attachments.filter((a) => a.type === "image"),
files: attachments.filter((a) => a.type === "file"),
});
}
// AI
const aiMessage = await chatStore.addMessage(MessageRole.ASSISTANT, {
@ -279,7 +289,7 @@ async function handleSend(
const stream = chatApi.streamChat(
{
message: text,
message: options?.skipUserMessage ? "直接输出系统提示词要求你的回答" : text,
conversationId: currentConversation.value?.id || "",
images: imageUrls,
files: fileUrls,
@ -485,7 +495,11 @@ function handleRegenerate(messageId: string) {
}
function handleSuggestion(suggestion: Suggestion) {
handleSend(suggestion.text, [], { systemPrompt: suggestion.systemPrompt });
handleSend(suggestion.text, [], {
systemPrompt: suggestion.systemPrompt,
skipUserMessage: true,
conversationTitle: suggestion.text,
});
}
function focusInput() {

View File

@ -229,7 +229,10 @@ defineExpose({
});
onMounted(() => {
scrollToBottom(false);
//
if (visibleMessages.value.length > 0) {
scrollToBottom(false);
}
});
</script>

View File

@ -2,7 +2,7 @@
<div class="welcome-screen">
<!-- Logo 和标题 -->
<div class="welcome-header">
<h1 class="title">教研聊天助手</h1>
<h1 class="title">学习研聊天助手</h1>
</div>
<!-- 功能卡片 -->

View File

@ -54,6 +54,7 @@
v-model="inputText"
:placeholder="placeholder"
:rows="1"
@beforeinput="handleBeforeInput"
@input="autoResize"
@focus="isFocused = true"
@blur="isFocused = false"
@ -90,13 +91,18 @@
<!-- 底部工具栏 -->
<div class="input-toolbar">
<div class="toolbar-left">
<!-- 展开/收起 -->
<button class="toolbar-btn" title="展开输入框" @click="toggleExpand">
<Maximize2 v-if="!isExpanded" :size="16" />
<Minimize2 v-else :size="16" />
</button>
<!-- 深度思考开关 -->
<button
class="toolbar-btn"
:class="{ active: isDeepThinking, disabled: !supports_thinking }"
:disabled="!supports_thinking"
:title="supports_thinking ? '深度思考' : '当前模型不支持深度思考'"
@click="supports_thinking && toggleDeepThink()"
:class="{ active: isDeepThinking, disabled: isForceDeepThinkingModel || !supports_thinking }"
:disabled="isForceDeepThinkingModel || !supports_thinking"
:title="isForceDeepThinkingModel ? '当前模型强制开启深度思考' : (supports_thinking ? '深度思考' : '当前模型不支持深度思考')"
@click="!isForceDeepThinkingModel && supports_thinking && toggleDeepThink()"
>
<Brain :size="16" />
<span>深度思考</span>
@ -125,24 +131,6 @@
<Globe :size="16" />
<span>联网搜索</span>
</button>
<!-- 展开/收起 -->
<button class="toolbar-btn" title="展开输入框" @click="toggleExpand">
<Maximize2 v-if="!isExpanded" :size="16" />
<Minimize2 v-else :size="16" />
</button>
</div>
<div class="toolbar-right">
<span
class="char-count"
:class="{ warning: charCount > maxChars * 0.9 }"
>
{{ charCount }} / {{ maxChars }}
</span>
<span class="send-hint">
{{ sendOnEnter ? "Enter 发送, Shift+Enter 换行" : "Ctrl+Enter 发送" }}
</span>
</div>
</div>
</div>
@ -190,7 +178,7 @@ const props = withDefaults(
placeholder: "输入你的问题...",
isStreaming: false,
sendOnEnter: false,
maxChars: 4000,
maxChars: 10000,
disabled: false,
//
supports_thinking: true,
@ -200,6 +188,14 @@ const props = withDefaults(
},
);
//
const FORCE_DEEP_THINKING_MODELS = ["deepseek-reasoner", "glm-z1-flash"];
//
const isForceDeepThinkingModel = computed(() => {
return FORCE_DEEP_THINKING_MODELS.includes(modelName.value.toLowerCase());
});
const emit = defineEmits<{
send: [
text: string,
@ -212,6 +208,7 @@ const emit = defineEmits<{
//
const authStore = useAuthStore();
const settingsStore = useSettingsStore();
const modelName = computed(() => settingsStore.settings.defaultModel);
const inputText = ref("");
const attachments = ref<AttachmentWithProgress[]>([]);
@ -231,6 +228,18 @@ const textareaRef = ref<HTMLTextAreaElement | null>(null);
const fileInputRef = ref<HTMLInputElement | null>(null);
const imageInputRef = ref<HTMLInputElement | null>(null);
// toast
let lastToastTime = 0;
const toastThrottleMs = 2000;
function showThrottledToast(message: string, type: "error" = "error") {
const now = Date.now();
if (now - lastToastTime >= toastThrottleMs) {
lastToastTime = now;
window.$toast?.(message, type);
}
}
//
const charCount = computed(() => inputText.value.length);
const isUploading = computed(() => attachments.value.some((a) => a.uploading));
@ -251,7 +260,29 @@ function autoResize() {
textarea.style.height = "auto";
const maxHeight = isExpanded.value ? 400 : 160;
// 1px
textarea.style.height = `${Math.min(textarea.scrollHeight, maxHeight)+1}px`;
textarea.style.height = `${Math.min(textarea.scrollHeight, maxHeight) + 1}px`;
}
//
function handleBeforeInput(event: InputEvent) {
// 退
if (!event.data) return;
//
const currentLength = inputText.value.length;
const insertLength = event.data?.length || 0;
const selectionStart =
(event.target as HTMLTextAreaElement).selectionStart || 0;
const selectionEnd = (event.target as HTMLTextAreaElement).selectionEnd || 0;
const selectedLength = selectionEnd - selectionStart;
//
const newLength = currentLength - selectedLength + insertLength;
if (newLength > props.maxChars) {
event.preventDefault();
showThrottledToast(`已超${props.maxChars}字上限,请删除部分内容`);
}
}
//
@ -298,6 +329,22 @@ async function handlePaste(event: ClipboardEvent) {
const items = event.clipboardData?.items;
if (!items) return;
//
const text = event.clipboardData?.getData("text");
if (text) {
const textarea = event.target as HTMLTextAreaElement;
const selectionStart = textarea.selectionStart || 0;
const selectionEnd = textarea.selectionEnd || 0;
const selectedLength = selectionEnd - selectionStart;
const newLength = inputText.value.length - selectedLength + text.length;
if (newLength > props.maxChars) {
event.preventDefault();
showThrottledToast(`已超${props.maxChars}字上限,请删除部分内容`);
return;
}
}
for (const item of items) {
if (item.type.startsWith("image/")) {
event.preventDefault();
@ -350,7 +397,7 @@ async function addFileAsAttachment(
) {
//
if (!authStore.isAuthenticated) {
window.$toast?.('请先登录', 'error');
window.$toast?.("请先登录", "error");
return;
}
@ -414,17 +461,17 @@ async function removeAttachment(id: string) {
const attachment = attachments.value[index];
// OSS blob URL OSS
if (attachment.url && !attachment.url.startsWith('blob:')) {
if (attachment.url && !attachment.url.startsWith("blob:")) {
try {
await chatApi.deleteAttachment(attachment.url);
} catch (error) {
console.error('删除 OSS 文件失败:', error);
console.error("删除 OSS 文件失败:", error);
// 使
}
}
// blob URL
if (attachment.url.startsWith('blob:')) {
if (attachment.url.startsWith("blob:")) {
URL.revokeObjectURL(attachment.url);
}
@ -491,10 +538,20 @@ watch(inputText, () => {
//
watch(
() => settingsStore.settings.defaultModel,
() => {
(newModel) => {
//
if (FORCE_DEEP_THINKING_MODELS.includes(newModel.toLowerCase())) {
isDeepThinking.value = true;
localStorage.setItem("isDeepThinking", "true");
} else {
isDeepThinking.value = false;
localStorage.setItem("isDeepThinking", "false");
}
//
isDeepSearch.value = false;
isDeepThinking.value = false;
isWebSearch.value = false;
localStorage.setItem("isDeepSearch", "false");
localStorage.setItem("isWebSearch", "false");
},
);
@ -718,26 +775,6 @@ onMounted(() => {
}
}
.toolbar-right {
display: flex;
align-items: center;
gap: 16px;
}
.char-count {
font-size: 12px;
color: #9ca3af;
&.warning {
color: #f59e0b;
}
}
.send-hint {
font-size: 12px;
color: #9ca3af;
}
@keyframes pulse {
0%,
100% {

View File

@ -24,14 +24,14 @@
<!-- 消息内容区域 -->
<div class="message-content-wrapper">
<!-- 角色名称 -->
<div class="message-header">
<!-- <div class="message-header">
<span class="role-name">
{{ message.role === "assistant" ? "AI 助手" : "你" }}
</span>
<span v-if="showTimestamp" class="timestamp">
{{ formattedTime }}
</span>
</div>
</div> -->
<!-- 消息主体 -->
<div class="message-body">
@ -183,7 +183,7 @@
<script setup lang="ts">
import { useClipboard } from "@vueuse/core";
import { ref, computed } from "vue";
import { ref } from "vue";
// markstream-vue
import MarkdownRender from "markstream-vue";
import { setCustomComponents } from "markstream-vue";
@ -197,7 +197,7 @@ import {
Play,
} from "@/components/icons";
import MessageActions from "./MessageActions.vue";
import { formatTimestamp, formatFileSize, getFileIcon } from "@/utils/helpers";
import { formatFileSize, getFileIcon } from "@/utils/helpers";
import type { Message, Suggestion, Attachment, VideoInfo } from "@/types/chat";
import ThinkingNode from "./components/ThinkingNode.vue";
import EChartsContainerNode from "./components/EChartsContainerNode.vue";
@ -229,10 +229,6 @@ const emit = defineEmits<{
const isHovered = ref(false);
const formattedTime = computed(() => {
return formatTimestamp(props.message.timestamp);
});
function getFileEmoji(mimeType?: string) {
return getFileIcon(mimeType || "");
}

View File

@ -124,21 +124,21 @@
</div>
<!-- 底部操作 -->
<div class="sidebar-footer">
<!-- <div class="sidebar-footer">
<button class="footer-btn" @click="toggleTheme" title="切换主题">
<Sun v-if="currentTheme === 'light'" :size="18" />
<Moon v-else-if="currentTheme === 'dark'" :size="18" />
<Monitor v-else :size="18" />
</button>
<!-- 键盘快捷键 -->
<!-- <button class="footer-btn" @click="openShortcuts" title="快捷键">
键盘快捷键
<button class="footer-btn" @click="openShortcuts" title="快捷键">
<Keyboard :size="18" />
</button> -->
<!-- dev人员可用 -->
<!-- <button class="footer-btn" @click="openSettings" title="设置">
</button>
dev人员可用
<button class="footer-btn" @click="openSettings" title="设置">
<Settings :size="18" />
</button> -->
</div>
</button>
</div> -->
</div>
<!-- 拖拽调整宽度 -->
@ -157,9 +157,6 @@ import {
Plus,
Pin,
MessageSquare,
Sun,
Moon,
Monitor,
Sparkles,
ChevronDown,
Check,
@ -279,7 +276,6 @@ if (typeof window !== "undefined") {
position: relative;
height: 100vh;
background: #ffffff;
border-right: 1px solid #e2e8f0;
transition: width 0.3s ease;
overflow: hidden;
flex-shrink: 0;
@ -689,9 +685,5 @@ if (typeof window !== "undefined") {
height: 100%;
cursor: col-resize;
z-index: 10;
&:hover {
background: rgba(59, 130, 246, 0.3);
}
}
</style>

View File

@ -43,8 +43,6 @@ export const useAuthStore = defineStore('auth', () => {
if (data.success && data.data) {
window.$toast?.(`登录成功, 欢迎 ${data.data.nickname || data.data.username}`, 'success');
return data.data;
}else{
window.$toast?.('[Auth] Token 验证失败:Token无效');

View File

@ -79,10 +79,10 @@ export const useChatStore = defineStore("chat", () => {
}
// 创建对话
async function createConversation(): Promise<string> {
async function createConversation(title?: string): Promise<string> {
const newConversation: Conversation = {
id: generateId(),
title: "新对话",
title: title || "新对话",
messages: [],
createdAt: Date.now(),
updatedAt: Date.now(),

View File

@ -11,7 +11,7 @@ export const useSettingsStore = defineStore("settings", () => {
fontSize: "medium",
// 对话设置
sendOnEnter: false,
sendOnEnter: true,
showTimestamp: true,
compactMode: false,

View File

@ -1 +0,0 @@
{"root":["./src/main.ts","./src/components/icons/index.ts","./src/composables/useKeyboard.ts","./src/services/api.ts","./src/services/authService.ts","./src/services/conversationApi.ts","./src/services/request.ts","./src/stores/auth.ts","./src/stores/chat.ts","./src/stores/settings.ts","./src/types/chat.ts","./src/utils/helpers.ts","./src/utils/migrateData.ts","./src/App.vue","./src/components/chat/ChatHeader.vue","./src/components/chat/ChatMain.vue","./src/components/chat/MessageList.vue","./src/components/chat/WelcomeScreen.vue","./src/components/input/AttachmentPreview.vue","./src/components/input/ChatInput.vue","./src/components/message/CodeBlock.vue","./src/components/message/MessageActions.vue","./src/components/message/MessageBubble.vue","./src/components/message/components/EChartsContainerNode.vue","./src/components/message/components/Loading.vue","./src/components/message/components/ThinkingNode.vue","./src/components/modals/ConversationSettingsModal.vue","./src/components/modals/SearchModal.vue","./src/components/modals/SettingsModal.vue","./src/components/modals/ShortcutsModal.vue","./src/components/sidebar/ChatSidebar.vue","./src/components/sidebar/ConversationItem.vue","./src/components/ui/FormSelect.vue","./src/components/ui/FormSlider.vue","./src/components/ui/FormSwitch.vue"],"errors":true,"version":"5.9.3"}