Compare commits

..

No commits in common. "feat/kexue-ui" and "main" have entirely different histories.

66 changed files with 2496 additions and 3343 deletions

2
.gitignore vendored
View File

@ -41,6 +41,4 @@ skills-lock.json
*.db
server/data/*.db
scripts/deploy-frontend-standalone.sh
tsconfig.tsbuildinfo

188
package-lock.json generated
View File

@ -18,7 +18,6 @@
"lucide-vue-next": "^0.563.0",
"markstream-vue": "^0.0.7-beta.4",
"mermaid": "^11.12.2",
"naive-ui": "^2.44.1",
"pinia": "^3.0.4",
"shiki": "^3.22.0",
"stream-markdown": "^0.0.14",
@ -221,30 +220,6 @@
"integrity": "sha512-4mudFAQ6H+MqBTfqLmU7G1ZwRzCLfJEooL/fsF6rCX5eePMbGhoy5n4g+G4vlh2muDcsCTJtL+uKbOzWxs5LHA==",
"license": "Apache-2.0"
},
"node_modules/@css-render/plugin-bem": {
"version": "0.15.14",
"resolved": "https://mirrors.cloud.tencent.com/npm/@css-render/plugin-bem/-/plugin-bem-0.15.14.tgz",
"integrity": "sha512-QK513CJ7yEQxm/P3EwsI+d+ha8kSOcjGvD6SevM41neEMxdULE+18iuQK6tEChAWMOQNQPLG/Rw3Khb69r5neg==",
"license": "MIT",
"peerDependencies": {
"css-render": "~0.15.14"
}
},
"node_modules/@css-render/vue3-ssr": {
"version": "0.15.14",
"resolved": "https://mirrors.cloud.tencent.com/npm/@css-render/vue3-ssr/-/vue3-ssr-0.15.14.tgz",
"integrity": "sha512-//8027GSbxE9n3QlD73xFY6z4ZbHbvrOVB7AO6hsmrEzGbg+h2A09HboUyDgu+xsmj7JnvJD39Irt+2D0+iV8g==",
"license": "MIT",
"peerDependencies": {
"vue": "^3.0.11"
}
},
"node_modules/@emotion/hash": {
"version": "0.8.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/@emotion/hash/-/hash-0.8.0.tgz",
"integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==",
"license": "MIT"
},
"node_modules/@esbuild/aix-ppc64": {
"version": "0.27.2",
"resolved": "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz",
@ -766,12 +741,6 @@
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@juggle/resize-observer": {
"version": "3.4.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/@juggle/resize-observer/-/resize-observer-3.4.0.tgz",
"integrity": "sha512-dfLbk+PwWvFzSxwk3n5ySL0hfBog779o8h68wK/7/APo/7cgyWp5jcXockbxdk5kFRkbeXWm4Fbi9FrdN381sA==",
"license": "Apache-2.0"
},
"node_modules/@mermaid-js/parser": {
"version": "1.0.1",
"resolved": "https://mirrors.cloud.tencent.com/npm/@mermaid-js/parser/-/parser-1.0.1.tgz",
@ -1850,21 +1819,6 @@
"@types/unist": "*"
}
},
"node_modules/@types/lodash": {
"version": "4.17.24",
"resolved": "https://mirrors.cloud.tencent.com/npm/@types/lodash/-/lodash-4.17.24.tgz",
"integrity": "sha512-gIW7lQLZbue7lRSWEFql49QJJWThrTFFeIMJdp3eH4tKoxm1OvEPg02rm4wCCSHS0cL3/Fizimb35b7k8atwsQ==",
"license": "MIT"
},
"node_modules/@types/lodash-es": {
"version": "4.17.12",
"resolved": "https://mirrors.cloud.tencent.com/npm/@types/lodash-es/-/lodash-es-4.17.12.tgz",
"integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==",
"license": "MIT",
"dependencies": {
"@types/lodash": "*"
}
},
"node_modules/@types/mdast": {
"version": "4.0.4",
"resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz",
@ -2884,12 +2838,6 @@
"url": "https://github.com/sponsors/sxzz"
}
},
"node_modules/async-validator": {
"version": "4.2.5",
"resolved": "https://mirrors.cloud.tencent.com/npm/async-validator/-/async-validator-4.2.5.tgz",
"integrity": "sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==",
"license": "MIT"
},
"node_modules/autoprefixer": {
"version": "10.4.24",
"resolved": "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.24.tgz",
@ -3225,22 +3173,6 @@
"node": ">= 8"
}
},
"node_modules/css-render": {
"version": "0.15.14",
"resolved": "https://mirrors.cloud.tencent.com/npm/css-render/-/css-render-0.15.14.tgz",
"integrity": "sha512-9nF4PdUle+5ta4W5SyZdLCCmFd37uVimSjg1evcTqKJCyvCEEj12WKzOSBNak6r4im4J4iYXKH1OWpUV5LBYFg==",
"license": "MIT",
"dependencies": {
"@emotion/hash": "~0.8.0",
"csstype": "~3.0.5"
}
},
"node_modules/css-render/node_modules/csstype": {
"version": "3.0.11",
"resolved": "https://mirrors.cloud.tencent.com/npm/csstype/-/csstype-3.0.11.tgz",
"integrity": "sha512-sa6P2wJ+CAbgyy4KFssIb/JNMLxFvKF1pCYCSXS8ZMuqZnMsrxqI2E5sPyoTpxoPU/gVZMzr2zjOfg8GIZOMsw==",
"license": "MIT"
},
"node_modules/css-tree": {
"version": "3.1.0",
"resolved": "https://registry.npmmirror.com/css-tree/-/css-tree-3.1.0.tgz",
@ -3768,25 +3700,6 @@
"lodash-es": "^4.17.21"
}
},
"node_modules/date-fns": {
"version": "4.1.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/date-fns/-/date-fns-4.1.0.tgz",
"integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==",
"license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/kossnocorp"
}
},
"node_modules/date-fns-tz": {
"version": "3.2.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/date-fns-tz/-/date-fns-tz-3.2.0.tgz",
"integrity": "sha512-sg8HqoTEulcbbbVXeg84u5UnlsQa8GS5QXMqjjYIhS4abEVVKIUwe0/l/UhrZdKaL/W5eWZNlbTeEIiOXTcsBQ==",
"license": "MIT",
"peerDependencies": {
"date-fns": "^3.0.0 || ^4.0.0"
}
},
"node_modules/dayjs": {
"version": "1.11.19",
"resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.19.tgz",
@ -4014,12 +3927,6 @@
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==",
"license": "MIT"
},
"node_modules/evtd": {
"version": "0.2.4",
"resolved": "https://mirrors.cloud.tencent.com/npm/evtd/-/evtd-0.2.4.tgz",
"integrity": "sha512-qaeGN5bx63s/AXgQo8gj6fBkxge+OoLddLniox5qtLAEY5HSnuSlISXVPxnSae1dWblvTh4/HoMIB+mbMsvZzw==",
"license": "MIT"
},
"node_modules/expect-type": {
"version": "1.3.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/expect-type/-/expect-type-1.3.0.tgz",
@ -4214,15 +4121,6 @@
"url": "https://opencollective.com/unified"
}
},
"node_modules/highlight.js": {
"version": "11.11.1",
"resolved": "https://mirrors.cloud.tencent.com/npm/highlight.js/-/highlight.js-11.11.1.tgz",
"integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==",
"license": "BSD-3-Clause",
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/hookable": {
"version": "5.5.3",
"resolved": "https://registry.npmmirror.com/hookable/-/hookable-5.5.3.tgz",
@ -4974,38 +4872,6 @@
"integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==",
"license": "MIT"
},
"node_modules/naive-ui": {
"version": "2.44.1",
"resolved": "https://mirrors.cloud.tencent.com/npm/naive-ui/-/naive-ui-2.44.1.tgz",
"integrity": "sha512-reo8Esw0p58liZwbUutC7meW24Xbn3EwNv91zReWKm2W4JPu+zfgJRn/F7aO0BFmvN+h2brA2M5lRvYqLq4kuA==",
"license": "MIT",
"dependencies": {
"@css-render/plugin-bem": "^0.15.14",
"@css-render/vue3-ssr": "^0.15.14",
"@types/lodash": "^4.17.20",
"@types/lodash-es": "^4.17.12",
"async-validator": "^4.2.5",
"css-render": "^0.15.14",
"csstype": "^3.1.3",
"date-fns": "^4.1.0",
"date-fns-tz": "^3.2.0",
"evtd": "^0.2.4",
"highlight.js": "^11.8.0",
"lodash": "^4.17.21",
"lodash-es": "^4.17.21",
"seemly": "^0.3.10",
"treemate": "^0.3.11",
"vdirs": "^0.1.8",
"vooks": "^0.2.12",
"vueuc": "^0.4.65"
},
"engines": {
"node": ">=20"
},
"peerDependencies": {
"vue": "^3.0.0"
}
},
"node_modules/nanoid": {
"version": "3.3.11",
"resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.11.tgz",
@ -5497,12 +5363,6 @@
"integrity": "sha512-6FtHJEvt+pVMIB9IBY+IcCJ6Z5f1iQnytgyfKMhDKgmzYG+TeH/wx1y3l27rshSbLiSanrR9ffZDrEsmjlQF2g==",
"license": "MIT"
},
"node_modules/seemly": {
"version": "0.3.10",
"resolved": "https://mirrors.cloud.tencent.com/npm/seemly/-/seemly-0.3.10.tgz",
"integrity": "sha512-2+SMxtG1PcsL0uyhkumlOU6Qo9TAQ/WyH7tthnPIOQB05/12jz9naq6GZ6iZ6ApVsO3rr2gsnTf3++OV63kE1Q==",
"license": "MIT"
},
"node_modules/semver": {
"version": "7.7.4",
"resolved": "https://mirrors.cloud.tencent.com/npm/semver/-/semver-7.7.4.tgz",
@ -5889,12 +5749,6 @@
"node": ">=6"
}
},
"node_modules/treemate": {
"version": "0.3.11",
"resolved": "https://mirrors.cloud.tencent.com/npm/treemate/-/treemate-0.3.11.tgz",
"integrity": "sha512-M8RGFoKtZ8dF+iwJfAJTOH/SM4KluKOKRJpjCMhI8bG3qB74zrFoArKZ62ll0Fr3mqkMJiQOmWYkdYgDeITYQg==",
"license": "MIT"
},
"node_modules/trim-lines": {
"version": "3.0.1",
"resolved": "https://registry.npmmirror.com/trim-lines/-/trim-lines-3.0.1.tgz",
@ -6201,18 +6055,6 @@
"uuid": "dist/esm/bin/uuid"
}
},
"node_modules/vdirs": {
"version": "0.1.8",
"resolved": "https://mirrors.cloud.tencent.com/npm/vdirs/-/vdirs-0.1.8.tgz",
"integrity": "sha512-H9V1zGRLQZg9b+GdMk8MXDN2Lva0zx72MPahDKc30v+DtwKjfyOSXWRIX4t2mhDubM1H09gPhWeth/BJWPHGUw==",
"license": "MIT",
"dependencies": {
"evtd": "^0.2.2"
},
"peerDependencies": {
"vue": "^3.0.11"
}
},
"node_modules/vfile": {
"version": "6.0.3",
"resolved": "https://registry.npmmirror.com/vfile/-/vfile-6.0.3.tgz",
@ -6397,18 +6239,6 @@
}
}
},
"node_modules/vooks": {
"version": "0.2.12",
"resolved": "https://mirrors.cloud.tencent.com/npm/vooks/-/vooks-0.2.12.tgz",
"integrity": "sha512-iox0I3RZzxtKlcgYaStQYKEzWWGAduMmq+jS7OrNdQo1FgGfPMubGL3uGHOU9n97NIvfFDBGnpSvkWyb/NSn/Q==",
"license": "MIT",
"dependencies": {
"evtd": "^0.2.2"
},
"peerDependencies": {
"vue": "^3.0.0"
}
},
"node_modules/vscode-jsonrpc": {
"version": "8.2.0",
"resolved": "https://mirrors.cloud.tencent.com/npm/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz",
@ -6593,24 +6423,6 @@
"typescript": ">=5.0.0"
}
},
"node_modules/vueuc": {
"version": "0.4.65",
"resolved": "https://mirrors.cloud.tencent.com/npm/vueuc/-/vueuc-0.4.65.tgz",
"integrity": "sha512-lXuMl+8gsBmruudfxnMF9HW4be8rFziylXFu1VHVNbLVhRTXXV4njvpRuJapD/8q+oFEMSfQMH16E/85VoWRyQ==",
"license": "MIT",
"dependencies": {
"@css-render/vue3-ssr": "^0.15.10",
"@juggle/resize-observer": "^3.3.1",
"css-render": "^0.15.10",
"evtd": "^0.2.4",
"seemly": "^0.3.6",
"vdirs": "^0.1.4",
"vooks": "^0.2.4"
},
"peerDependencies": {
"vue": "^3.0.11"
}
},
"node_modules/webpack-virtual-modules": {
"version": "0.6.2",
"resolved": "https://registry.npmmirror.com/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz",

View File

@ -22,7 +22,6 @@
"lucide-vue-next": "^0.563.0",
"markstream-vue": "^0.0.7-beta.4",
"mermaid": "^11.12.2",
"naive-ui": "^2.44.1",
"pinia": "^3.0.4",
"shiki": "^3.22.0",
"stream-markdown": "^0.0.14",

View File

@ -1,19 +1,26 @@
"""
阿里云百炼 DashScope 适配器
使用 OpenAI SDK 调用阿里云 OpenAI 兼容 API
基于 api/chat_routes.py 重构
"""
import json
import os
from typing import Any, Dict, List, Optional
from typing import Dict, List
from .base import ChatCompletionRequest, ModelInfo
from .unified_adapter import UnifiedOpenAIAdapter
from fastapi.responses import JSONResponse, StreamingResponse
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode
from core import get_logger
logger = get_logger()
# 支持深度思考的模型
THINKING_MODELS = {"qwen3-max", "qwen3.5-plus"}
# 需要使用多模态接口的模型qwen3.5 系列)
MULTIMODAL_API_MODELS = {"qwen3.5-plus", "qwen3.5-flash"}
# 百炼模型配置
DASHSCOPE_MODELS = [
ModelInfo(
@ -41,18 +48,7 @@ DASHSCOPE_MODELS = [
ModelInfo(
id="qwen3.5-flash",
name="Qwen3.5-Flash",
description="千问系列速度最快、成本极低的模型,适合简单任务。",
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="qwen-turbo",
name="Qwen-Turbo",
description="快速响应的通用模型",
description="千问系列速度最快、成本极低的模型适合简单任务。千问Flash采用灵活的阶梯定价相比千问Turbo计费更合理。",
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
@ -71,49 +67,397 @@ DASHSCOPE_MODELS = [
supports_vision=True,
supports_files=False,
),
ModelInfo(
id="qwen-vl-plus",
name="通义万相 VL-Plus",
description="支持视觉理解的多模态模型",
max_tokens=8192,
provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=True,
supports_files=False,
),
]
# 从 DASHSCOPE_MODELS 自动计算
THINKING_MODELS = {m.id.lower() for m in DASHSCOPE_MODELS if m.supports_thinking}
VISION_MODELS = {m.id.lower() for m in DASHSCOPE_MODELS if m.supports_vision}
class DashScopeAdapter(UnifiedOpenAIAdapter):
class DashScopeAdapter(BaseAdapter):
"""阿里云百炼 DashScope 平台适配器"""
_provider_type = "dashscope"
@property
def provider_name(self) -> str:
return "dashscope"
def list_models(self) -> List[ModelInfo]:
return DASHSCOPE_MODELS
def is_available(self) -> bool:
"""检查 API Key 是否配置"""
return bool(os.getenv("ALIYUN_API_KEY") or os.getenv("DASHSCOPE_API_KEY"))
def _get_api_key(self) -> str:
"""获取 API Key"""
return os.getenv("ALIYUN_API_KEY") or os.getenv("DASHSCOPE_API_KEY", "")
def _needs_multimodal_api(self, model: str) -> bool:
"""检查模型是否需要使用多模态 API"""
return model.lower() in MULTIMODAL_API_MODELS
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in THINKING_MODELS
def _is_vision_model(self, model: str) -> bool:
"""检查是否为多模态模型"""
return model.lower() in VISION_MODELS
def list_models(self) -> List[ModelInfo]:
return DASHSCOPE_MODELS
def _build_messages(self, request: ChatCompletionRequest) -> List[Dict]:
async def chat(self, request: ChatCompletionRequest):
"""
构建 DashScope 格式的消息
处理多模态内容
处理 DashScope 聊天请求
支持流式/非流式多模态
"""
# 打印请求参数
logger.info(f"[DashScope] 请求参数:")
logger.info(f" - model: {request.model}")
logger.info(f" - stream: {request.stream}")
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - files: {request.files}")
logger.info(f" - deep_thinking: {request.deep_thinking}")
logger.info(
f" - messages: {json.dumps(request.messages, ensure_ascii=False, indent=2)}"
)
# 检测是否包含多模态内容
has_multimodal = self._has_multimodal_content(request)
logger.info(f" - has_multimodal: {has_multimodal}")
# 检查是否需要使用多模态接口qwen3.5 系列)
needs_multimodal_api = self._needs_multimodal_api(request.model)
logger.info(f" - needs_multimodal_api: {needs_multimodal_api}")
if has_multimodal or needs_multimodal_api:
return await self._multimodal_chat(request)
else:
return await self._text_chat(request)
def _has_multimodal_content(self, request: ChatCompletionRequest) -> bool:
"""检查是否包含多模态内容"""
for msg in request.messages:
content = msg.get("content", "")
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get("type") == "image_url":
return True
return bool(request.files)
async def _text_chat(self, request: ChatCompletionRequest):
"""纯文本聊天"""
import dashscope
from dashscope import Generation
dashscope.api_key = self._get_api_key()
# 转换消息格式
messages = self._build_text_messages(request)
logger.info(f"[DashScope] 文本聊天 - 转换后的消息:")
logger.info(f" - messages_count: {len(messages)}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
if request.stream:
return self._stream_text_chat(messages, request)
else:
return self._sync_text_chat(messages, request)
def _build_text_messages(self, request: ChatCompletionRequest) -> List[Dict]:
"""构建文本消息"""
messages = []
for msg in request.messages:
role = msg.get("role", "user")
content = msg.get("content", "")
if isinstance(content, str) and content.strip():
messages.append({"role": role, "content": content})
elif isinstance(content, list):
text = ""
for item in content:
if isinstance(item, dict) and item.get("type") == "text":
text += item.get("text", "")
if text.strip():
messages.append({"role": role, "content": text})
return messages
def _stream_text_chat(self, messages: List[Dict], request: ChatCompletionRequest):
"""流式文本聊天"""
logger.info(f"[DashScope] 开始流式文本响应...")
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(request.model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(request.model)})")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
from dashscope import Generation
full_content = ""
full_reasoning = ""
chunk_count = 0
error_occurred = False
# 打印 API 调用参数
api_params = {
"model": request.model,
"messages": messages,
"stream": True,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
# 打印 API 调用参数
logger.info(f"[DashScope] API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - result_format: {api_params['result_format']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
try:
responses = Generation.call(**api_params)
except Exception as e:
error_occurred = True
logger.error(f"[DashScope] API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
# 返回错误响应
error_data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [{
"index": 0,
"delta": {"content": f"API 调用失败: {str(e)}"},
"finish_reason": "stop",
}],
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
return
for resp in responses:
if resp.status_code == 200:
chunk_count += 1
choice = resp.output.choices[0]
# 处理深度思考内容reasoning_content
reasoning_content = getattr(choice.message, "reasoning_content", None)
if reasoning_content:
# 计算增量
if len(reasoning_content) > len(full_reasoning):
delta_reasoning = reasoning_content[len(full_reasoning):]
full_reasoning = reasoning_content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [
{
"index": 0,
"delta": {"reasoning_content": delta_reasoning},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content = choice.message.content
if content and len(content) > len(full_content):
# DashScope 流式响应返回完整内容,计算增量
delta = content[len(full_content) :]
full_content = content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [
{
"index": 0,
"delta": {"content": delta},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
else:
# 记录非200响应
logger.warning(f"[DashScope] 非200响应: status_code={resp.status_code}, code={resp.code}, message={resp.message}")
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": request.model,
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 打印流式响应结果
logger.info(f"[DashScope] 流式文本响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_text_chat(self, messages: List[Dict], request: ChatCompletionRequest):
"""非流式文本聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
from dashscope import Generation
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(request.model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(request.model)})")
# 构建 API 调用参数
api_params = {
"model": request.model,
"messages": messages,
"stream": False,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"result_format": "message",
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
# 打印 API 调用参数
logger.info(f"[DashScope] API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - result_format: {api_params['result_format']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
try:
resp = Generation.call(**api_params)
except Exception as e:
logger.error(f"[DashScope] API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"DashScope API 调用异常: {str(e)}"},
)
if resp.status_code == 200:
message = resp.output.choices[0].message
content = message.content or ""
# 构建响应消息
response_message = {"role": "assistant", "content": content}
# 处理深度思考内容
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
"created": get_current_timestamp(),
"model": request.model,
"choices": [
{
"index": 0,
"message": response_message,
"finish_reason": "stop",
}
],
}
if hasattr(resp, "usage") and resp.usage:
response["usage"] = {
"prompt_tokens": resp.usage.input_tokens,
"completion_tokens": resp.usage.output_tokens,
"total_tokens": resp.usage.total_tokens,
}
# 打印响应结果
logger.info(f"[DashScope] 响应成功:")
logger.info(f" - status_code: {resp.status_code}")
logger.info(f" - content_length: {len(content)} 字符")
if reasoning_content:
logger.info(f" - reasoning_length: {len(reasoning_content)} 字符")
logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
else f" - content: {content}"
)
if hasattr(resp, "usage") and resp.usage:
logger.info(f" - usage: {response['usage']}")
return JSONResponse(content=response)
logger.error(f"[DashScope] 请求失败:")
logger.error(f" - status_code: {resp.status_code}")
logger.error(f" - code: {resp.code}")
logger.error(f" - message: {resp.message}")
return JSONResponse(
status_code=500,
content={"error": f"DashScope Error: {resp.code} - {resp.message}"},
)
async def _multimodal_chat(self, request: ChatCompletionRequest):
"""多模态聊天"""
import dashscope
from dashscope import MultiModalConversation
dashscope.api_key = self._get_api_key()
logger.info(f"[DashScope] 开始多模态聊天...")
# 转换消息格式
messages = self._build_multimodal_messages(request)
logger.info(f"[DashScope] 多模态消息转换完成:")
logger.info(f" - messages_count: {len(messages)}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
# 选择多模态模型
model = request.model
if "qwen-" in model and "vl" not in model:
original_model = model
model = model.replace("qwen-", "qwen-vl-")
logger.info(f"[DashScope] 模型自动切换: {original_model} -> {model}")
if request.stream:
return self._stream_multimodal_chat(messages, model, request)
else:
return self._sync_multimodal_chat(messages, model, request)
def _build_multimodal_messages(self, request: ChatCompletionRequest) -> List[Dict]:
"""构建多模态消息"""
messages = []
for msg in request.messages:
@ -122,43 +466,39 @@ class DashScopeAdapter(UnifiedOpenAIAdapter):
if isinstance(content, str):
if content.strip():
messages.append({"role": role, "content": content})
messages.append({"role": role, "content": [{"text": content}]})
elif isinstance(content, list):
# 多模态内容 - OpenAI 格式兼容
ds_content = []
for item in content:
if isinstance(item, dict):
if item.get("type") == "text":
ds_content.append({"type": "text", "text": item.get("text", "")})
ds_content.append({"text": item.get("text", "")})
elif item.get("type") == "image_url":
img_url = self._extract_image_url(item)
if img_url:
ds_content.append({
"type": "image_url",
"image_url": {"url": img_url}
})
ds_content.append({"image": img_url})
if ds_content:
messages.append({"role": role, "content": ds_content})
return messages
def _extract_image_url(self, item: Dict) -> Optional[str]:
"""提取图片 URL"""
def _extract_image_url(self, item: Dict) -> str:
"""提取并转换图片 URL"""
img_val = item.get("image_url", "")
if isinstance(img_val, str):
img_url = img_val
elif isinstance(img_val, dict):
img_url = img_val.get("url", "")
else:
return None
img_url = ""
# 记录图片 URL 转换
logger.info(f"[DashScope] 图片URL: {img_url}")
logger.info(f"[DashScope] 原始图片URL: {img_url}")
# 处理本地文件 URL
# 转换 http URL 为 file:// 格式(如果是本地文件)
if img_url.startswith(("http://", "https://")):
from urllib.parse import urlparse
parsed = urlparse(img_url)
if "localhost" in parsed.netloc or "127.0.0.1" in parsed.netloc:
path_parts = parsed.path.split("/")
@ -170,38 +510,285 @@ class DashScopeAdapter(UnifiedOpenAIAdapter):
elif not img_url.startswith("file://") and not img_url.startswith(("http://", "https://")):
img_url = f"file://{img_url}"
logger.info(f"[DashScope] 转换后图片URL: {img_url}")
return img_url
def _get_extra_params(self, request: ChatCompletionRequest) -> Dict[str, Any]:
"""
获取 DashScope 特殊参数
- 深度思考: extra_body={"enable_thinking": True/False}
- 联网搜索: extra_body={"enable_search": True}
"""
extra_params = {}
extra_body = {}
def _stream_multimodal_chat(
self, messages: List[Dict], model: str, request: ChatCompletionRequest
):
"""流式多模态聊天"""
logger.info(f"[DashScope] 开始流式多模态响应...")
logger.info(f" - model: {model}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - temperature: {request.temperature}")
model = request.model
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(model)})")
# 深度思考 - 始终传递,明确启用或禁用
logger.info(f"[DashScope] 深度思考请求: deep_thinking={request.deep_thinking}, model={model}")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
supports_thinking = self._supports_thinking(model)
logger.info(f"[DashScope] 模型 {model} 支持深度思考: {supports_thinking}")
from dashscope import MultiModalConversation
thinking_enabled = request.deep_thinking and supports_thinking
extra_body["enable_thinking"] = thinking_enabled
logger.info(f"[DashScope] 深度思考最终状态: {thinking_enabled}")
full_content = ""
full_reasoning = ""
chunk_count = 0
error_occurred = False
# 联网搜索
# 打印 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": True,
"enable_thinking": False,
"max_tokens": request.max_tokens,
"temperature": request.temperature,
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
api_params["enable_search"] = True
if web_search_mode == "deep":
api_params["search_options"] = {"enable_search_extension": True}
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
logger.info(f"[DashScope] 流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
try:
responses = MultiModalConversation.call(**api_params)
except Exception as e:
error_occurred = True
logger.error(f"[DashScope] 多模态 API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
error_data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [{
"index": 0,
"delta": {"content": f"API 调用失败: {str(e)}"},
"finish_reason": "stop",
}],
}
yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
return
for resp in responses:
chunk_count += 1
if resp.status_code == 200:
try:
choice = resp.output.choices[0]
message = choice["message"]
# 处理深度思考内容reasoning_content
# 多模态 API 返回的 reasoning_content 也是独立的片段
reasoning_content = message.get("reasoning_content", "")
if reasoning_content:
delta_reasoning = reasoning_content
full_reasoning += reasoning_content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"reasoning_content": delta_reasoning},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content_items = message.get("content", [])
text = ""
for item in content_items:
if isinstance(item, dict) and "text" in item:
text += item["text"]
# 多模态 API 返回的 content 是独立的片段(不是累积的),直接作为 delta
if text:
delta = text
full_content += text
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": delta},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
except (KeyError, IndexError, TypeError) as e:
logger.warning(f"[DashScope] 解析多模态响应异常: {str(e)}")
else:
logger.warning(f"[DashScope] 非200响应: status_code={resp.status_code}, code={resp.code}, message={resp.message}")
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 打印流式响应结果
logger.info(f"[DashScope] 流式多模态响应完成:")
logger.info(f" - chunks: {chunk_count}")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(f" - reasoning: {full_reasoning[:500]}..." if len(full_reasoning) > 500 else f" - reasoning: {full_reasoning}")
logger.info(f" - content_length: {len(full_content)} 字符")
logger.info(
f" - content: {full_content[:500]}..."
if len(full_content) > 500
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_multimodal_chat(
self, messages: List[Dict], model: str, request: ChatCompletionRequest
):
"""非流式多模态聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
from dashscope import MultiModalConversation
# 检查是否启用深度思考
thinking_enabled = request.deep_thinking and self._supports_thinking(model)
logger.info(f"[DashScope] 深度思考: {thinking_enabled} (request={request.deep_thinking}, supports={self._supports_thinking(model)})")
logger.info(f"[DashScope] 开始非流式多模态响应...")
logger.info(f" - model: {model}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - temperature: {request.temperature}")
# 打印 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": False,
"max_tokens": request.max_tokens,
"enable_thinking": False,
"temperature": request.temperature,
}
# 使用统一网络搜索配置
web_search_mode = get_web_search_mode(request)
if web_search_mode:
extra_body["enable_search"] = True
api_params["enable_search"] = True
if web_search_mode == "deep":
extra_body["search_options"] = {"enable_search_extension": True}
logger.info(f"[DashScope] 联网搜索已启用: mode={web_search_mode}")
api_params["search_options"] = {"enable_search_extension": True}
# 添加 extra_body 到参数
extra_params["extra_body"] = extra_body
# 添加深度思考参数
if thinking_enabled:
api_params["enable_thinking"] = True
return extra_params
logger.info(f"[DashScope] 非流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
try:
resp = MultiModalConversation.call(**api_params)
except Exception as e:
logger.error(f"[DashScope] 多模态 API 调用异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"DashScope API 调用异常: {str(e)}"},
)
if resp.status_code == 200:
try:
message = resp.output.choices[0]["message"]
content_items = message.get("content", [])
text = ""
for item in content_items:
if isinstance(item, dict) and "text" in item:
text += item["text"]
# 构建响应消息
response_message = {"role": "assistant", "content": text}
# 处理深度思考内容
reasoning_content = message.get("reasoning_content")
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"message": response_message,
"finish_reason": "stop",
}
],
}
# 打印响应结果
logger.info(f"[DashScope] 多模态响应成功:")
logger.info(f" - status_code: {resp.status_code}")
logger.info(f" - content_length: {len(text)} 字符")
if reasoning_content:
logger.info(f" - reasoning_length: {len(reasoning_content)} 字符")
logger.info(
f" - content_preview: {text[:200]}..."
if len(text) > 200
else f" - content: {text}"
)
return JSONResponse(content=response)
except (KeyError, IndexError, TypeError) as e:
logger.error(f"[DashScope] 解析多模态响应异常: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return JSONResponse(
status_code=500,
content={"error": f"Parse error: {str(e)}"},
)
logger.error(f"[DashScope] 多模态请求失败:")
logger.error(f" - status_code: {resp.status_code}")
logger.error(f" - code: {resp.code}")
logger.error(f" - message: {resp.message}")
return JSONResponse(
status_code=500,
content={"error": f"DashScope Error: {resp.code} - {resp.message}"},
)

View File

@ -1,14 +1,16 @@
"""
智谱 GLM 适配器
使用 OpenAI SDK 调用智谱 OpenAI 兼容 API
基于 utils/glm_adapter.py 重构
使用zai-sdk因为已经完成这一部分的整套逻辑如果更换OpenAI-SDK会花很多时间调试
"""
import json
import os
from typing import Any, Dict, List, Optional
from typing import Dict, List, Optional
from .base import ChatCompletionRequest, ModelInfo
from .unified_adapter import UnifiedOpenAIAdapter
from fastapi.responses import JSONResponse, StreamingResponse
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode, build_glm_search_tool
from core import get_logger
@ -29,7 +31,7 @@ GLM_MODELS = [
),
ModelInfo(
id="glm-4.6v",
name="GLM-4.6V",
name="GLM-4.6V(推荐)",
description="最新旗舰模型,支持文本/图像/文档/深度思考",
max_tokens=128000,
provider="ZhipuAI",
@ -73,44 +75,122 @@ GLM_MODELS = [
),
]
# 从 GLM_MODELS 自动计算
VISION_MODELS = {m.id.lower() for m in GLM_MODELS if m.supports_vision}
THINKING_MODELS = {m.id.lower() for m in GLM_MODELS if m.supports_thinking}
# 视觉模型列表(用于自动切换)
VISION_MODELS = {"glm-4v", "glm-4v-plus", "glm-4v-plus-0111", "glm-4.6v"}
# 支持深度思考的模型
THINKING_MODELS = {"glm-z1-flash", "glm-z1-air", "glm-4.6v", "glm-4.6"}
class GLMAdapter(UnifiedOpenAIAdapter):
class GLMAdapter(BaseAdapter):
"""智谱 GLM 平台适配器"""
_provider_type = "zhipu"
_client = None
@property
def provider_name(self) -> str:
return "glm"
def is_available(self) -> bool:
"""检查 API Key 是否配置"""
return bool(os.getenv("ZHIPU_API_KEY") or os.getenv("GLM_API_KEY"))
def _get_client(self):
"""获取 GLM 客户端(懒加载)"""
if self._client is None:
from zhipuai import ZhipuAI
api_key = os.getenv("ZHIPU_API_KEY") or os.getenv("GLM_API_KEY")
self._client = ZhipuAI(api_key=api_key)
return self._client
def list_models(self) -> List[ModelInfo]:
return GLM_MODELS
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in THINKING_MODELS
async def chat(self, request: ChatCompletionRequest):
"""
处理 GLM 聊天请求
支持流式/非流式图像文档联网搜索深度思考
"""
client = self._get_client()
def _build_messages(self, request: ChatCompletionRequest) -> List[Dict]:
# 构建消息
glm_messages, has_vision, has_files = self._build_messages(request)
actual_model = self._resolve_model(request.model, has_vision, has_files)
# 调试:打印原始请求参数
logger.info(f"[GLM] 原始请求参数:")
logger.info(
f" - request.deep_thinking: {request.deep_thinking} (type: {type(request.deep_thinking)})"
)
logger.info(f" - request.web_search: {request.web_search}")
logger.info(f" - request.deep_search: {request.deep_search}")
logger.info(f" - actual_model: {actual_model}")
logger.info(f" - supports_thinking: {self._supports_thinking(actual_model)}")
# 构建额外参数
extra_kwargs = {}
web_search_mode = get_web_search_mode(request)
if web_search_mode:
extra_kwargs["tools"] = [build_glm_search_tool(web_search_mode)]
extra_kwargs["tool_choice"] = "auto"
# 深度思考正向选择True 时启用False 时禁用)
# 注意:只有特定模型支持深度思考(如 glm-z1-flash
thinking_enabled = request.deep_thinking and self._supports_thinking(
actual_model
)
logger.info(
f"[GLM] 深度思考判断: {request.deep_thinking} and {self._supports_thinking(actual_model)} = {thinking_enabled}"
)
if thinking_enabled:
extra_kwargs["thinking"] = {"type": "enabled"}
logger.info(
f"[GLM] 深度思考已启用: extra_kwargs['thinking'] = {extra_kwargs['thinking']}"
)
else:
extra_kwargs["thinking"] = {"type": "disabled"}
logger.info(
f"[GLM] 深度思考已禁用: extra_kwargs['thinking'] = {extra_kwargs['thinking']}"
)
if extra_kwargs:
logger.info(
f"[GLM] 最终 extra_kwargs: {json.dumps(extra_kwargs, ensure_ascii=False)}"
)
if request.stream:
return self._stream_chat(
client, glm_messages, actual_model, request, extra_kwargs
)
else:
return self._sync_chat(
client, glm_messages, actual_model, request, extra_kwargs
)
def _build_messages(
self, request: ChatCompletionRequest
) -> tuple[List[Dict], bool, bool]:
"""
构建 GLM 格式的消息
处理文件附件和多模态内容
返回(消息列表, 是否包含图片, 是否包含文件附件)
"""
messages = []
has_vision = False
has_files = bool(request.files)
has_files = bool(request.files) # 检查是否有文件附件
for msg in request.messages:
role = msg.get("role", "user")
content = msg.get("content", "")
if isinstance(content, str):
# 纯文本
if content.strip():
messages.append({"role": role, "content": content})
elif isinstance(content, list):
# 多模态内容
glm_content = []
for item in content:
if isinstance(item, dict):
@ -134,6 +214,7 @@ class GLMAdapter(UnifiedOpenAIAdapter):
if request.files:
file_content = self._build_file_content(request.files)
if messages and messages[-1]["role"] == "user":
# 追加到最后一个用户消息
if isinstance(messages[-1]["content"], list):
messages[-1]["content"].extend(file_content)
else:
@ -144,7 +225,7 @@ class GLMAdapter(UnifiedOpenAIAdapter):
else:
messages.append({"role": "user", "content": file_content})
return messages
return messages, has_vision, has_files
def _extract_image_url(self, item: Dict) -> Optional[str]:
"""提取图片 URL"""
@ -163,54 +244,232 @@ class GLMAdapter(UnifiedOpenAIAdapter):
content.append({"type": "file_url", "file_url": {"url": file_url}})
return content
def _resolve_model(self, model: str, has_vision: bool, has_files: bool = False) -> str:
def _resolve_model(
self, model: str, has_vision: bool, has_files: bool = False
) -> str:
"""解析实际使用的模型"""
model_lower = model.lower()
# 如果有图片或文件附件,强制使用 glm-4.6v(支持多模态)
if (has_vision or has_files) and model_lower not in VISION_MODELS:
logger.info(f"[GLM] 检测到图片或文件附件,切换模型: {model} -> glm-4.6v")
logger.info(
f"[GLM] 检测到图片或文件附件,强制切换模型: {model} -> glm-4.6v"
)
return "glm-4.6v"
return model
def _get_extra_params(self, request: ChatCompletionRequest) -> Dict[str, Any]:
"""
获取 GLM 特殊参数
- 深度思考: extra_body={"thinking": {"type": "enabled/disabled"}}
- 联网搜索: tools=[{"type": "web_search", ...}]
"""
extra_params = {}
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in THINKING_MODELS
# 检测是否有多模态内容,决定最终使用的模型
messages = self._build_messages(request)
has_vision = any(
isinstance(m.get("content"), list) and
any(c.get("type") == "image_url" for c in m.get("content", []))
for m in messages
def _stream_chat(
self, client, messages, model, request, extra_kwargs
) -> StreamingResponse:
"""流式聊天"""
logger.info(f"[GLM] 开始流式响应...")
# 提取深度思考配置
thinking_config = extra_kwargs.get("thinking")
tools_config = extra_kwargs.get("tools")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
full_content = ""
# 构建 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": True,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
}
# 深度思考:使用 extra_body 传递
if thinking_config:
api_params["extra_body"] = {"thinking": thinking_config}
# 联网搜索:使用 tools 参数
if tools_config:
api_params["tools"] = tools_config
api_params["tool_choice"] = "auto"
# 打印请求参数
logger.info(f"[GLM] API 调用参数:")
logger.info(f" - model: {model}")
logger.info(f" - stream: True")
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
if thinking_config:
logger.info(f" - extra_body: {{'thinking': {thinking_config}}}")
if tools_config:
logger.info(
f" - tools: {json.dumps(tools_config, ensure_ascii=False)}"
)
logger.info(f" - tool_choice: auto")
logger.info(
f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}"
)
chunk_count = 0
resp = client.chat.completions.create(**api_params)
for chunk in resp:
chunk_count += 1
# 检查 delta 是否存在
if not hasattr(chunk.choices[0], "delta"):
continue
delta = chunk.choices[0].delta
# 处理深度思考内容reasoning_content
reasoning_content = getattr(delta, "reasoning_content", None)
if reasoning_content:
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"reasoning_content": reasoning_content},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content = getattr(delta, "content", None)
if content:
full_content += content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": content},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
# 结束标记
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": model,
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
# 打印流式响应结果
logger.info(f"[GLM] 流式响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_chat(
self, client, messages, model, request, extra_kwargs
) -> JSONResponse:
"""非流式聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
# 提取深度思考配置
thinking_config = extra_kwargs.get("thinking")
tools_config = extra_kwargs.get("tools")
# 构建 API 调用参数
api_params = {
"model": model,
"messages": messages,
"stream": False,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
}
# 深度思考:使用 extra_body 传递
if thinking_config:
api_params["extra_body"] = {"thinking": thinking_config}
# 联网搜索:使用 tools 参数
if tools_config:
api_params["tools"] = tools_config
api_params["tool_choice"] = "auto"
# 打印请求参数
logger.info(f"[GLM] API 调用参数:")
logger.info(f" - model: {model}")
logger.info(f" - stream: {request.stream}")
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
if thinking_config:
logger.info(f" - extra_body: {{'thinking': {thinking_config}}}")
if tools_config:
logger.info(f" - tools: {json.dumps(tools_config, ensure_ascii=False)}")
logger.info(f" - tool_choice: auto")
logger.info(
f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}"
)
has_files = bool(request.files)
actual_model = self._resolve_model(request.model, has_vision, has_files)
# 更新请求中的模型(如果有变化)
if actual_model != request.model:
extra_params["model"] = actual_model
resp = client.chat.completions.create(**api_params)
# 联网搜索
web_search_mode = get_web_search_mode(request)
if web_search_mode:
extra_params["tools"] = [build_glm_search_tool(web_search_mode)]
extra_params["tool_choice"] = "auto"
logger.info(f"[GLM] 联网搜索已启用: mode={web_search_mode}")
message = resp.choices[0].message
content = message.content or ""
# 深度思考 - 始终传递,明确启用或禁用
logger.info(f"[GLM] 深度思考请求: deep_thinking={request.deep_thinking}, actual_model={actual_model}")
# 构建响应
response_message = {"role": "assistant", "content": content}
# 判断是否支持深度思考
supports_thinking = self._supports_thinking(actual_model)
logger.info(f"[GLM] 模型 {actual_model} 支持深度思考: {supports_thinking}")
# 处理深度思考内容
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
# 只有前端请求启用 且 模型支持时才启用
thinking_enabled = request.deep_thinking and supports_thinking
thinking_type = "enabled" if thinking_enabled else "disabled"
extra_params["extra_body"] = {"thinking": {"type": thinking_type}}
logger.info(f"[GLM] 深度思考最终状态: {thinking_type}")
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
"created": get_current_timestamp(),
"model": model,
"choices": [
{
"index": 0,
"message": response_message,
"finish_reason": "stop",
}
],
}
return extra_params
if hasattr(resp, "usage") and resp.usage:
response["usage"] = {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens,
}
# 打印响应结果
logger.info(f"[GLM] 响应结果:")
logger.info(f" - content_length: {len(content)} 字符")
logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
else f" - content: {content}"
)
if hasattr(resp, "usage") and resp.usage:
logger.info(f" - usage: {response['usage']}")
return JSONResponse(content=response)

View File

@ -5,18 +5,12 @@ OpenAI 适配器
import json
import os
from typing import Any, Dict, List, Optional
from typing import Dict, List, Optional
from fastapi.responses import StreamingResponse
from fastapi.responses import JSONResponse, StreamingResponse
from .base import ChatCompletionRequest, ModelInfo
from .unified_adapter import UnifiedOpenAIAdapter
from .plugins import (
get_web_search_mode,
build_openai_search_tool,
execute_tavily_search,
get_current_time_info,
)
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from .plugins import get_web_search_mode, build_openai_search_tool, execute_tavily_search, get_current_time_info
from core import get_logger
logger = get_logger()
@ -89,67 +83,183 @@ DEEPSEEK_MODELS = [
max_tokens=64000,
provider="Deepseek",
supports_thinking=True,
supports_web_search=True,
supports_web_search=True, # 注:通过内置检索增强实现
supports_vision=False,
supports_files=False,
),
]
# 从 DEEPSEEK_MODELS 自动计算
DEEPSEEK_THINKING_MODELS = {m.id.lower() for m in DEEPSEEK_MODELS if m.supports_thinking}
# DeepSeek 支持深度思考的模型
DEEPSEEK_THINKING_MODELS = {"deepseek-reasoner"}
class OpenAIAdapter(UnifiedOpenAIAdapter):
class OpenAIAdapter(BaseAdapter):
"""OpenAI 平台适配器"""
_provider_type = "openai"
_client = None
_provider_type: str = "openai" # openai 或 deepseek
def __init__(self, provider_type: str = "openai"):
self._provider_type = provider_type
@property
def provider_name(self) -> str:
return "openai"
return self._provider_type
def is_available(self) -> bool:
"""检查 API Key 是否配置"""
if self._provider_type == "deepseek":
return bool(os.getenv("DEEPSEEK_API_KEY"))
return bool(os.getenv("OPENAI_API_KEY"))
def _get_client(self):
"""获取 OpenAI 客户端(懒加载)"""
if self._client is None:
from openai import OpenAI
if self._provider_type == "deepseek":
api_key = os.getenv("DEEPSEEK_API_KEY", "")
base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
else:
api_key = os.getenv("OPENAI_API_KEY", "")
base_url = os.getenv("OPENAI_BASE_URL") # 可选自定义端点
kwargs = {"api_key": api_key}
if base_url:
kwargs["base_url"] = base_url
self._client = OpenAI(**kwargs)
return self._client
def list_models(self) -> List[ModelInfo]:
if self._provider_type == "deepseek":
return DEEPSEEK_MODELS
return OPENAI_MODELS
def _get_extra_params(self, request: ChatCompletionRequest) -> Dict[str, Any]:
"""获取 OpenAI 特殊参数"""
extra_params = {}
async def chat(self, request: ChatCompletionRequest):
"""
处理 OpenAI 聊天请求
直接使用 OpenAI SDK支持流式/非流式
"""
client = self._get_client()
# 联网搜索 - 使用 Function Calling
# 打印请求参数
provider_name = self._provider_type.upper()
logger.info(f"[{provider_name}] 请求参数:")
logger.info(f" - model: {request.model}")
logger.info(f" - stream: {request.stream}")
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - provider_type: {self._provider_type}")
if self._provider_type == "deepseek":
logger.info(f" - deep_thinking: {request.deep_thinking}")
# 构建消息
messages = self._build_messages(request)
# 统一添加联网搜索插件参数
web_search_mode = get_web_search_mode(request)
if web_search_mode:
extra_params["tools"] = [build_openai_search_tool(web_search_mode)]
logger.info(f"[OpenAI] 联网搜索已启用: mode={web_search_mode}")
# 注入当前时间信息到 System Prompt 中,以便模型拥有时间感知能力
time_info = get_current_time_info()
has_system = False
for msg in messages:
if msg.get("role") == "system":
msg["content"] = f"当前系统时间:{time_info}\n" + str(msg.get("content", ""))
has_system = True
break
if not has_system:
messages.insert(0, {"role": "system", "content": f"当前系统时间:{time_info}"})
return extra_params
logger.info(
f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}"
)
def _stream_chat(self, client, kwargs: Dict) -> StreamingResponse:
"""
流式聊天 - 处理联网搜索的 Function Calling
"""
logger.info(f"[OpenAI] 开始流式响应...")
# 构建请求参数
kwargs = {
"model": request.model,
"messages": messages,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": request.stream,
}
if web_search_mode:
search_tool = build_openai_search_tool(web_search_mode)
kwargs["tools"] = [search_tool]
# DeepSeek 深度思考支持
extra_body = None
if self._provider_type == "deepseek" and request.deep_thinking:
if self._supports_thinking(request.model):
extra_body = {"thinking": {"type": "enabled"}}
kwargs["extra_body"] = extra_body
logger.info(
f"[{provider_name}] 深度思考已启用: extra_body = {extra_body}"
)
if request.stream:
return self._stream_chat(client, kwargs, extra_body)
else:
return self._sync_chat(client, kwargs, extra_body)
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in DEEPSEEK_THINKING_MODELS
def _build_messages(self, request: ChatCompletionRequest) -> List[Dict]:
"""构建 OpenAI 格式消息"""
messages = []
for msg in request.messages:
role = msg.get("role", "user")
content = msg.get("content", "")
# OpenAI 直接支持标准格式
if isinstance(content, str):
if content.strip():
messages.append({"role": role, "content": content})
elif isinstance(content, list):
# 多模态内容
openai_content = []
for item in content:
if isinstance(item, dict):
openai_content.append(item)
if openai_content:
messages.append({"role": role, "content": openai_content})
return messages
def _stream_chat(
self, client, kwargs: Dict, extra_body: Optional[Dict] = None
) -> StreamingResponse:
"""流式聊天"""
provider_name = self._provider_type.upper()
logger.info(f"[{provider_name}] 开始流式响应...")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
nonlocal kwargs
# 可能需要多轮对话(当发生工具调用时)
# 可能需要执行多轮对话(当发生工具调用时)
while True:
resp = client.chat.completions.create(**kwargs)
full_content = ""
full_reasoning = ""
chunk_count = 0
tool_calls = []
current_tool_call = None
for chunk in resp:
if not chunk.choices:
continue
chunk_count += 1
delta = chunk.choices[0].delta
# 收集内容
# 1. 收集可能有内容/推理
delta_content = {}
if hasattr(delta, "content") and delta.content:
delta_content["content"] = delta.content
@ -158,20 +268,18 @@ class OpenAIAdapter(UnifiedOpenAIAdapter):
delta_content["reasoning_content"] = delta.reasoning_content
full_reasoning += delta.reasoning_content
# 收集 tool_calls流式
# 2. 收集可能产生的 tool_calls (流式)
if hasattr(delta, "tool_calls") and delta.tool_calls:
for tool_call_chunk in delta.tool_calls:
idx = tool_call_chunk.index
# 确保 tool_calls 列表足够长
while len(tool_calls) <= idx:
tool_calls.append({
"id": "",
"type": "function",
"function": {"name": "", "arguments": ""}
})
tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
if tool_call_chunk.id:
tool_calls[idx]["id"] += tool_call_chunk.id
if tool_call_chunk.type:
# 对于 type, 因为 OpenAI 可能会传 chunks, 但通常只在第一块或者每块传, 为了避免 functionfunction, 使用赋值而非累加
tool_calls[idx]["type"] = tool_call_chunk.type
if tool_call_chunk.function:
if tool_call_chunk.function.name:
@ -179,59 +287,64 @@ class OpenAIAdapter(UnifiedOpenAIAdapter):
if tool_call_chunk.function.arguments:
tool_calls[idx]["function"]["arguments"] += tool_call_chunk.function.arguments
# 输出普通内容
# 3. 输出给前端普通文本
if delta_content and not tool_calls:
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{
"index": 0,
"delta": delta_content,
"finish_reason": None,
}],
"choices": [
{
"index": 0,
"delta": delta_content,
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
# 检查是否有完整的工具调用
# 检查此轮请求是否收到了完整工具调用,若是则执行搜索逻辑并追加继续请求,不再让外部函数退出
if tool_calls:
logger.info(f"[OpenAI] 检测到工具调用: {json.dumps(tool_calls, ensure_ascii=False)}")
# 添加助手消息
logger.info(f"[{provider_name}] 检测到流式中包含了工具调用进行拦截并处理: {json.dumps(tool_calls, ensure_ascii=False)}")
# 把大模型的工具调用请求也追加进去
assistant_msg = {
"role": "assistant",
"content": full_content or None,
"content": full_content or None, # 如果工具和普通内容同时存在也保留
"tool_calls": tool_calls
}
if full_reasoning:
assistant_msg["reasoning_content"] = full_reasoning
elif self._provider_type == "deepseek" and self._supports_thinking(kwargs["model"]):
# DeepSeek 推理模型在有工具调用时必须有 reasoning_content 字段
assistant_msg["reasoning_content"] = ""
kwargs["messages"].append(assistant_msg)
# 执行搜索工具
for tc in tool_calls:
if tc["function"]["name"] == "web_search":
try:
args = json.loads(tc["function"]["arguments"])
query = args.get("query", "")
mode = "deep" if "advanced" in str(kwargs.get("tools", [])) else "simple"
logger.info(f"[OpenAI] 执行搜索: {query}")
logger.info(f"[{provider_name}] 执行搜索插件: {query}")
search_result = execute_tavily_search(query, mode=mode)
except Exception as e:
search_result = f"搜索失败: {str(e)}"
search_result = f"获取搜索参数或执行搜索失败: {str(e)}"
logger.error(search_result)
# 把执行结果告诉大模型
kwargs["messages"].append({
"role": "tool",
"tool_call_id": tc["id"],
"name": "web_search",
"content": search_result
})
# 继续请求归纳答案
# 工具执行完毕,继续发起下一轮请求大模型归纳总结输出
continue
# 没有工具调用,结束
# 如果没有工具调用或者全部分发完毕正常结束给前端
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
@ -242,52 +355,130 @@ class OpenAIAdapter(UnifiedOpenAIAdapter):
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
logger.info(f"[OpenAI] 流式响应完成: chunks={chunk_count}, content_len={len(full_content)}")
# 打印流式响应结果
logger.info(f"[{provider_name}] 流式响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
# 结束外层循环退出生成器
break
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_chat(
self, client, kwargs: Dict, extra_body: Optional[Dict] = None
) -> JSONResponse:
"""非流式聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
class DeepseekAdapter(UnifiedOpenAIAdapter):
"""Deepseek 平台适配器"""
while True:
resp = client.chat.completions.create(**kwargs)
_provider_type = "deepseek"
message = resp.choices[0].message
# 判断是否涉及工具调用
if hasattr(message, "tool_calls") and message.tool_calls:
# 记录这轮的助手回复
assistant_msg = {"role": "assistant", "content": message.content or None}
# openai sdk 对象转 dict 存储 tool_calls
tool_calls_dict = []
for tc in message.tool_calls:
tc_dict = {
"id": tc.id,
"type": tc.type,
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
}
tool_calls_dict.append(tc_dict)
assistant_msg["tool_calls"] = tool_calls_dict
if hasattr(message, "reasoning_content") and message.reasoning_content:
assistant_msg["reasoning_content"] = message.reasoning_content
elif self._provider_type == "deepseek" and self._supports_thinking(kwargs["model"]):
# DeepSeek 推理模型在有工具调用时必须有 reasoning_content 字段
assistant_msg["reasoning_content"] = ""
kwargs["messages"].append(assistant_msg)
# 执行所有的工具调用
for tc in tool_calls_dict:
if tc["function"]["name"] == "web_search":
try:
args = json.loads(tc["function"]["arguments"])
query = args.get("query", "")
mode = "deep" if "advanced" in str(kwargs.get("tools", [])) else "simple"
search_result = execute_tavily_search(query, mode=mode)
except Exception as e:
search_result = f"执行搜索失败: {str(e)}"
# 把执行结果追加到消息中
kwargs["messages"].append({
"role": "tool",
"tool_call_id": tc["id"],
"name": "web_search",
"content": search_result
})
# 工具调用完成,发起下一轮请求获取归纳答案
continue
@property
def provider_name(self) -> str:
return "deepseek"
# 处理普通的文本回复
content = message.content or ""
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [
{
"index": 0,
"message": {
"role": message.role,
"content": content,
},
"finish_reason": resp.choices[0].finish_reason,
}
],
}
def list_models(self) -> List[ModelInfo]:
return DEEPSEEK_MODELS
# 添加推理内容(如有)
if hasattr(message, "reasoning_content") and message.reasoning_content:
response["choices"][0]["message"][
"reasoning_content"
] = message.reasoning_content
def _supports_thinking(self, model: str) -> bool:
"""检查模型是否支持深度思考"""
return model.lower() in DEEPSEEK_THINKING_MODELS
if resp.usage:
response["usage"] = {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens,
}
def _get_extra_params(self, request: ChatCompletionRequest) -> Dict[str, Any]:
"""获取 Deepseek 特殊参数"""
extra_params = {}
# 打印响应结果
provider_name = self._provider_type.upper()
logger.info(f"[{provider_name}] 响应结果:")
logger.info(f" - content_length: {len(content)} 字符")
if hasattr(message, "reasoning_content") and message.reasoning_content:
logger.info(f" - reasoning_length: {len(message.reasoning_content)} 字符")
logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
else f" - content: {content}"
)
if resp.usage:
logger.info(f" - usage: {response['usage']}")
# 深度思考 - 始终传递,明确启用或禁用
logger.info(f"[Deepseek] 深度思考请求: deep_thinking={request.deep_thinking}, model={request.model}")
return JSONResponse(content=response)
supports_thinking = self._supports_thinking(request.model)
logger.info(f"[Deepseek] 模型 {request.model} 支持深度思考: {supports_thinking}")
thinking_enabled = request.deep_thinking and supports_thinking
thinking_type = "enabled" if thinking_enabled else "disabled"
extra_params["extra_body"] = {"thinking": {"type": thinking_type}}
logger.info(f"[Deepseek] 深度思考最终状态: {thinking_type}")
class DeepseekAdapter(OpenAIAdapter):
"""Deepseek 平台适配器(继承 OpenAI 适配器)"""
# 联网搜索 - 使用 Function Calling
web_search_mode = get_web_search_mode(request)
if web_search_mode:
extra_params["tools"] = [build_openai_search_tool(web_search_mode)]
logger.info(f"[Deepseek] 联网搜索已启用: mode={web_search_mode}")
return extra_params
def _stream_chat(self, client, kwargs: Dict) -> StreamingResponse:
"""流式聊天 - 复用 OpenAI 的工具调用逻辑"""
# DeepSeek 使用相同的工具调用处理逻辑
return OpenAIAdapter._stream_chat(self, client, kwargs)
def __init__(self):
super().__init__(provider_type="deepseek")

View File

@ -1,382 +0,0 @@
"""
统一 OpenAI SDK 适配器基类
所有平台适配器继承此类通过配置区分不同平台
MCP (Model Context Protocol) 支持
- 子类可覆盖 _get_mcp_tools() 返回 MCP 工具定义
- 子类可覆盖 _handle_mcp_tool_call() 处理 MCP 工具调用
"""
import json
import os
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from fastapi.responses import JSONResponse, StreamingResponse
from openai import OpenAI
from .base import BaseAdapter, ChatCompletionRequest, ModelInfo
from core import get_logger
logger = get_logger()
# 平台配置
PROVIDER_CONFIGS = {
"zhipu": {
"base_url": "https://open.bigmodel.cn/api/paas/v4/",
"api_key_env": "ZHIPU_API_KEY",
"alias_env": ["GLM_API_KEY"], # 备选环境变量
},
"dashscope": {
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"api_key_env": "DASHSCOPE_API_KEY",
"alias_env": ["ALIYUN_API_KEY"],
},
"deepseek": {
"base_url": "https://api.deepseek.com/v1",
"api_key_env": "DEEPSEEK_API_KEY",
"alias_env": [],
},
"openai": {
"base_url": None, # 使用 OpenAI 默认值
"api_key_env": "OPENAI_API_KEY",
"alias_env": [],
},
}
class UnifiedOpenAIAdapter(BaseAdapter):
"""
基于 OpenAI SDK 的统一适配器基类
子类只需提供:
- provider_name: 平台名称
- list_models(): 支持的模型列表
- _get_extra_params(): 特殊参数可选
MCP 扩展点:
- _get_mcp_tools(): 返回 MCP 工具定义
- _handle_mcp_tool_call(): 处理 MCP 工具调用
"""
_client: Optional[OpenAI] = None
_provider_type: str = "openai"
def _get_api_key(self) -> Optional[str]:
"""获取 API Key"""
config = PROVIDER_CONFIGS.get(self._provider_type, {})
api_key_env = config.get("api_key_env", "")
alias_env = config.get("alias_env", [])
# 优先使用主环境变量
api_key = os.getenv(api_key_env)
if api_key:
return api_key
# 尝试备选环境变量
for env_name in alias_env:
api_key = os.getenv(env_name)
if api_key:
return api_key
return None
def _get_base_url(self) -> Optional[str]:
"""获取 Base URL"""
config = PROVIDER_CONFIGS.get(self._provider_type, {})
return config.get("base_url")
def _get_client(self) -> OpenAI:
"""获取 OpenAI 客户端(懒加载)"""
if self._client is None:
api_key = self._get_api_key()
base_url = self._get_base_url()
kwargs = {"api_key": api_key or ""}
if base_url:
kwargs["base_url"] = base_url
self._client = OpenAI(**kwargs)
logger.info(f"[{self.provider_name}] 创建 OpenAI 客户端: base_url={base_url or 'default'}")
return self._client
def is_available(self) -> bool:
"""检查适配器是否可用"""
return bool(self._get_api_key())
def _get_extra_params(self, request: ChatCompletionRequest) -> Dict[str, Any]:
"""
获取额外参数子类可覆盖
Returns:
传递给 OpenAI API 的额外参数 extra_body
"""
return {}
# ============================================================
# MCP 扩展点(子类可覆盖)
# ============================================================
def _get_mcp_tools(self, request: ChatCompletionRequest) -> List[Dict]:
"""
获取 MCP 工具定义子类可覆盖
Returns:
MCP 工具列表格式与 OpenAI tools 相同
例如: [{"type": "function", "function": {...}}]
示例:
return [{
"type": "function",
"function": {
"name": "mcp_search",
"description": "通过 MCP 协议搜索",
"parameters": {...}
}
}]
"""
return []
def _handle_mcp_tool_call(
self,
tool_name: str,
tool_args: Dict,
request: ChatCompletionRequest
) -> Optional[str]:
"""
处理 MCP 工具调用子类可覆盖
Args:
tool_name: 工具名称
tool_args: 工具参数
request: 原始请求
Returns:
工具执行结果字符串返回 None 表示不是 MCP 工具
示例:
if tool_name == "mcp_search":
# 调用 MCP 客户端
result = await mcp_client.call(tool_name, tool_args)
return result
return None
"""
return None
# ============================================================
# 聊天处理
# ============================================================
async def chat(self, request: ChatCompletionRequest):
"""
处理聊天请求统一流程
"""
client = self._get_client()
# 打印请求参数
logger.info(f"[{self.provider_name}] 请求参数:")
logger.info(f" - model: {request.model}")
logger.info(f" - stream: {request.stream}")
logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - deep_thinking: {request.deep_thinking}")
logger.info(f" - web_search: {request.web_search}")
logger.info(f" - deep_search: {request.deep_search}")
# 构建消息
messages = self._build_messages(request)
# 构建请求参数
kwargs: Dict[str, Any] = {
"model": request.model,
"messages": messages,
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": request.stream,
}
# 添加特殊参数(由子类实现)
extra_params = self._get_extra_params(request)
# 分离 extra_body 和其他参数
# extra_body 需要作为 OpenAI SDK 的单独参数传递
extra_body = None
if extra_params:
if "extra_body" in extra_params:
extra_body = extra_params.pop("extra_body")
kwargs.update(extra_params)
logger.info(f" - extra_params: {json.dumps(extra_params, ensure_ascii=False)}")
if extra_body:
logger.info(f" - extra_body: {json.dumps(extra_body, ensure_ascii=False)}")
# 添加 MCP 工具(由子类实现)
mcp_tools = self._get_mcp_tools(request)
if mcp_tools:
if "tools" not in kwargs:
kwargs["tools"] = []
kwargs["tools"].extend(mcp_tools)
logger.info(f" - mcp_tools: {len(mcp_tools)} 个工具")
# 单独传递 extra_body
if extra_body:
kwargs["extra_body"] = extra_body
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
if request.stream:
return self._stream_chat(client, kwargs)
else:
return self._sync_chat(client, kwargs)
def _build_messages(self, request: ChatCompletionRequest) -> List[Dict]:
"""
构建 OpenAI 格式消息
子类可覆盖以处理特殊格式如多模态
"""
messages = []
for msg in request.messages:
role = msg.get("role", "user")
content = msg.get("content", "")
if isinstance(content, str):
if content.strip():
messages.append({"role": role, "content": content})
elif isinstance(content, list):
# 多模态内容
openai_content = []
for item in content:
if isinstance(item, dict):
openai_content.append(item)
if openai_content:
messages.append({"role": role, "content": openai_content})
return messages
def _stream_chat(self, client: OpenAI, kwargs: Dict) -> StreamingResponse:
"""流式聊天"""
logger.info(f"[{self.provider_name}] 开始流式响应...")
# 调试:打印最终传给 API 的参数
logger.info(f"[{self.provider_name}] API 调用参数:")
for key, value in kwargs.items():
if key == "messages":
logger.info(f" - {key}: [{len(value)} 条消息]")
elif key == "extra_body":
logger.info(f" - {key}: {json.dumps(value, ensure_ascii=False)}")
elif key == "tools":
logger.info(f" - {key}: {json.dumps(value, ensure_ascii=False)}")
else:
logger.info(f" - {key}: {value}")
def generator():
from utils.helpers import generate_unique_id, get_current_timestamp
full_content = ""
full_reasoning = ""
chunk_count = 0
resp = client.chat.completions.create(**kwargs)
for chunk in resp:
if not chunk.choices:
continue
chunk_count += 1
delta = chunk.choices[0].delta
# 处理深度思考内容
reasoning_content = getattr(delta, "reasoning_content", None)
if reasoning_content:
full_reasoning += reasoning_content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{
"index": 0,
"delta": {"reasoning_content": reasoning_content},
"finish_reason": None,
}],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
continue
# 处理普通内容
content = getattr(delta, "content", None)
if content:
full_content += content
data = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{
"index": 0,
"delta": {"content": content},
"finish_reason": None,
}],
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
# 结束标记
finish = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
yield f"data: {json.dumps(finish, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
logger.info(f"[{self.provider_name}] 流式响应完成: chunks={chunk_count}, content_len={len(full_content)}")
return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_chat(self, client: OpenAI, kwargs: Dict) -> JSONResponse:
"""非流式聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp
resp = client.chat.completions.create(**kwargs)
message = resp.choices[0].message
content = message.content or ""
# 构建响应消息
response_message = {"role": message.role, "content": content}
# 处理深度思考内容
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
response_message["reasoning_content"] = reasoning_content
response = {
"id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion",
"created": get_current_timestamp(),
"model": kwargs["model"],
"choices": [{
"index": 0,
"message": response_message,
"finish_reason": resp.choices[0].finish_reason,
}],
}
if resp.usage:
response["usage"] = {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens,
}
logger.info(f"[{self.provider_name}] 响应完成: content_len={len(content)}")
if reasoning_content:
logger.info(f"[{self.provider_name}] reasoning_len={len(reasoning_content)}")
return JSONResponse(content=response)

View File

@ -20,6 +20,7 @@ def get_current_user_id(request) -> str:
Returns:
用户 ID 字符串
"""
# TODO: 实现 token 验证逻辑
# 示例:
# auth_header = request.headers.get("Authorization")
# if auth_header and auth_header.startswith("Bearer "):

View File

@ -1,65 +1,103 @@
# ============================================================
# 核心依赖
# ============================================================
openai==2.26.0
fastapi==0.115.4
uvicorn==0.32.0
pydantic==2.12.5
python-dotenv==1.0.1
# ============================================================
# 数据库
# ============================================================
SQLAlchemy==2.0.48
aiosqlite==0.22.1
# ============================================================
# 文件上传
# ============================================================
python-multipart==0.0.18
# ============================================================
# 阿里云 OSS
# ============================================================
alibabacloud-oss-v2==1.2.4
oss2==2.19.1
# ============================================================
# Token 计算
# ============================================================
tiktoken==0.12.0
# ============================================================
# 间接依赖(由上述包自动安装,但显式声明版本)
# ============================================================
starlette==0.41.3
httpx==0.28.1
httpcore==1.0.9
h11==0.16.0
anyio==4.12.1
sniffio==1.3.1
certifi==2026.2.25
idna==3.11
charset-normalizer==3.4.4
urllib3==2.6.3
requests==2.32.5
jiter==0.13.0
distro==1.9.0
pydantic_core==2.41.5
annotated-types==0.7.0
typing_extensions==4.15.0
typing-inspect==0.9.0
tenacity==9.1.4
# ============================================================
# 异步/网络
# ============================================================
aiohttp==3.13.3
aiofiles==24.1.0
# ============================================================
# 其他工具
# ============================================================
aiohappyeyeballs==2.6.1
aiohttp==3.13.3
aiosignal==1.4.0
aiosqlite==0.22.1
alibabacloud-oss-v2==1.2.4
aliyun-python-sdk-core==2.16.0
aliyun-python-sdk-kms==2.16.5
annotated-types==0.7.0
anyio==4.12.1
argcomplete==3.6.3
attrs==25.4.0
banks==2.4.1
black==26.1.0
cachetools==7.0.2
certifi==2026.2.25
cffi==2.0.0
charset-normalizer==3.4.4
click==8.3.1
colorama==0.4.6
colorlog==6.10.1
crcmod==1.7
crcmod-plus==2.3.1
cryptography==46.0.5
dashscope==1.20.12
dataclasses-json==0.6.7
dependency-groups==1.3.1
Deprecated==1.3.1
dirtyjson==1.0.8
distlib==0.4.0
distro==1.9.0
fastapi==0.115.4
filelock==3.25.0
filetype==1.2.0
frozenlist==1.8.0
fsspec==2026.2.0
greenlet==3.3.2
griffe==2.0.0
griffecli==2.0.0
griffelib==2.0.0
h11==0.16.0
httpcore==1.0.9
httpx==0.28.1
humanize==4.15.0
idna==3.11
isort==8.0.1
Jinja2==3.1.6
jiter==0.13.0
jmespath==0.10.0
joblib==1.5.3
llama-index-core==0.14.15
llama-index-instrumentation==0.4.2
llama-index-readers-dashscope==0.4.1
llama-index-workflows==2.15.0
MarkupSafe==3.0.3
marshmallow==3.26.2
multidict==6.7.1
mypy_extensions==1.1.0
nest-asyncio==1.6.0
networkx==3.6.1
nltk==3.9.3
nox==2026.2.9
numpy==2.4.2
openai==2.26.0
oss2==2.19.1
packaging==26.0
pathspec==1.0.4
pillow==12.1.1
platformdirs==4.9.2
propcache==0.4.1
pycparser==3.0
pycryptodome==3.23.0
pydantic==2.12.5
pydantic_core==2.41.5
PyJWT==2.11.0
python-discovery==1.1.0
python-dotenv==1.0.1
python-multipart==0.0.18
pytokens==0.4.1
PyYAML==6.0.3
pillow==12.1.1
regex==2026.2.28
requests==2.32.5
retrying==1.4.2
setuptools==82.0.0
six==1.17.0
sniffio==1.3.1
SQLAlchemy==2.0.48
starlette==0.41.3
tenacity==9.1.4
tiktoken==0.12.0
tinytag==2.2.0
tqdm==4.67.3
typing-inspect==0.9.0
typing-inspection==0.4.2
typing_extensions==4.15.0
urllib3==2.6.3
uvicorn==0.32.0
virtualenv==21.1.0
websocket-client==1.9.0
wrapt==2.1.1
yarl==1.23.0
# zai-sdk==0.2.2
zhipuai==2.1.5.20250825

View File

@ -61,6 +61,7 @@ def _generate_object_key(filename: str, prefix: str = "chat-ui") -> str:
根据文件名生成唯一的 OSS 对象 Key
格式: {prefix}/{日期}/{uuid}_{原始文件名}
"""
# TODO: 需要按用户ID分目录
date_str = datetime.now().strftime("%Y%m%d")
unique_id = uuid.uuid4().hex[:8]
safe_name = Path(filename).name # 只取文件名,去掉路径

View File

@ -1,28 +1,24 @@
<template>
<n-config-provider>
<n-message-provider>
<div class="app" :class="{ dark: isDark }">
<router-view />
<div class="app" :class="{ dark: isDark }">
<router-view />
<!-- Toast 通知 -->
<Teleport to="body">
<TransitionGroup name="toast" tag="div" class="toast-container">
<div
v-for="toast in toasts"
:key="toast.id"
class="toast"
:class="toast.type"
>
<Check v-if="toast.type === 'success'" :size="18" />
<AlertCircle v-else-if="toast.type === 'error'" :size="18" />
<Info v-else :size="18" />
<span>{{ toast.message }}</span>
</div>
</TransitionGroup>
</Teleport>
</div>
</n-message-provider>
</n-config-provider>
<!-- Toast 通知 -->
<Teleport to="body">
<TransitionGroup name="toast" tag="div" class="toast-container">
<div
v-for="toast in toasts"
:key="toast.id"
class="toast"
:class="toast.type"
>
<Check v-if="toast.type === 'success'" :size="18" />
<AlertCircle v-else-if="toast.type === 'error'" :size="18" />
<Info v-else :size="18" />
<span>{{ toast.message }}</span>
</div>
</TransitionGroup>
</Teleport>
</div>
</template>
<script setup lang="ts">
@ -30,7 +26,6 @@ import { ref, computed } from "vue";
import { storeToRefs } from "pinia";
import { useSettingsStore } from "@/stores/settings";
import { Check, AlertCircle, Info } from "@/components/icons";
import { NConfigProvider, NMessageProvider } from "naive-ui";
const settingsStore = useSettingsStore();
const { settings } = storeToRefs(settingsStore);
@ -72,7 +67,7 @@ window.$toast = showToast;
<style lang="scss">
.app {
display: flex;
width: 100%;
width: 100vw;
height: 100vh;
overflow: hidden;
background: #f5f5f5;
@ -152,4 +147,4 @@ window.$toast = showToast;
.toast-move {
transition: transform 0.3s ease;
}
</style>
</style>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@ -2,12 +2,13 @@
<header class="chat-header">
<!-- 左侧侧边栏切换和标题 -->
<div class="header-left">
<button class="toggle-sidebar-btn" title="切换侧边栏 (Ctrl+B)" @click="$emit('toggle-sidebar')">
<!-- TODO: 侧边栏图标 -->
<SidebarExpandIcon v-if="showSidebarToggle" />
<SidebarCollapseIcon v-else />
<button
class="toggle-sidebar-btn"
title="切换侧边栏 (Ctrl+B)"
@click="$emit('toggle-sidebar')"
>
<Menu v-if="showSidebarToggle" :size="20" />
<ChevronLeft v-else :size="18" />
</button>
<!-- <div class="conversation-info">
@ -31,14 +32,14 @@
</button> -->
<!-- 清空对话 -->
<!-- <button
<button
class="header-btn"
title="清空对话"
:disabled="messageCount === 0"
@click="handleClear"
>
<Trash2 :size="18" />
</button> -->
</button>
<!-- 导出对话 -->
<!-- <button
@ -80,31 +81,22 @@
</button>
</div>
</Transition>
<div class="learning-mode-toggle">
<span class="learning-mode-label">学习模式</span>
<FormSwitch
:model-value="settings.learningModeEnabled"
@update:model-value="settingsStore.setLearningModeEnabled($event)"
/>
</div>
</div>
</header>
</template>
<script setup lang="ts">
import { ref } from "vue";
import { storeToRefs } from "pinia";
import {
Menu,
Trash2,
ChevronLeft,
ExternalLink,
Pin,
Archive,
Settings,
} from "@/components/icons";
import SidebarExpandIcon from "@/components/icons/custom/SidebarExpandIcon.vue";
import SidebarCollapseIcon from "@/components/icons/custom/SidebarCollapseIcon.vue";
import { useSettingsStore } from "@/stores/settings.ts";
import FormSwitch from "@/components/ui/FormSwitch.vue";
const props = withDefaults(
defineProps<{
@ -137,7 +129,12 @@ const emit = defineEmits<{
const showMoreMenu = ref(false);
const settingsStore = useSettingsStore();
const { settings } = storeToRefs(settingsStore);
function handleClear() {
if (confirm("确定要清空当前对话吗?此操作不可恢复。")) {
emit("clear");
}
}
function handleShare() {
showMoreMenu.value = false;
@ -247,24 +244,6 @@ if (typeof window !== "undefined") {
position: relative;
}
.learning-mode-toggle {
display: flex;
align-items: center;
gap: 10px;
margin-left: 8px;
}
.learning-mode-label {
font-size: 14px;
color: #6b7280;
user-select: none;
white-space: nowrap;
.dark & {
color: #9ca3af;
}
}
.header-btn {
display: flex;
align-items: center;

View File

@ -28,6 +28,13 @@
<!-- 输入区域 -->
<div class="input-wrapper">
<!-- 附件预览区 -->
<div v-if="hasAttachments" class="attachments-preview-container">
<AttachmentPreview
:attachments="currentAttachments"
@remove="handleRemoveAttachment"
/>
</div>
<div class="input-container" :class="{ wide: isWideMode }">
<ChatInput
ref="chatInputRef"
@ -56,6 +63,7 @@ import { useAuthStore } from "@/stores/auth";
import ChatHeader from "./ChatHeader.vue";
import MessageList from "./MessageList.vue";
import ChatInput from "@/components/input/ChatInput.vue";
import AttachmentPreview from "@/components/input/AttachmentPreview.vue";
import { MessageType, MessageRole } from "@/types/chat";
import type { Attachment, Suggestion } from "@/types/chat";
import { chatApi, type ModelInfo } from "@/services/api";
@ -119,6 +127,14 @@ const inputPlaceholder = computed(() => {
return "输入你的问题,按 Ctrl+Enter 发送";
});
//
const currentAttachments = computed(() => chatInputRef.value?.attachments || []);
const hasAttachments = computed(() => currentAttachments.value.length > 0);
function handleRemoveAttachment(id: string) {
chatInputRef.value?.removeAttachment(id);
}
function toggleWideMode() {
isWideMode.value = !isWideMode.value;
}
@ -216,10 +232,7 @@ async function handleSend(
}
// 使使
const systemPrompt =
options?.systemPrompt ||
currentConversation.value?.settings?.systemPrompt ||
settings.value.defaultSystemPrompt;
const systemPrompt = options?.systemPrompt || currentConversation.value?.settings?.systemPrompt;
//
const existingMessages = currentConversation.value?.messages || [];
@ -286,7 +299,7 @@ async function handleSend(
deepSearch: options?.deepSearch,
webSearch: options?.webSearch,
deepThinking: options?.deepThinking,
systemPrompt,
systemPrompt: options?.systemPrompt,
},
abortController.value.signal,
);
@ -407,9 +420,6 @@ async function handleRetry(messageId: string) {
currentStreamingMessageId.value = messageId;
chatStore.startStreaming();
abortController.value = new AbortController();
const systemPrompt =
currentConversation.value?.settings?.systemPrompt ||
settings.value.defaultSystemPrompt;
try {
const stream = chatApi.streamChat(
@ -419,7 +429,6 @@ async function handleRetry(messageId: string) {
model: settings.value.defaultModel,
stream: true,
history: priorMessages,
systemPrompt,
},
abortController.value.signal,
);
@ -516,7 +525,6 @@ watch(
.chat-main {
display: flex;
flex-direction: column;
min-width: 900px;
flex: 1;
height: 100vh;
background: #ffffff;
@ -537,7 +545,7 @@ watch(
.input-wrapper {
flex-shrink: 0;
padding: 16px 0px 24px;
padding: 16px 10% 24px;
background: linear-gradient(to top, white 80%, transparent);
.dark & {
@ -545,8 +553,19 @@ watch(
}
}
.attachments-preview-container {
margin-bottom: 12px;
background: #f3f4f5;
border-radius: 16px;
overflow: hidden;
.dark & {
background: #1e1e2e;
}
}
.input-container {
margin: 0 22%;
width: 100%;
// min-width: 1000px;
// margin: 0 auto;
transition: max-width 0.3s ease;

View File

@ -13,7 +13,11 @@
<button class="action-btn cancel" @click="handleCancelSelect">
取消
</button>
<button class="action-btn confirm" :disabled="selectedMessageCount === 0" @click="handleConfirmShare">
<button
class="action-btn confirm"
:disabled="selectedMessageCount === 0"
@click="handleConfirmShare"
>
确认分享
</button>
</div>
@ -22,21 +26,36 @@
<div ref="containerRef" class="message-list" @scroll="handleScroll">
<!-- 欢迎界面 -->
<WelcomeScreen v-if="visibleMessages.length === 0" @select="$emit('select-suggestion', $event)" />
<WelcomeScreen
v-if="visibleMessages.length === 0"
@select="$emit('select-suggestion', $event)"
/>
<!-- 消息列表 -->
<template v-else>
<div class="messages-wrapper">
<TransitionGroup name="message">
<MessageBubble v-for="(message, index) in visibleMessages" :key="message.id" :message="message"
:show-timestamp="showTimestamp" :compact="compact" :is-New="index === visibleMessages.length - 1"
:is-message-select-mode="isMessageSelectMode" :is-selected="isMessageSelected(message.id)"
@retry="$emit('retry', message.id)" @regenerate="$emit('regenerate', message.id)"
@copy="handleCopy(message)" @like="handleLike(message)" @dislike="handleDislike(message)"
@select-suggestion="$emit('select-suggestion', $event)" @preview-image="handlePreviewImage"
@play-video="handlePlayVideo" @download-file="handleDownloadFile"
<MessageBubble
v-for="(message, index) in visibleMessages"
:key="message.id"
:message="message"
:show-timestamp="showTimestamp"
:compact="compact"
:is-New="index === visibleMessages.length - 1"
:is-message-select-mode="isMessageSelectMode"
:is-selected="isMessageSelected(message.id)"
@retry="$emit('retry', message.id)"
@regenerate="$emit('regenerate', message.id)"
@copy="handleCopy(message)"
@like="handleLike(message)"
@dislike="handleDislike(message)"
@select-suggestion="$emit('select-suggestion', $event)"
@preview-image="handlePreviewImage"
@play-video="handlePlayVideo"
@download-file="handleDownloadFile"
@toggle-select="handleToggleMessageSelect(message.id)"
@enter-select-mode="handleEnterSelectMode(message.id)" />
@enter-select-mode="handleEnterSelectMode(message.id)"
/>
</TransitionGroup>
<!-- 正在输入指示器 -->
@ -56,7 +75,11 @@
</div>
<!-- 回到底部按钮 -->
<Transition name="fade">
<button v-if="showScrollButton" class="scroll-bottom-btn" @click="handleScrollToBottom">
<button
v-if="showScrollButton"
class="scroll-bottom-btn"
@click="handleScrollToBottom"
>
<ChevronDown :size="20" />
<span v-if="newMessageCount > 0" class="new-count">
{{ newMessageCount }}
@ -336,7 +359,6 @@ onMounted(() => {
&:nth-child(1) {
animation-delay: -0.32s;
}
&:nth-child(2) {
animation-delay: -0.16s;
}
@ -344,7 +366,7 @@ onMounted(() => {
}
.typing-text {
font-size: 14px;
font-size: 13px;
color: #9ca3af;
}
@ -429,7 +451,6 @@ onMounted(() => {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
@ -437,14 +458,12 @@ onMounted(() => {
}
@keyframes typingBounce {
0%,
80%,
100% {
transform: scale(0.7);
opacity: 0.5;
}
40% {
transform: scale(1);
opacity: 1;
@ -493,7 +512,7 @@ onMounted(() => {
padding: 8px 16px;
border: none;
border-radius: 8px;
font-size: 14px;
font-size: 13px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;

View File

@ -6,34 +6,39 @@
</div>
<!-- 功能卡片 -->
<!-- <div class="feature-cards">
<div v-for="feature in features" :key="feature.title" class="feature-card">
<div class="feature-cards">
<div
v-for="feature in features"
:key="feature.title"
class="feature-card"
>
<div class="feature-icon" :style="{ background: feature.gradient }">
<component :is="feature.icon" :size="22" />
</div>
<h3>{{ feature.title }}</h3>
<p>{{ feature.description }}</p>
</div>
</div> -->
</div>
<!-- 快速开始建议 -->
<div class="quick-start">
<n-divider title-placement="center">
试试这些问题
</n-divider>
<h4>试试这些问题</h4>
<div class="suggestions-grid">
<button v-for="suggestion in suggestions" :key="suggestion.text" class="suggestion-card"
@click="$emit('select', { id: suggestion.id, text: suggestion.text, systemPrompt: suggestion.systemPrompt })">
<button
v-for="suggestion in suggestions"
:key="suggestion.text"
class="suggestion-card"
@click="$emit('select', { id: suggestion.id, text: suggestion.text, systemPrompt: suggestion.systemPrompt })"
>
<component :is="suggestion.iconComponent" :size="18" class="suggestion-icon" />
<span>{{ suggestion.text }}</span>
<!-- <ChevronRight :size="16" class="arrow-icon" /> -->
<ChevronRight :size="16" class="arrow-icon" />
</button>
</div>
</div>
<!-- 底部提示 -->
<!-- <div class="welcome-footer">
<div class="welcome-footer">
<div class="tip">
<Keyboard :size="14" />
<span> <kbd>Ctrl</kbd> + <kbd>/</kbd> 聚焦输入框</span>
@ -42,72 +47,69 @@
<Zap :size="14" />
<span>支持 Markdown代码高亮LaTeX 公式</span>
</div>
</div> -->
</div>
</div>
</template>
<script setup lang="ts">
import { computed } from "vue";
import {
MessageSquare,
Code,
Image,
FileText,
ChevronRight,
Keyboard,
Zap,
Globe,
Lightbulb,
PenTool,
} from "@/components/icons";
import promptData from "@/assets/prompt.json";
import type { Suggestion } from "@/types/chat";
import StudyIcon from "../icons/custom/StudyIcon.vue";
import CodeIcon from "../icons/custom/CodeIcon.vue";
import WritingIcon from "../icons/custom/WritingIcon.vue";
import GuideIcon from "../icons/custom/GuideIcon.vue";
import ThesisIcon from "../icons/custom/ThesisIcon.vue";
import ChatIcon from "../icons/custom/ChatIcon.vue";
import { NDivider } from "naive-ui";
defineEmits<{
select: [suggestion: Suggestion];
}>();
// const features = computed(() => [
// {
// icon: MessageSquare,
// title: "",
// description: "",
// gradient: "linear-gradient(135deg, #3b82f6 0%, #2563eb 100%)",
// },
// {
// icon: Code,
// title: "",
// description: "",
// gradient: "linear-gradient(135deg, #8b5cf6 0%, #6366f1 100%)",
// },
// {
// icon: Image,
// title: "",
// description: "",
// gradient: "linear-gradient(135deg, #ec4899 0%, #d946ef 100%)",
// },
// {
// icon: FileText,
// title: "",
// description: "",
// gradient: "linear-gradient(135deg, #f59e0b 0%, #f97316 100%)",
// },
// ]);
const features = computed(() => [
{
icon: MessageSquare,
title: "智能对话",
description: "自然流畅的对话体验,理解上下文",
gradient: "linear-gradient(135deg, #3b82f6 0%, #2563eb 100%)",
},
{
icon: Code,
title: "代码助手",
description: "编写、解释、优化各种编程语言代码",
gradient: "linear-gradient(135deg, #8b5cf6 0%, #6366f1 100%)",
},
{
icon: Image,
title: "图像理解",
description: "分析图片内容,提取关键信息",
gradient: "linear-gradient(135deg, #ec4899 0%, #d946ef 100%)",
},
{
icon: FileText,
title: "文档处理",
description: "阅读、总结、翻译各类文档",
gradient: "linear-gradient(135deg, #f59e0b 0%, #f97316 100%)",
},
]);
//
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const iconMap: Record<string, any> = {
学习: StudyIcon,
调试: CodeIcon,
写作: WritingIcon,
语言: ChatIcon,
职业: GuideIcon,
学术: ThesisIcon,
学习: Lightbulb,
调试: Code,
写作: PenTool,
语言: Globe,
职业: Globe,
学术: FileText,
};
const excludedSuggestionTexts = new Set([
"让可学 AI 成为我的全科学习导师?",
"让 AI 扮演一位严谨的学术论文写作导师?",
]);
const suggestions = computed(() => {
// icon
type SuggestionWithIcon = Suggestion & { iconComponent: typeof Code };
@ -117,8 +119,6 @@ const suggestions = computed(() => {
// prompt.json
for (const category of Object.values(promptData)) {
for (const [text, systemPrompt] of Object.entries(category)) {
if (excludedSuggestionTexts.has(text)) continue;
//
let iconComponent = Code; //
for (const [keyword, icon] of Object.entries(iconMap)) {
@ -145,9 +145,9 @@ const suggestions = computed(() => {
display: flex;
flex-direction: column;
align-items: center;
justify-content: space-between;
min-height: 70%;
padding: 25px 22%;
justify-content: center;
min-height: 100%;
padding: 40px 24px;
animation: fadeIn 0.5s ease;
}
@ -177,20 +177,19 @@ const suggestions = computed(() => {
.logo-glow {
position: absolute;
inset: -20px;
background: radial-gradient(circle,
rgba(59, 130, 246, 0.2) 0%,
transparent 70%);
background: radial-gradient(
circle,
rgba(59, 130, 246, 0.2) 0%,
transparent 70%
);
pointer-events: none;
}
.title {
color: #333;
text-align: center;
font-family: "Microsoft YaHei";
font-size: 35px;
font-style: normal;
margin: 0 0 12px;
font-size: 32px;
font-weight: 700;
line-height: normal;
color: #1f2937;
.dark & {
color: #f3f4f6;
@ -270,7 +269,7 @@ const suggestions = computed(() => {
p {
margin: 0;
font-size: 14px;
font-size: 13px;
color: #6b7280;
line-height: 1.5;
@ -281,7 +280,7 @@ const suggestions = computed(() => {
}
.quick-start {
max-width: 650px;
max-width: 710px;
width: 100%;
margin-bottom: 40px;
@ -301,7 +300,7 @@ const suggestions = computed(() => {
.suggestions-grid {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 10px;
gap: 12px;
@media (max-width: 600px) {
grid-template-columns: 1fr;
@ -312,11 +311,12 @@ const suggestions = computed(() => {
display: flex;
align-items: center;
gap: 12px;
padding: 12px 20px;
background: #F8F9FA;
border: 1px solid transparent;
border-radius: 15px;
font-size: 12px;
padding: 16px 20px;
background: white;
border: 1px solid #e2e8f0;
border-radius: 14px;
color: #374151;
font-size: 14px;
text-align: left;
cursor: pointer;
transition: all 0.2s ease;
@ -326,12 +326,14 @@ const suggestions = computed(() => {
border-color: #2d2d3d;
color: #e5e7eb;
}
// TODO:
&:hover {
background: #E9EAEB;
border-color: #3b82f6;
background: rgba(59, 130, 246, 0.05);
.arrow-icon {
transform: translateX(4px);
color: #3b82f6;
}
}
@ -363,7 +365,7 @@ const suggestions = computed(() => {
display: flex;
align-items: center;
gap: 8px;
font-size: 14px;
font-size: 13px;
color: #9ca3af;
kbd {
@ -383,7 +385,6 @@ const suggestions = computed(() => {
opacity: 0;
transform: translateY(20px);
}
to {
opacity: 1;
transform: translateY(0);

View File

@ -1,23 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<g clip-path="url(#clip0_56_267)">
<path d="M14.2902 8.35H10.7972C10.7468 10.863 10.1672 13.0267 9.3321 14.1593C10.6804 13.867 11.8955 13.1404 12.7911 12.091C13.6867 11.0417 14.2134 9.72746 14.2902 8.35ZM14.2902 7.65C14.2134 6.27254 13.6867 4.95833 12.7911 3.90896C11.8955 2.85959 10.6804 2.13302 9.3321 1.8407C10.1679 2.9733 10.7468 5.1377 10.7965 7.65H14.2902ZM1.7098 7.65H5.2028C5.2532 5.137 5.8328 2.9733 6.6679 1.8407C5.31962 2.13302 4.10448 2.85959 3.20887 3.90896C2.31325 4.95833 1.78664 6.27254 1.7098 7.65ZM1.7098 8.35C1.78664 9.72746 2.31325 11.0417 3.20887 12.091C4.10448 13.1404 5.31962 13.867 6.6679 14.1593C5.8321 13.0267 5.2532 10.8623 5.2035 8.35H1.7098ZM10.0958 8.35H5.9042C5.9742 11.6946 7.0487 14.3 8 14.3C8.952 14.3 10.0258 11.6946 10.0965 8.35H10.0958ZM10.0958 7.65C10.0265 4.3054 8.952 1.7 8 1.7C7.048 1.7 5.9742 4.3054 5.9035 7.65H10.0965H10.0958ZM8 15C4.1339 15 1 11.8661 1 8C1 4.1339 4.1339 1 8 1C11.8661 1 15 4.1339 15 8C15 11.8661 11.8661 15 8 15Z" fill="#333333" stroke="#333333" stroke-width="0.2"/>
</g>
<defs>
<clipPath id="clip0_56_267">
<rect width="16" height="16" fill="white"/>
</clipPath>
</defs>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M4.76252 4.16667C4.54848 3.94444 4.22741 3.94444 4.01336 4.16667L0.160534 8.05556C-0.0535115 8.27778 -0.0535115 8.61111 0.160534 8.83333L3.90634 12.7222C4.12038 12.9444 4.44145 12.9444 4.6555 12.7222C4.86954 12.5 4.86954 12.1667 4.6555 11.9444L1.33779 8.5L4.76252 4.94444C4.86954 4.72222 4.86954 4.38889 4.76252 4.16667ZM8.61535 4.16667C8.4013 4.16667 8.08023 4.27778 7.97321 4.61111L6.0468 12.1667C5.93977 12.5 6.15382 12.7222 6.47489 12.8333C6.79596 12.9444 7.01 12.7222 7.11703 12.3889L9.04344 4.83333C9.15046 4.61111 8.93642 4.27778 8.61535 4.16667ZM14.9297 8.05556L11.1839 4.16667C10.9699 3.94444 10.6488 3.94444 10.4347 4.16667C10.2207 4.38889 10.2207 4.72222 10.4347 4.94444L13.7524 8.5L10.3277 12.0556C10.1137 12.2778 10.1137 12.6111 10.3277 12.8333C10.5418 13.0556 10.8628 13.0556 11.0769 12.8333L14.8227 8.94444C15.0367 8.72222 15.0367 8.27778 14.9297 8.05556Z" fill="#333333"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M11.208 1.5L8.54883 7.08496L8.20801 7.7998H9.57324L4.79883 13.3125L5.70605 8.08496L5.80762 7.5H4.60449L5.74707 1.5H11.208Z" stroke="currentColor" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,21 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<circle cx="8" cy="8" r="6.5" stroke="currentColor" />
<path d="M12.5 3.36212C11.2832 4.38433 9.71352 5.00001 8 5.00001C6.28648 5.00001 4.71677 4.38433 3.5 3.36212" stroke="currentColor" />
<path d="M12.5 12.6379C11.2832 11.6157 9.71352 11 8 11C6.28648 11 4.71677 11.6157 3.5 12.6379" stroke="currentColor" />
<path d="M8 1.5C8.675 1.5 9.42059 2.06016 10.0273 3.27344C10.6192 4.45713 11 6.12779 11 8C11 9.87221 10.6192 11.5429 10.0273 12.7266C9.42059 13.9398 8.675 14.5 8 14.5C7.325 14.5 6.57941 13.9398 5.97266 12.7266C5.38081 11.5429 5 9.87221 5 8C5 6.12779 5.38081 4.45713 5.97266 3.27344C6.57941 2.06016 7.325 1.5 8 1.5Z" stroke="currentColor" />
<path d="M1.5 8L14.5 8" stroke="currentColor" />
<path d="M8 14L8 2" stroke="currentColor" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,17 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M1.6001 3.19999H4.0001M14.4001 3.19999H12.0001M12.0001 3.19999V2.79999C12.0001 1.69542 11.1047 0.799988 10.0001 0.799988H6.0001C4.89553 0.799988 4.0001 1.69542 4.0001 2.79999V3.19999M12.0001 3.19999H4.0001" stroke="currentColor" stroke-linecap="round" />
<path d="M3.19995 5.59998V12.4C3.19995 13.5045 4.09538 14.4 5.19995 14.4H10.8C11.9045 14.4 12.8 13.5045 12.8 12.4V5.59998M6.39995 6.39998V12.8M9.59995 6.39998V12.8" stroke="currentColor" stroke-linecap="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,17 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M11.7854 6.40509L3.71021 14.4783H1.51099V12.2791L9.58423 4.20392L11.7854 6.40509ZM12.3801 1.52032C12.4497 1.52043 12.5204 1.54923 12.5735 1.60236L14.387 3.41486C14.4383 3.46624 14.467 3.53597 14.467 3.60822C14.467 3.66397 14.4509 3.71704 14.4211 3.76154L14.387 3.80255L13.3225 4.867L11.1213 2.66583L12.1858 1.60236L12.1887 1.6004C12.2399 1.54872 12.3071 1.52032 12.3801 1.52032Z" stroke="currentColor" stroke-linejoin="round" />
<path d="M6 14.5H14.5" stroke="currentColor" stroke-linecap="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M6.92074 6.24724C6.1667 5.75404 5.09746 6.01283 4.5325 6.82521C3.9675 7.63761 4.12076 8.69595 4.87478 9.18891C5.62882 9.68211 6.69803 9.4231 7.26299 8.61094C7.82794 7.79851 7.67474 6.74 6.92076 6.24724H6.92074ZM6.65622 8.21403C6.31722 8.70148 5.70288 8.87435 5.28391 8.60053C4.86504 8.3267 4.80026 7.70955 5.13926 7.2221C5.47823 6.73469 6.09265 6.56157 6.51152 6.8354C6.93041 7.10946 6.99519 7.72659 6.65622 8.21403ZM15.9786 7.01976C15.8075 6.31794 14.6329 5.92201 12.906 5.85822C12.6518 5.40916 12.3351 4.99803 11.9669 4.63632C11.9671 4.6361 11.9674 4.63588 11.9677 4.63568C11.9528 4.61806 11.936 4.6026 11.9178 4.58868C10.8933 3.60657 9.48509 3 7.93064 3C4.86828 3 2.37312 5.35359 2.26296 8.29706L2.2626 8.29632C0.274154 9.21706 -0.130379 10.0066 0.0325382 10.6751C0.214203 11.4204 1.52517 11.8222 3.42953 11.8463C4.46632 13.1556 6.09668 14 7.93066 14C10.6995 14 13.0045 12.0759 13.5025 9.53162C15.1816 8.68608 16.1595 7.76201 15.9786 7.01976H15.9786ZM11.4376 5.27741C12.0731 5.92742 12.5185 6.75387 12.6873 7.67319C11.878 7.48883 11.2768 6.80172 11.2768 5.98228C11.2768 5.73029 11.3345 5.49214 11.4376 5.27743V5.27741ZM0.998344 10.3334C0.919114 10.0083 1.55274 9.70124 2.31897 9.30181C2.40924 9.90092 2.59893 10.4687 2.8716 10.9888C1.78816 10.9357 1.09133 10.7147 0.998375 10.3334H0.998344ZM7.93067 13.1852C7.74953 13.1852 7.57083 13.1752 7.39486 13.1564C7.48472 12.8522 7.6351 12.7247 8.1408 12.7247C8.46709 12.7247 8.7978 12.858 9.04986 13.0586C8.69061 13.1412 8.31596 13.1852 7.93067 13.1852ZM9.79456 12.8236C9.43627 12.2934 8.71219 11.9628 8.1408 11.9628C7.37193 11.9628 6.68161 12.2818 6.4956 12.975C5.74273 12.7483 5.06889 12.3476 4.52365 11.8217C5.74519 11.7552 7.13415 11.559 8.58553 11.2264C10.0463 10.8915 11.3867 10.4604 12.5142 9.98588C12.072 11.2691 11.0726 12.305 9.7946 12.8236L9.79456 12.8236ZM12.7389 8.96319C11.6621 9.46339 10.2302 9.95074 8.56092 10.3334C6.79961 10.737 5.14847 10.9548 3.84027 10.9947C3.37075 10.2723 3.09902 9.41698 3.09902 8.50005C3.09902 5.91254 5.26225 3.81493 7.93064 3.81493C9.07144 3.81493 10.1197 4.19852 10.9463 4.83969C10.736 5.15487 10.6616 5.51834 10.6616 5.98231C10.6616 7.105 11.5613 8.03877 12.7556 8.25274C12.76 8.33466 12.7623 8.41709 12.7623 8.50008C12.7623 8.65638 12.7543 8.81087 12.7389 8.96323L12.7389 8.96319ZM13.6024 8.51584C13.6024 8.51056 13.6026 8.50531 13.6026 8.50004C13.6026 7.86315 13.4905 7.25173 13.2851 6.68285C14.2559 6.74069 14.821 6.94078 14.863 7.07401C14.9856 7.46309 14.5173 7.98118 13.6024 8.51584Z" fill="#333333"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,40 +0,0 @@
<template>
<svg
xmlns="http://www.w3.org/2000/svg"
width="14"
height="14"
viewBox="0 0 14 14"
fill="none"
>
<path
d="M7 13C10.3137 13 13 10.3137 13 7C13 3.68629 10.3137 1 7 1C3.68629 1 1 3.68629 1 7"
stroke="url(#paint0_linear_169_1705)"
stroke-width="2"
stroke-linecap="round"
/>
<defs>
<linearGradient
id="paint0_linear_169_1705"
x1="1"
y1="7"
x2="7"
y2="13"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="white" />
<stop offset="1" stop-color="white" />
</linearGradient>
</defs>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 14,
},
);
</script>

View File

@ -1,21 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<mask id="path-1-inside-1_67_407" fill="white">
<path d="M8 1.00391C12.4 1.00391 16 3.83468 16 7.32129C15.9999 10.8079 12.4 13.6387 8 13.6387C7.84133 13.6444 7.68514 13.679 7.54004 13.7412C6.67608 14.0652 5.24423 15.0882 4.24023 15.8545C4.12667 15.9434 3.98598 15.9943 3.83984 16C3.74658 15.9983 3.6546 15.9789 3.56934 15.9424C3.48392 15.9058 3.40674 15.8528 3.34277 15.7871C3.21195 15.6526 3.14013 15.4744 3.14355 15.29C3.14355 14.0031 3.06787 12.3127 2.66797 12.0283C1.02798 10.8711 1.03933e-05 9.18904 0 7.31738C0 3.82692 3.6 1 8 1V1.00391Z"/>
</mask>
<path d="M8 1.00391H7V2.00391H8V1.00391ZM16 7.32129L17 7.3213V7.32129H16ZM8 13.6387V12.6387H7.98194L7.96389 12.6393L8 13.6387ZM7.54004 13.7412L7.89118 14.6775L7.91273 14.6695L7.93388 14.6604L7.54004 13.7412ZM4.24023 15.8545L3.63346 15.0595L3.62397 15.0669L4.24023 15.8545ZM3.83984 16L3.82128 16.9998L3.8502 17.0004L3.87911 16.9992L3.83984 16ZM3.56934 15.9424L3.17562 16.8616L3.1761 16.8618L3.56934 15.9424ZM3.34277 15.7871L2.62605 16.4845L2.62626 16.4847L3.34277 15.7871ZM3.14355 15.29L4.14338 15.3086L4.14355 15.2993V15.29H3.14355ZM2.66797 12.0283L3.24754 11.2134L3.2445 11.2112L2.66797 12.0283ZM0 7.31738H-1V7.31739L0 7.31738ZM8 1H9V0H8V1ZM8 1.00391V2.00391C12.082 2.00391 15 4.59522 15 7.32129H16H17C17 3.07414 12.718 0.00390625 8 0.00390625V1.00391ZM16 7.32129L15 7.32127C15 10.0473 12.082 12.6387 8 12.6387V13.6387V14.6387C12.718 14.6387 16.9999 11.5684 17 7.3213L16 7.32129ZM8 13.6387L7.96389 12.6393C7.68397 12.6494 7.40633 12.7106 7.14619 12.822L7.54004 13.7412L7.93388 14.6604C7.96395 14.6475 7.99869 14.6394 8.03611 14.638L8 13.6387ZM7.54004 13.7412L7.1889 12.8049C6.64275 13.0097 5.98828 13.402 5.39017 13.7985C4.77325 14.2074 4.14496 14.6692 3.63351 15.0596L4.24023 15.8545L4.84696 16.6494C5.3395 16.2735 5.92914 15.8407 6.49513 15.4655C7.07993 15.0779 7.57337 14.7967 7.89118 14.6775L7.54004 13.7412ZM4.24023 15.8545L3.62397 15.0669C3.67951 15.0235 3.74176 15.0031 3.80058 15.0008L3.83984 16L3.87911 16.9992C4.23019 16.9854 4.57382 16.8632 4.8565 16.642L4.24023 15.8545ZM3.83984 16L3.85841 15.0002C3.89139 15.0008 3.92687 15.0077 3.96257 15.0229L3.56934 15.9424L3.1761 16.8618C3.38234 16.95 3.60178 16.9958 3.82128 16.9998L3.83984 16ZM3.56934 15.9424L3.96305 15.0231C3.99791 15.0381 4.03065 15.0601 4.05929 15.0895L3.34277 15.7871L2.62626 16.4847C2.78284 16.6455 2.96994 16.7735 3.17562 16.8616L3.56934 15.9424ZM3.34277 15.7871L4.0595 15.0898C4.10963 15.1413 4.14505 15.2188 4.14338 15.3086L3.14355 15.29L2.14373 15.2714C2.1352 15.73 2.31426 16.164 2.62605 16.4845L3.34277 15.7871ZM3.14355 15.29H4.14355C4.14355 14.6352 4.12467 13.8471 4.04958 13.1726C4.01239 12.8385 3.9577 12.4962 3.87148 12.1987C3.82828 12.0496 3.76969 11.8851 3.68638 11.7283C3.60749 11.5797 3.47285 11.3736 3.24754 11.2134L2.66797 12.0283L2.0884 12.8432C2.02206 12.7961 1.97973 12.7493 1.95655 12.7201C1.93324 12.6908 1.92216 12.6704 1.92006 12.6664C1.91743 12.6615 1.93036 12.6858 1.95049 12.7553C1.99094 12.8949 2.03014 13.1089 2.06186 13.3939C2.12459 13.9574 2.14355 14.658 2.14355 15.29H3.14355ZM2.66797 12.0283L3.2445 11.2112C1.8111 10.1998 1.00001 8.79804 1 7.31738L0 7.31738L-1 7.31739C-0.999987 9.58004 0.244856 11.5424 2.09143 12.8454L2.66797 12.0283ZM0 7.31738H1C1 4.5878 3.91754 2 8 2V1V0C3.28246 0 -1 3.06604 -1 7.31738H0ZM8 1H7V1.00391H8H9V1H8Z" fill="#333333" mask="url(#path-1-inside-1_67_407)"/>
<path d="M5 5.73633H11" stroke="#333333" stroke-linecap="round"/>
<path d="M6 8.73633H10" stroke="#333333" stroke-linecap="round"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,26 +0,0 @@
<template>
<svg
xmlns="http://www.w3.org/2000/svg"
width="16"
height="16"
viewBox="0 0 16 16"
fill="none"
>
<path
d="M5.43994 0.5H5.44189L7.99268 0.507812H8.0083L10.5581 0.5H10.5601C10.6453 0.500024 10.7177 0.541283 10.7603 0.600586L10.7925 0.666016C10.8015 0.697293 10.8004 0.716064 10.7954 0.732422C10.7899 0.750047 10.767 0.804834 10.6704 0.886719L10.6411 0.914062L10.0366 1.51758C9.84509 1.70911 9.74857 1.9731 9.76904 2.23926L9.77002 2.24121L10.0864 6.15332V6.1543C10.1201 6.56503 10.2691 6.95616 10.5151 7.28418L10.6265 7.4209L11.5933 8.5127C11.6397 8.56497 11.6702 8.62659 11.6841 8.68945L11.6929 8.75293C11.6937 8.78175 11.6899 8.8027 11.686 8.81641C11.6827 8.82804 11.6786 8.83521 11.6733 8.8418C11.6651 8.85078 11.647 8.8623 11.6216 8.8623H11.6206L8.87158 8.84277H7.12842L4.37939 8.8623H4.37842C4.34895 8.8623 4.33235 8.84936 4.32764 8.84375L4.32666 8.84277C4.32122 8.83606 4.31665 8.82825 4.31299 8.81543C4.30892 8.80101 4.30578 8.77903 4.30615 8.74902C4.30865 8.66986 4.34102 8.58452 4.40479 8.5127H4.40576L5.37256 7.4209C5.68385 7.06918 5.8747 6.62516 5.91162 6.15332L6.229 2.24121V2.23926C6.24675 2.00853 6.1773 1.77409 6.02783 1.58984L5.95947 1.51465L5.34521 0.900391L5.32959 0.886719L5.27002 0.831055C5.22258 0.780136 5.2087 0.745665 5.20459 0.732422C5.19958 0.716074 5.19847 0.697262 5.20752 0.666016C5.23456 0.573903 5.32611 0.500071 5.43994 0.5Z"
stroke="currentColor"
/>
<path d="M8 9V15.5" stroke="currentColor" stroke-linecap="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,24 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<g clip-path="url(#clip0_75_633)">
<path d="M5.43994 0.5H5.44189L7.99268 0.507812H8.0083L10.5581 0.5H10.5601C10.6453 0.500024 10.7177 0.541283 10.7603 0.600586L10.7925 0.666016C10.8015 0.697293 10.8004 0.716064 10.7954 0.732422C10.7899 0.750047 10.767 0.804834 10.6704 0.886719L10.6411 0.914062L10.0366 1.51758C9.84509 1.70911 9.74857 1.9731 9.76904 2.23926L9.77002 2.24121L10.0864 6.15332V6.1543C10.1201 6.56503 10.2691 6.95616 10.5151 7.28418L10.6265 7.4209L11.5933 8.5127C11.6397 8.56497 11.6702 8.62659 11.6841 8.68945L11.6929 8.75293C11.6937 8.78175 11.6899 8.8027 11.686 8.81641C11.6827 8.82804 11.6786 8.83521 11.6733 8.8418C11.6651 8.85078 11.647 8.8623 11.6216 8.8623H11.6206L8.87158 8.84277H7.12842L4.37939 8.8623H4.37842C4.34895 8.8623 4.33235 8.84936 4.32764 8.84375L4.32666 8.84277C4.32122 8.83606 4.31665 8.82825 4.31299 8.81543C4.30892 8.80101 4.30578 8.77903 4.30615 8.74902C4.30865 8.66986 4.34102 8.58452 4.40479 8.5127H4.40576L5.37256 7.4209C5.68385 7.06918 5.8747 6.62516 5.91162 6.15332L6.229 2.24121V2.23926C6.24675 2.00853 6.1773 1.77409 6.02783 1.58984L5.95947 1.51465L5.34521 0.900391L5.32959 0.886719L5.27002 0.831055C5.22258 0.780136 5.2087 0.745665 5.20459 0.732422C5.19958 0.716074 5.19847 0.697262 5.20752 0.666016C5.23456 0.573903 5.32611 0.500071 5.43994 0.5Z" stroke="#666666"/>
<path d="M8 9V15.5" stroke="#666666" stroke-linecap="round"/>
</g>
<defs>
<clipPath id="clip0_75_633">
<rect width="16" height="16" fill="white"/>
</clipPath>
</defs>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,27 +0,0 @@
<template>
<svg
xmlns="http://www.w3.org/2000/svg"
width="16"
height="16"
viewBox="0 0 16 16"
fill="none"
>
<path
d="M5.43994 0.5H5.44189L7.99268 0.507812H8.0083L10.5581 0.5H10.5601C10.6453 0.500024 10.7177 0.541283 10.7603 0.600586L10.7925 0.666016C10.8015 0.697293 10.8004 0.716064 10.7954 0.732422C10.7899 0.750047 10.767 0.804834 10.6704 0.886719L10.6411 0.914062L10.0366 1.51758C9.84509 1.70911 9.74857 1.9731 9.76904 2.23926L9.77002 2.24121L10.0864 6.15332V6.1543C10.1201 6.56503 10.2691 6.95616 10.5151 7.28418L10.6265 7.4209L11.5933 8.5127C11.6397 8.56497 11.6702 8.62659 11.6841 8.68945L11.6929 8.75293C11.6937 8.78175 11.6899 8.8027 11.686 8.81641C11.6827 8.82804 11.6786 8.83521 11.6733 8.8418C11.6651 8.85078 11.647 8.8623 11.6216 8.8623H11.6206L8.87158 8.84277H7.12842L4.37939 8.8623H4.37842C4.34895 8.8623 4.33235 8.84936 4.32764 8.84375L4.32666 8.84277C4.32122 8.83606 4.31665 8.82825 4.31299 8.81543C4.30892 8.80101 4.30578 8.77903 4.30615 8.74902C4.30865 8.66986 4.34102 8.58452 4.40479 8.5127H4.40576L5.37256 7.4209C5.68385 7.06918 5.8747 6.62516 5.91162 6.15332L6.229 2.24121V2.23926C6.24675 2.00853 6.1773 1.77409 6.02783 1.58984L5.95947 1.51465L5.34521 0.900391L5.32959 0.886719L5.27002 0.831055C5.22258 0.780136 5.2087 0.745665 5.20459 0.732422C5.19958 0.716074 5.19847 0.697262 5.20752 0.666016C5.23456 0.573903 5.32611 0.500071 5.43994 0.5Z"
stroke="currentColor"
/>
<path d="M8 9V15.5" stroke="currentColor" stroke-linecap="round" />
<path d="M12 4L8 8L4 12" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,17 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="11" height="11" viewBox="0 0 11 11" fill="none">
<path d="M0.5 5.49469H10.5" stroke="#999999" stroke-linecap="round" />
<path d="M5.49512 10.5L5.49512 0.5" stroke="#999999" stroke-linecap="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="14" viewBox="0 0 12 14" fill="none">
<path d="M6 13L6 1M6 1L1 6M6 1L11 6" stroke="white" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,20 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<circle cx="10.5" cy="2.5" r="2" stroke="currentColor" />
<circle cx="3.5" cy="7.5" r="2" stroke="currentColor" />
<path d="M8.79367 3.71881L5.19458 6.28959" stroke="currentColor" stroke-linecap="round" />
<path d="M5 9L9.5 12" stroke="currentColor" stroke-linecap="round" />
<circle cx="12" cy="13" r="2.5" stroke="currentColor" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,39 +0,0 @@
<template>
<svg
:width="width"
:height="height"
viewBox="0 0 14 13"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M0.702819 11.5556C0.521256 11.5547 0.346487 11.6268 0.215434 11.7565C0.0843801 11.8862 0.00730705 12.0634 0.000494003 12.2507C-0.00631809 12.4379 0.0576639 12.6206 0.178926 12.7601C0.300187 12.8996 0.46923 12.985 0.650342 12.9982L0.702819 13H13.2972C13.4787 13.0008 13.6535 12.9288 13.7846 12.7991C13.9156 12.6694 13.9927 12.4922 13.9995 12.3049C14.0063 12.1176 13.9423 11.9349 13.8211 11.7954C13.6998 11.6559 13.5308 11.5706 13.3497 11.5574L13.2972 11.5556H0.702819Z"
fill="currentColor"
/>
<path
d="M4.81103 4.13111C4.76599 4.04842 4.70562 3.9757 4.63335 3.9171C4.56108 3.8585 4.47833 3.81517 4.38984 3.78958C4.30134 3.764 4.20883 3.75666 4.11759 3.76798C4.02634 3.77931 3.93815 3.80908 3.85806 3.85559L0.38761 5.87058C0.279428 5.93342 0.189389 6.02483 0.12673 6.13544C0.0640707 6.24605 0.0310459 6.37187 0.0310459 6.5C0.0310459 6.62813 0.0640707 6.75395 0.12673 6.86456C0.189389 6.97517 0.279428 7.06658 0.38761 7.12942L3.85806 9.14441C3.96453 9.20627 4.08483 9.23827 4.207 9.23724C4.32917 9.2362 4.44895 9.20216 4.55442 9.13851C4.65989 9.07485 4.74738 8.9838 4.80817 8.87441C4.86896 8.76502 4.90095 8.64111 4.90094 8.515V4.485C4.90089 4.36103 4.86993 4.23916 4.81103 4.13111Z"
fill="currentColor"
/>
<path
d="M7.06997 5.77778C6.64491 5.77778 6.30031 6.10097 6.30031 6.5C6.30031 6.88061 6.61412 7.19261 7.01259 7.22042L7.06997 7.22222H13.2272C13.6523 7.22222 13.9969 6.89903 13.9969 6.5C13.9969 6.11939 13.6831 5.80739 13.2846 5.77958L13.2272 5.77778H7.06997Z"
fill="currentColor"
/>
<path
d="M0.702819 7.24048e-06C0.521255 -0.000831409 0.346487 0.0712134 0.215434 0.200922C0.0843801 0.33063 0.00730515 0.507843 0.00049305 0.695123C-0.00631905 0.882404 0.0576639 1.06508 0.178926 1.20457C0.300187 1.34406 0.46923 1.42943 0.650342 1.44264L0.702819 1.44445H13.2972C13.4787 1.44529 13.6535 1.37324 13.7846 1.24354C13.9156 1.11383 13.9927 0.936614 13.9995 0.749334C14.0063 0.562054 13.9423 0.379374 13.8211 0.239887C13.6998 0.1004 13.5308 0.0150303 13.3497 0.00181292L13.2972 7.24048e-06H0.702819Z"
fill="currentColor"
/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
width?: number;
height?: number;
}>(),
{
width: 14,
height: 13,
},
);
</script>

View File

@ -1,39 +0,0 @@
<template>
<svg
:width="width"
:height="height"
viewBox="0 0 14 13"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M13.2972 11.5556C13.4787 11.5547 13.6535 11.6268 13.7846 11.7565C13.9156 11.8862 13.9927 12.0634 13.9995 12.2507C14.0063 12.4379 13.9423 12.6206 13.8211 12.7601C13.6998 12.8996 13.5308 12.985 13.3497 12.9982L13.2972 13H0.702818C0.521256 13.0008 0.346487 12.9288 0.215433 12.7991C0.0843798 12.6694 0.00730556 12.4922 0.000493166 12.3049C-0.00631923 12.1176 0.0576638 11.9349 0.178925 11.7954C0.300187 11.6559 0.469229 11.5706 0.650342 11.5574L0.702818 11.5556H13.2972Z"
fill="currentColor"
/>
<path
d="M9.18897 4.13111C9.23401 4.04842 9.29438 3.9757 9.36665 3.9171C9.43892 3.8585 9.52167 3.81517 9.61016 3.78958C9.69866 3.764 9.79117 3.75666 9.88241 3.76798C9.97366 3.77931 10.0618 3.80908 10.1419 3.85559L13.6124 5.87058C13.7206 5.93342 13.8106 6.02483 13.8733 6.13544C13.9359 6.24605 13.969 6.37187 13.969 6.5C13.969 6.62813 13.9359 6.75395 13.8733 6.86456C13.8106 6.97517 13.7206 7.06658 13.6124 7.12942L10.1419 9.14441C10.0355 9.20627 9.91517 9.23827 9.793 9.23724C9.67083 9.2362 9.55105 9.20216 9.44558 9.13851C9.34011 9.07485 9.25262 8.9838 9.19183 8.87441C9.13104 8.76502 9.09905 8.64111 9.09906 8.515V4.485C9.09911 4.36103 9.13007 4.23916 9.18897 4.13111Z"
fill="currentColor"
/>
<path
d="M6.93003 5.77778C7.35509 5.77778 7.69969 6.10097 7.69969 6.5C7.69969 6.88061 7.38588 7.19261 6.98741 7.22042L6.93003 7.22222H0.772787C0.347727 7.22222 0.00313156 6.89903 0.00313156 6.5C0.00313156 6.11939 0.316941 5.80739 0.715413 5.77958L0.772787 5.77778H6.93003Z"
fill="currentColor"
/>
<path
d="M13.2972 7.24048e-06C13.4787 -0.000831409 13.6535 0.0712134 13.7846 0.200922C13.9156 0.33063 13.9927 0.507843 13.9995 0.695123C14.0063 0.882404 13.9423 1.06508 13.8211 1.20457C13.6998 1.34406 13.5308 1.42943 13.3497 1.44264L13.2972 1.44445H0.702818C0.521256 1.44529 0.346487 1.37324 0.215433 1.24354C0.08438 1.11383 0.00730581 0.936614 0.000493416 0.749334C-0.00631898 0.562054 0.057664 0.379374 0.178925 0.239887C0.300187 0.1004 0.469229 0.0150303 0.650342 0.00181292L0.702818 7.24048e-06H13.2972Z"
fill="currentColor"
/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
width?: number;
height?: number;
}>(),
{
width: 14,
height: 13,
},
);
</script>

View File

@ -1,22 +0,0 @@
<template>
<svg
xmlns="http://www.w3.org/2000/svg"
width="12"
height="12"
viewBox="0 0 12 12"
fill="none"
>
<rect width="12" height="12" rx="2" fill="white" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 12,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M6.2 15H9.8C9.90609 15 10.0078 14.9566 10.0828 14.8794C10.1579 14.8022 10.2 14.6974 10.2 14.5882C10.2 14.479 10.1579 14.3743 10.0828 14.2971C10.0078 14.2199 9.90609 14.1765 9.8 14.1765H6.2C6.09391 14.1765 5.99217 14.2199 5.91716 14.2971C5.84214 14.3743 5.8 14.479 5.8 14.5882C5.8 14.6974 5.84214 14.8022 5.91716 14.8794C5.99217 14.9566 6.09391 15 6.2 15ZM6.6024 12.5294V10.8799C6.6024 10.527 6.386 10.2482 6.0408 9.98182C5.89518 9.86912 5.74033 9.76965 5.578 9.68453L5.6256 9.71335C5.06234 9.31673 4.60164 8.78457 4.2835 8.16308C3.96535 7.54159 3.79939 6.84956 3.8 6.14706C3.8 3.75924 5.6804 1.82353 8 1.82353C10.32 1.82353 12.2 3.75882 12.2 6.14706C12.2 7.59071 11.508 8.91288 10.3736 9.71418L10.4216 9.68535C10.3016 9.74712 10.132 9.84841 9.9588 9.98182C9.614 10.2482 9.3976 10.5266 9.3976 10.8799C9.3976 11.0289 9.3976 11.2929 9.3984 11.6384L9.3988 11.8607L9.3996 12.3305L9.4 12.5302C9.4 12.7567 9.2212 12.9412 9 12.9412H7C6.89391 12.9412 6.79217 12.8978 6.71716 12.8206C6.64214 12.7434 6.6 12.6386 6.6 12.5294H5.8C5.8 12.857 5.92643 13.1712 6.15147 13.4029C6.37651 13.6346 6.68174 13.7647 7 13.7647H9C9.15758 13.7647 9.31363 13.7328 9.45922 13.6707C9.60481 13.6086 9.7371 13.5176 9.84853 13.4029C9.95996 13.2882 10.0483 13.152 10.1087 13.0021C10.169 12.8523 10.2 12.6916 10.2 12.5294V12.3293L10.1988 11.8595V11.6371L10.1976 10.8799C10.1976 10.8704 10.282 10.7617 10.4388 10.6411C10.5462 10.5589 10.6599 10.4857 10.7788 10.4224L10.8264 10.3936C11.497 9.92125 12.0455 9.28757 12.4243 8.54755C12.8031 7.80752 13.0007 6.98352 13 6.14706C13 3.30424 10.7616 1 8 1C5.2388 1 3 3.30465 3 6.14706C3 7.86412 3.8236 9.43912 5.1728 10.3924L5.2208 10.4212C5.33982 10.4848 5.45365 10.5582 5.5612 10.6406C5.7176 10.7617 5.8024 10.8708 5.8024 10.8799V12.5294H6.6024ZM6.6 12.1176H9.4V11.2941H6.6V12.1176Z" fill="#333333"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,18 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M9.54077 4.86591L10.4935 3.91315L13.0714 6.57146L12.1202 7.52504L9.54077 4.86591Z" fill="#333333"/>
<path d="M11.6073 3.51805L4.23558 10.893L6.11103 12.7701L13.4828 5.39431L11.6073 3.51805ZM6.78902 13.9676L2.66173 15L2 14.3716L3.03894 10.215L10.9952 2.25393C11.0756 2.17343 11.1711 2.10957 11.2762 2.066C11.3813 2.02243 11.4939 2 11.6077 2C11.7215 2 11.8342 2.02243 11.9393 2.066C12.0444 2.10957 12.1399 2.17343 12.2203 2.25393L14.7461 4.78217C15.0834 5.12035 15.0834 5.66828 14.7461 6.00646L6.78821 13.9676H6.78902Z" fill="#333333"/>
<path d="M14.1697 12.323L13.2023 12.718L12.8064 13.6863L12.4113 12.7189L11.4431 12.323L12.4105 11.9279L12.8064 10.9597L13.2015 11.9271L14.1697 12.323Z" fill="#333333"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,18 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<circle cx="8" cy="8" r="6.5" stroke="currentColor" />
<path d="M8 5C11.3137 5 14 6.34315 14 8C14 9.65685 11.3137 11 8 11C4.68629 11 2 9.65685 2 8C2 7.23165 2.57771 6.53076 3.52779 6" stroke="currentColor" stroke-linecap="round" />
<path d="M5 8C5 4.68629 6.34315 2 8 2C9.65685 2 11 4.68629 11 8C11 11.3137 9.65685 14 8 14C7.1115 14 6.31321 13.2275 5.76389 12" stroke="currentColor" stroke-linecap="round" />
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -1,16 +0,0 @@
<template>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M1.42196 1.00146C1.46856 0.997573 1.51672 1.00146 1.56565 1.01311L11.0513 3.30269C11.1362 3.32322 11.2135 3.36761 11.2741 3.43064C11.3346 3.49366 11.3759 3.57269 11.393 3.6584L12.2062 7.72497L12.2559 7.67682C12.3418 7.59098 12.4583 7.54276 12.5798 7.54276C12.7012 7.54276 12.8177 7.59098 12.9036 7.67682L14.8663 9.63943C14.9519 9.72532 15 9.84164 15 9.96291C15 10.0842 14.9519 10.2005 14.8663 10.2864L10.2862 14.8663C10.2003 14.9519 10.084 15 9.96274 15C9.84147 15 9.72515 14.9519 9.63925 14.8663L7.67658 12.9037C7.59073 12.8178 7.54251 12.7013 7.54251 12.5798C7.54251 12.4584 7.59073 12.3419 7.67658 12.256L7.72473 12.2063L3.65803 11.3939C3.58501 11.3793 3.51665 11.3471 3.45887 11.3002C3.40108 11.2532 3.35561 11.1929 3.32639 11.1244L3.30309 11.0522L1.01187 1.56609C1.00093 1.51876 0.997518 1.47001 1.00178 1.42163V1.40998C1.00337 1.39061 1.00649 1.37139 1.0111 1.3525L1.01731 1.33076C1.02141 1.31489 1.0266 1.29932 1.03285 1.28416L1.04372 1.26086C1.05287 1.24118 1.06352 1.22222 1.07556 1.20416L1.08022 1.19795L1.09187 1.18242C1.1362 1.12404 1.19368 1.07694 1.25964 1.04495C1.282 1.03385 1.30542 1.02501 1.32954 1.01854L1.35206 1.01233C1.37067 1.00748 1.38962 1.0041 1.40876 1.00223H1.41886L1.42196 1.00146ZM12.5798 8.64764L8.64743 12.5798L9.96313 13.8947L13.8947 9.96329L12.5798 8.64764ZM2.93183 2.28449L6.46263 5.81595C6.83373 5.60181 7.27002 5.52986 7.69023 5.61351C8.11044 5.69716 8.48591 5.93071 8.7467 6.27064C9.00749 6.61058 9.13582 7.03372 9.10778 7.46124C9.07973 7.88876 8.89723 8.29152 8.59426 8.59447C8.2913 8.89743 7.88853 9.07992 7.461 9.10797C7.03346 9.13601 6.6103 9.00769 6.27036 8.74691C5.93041 8.48612 5.69686 8.11066 5.61321 7.69047C5.52955 7.27027 5.6015 6.83399 5.81565 6.4629L2.28408 2.93222L4.12481 10.5528L8.50375 11.4281L11.428 8.50396L10.5526 4.12439L2.93183 2.28449ZM7.34571 6.49552C7.12016 6.49552 6.90384 6.58512 6.74434 6.74461C6.58485 6.9041 6.49525 7.12041 6.49525 7.34596C6.49525 7.57151 6.58485 7.78783 6.74434 7.94731C6.90384 8.1068 7.12016 8.1964 7.34571 8.1964C7.57127 8.1964 7.78759 8.1068 7.94708 7.94731C8.10658 7.78783 8.19618 7.57151 8.19618 7.34596C8.19618 7.12041 8.10658 6.9041 7.94708 6.74461C7.78759 6.58512 7.57127 6.49552 7.34571 6.49552Z" fill="#333333"/>
</svg>
</template>
<script setup lang="ts">
withDefaults(
defineProps<{
size?: number;
}>(),
{
size: 18,
},
);
</script>

View File

@ -0,0 +1,269 @@
<template>
<div class="attachment-preview">
<TransitionGroup name="attachment">
<div
v-for="attachment in attachments"
:key="attachment.id"
class="attachment-item"
:class="attachment.type"
>
<!-- 图片预览 -->
<template v-if="attachment.type === 'image'">
<img
:src="attachment.url"
:alt="attachment.name"
class="preview-image"
/>
</template>
<!-- 视频预览 -->
<template v-else-if="attachment.type === 'video'">
<div class="preview-video">
<img
v-if="attachment.thumbnail"
:src="attachment.thumbnail"
:alt="attachment.name"
/>
<div v-else class="video-placeholder">
<Video :size="24" />
</div>
<div class="video-badge">
<Play :size="12" />
</div>
</div>
</template>
<!-- 文件预览 -->
<template v-else>
<div class="preview-file">
<span class="file-emoji">{{
getFileEmoji(attachment.mimeType)
}}</span>
<div class="file-details">
<span class="file-name">{{ truncateName(attachment.name) }}</span>
<span class="file-size">{{ formatSize(attachment.size) }}</span>
</div>
</div>
</template>
<!-- 删除按钮 -->
<button class="remove-btn" @click="$emit('remove', attachment.id)">
<X :size="14" />
</button>
<!-- 上传进度 -->
<div v-if="attachment.uploading" class="upload-progress">
<div
class="progress-bar"
:style="{ width: `${attachment.progress || 0}%` }"
/>
</div>
</div>
</TransitionGroup>
</div>
</template>
<script setup lang="ts">
import { X, Video, Play } from "@/components/icons";
import { formatFileSize, getFileIcon, truncateText } from "@/utils/helpers";
interface AttachmentWithProgress {
id: string;
name: string;
type: "image" | "file" | "video";
url: string;
size?: number;
mimeType?: string;
thumbnail?: string;
uploading?: boolean;
progress?: number;
}
defineProps<{
attachments: AttachmentWithProgress[];
}>();
defineEmits<{
remove: [id: string];
}>();
function getFileEmoji(mimeType?: string) {
return getFileIcon(mimeType || "");
}
function formatSize(size?: number) {
return size ? formatFileSize(size) : "";
}
function truncateName(name: string) {
return truncateText(name, 20);
}
</script>
<style lang="scss" scoped>
.attachment-preview {
display: flex;
flex-wrap: wrap;
gap: 10px;
padding: 12px 16px;
border-bottom: 1px solid #e2e8f0;
.dark & {
border-bottom-color: #374151;
}
}
.attachment-item {
position: relative;
border-radius: 12px;
overflow: hidden;
background: #f3f4f6;
.dark & {
background: #374151;
}
&.image,
&.video {
width: 80px;
height: 80px;
}
&.file {
padding: 10px 40px 10px 12px;
}
}
.preview-image {
width: 100%;
height: 100%;
object-fit: cover;
}
.preview-video {
position: relative;
width: 100%;
height: 100%;
img {
width: 100%;
height: 100%;
object-fit: cover;
}
.video-placeholder {
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
background: #e5e7eb;
color: #9ca3af;
.dark & {
background: #4b5563;
}
}
.video-badge {
position: absolute;
bottom: 6px;
left: 6px;
display: flex;
align-items: center;
justify-content: center;
width: 22px;
height: 22px;
background: rgba(0, 0, 0, 0.7);
border-radius: 50%;
color: white;
}
}
.preview-file {
display: flex;
align-items: center;
gap: 10px;
}
.file-emoji {
font-size: 24px;
}
.file-details {
display: flex;
flex-direction: column;
}
.file-name {
font-size: 13px;
font-weight: 500;
color: #374151;
.dark & {
color: #e5e7eb;
}
}
.file-size {
font-size: 11px;
color: #9ca3af;
}
.remove-btn {
position: absolute;
top: 4px;
right: 4px;
display: flex;
align-items: center;
justify-content: center;
width: 22px;
height: 22px;
border: none;
border-radius: 50%;
background: rgba(0, 0, 0, 0.6);
color: white;
cursor: pointer;
opacity: 0;
transition: all 0.2s ease;
.attachment-item:hover & {
opacity: 1;
}
&:hover {
background: rgba(239, 68, 68, 0.9);
}
}
.upload-progress {
position: absolute;
bottom: 0;
left: 0;
right: 0;
height: 3px;
background: rgba(0, 0, 0, 0.2);
.progress-bar {
height: 100%;
background: #3b82f6;
transition: width 0.3s ease;
}
}
//
.attachment-enter-active,
.attachment-leave-active {
transition: all 0.3s ease;
}
.attachment-enter-from {
opacity: 0;
transform: scale(0.8);
}
.attachment-leave-to {
opacity: 0;
transform: scale(0.8);
}
</style>

View File

@ -1,102 +1,164 @@
<template>
<div class="chat-input-container" :class="{ 'is-focused': isFocused, 'is-expanded': isExpanded }">
<div
class="chat-input-container"
:class="{ 'is-focused': isFocused, 'is-expanded': isExpanded }"
>
<!-- 输入区域 -->
<div class="input-area">
<!-- 左侧功能按钮 -->
<div class="input-actions left">
<StackedCards :cards="attachments" :supports-files="supports_files" :supports-vision="supports_vision"
@remove="removeAttachment" @add-upload="triggerUploadInput" />
<!-- 附件按钮 -->
<button
class="action-btn"
:class="{ disabled: !supports_files }"
:disabled="!supports_files"
:title="supports_files ? '添加附件' : '当前模型不支持文件附件'"
@click="supports_files && triggerFileInput()"
>
<Paperclip :size="20" />
</button>
<!-- 图片按钮 -->
<button
class="action-btn"
:class="{ disabled: !supports_vision }"
:disabled="!supports_vision"
:title="supports_vision ? '添加图片' : '当前模型不支持图片识别'"
@click="supports_vision && triggerImageInput()"
>
<Image :size="20" />
</button>
<!-- 隐藏的文件输入框 -->
<input
ref="uploadInputRef"
type="file"
:accept="uploadAccept"
multiple
hidden
@change="handleUploadSelect"
/>
<input
ref="fileInputRef"
type="file"
multiple
hidden
@change="handleFileSelect"
/>
<input
ref="imageInputRef"
type="file"
accept="image/*"
multiple
hidden
@change="handleImageSelect"
/>
</div>
<!-- 文本输入框 -->
<div class="textarea-wrapper">
<textarea ref="textareaRef" v-model="inputText" :placeholder="placeholder" :rows="1"
@beforeinput="handleBeforeInput" @input="autoResize" @focus="isFocused = true" @blur="isFocused = false"
@keydown="handleKeydown" @paste="handlePaste" />
<textarea
ref="textareaRef"
v-model="inputText"
:placeholder="placeholder"
:rows="1"
@beforeinput="handleBeforeInput"
@input="autoResize"
@focus="isFocused = true"
@blur="isFocused = false"
@keydown="handleKeydown"
@paste="handlePaste"
/>
</div>
<!-- 右侧功能按钮 -->
<div class="input-actions right">
<!-- 发送/停止按钮 -->
<button
v-if="isStreaming"
class="action-btn stop"
title="停止生成"
@click="$emit('stop')"
>
<StopCircle :size="20" />
</button>
<button
v-else
class="action-btn send"
:class="{ active: canSend, loading: isUploading }"
:disabled="!canSend"
:title="isUploading ? '上传中...' : '发送消息 (Ctrl+Enter)'"
@click="handleSend"
>
<Loader2 v-if="isUploading" :size="20" class="animate-spin" />
<Send v-else :size="20" />
</button>
</div>
</div>
<!-- 底部工具栏 -->
<div class="input-toolbar">
<div class="toolbar-left">
<!-- 展开/收起 -->
<!-- <button class="toolbar-btn" title="展开输入框" @click="toggleExpand">
<button class="toolbar-btn" title="展开输入框" @click="toggleExpand">
<Maximize2 v-if="!isExpanded" :size="16" />
<Minimize2 v-else :size="16" />
</button> -->
</button>
<!-- 深度思考开关 -->
<button class="toolbar-btn"
<button
class="toolbar-btn"
:class="{ active: isDeepThinking, disabled: isForceDeepThinkingModel || !supports_thinking }"
:disabled="isForceDeepThinkingModel || !supports_thinking"
:title="isForceDeepThinkingModel ? '当前模型强制开启深度思考' : (supports_thinking ? '深度思考' : '当前模型不支持深度思考')"
@click="!isForceDeepThinkingModel && supports_thinking && toggleDeepThink()">
<DeepThinkingIcon :size="16" />
@click="!isForceDeepThinkingModel && supports_thinking && toggleDeepThink()"
>
<Brain :size="16" />
<span>深度思考</span>
</button>
<!-- 深度搜索开关 -->
<button class="toolbar-btn" :class="{ active: isDeepSearch, disabled: !supports_web_search }"
:disabled="!supports_web_search" :title="supports_web_search ? '深度搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleDeepSearch()">
<DeepSearchIcon :size="16" />
<button
class="toolbar-btn"
:class="{ active: isDeepSearch, disabled: !supports_web_search }"
:disabled="!supports_web_search"
:title="supports_web_search ? '深度搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleDeepSearch()"
>
<Sparkles :size="16" />
<span>深度搜索</span>
</button>
<!-- 联网搜索开关 -->
<button class="toolbar-btn" :class="{ active: isWebSearch, disabled: !supports_web_search }"
:disabled="!supports_web_search" :title="supports_web_search ? '联网搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleWebSearch()">
<WebSearchIcon :size="16" />
<button
class="toolbar-btn"
:class="{ active: isWebSearch, disabled: !supports_web_search }"
:disabled="!supports_web_search"
:title="supports_web_search ? '联网搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleWebSearch()"
>
<Globe :size="16" />
<span>联网搜索</span>
</button>
</div>
<!-- 右侧功能按钮 -->
<div class="input-actions right">
<!-- 发送/停止按钮 -->
<button v-if="isStreaming" class="action-btn stop" title="停止生成" @click="$emit('stop')">
<StopIcon />
</button>
<button v-else class="action-btn send" :class="{ active: canSend, loading: isProcessingAttachments }"
:disabled="!canSend" :title="isProcessingAttachments ? '附件处理中...' : '发送消息 (Ctrl+Enter)'" @click="handleSend">
<LoadingIcon v-if="isProcessingAttachments" class="animate-spin" />
<SendIcon v-else :size="20" />
</button>
</div>
</div>
</div>
</template>
<script setup lang="ts">
import { ref, computed, watch, nextTick, onMounted } from "vue";
import { generateId } from "@/utils/helpers";
import type { Attachment } from "@/types/chat";
import { chatApi } from "@/services/api";
import { useAuthStore } from "@/stores/auth";
import { useSettingsStore } from "@/stores/settings";
import StackedCards from "@/components/ui/StackedCards.vue";
import DeepThinkingIcon from "../icons/custom/DeepThinkingIcon.vue";
import DeepSearchIcon from "../icons/custom/DeepSearchIcon.vue";
import WebSearchIcon from "../icons/custom/WebSearchIcon.vue";
import SendIcon from "../icons/custom/SendIcon.vue";
import StopIcon from "../icons/custom/StopIcon.vue";
import LoadingIcon from "../icons/custom/LoadingIcon.vue";
import { ref, computed, watch, nextTick, onMounted } from "vue";
import {
Paperclip,
Image,
Send,
StopCircle,
Sparkles,
Globe,
Maximize2,
Minimize2,
Brain,
Loader2,
} from "@/components/icons";
import { generateId } from "@/utils/helpers";
import type { Attachment } from "@/types/chat";
import { chatApi } from "@/services/api";
import { useAuthStore } from "@/stores/auth";
import { useSettingsStore } from "@/stores/settings";
interface AttachmentWithProgress extends Attachment {
uploading?: boolean;
progress?: number;
deleting?: boolean;
}
const props = withDefaults(
@ -163,7 +225,8 @@ const isWebSearch = ref(
);
// DOM
const textareaRef = ref<HTMLTextAreaElement | null>(null);
const uploadInputRef = ref<HTMLInputElement | null>(null);
const fileInputRef = ref<HTMLInputElement | null>(null);
const imageInputRef = ref<HTMLInputElement | null>(null);
// toast
let lastToastTime = 0;
@ -178,50 +241,14 @@ function showThrottledToast(message: string, type: "error" = "error") {
}
//
const charCount = computed(() => inputText.value.length);
const isProcessingAttachments = computed(() =>
attachments.value.some((a) => a.uploading || a.deleting),
);
const textFileAccept =
".txt,.md,.markdown,.pdf,.doc,.docx,.rtf,.csv,.tsv,.json,.xml,.html,.htm,.yaml,.yml,.log,.ini,.conf,.sql,.js,.ts,.jsx,.tsx,.py,.java,.c,.cpp,.h,.hpp,.go,.rs,.sh";
const uploadAccept = computed(() => {
if (props.supports_vision && props.supports_files) {
return `image/*,${textFileAccept}`;
}
if (props.supports_vision) {
return "image/*";
}
if (props.supports_files) {
return textFileAccept;
}
return "";
});
function getFileExt(fileName: string) {
const idx = fileName.lastIndexOf(".");
if (idx === -1) return "";
return fileName.slice(idx).toLowerCase();
}
function getUploadTypeByModel(file: File): "image" | "file" | null {
const isImage =
file.type.startsWith("image/") ||
[".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".svg", ".heic"].includes(
getFileExt(file.name),
);
if (isImage) {
return props.supports_vision ? "image" : null;
}
return props.supports_files ? "file" : null;
}
const charCount = computed(() => inputText.value.length);
const isUploading = computed(() => attachments.value.some((a) => a.uploading));
const canSend = computed(() => {
return (
(inputText.value.trim().length > 0 || attachments.value.length > 0) &&
!props.disabled &&
charCount.value <= props.maxChars &&
!isProcessingAttachments.value
!isUploading.value
);
});
@ -231,7 +258,7 @@ function autoResize() {
if (!textarea) return;
textarea.style.height = "auto";
const maxHeight = isExpanded.value ? 400 : 116;
const maxHeight = isExpanded.value ? 400 : 160;
// 1px
textarea.style.height = `${Math.min(textarea.scrollHeight, maxHeight) + 1}px`;
}
@ -298,7 +325,7 @@ function handleSend() {
}
//
async function handlePaste(event: ClipboardEvent) {
async function handlePaste(event: ClipboardEvent) {
const items = event.clipboardData?.items;
if (!items) return;
@ -318,49 +345,47 @@ async function handlePaste(event: ClipboardEvent) {
}
}
for (const item of items) {
if (item.type.startsWith("image/")) {
const file = item.getAsFile();
if (file) {
const uploadType = getUploadTypeByModel(file);
if (uploadType === "image") {
event.preventDefault();
await addFileAsAttachment(file, uploadType);
} else {
event.preventDefault();
showThrottledToast("当前模型不支持上传图片");
}
}
}
}
}
function triggerUploadInput() {
if (!props.supports_vision && !props.supports_files) {
showThrottledToast("当前模型不支持上传附件");
return;
}
uploadInputRef.value?.click();
}
for (const item of items) {
if (item.type.startsWith("image/")) {
event.preventDefault();
const file = item.getAsFile();
if (file) {
await addFileAsAttachment(file, "image");
}
}
}
}
//
async function handleUploadSelect(event: Event) {
//
function triggerFileInput() {
fileInputRef.value?.click();
}
function triggerImageInput() {
imageInputRef.value?.click();
}
//
async function handleFileSelect(event: Event) {
const input = event.target as HTMLInputElement;
const files = input.files;
if (!files) return;
for (const file of files) {
const uploadType = getUploadTypeByModel(file);
if (!uploadType) {
showThrottledToast(
file.type.startsWith("image/")
? "当前模型不支持上传图片"
: "当前模型不支持上传附件文件",
);
continue;
}
await addFileAsAttachment(file, uploadType);
}
if (!files) return;
for (const file of files) {
await addFileAsAttachment(file, "file");
}
input.value = "";
}
async function handleImageSelect(event: Event) {
const input = event.target as HTMLInputElement;
const files = input.files;
if (!files) return;
for (const file of files) {
await addFileAsAttachment(file, "image");
}
input.value = "";
}
@ -429,26 +454,19 @@ async function uploadFileToServer(id: string, file: File) {
}
//
async function removeAttachment(id: string | number) {
const targetId = String(id);
const index = attachments.value.findIndex((a) => a.id === targetId);
async function removeAttachment(id: string) {
const index = attachments.value.findIndex((a) => a.id === id);
if (index === -1) return;
const attachment = attachments.value[index];
let deletedFromOss = false;
// OSS blob URL OSS
if (attachment.url && !attachment.url.startsWith("blob:")) {
try {
attachment.deleting = true;
await nextTick();
await chatApi.deleteAttachment(attachment.url);
deletedFromOss = true;
} catch (error) {
console.error("删除 OSS 文件失败:", error);
// 使
} finally {
attachment.deleting = false;
}
}
@ -458,10 +476,6 @@ async function removeAttachment(id: string | number) {
}
attachments.value.splice(index, 1);
if (deletedFromOss) {
window.$toast?.("OSS 文件删除成功", "success");
}
}
//
@ -488,6 +502,13 @@ function toggleWebSearch() {
localStorage.setItem("isWebSearch", String(isWebSearch.value));
}
function toggleExpand() {
isExpanded.value = !isExpanded.value;
nextTick(() => {
autoResize();
});
}
//
function focus() {
textareaRef.value?.focus();
@ -541,15 +562,16 @@ onMounted(() => {
<style lang="scss" scoped>
.chat-input-container {
background: #F8F9FA;
background: #f3f4f5;
// border: 2px solid #e2e8f0;
height: 200px;
border-radius: 20px;
padding: 20px;
display: grid;
grid-template-rows: minmax(0, 1fr) auto;
gap: 12px;
border-color: #374151;
overflow: hidden;
transition: all 0.2s ease;
.dark & {
background: #1e1e2e;
border-color: #374151;
}
// &.is-focused {
// border-color: #3b82f6;
@ -564,12 +586,10 @@ onMounted(() => {
}
.input-area {
position: relative;
display: flex;
align-items: flex-start;
justify-content: flex-start;
min-height: 0;
align-items: flex-end;
gap: 8px;
padding: 12px 16px;
}
.input-actions {
@ -577,7 +597,6 @@ onMounted(() => {
align-items: center;
gap: 4px;
padding-bottom: 4px;
min-width: 0;
&.left {
flex-shrink: 0;
@ -595,7 +614,7 @@ onMounted(() => {
width: 38px;
height: 38px;
border: none;
border-radius: 50px;
border-radius: 12px;
background: transparent;
color: #6b7280;
cursor: pointer;
@ -616,14 +635,14 @@ onMounted(() => {
opacity: 0.4;
cursor: not-allowed;
// &:hover {
// background: transparent;
// color: #6b7280;
// }
&:hover {
background: transparent;
color: #6b7280;
}
}
&.send {
background: rgba(0, 15, 51, 0.20);
background: #e5e7eb;
color: #9ca3af;
.dark & {
@ -631,49 +650,46 @@ onMounted(() => {
}
&.active {
background: #000E32;
background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%);
color: white;
&:hover {
transform: scale(1.05);
// box-shadow: 0 4px 12px rgba(59, 130, 246, 0.4);
box-shadow: 0 4px 12px rgba(59, 130, 246, 0.4);
}
}
&.loading {
background: #000F33;
background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%);
color: white;
cursor: wait;
}
&:disabled {
background: rgba(0, 15, 51, 0.20);
cursor: not-allowed;
opacity: 0.6;
}
}
&.stop {
background: #000F33;
color: white;
&:hover {
transform: scale(1.05);
}
}
&.stop {
background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%);
color: white;
animation: pulse 2s infinite;
&:hover {
transform: scale(1.05);
}
}
}
.textarea-wrapper {
flex: 1;
min-width: 0;
min-height: 0;
textarea {
width: 100%;
min-height: 25px;
max-height: 100%;
overflow-y: auto;
scrollbar-gutter: stable;
max-height: 160px;
padding: 8px 0;
border: none;
outline: none;
@ -698,6 +714,8 @@ onMounted(() => {
display: flex;
align-items: center;
justify-content: space-between;
padding: 8px 16px;
border-top: 1px solid #f3f4f6;
// background: #fafbfc;
.dark & {
@ -716,13 +734,12 @@ onMounted(() => {
display: flex;
align-items: center;
gap: 6px;
height: 36px;
padding: 10px 15px;
border-radius: 50px;
background: var(---FFFFFF, #FFF);
padding: 6px 12px;
border: 1px solid transparent;
color: var(--6-666666, #666);
font-size: 14px;
border-radius: 8px;
background: transparent;
color: #6b7280;
font-size: 13px;
cursor: pointer;
transition: all 0.2s ease;
@ -737,12 +754,12 @@ onMounted(() => {
}
&.active {
background: #DFE2E6;
// border-color: rgba(59, 130, 246, 0.3);
color: #000F33;
background: rgba(59, 130, 246, 0.1);
border-color: rgba(59, 130, 246, 0.3);
color: #3b82f6;
svg {
color: #000F33;
color: #3b82f6;
}
}
@ -758,4 +775,13 @@ onMounted(() => {
}
}
</style>
@keyframes pulse {
0%,
100% {
box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.4);
}
50% {
box-shadow: 0 0 0 8px rgba(239, 68, 68, 0);
}
}
</style>

View File

@ -1,194 +0,0 @@
<script setup lang="ts">
import { computed } from "vue";
type UploadAction = "file" | "image";
interface ActionCard {
id: UploadAction;
title: string;
description: string;
icon: string;
color: string;
disabled: boolean;
}
const props = withDefaults(
defineProps<{
supportsFiles?: boolean;
supportsVision?: boolean;
}>(),
{
supportsFiles: true,
supportsVision: true,
},
);
const emit = defineEmits<{
file: [];
image: [];
}>();
const cards = computed<ActionCard[]>(() => [
{
id: "file",
title: "附件",
description: props.supportsFiles ? "上传文档 / 压缩包" : "当前模型不支持附件",
icon: "📎",
color: "#06b6d4",
disabled: !props.supportsFiles,
},
{
id: "image",
title: "图片",
description: props.supportsVision ? "上传图片 / 截图" : "当前模型不支持图片",
icon: "🖼️",
color: "#8b5cf6",
disabled: !props.supportsVision,
},
]);
function handleClick(action: UploadAction) {
if (action === "file" && props.supportsFiles) {
emit("file");
}
if (action === "image" && props.supportsVision) {
emit("image");
}
}
</script>
<template>
<div class="upload-card-group" aria-label="上传入口">
<button
type="button"
v-for="(card, index) in cards"
:key="card.id"
class="upload-card"
:class="{ disabled: card.disabled, [`card-${card.id}`]: true }"
:style="{
'--card-color': card.color,
'--card-border': `${card.color}33`,
'--card-glow': `${card.color}40`,
'--card-glow-fade': `${card.color}14`,
'--card-offset': `${index * 14}px`,
zIndex: cards.length - index,
}"
:disabled="card.disabled"
:title="card.description"
@click="handleClick(card.id)"
>
<span class="card-glow" />
<span class="card-icon">{{ card.icon }}</span>
<span class="card-copy">
<span class="card-title">{{ card.title }}</span>
<span class="card-desc">{{ card.description }}</span>
</span>
</button>
</div>
</template>
<style scoped lang="scss">
.upload-card-group {
position: relative;
display: flex;
align-items: center;
justify-content: flex-start;
min-width: 164px;
height: 44px;
}
.upload-card {
position: absolute;
left: var(--card-offset);
top: 0;
display: flex;
align-items: center;
gap: 10px;
width: 138px;
height: 44px;
padding: 0 12px;
border: 1px solid var(--card-border);
border-radius: 999px;
background: linear-gradient(135deg, rgba(255, 255, 255, 0.96), rgba(245, 247, 250, 0.96));
box-shadow: 0 10px 20px rgba(15, 23, 42, 0.08);
color: #1f2937;
cursor: pointer;
transition: transform 0.2s ease, box-shadow 0.2s ease, opacity 0.2s ease, border-color 0.2s ease;
overflow: hidden;
}
.upload-card:hover:not(:disabled) {
transform: translateY(-2px);
box-shadow: 0 14px 24px rgba(15, 23, 42, 0.12);
}
.upload-card.disabled,
.upload-card:disabled {
opacity: 0.45;
cursor: not-allowed;
}
.card-glow {
position: absolute;
inset: -20%;
background: radial-gradient(circle at left center, var(--card-glow), transparent 58%);
opacity: 0.45;
pointer-events: none;
}
.card-icon {
position: relative;
z-index: 1;
flex-shrink: 0;
font-size: 18px;
}
.card-copy {
position: relative;
z-index: 1;
display: flex;
flex-direction: column;
min-width: 0;
text-align: left;
}
.card-title {
font-size: 14px;
font-weight: 700;
line-height: 1.1;
}
.card-desc {
margin-top: 2px;
font-size: 11px;
line-height: 1.2;
color: #6b7280;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.dark {
.upload-card {
background: linear-gradient(135deg, rgba(30, 30, 46, 0.98), rgba(24, 24, 37, 0.98));
color: #f3f4f6;
box-shadow: 0 12px 24px rgba(0, 0, 0, 0.22);
}
.card-desc {
color: #9ca3af;
}
}
@media (max-width: 640px) {
.upload-card-group {
min-width: 0;
height: 40px;
}
.upload-card {
width: 126px;
height: 40px;
}
}
</style>

View File

@ -120,7 +120,7 @@ function toggleExpand() {
display: flex;
align-items: center;
gap: 8px;
font-size: 14px;
font-size: 13px;
font-weight: 500;
color: #a6adc8;
@ -170,7 +170,7 @@ function toggleExpand() {
code {
font-family: "JetBrains Mono", "Fira Code", "Monaco", monospace;
font-size: 14px;
font-size: 13px;
line-height: 1.6;
color: #cdd6f4;
tab-size: 2;

View File

@ -276,7 +276,7 @@ if (typeof window !== "undefined") {
border-radius: 8px;
background: transparent;
color: #374151;
font-size: 14px;
font-size: 13px;
text-align: left;
cursor: pointer;
transition: all 0.15s ease;

View File

@ -1,15 +1,21 @@
<template>
<div class="message-bubble" :class="[
`role-${message.role}`,
{
'is-streaming': message.isStreaming,
'is-end': !message.isEnd && message.role !== 'user',
'is-error': message.isError,
compact: compact,
'message-select-mode': isMessageSelectMode,
'message-selected': isSelected,
},
]" @click="handleBubbleClick" @mouseenter="isHovered = true" @mouseleave="isHovered = false">
<div
class="message-bubble"
:class="[
`role-${message.role}`,
{
'is-streaming': message.isStreaming,
'is-end': !message.isEnd && message.role !== 'user',
'is-error': message.isError,
compact: compact,
'message-select-mode': isMessageSelectMode,
'message-selected': isSelected,
},
]"
@click="handleBubbleClick"
@mouseenter="isHovered = true"
@mouseleave="isHovered = false"
>
<!-- 消息选择模式复选框 -->
<div v-if="isMessageSelectMode" class="message-checkbox" @click.stop="handleToggleSelect">
<div class="checkbox" :class="{ checked: isSelected }">
@ -18,12 +24,12 @@
</div>
<!-- 头像 -->
<!-- <div class="avatar">
<div class="avatar">
<div class="avatar-inner" :class="message.role">
<Bot v-if="message.role === 'assistant'" :size="20" />
<User v-else :size="20" />
</div>
</div> -->
</div>
<!-- 消息内容区域 -->
<div class="message-content-wrapper">
@ -38,7 +44,7 @@
</div> -->
<!-- 消息主体 -->
<div :class="message.role === 'assistant' ? 'message-body assistant' : 'message-body user'">
<div class="message-body">
<!-- 错误状态 -->
<div v-if="message.isError" class="error-content">
<AlertCircle :size="18" />
@ -53,17 +59,30 @@
<template v-else>
<!-- 文本内容 - 使用 markstream-vue -->
<div v-if="message.content.text" class="text-content markstream-vue">
<MarkdownRender v-if="message.role !== 'user'" :content="message.content.text" :custom-html-tags="['think']"
custom-id="playground-demo" :escape-html-tags="['question', 'answer']" @copy="textCopy" />
<MarkdownRender
v-if="message.role !== 'user'"
:content="message.content.text"
:custom-html-tags="['think']"
custom-id="playground-demo"
:escape-html-tags="['question', 'answer']"
@copy="textCopy"
/>
<div v-else style="white-space: pre-wrap">
{{ message.content.text }}
</div>
</div>
<!-- 推荐选项 -->
<div v-if="message.content.suggestions?.length && isNew" class="suggestions">
<button v-for="suggestion in message.content.suggestions" :key="suggestion.id" class="suggestion-btn"
@click="$emit('select-suggestion', suggestion)">
<div
v-if="message.content.suggestions?.length && isNew"
class="suggestions"
>
<button
v-for="suggestion in message.content.suggestions"
:key="suggestion.id"
class="suggestion-btn"
@click="$emit('select-suggestion', suggestion)"
>
<Zap :size="14" />
{{ suggestion.text }}
</button>
@ -71,34 +90,40 @@
<!-- 图片展示 -->
<div v-if="message.content.images?.length" class="images-grid">
<n-image-group>
<div v-for="image in message.content.images" :key="image.id" class="image-item">
<n-image
class="message-image"
:src="image.url"
object-fit="cover"
:img-props="{
alt: image.name,
loading: 'lazy',
}"
/>
<div class="image-overlay">
<Maximize2 :size="18" />
</div>
<div
v-for="(image, index) in message.content.images"
:key="image.id"
class="image-item"
@click="$emit('preview-image', image, index)"
>
<img :src="image.url" :alt="image.name" loading="lazy" />
<div class="image-overlay">
<Maximize2 :size="18" />
</div>
</n-image-group>
</div>
</div>
<!-- 单个视频 -->
<div v-if="message.content.videos?.length === 1" class="single-video">
<video :src="message.content.videos[0].url" :poster="message.content.videos[0].poster" controls
preload="metadata" />
<video
:src="message.content.videos[0].url"
:poster="message.content.videos[0].poster"
controls
preload="metadata"
/>
</div>
<!-- 多个视频 -->
<div v-if="message.content.videos && message.content.videos.length > 1" class="videos-grid">
<div v-for="video in message.content.videos" :key="video.id" class="video-item"
@click="$emit('play-video', video)">
<div
v-if="message.content.videos && message.content.videos.length > 1"
class="videos-grid"
>
<div
v-for="video in message.content.videos"
:key="video.id"
class="video-item"
@click="$emit('play-video', video)"
>
<img :src="video.poster" :alt="video.title" />
<div class="video-overlay">
<Play :size="32" />
@ -111,7 +136,11 @@
<!-- 附件列表 -->
<div v-if="message.content.files?.length" class="files-list">
<div v-for="file in message.content.files" :key="file.id" class="file-item">
<div
v-for="file in message.content.files"
:key="file.id"
class="file-item"
>
<div class="file-icon">
{{ getFileEmoji(file.mimeType) }}
</div>
@ -131,24 +160,36 @@
<!-- 加载动画 -->
<div
v-if="message.role === 'assistant' && message.isStreaming"
class="loading-spinner-row"
aria-label="模型正在生成中"
v-if="message.isStreaming && !message.content.text"
class="loading-dots"
>
<span class="loading-spinner" aria-hidden="true"></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<!-- 操作栏 -->
<!-- <MessageActions v-if="
message.role === 'assistant' &&
!message.isStreaming &&
!message.isError &&
!readonly &&
!isMessageSelectMode
" :content="message.content.text || ''" :feedback="message.feedback" :show-regenerate="true"
:is-hovered="isHovered" :is-new="isNew" :is-break="message.isBreak" @copy="handleCopy" @like="handleLike"
@dislike="handleDislike" @regenerate="$emit('regenerate')" @share="handleShareClick" /> -->
<MessageActions
v-if="
message.role === 'assistant' &&
!message.isStreaming &&
!message.isError &&
!readonly &&
!isMessageSelectMode
"
:content="message.content.text || ''"
:feedback="message.feedback"
:show-regenerate="true"
:is-hovered="isHovered"
:is-new="isNew"
:is-break="message.isBreak"
@copy="handleCopy"
@like="handleLike"
@dislike="handleDislike"
@regenerate="$emit('regenerate')"
@share="handleShareClick"
/>
</div>
</div>
</template>
@ -160,6 +201,8 @@ import { ref } from "vue";
import MarkdownRender from "markstream-vue";
import { setCustomComponents } from "markstream-vue";
import {
Bot,
User,
AlertCircle,
RefreshCw,
Zap,
@ -167,7 +210,7 @@ import {
Play,
Check,
} from "@/components/icons";
import { NImage, NImageGroup } from "naive-ui";
import MessageActions from "./MessageActions.vue";
import { formatFileSize, getFileIcon } from "@/utils/helpers";
import type { Message, Suggestion, Attachment, VideoInfo } from "@/types/chat";
import ThinkingNode from "./components/ThinkingNode.vue";
@ -220,6 +263,11 @@ function handleToggleSelect() {
emit("toggle-select");
}
//
function handleShareClick() {
emit("enter-select-mode");
}
function getFileEmoji(mimeType?: string) {
return getFileIcon(mimeType || "");
}
@ -240,6 +288,18 @@ function textCopy(data: any) {
}
}
function handleCopy() {
emit("copy");
}
function handleLike() {
emit("like");
}
function handleDislike() {
emit("dislike");
}
setCustomComponents("playground-demo", {
think: ThinkingNode,
vmr_container: EChartsContainerNode,
@ -250,7 +310,7 @@ setCustomComponents("playground-demo", {
.message-bubble {
display: flex;
gap: 16px;
padding: 20px 22%;
padding: 20px 10%;
animation: fadeIn 0.3s ease;
//
@ -287,9 +347,9 @@ setCustomComponents("playground-demo", {
}
.message-body {
border-radius: 10px 2px 10px 10px;
background: #EEEFF0;
background: #f8fafc;
color: #1f2937;
border-radius: 20px 20px 4px 20px;
}
.text-content {
@ -306,10 +366,8 @@ setCustomComponents("playground-demo", {
&.role-assistant {
.message-body {
padding: 15px 20px;
gap: 15px;
border-radius: 2px 10px 10px 10px;
background: var(---F8F9FA, #F8F9FA);
background: #f8fafc;
border-radius: 20px 20px 20px 4px;
.dark & {
background: #2d2d3d;
@ -353,17 +411,17 @@ setCustomComponents("playground-demo", {
.message-body {
position: relative;
// &::after {
// content: "";
// position: absolute;
// bottom: 12px;
// right: 12px;
// width: 8px;
// height: 8px;
// background: #3b82f6;
// border-radius: 50%;
// animation: pulse 1.5s infinite;
// }
&::after {
content: "";
position: absolute;
bottom: 12px;
right: 12px;
width: 8px;
height: 8px;
background: #3b82f6;
border-radius: 50%;
animation: pulse 1.5s infinite;
}
}
}
}
@ -396,6 +454,7 @@ setCustomComponents("playground-demo", {
display: flex;
flex-direction: column;
gap: 8px;
max-width: 75%;
min-width: 0;
}
@ -432,10 +491,12 @@ setCustomComponents("playground-demo", {
// markstream-vue
.text-content {
:deep(p) {
margin: 0 0 16px;
:deep(p) {
margin: 0 0 12px;
&:last-child {
margin-bottom: 0;
}
}
:deep(ul),
@ -489,7 +550,6 @@ setCustomComponents("playground-demo", {
}
.dark & {
th,
td {
border-color: #374151;
@ -529,15 +589,12 @@ setCustomComponents("playground-demo", {
:deep(h1) {
font-size: 1.5em;
}
:deep(h2) {
font-size: 1.3em;
}
:deep(h3) {
font-size: 1.15em;
}
:deep(h4) {
font-size: 1em;
}
@ -563,7 +620,7 @@ setCustomComponents("playground-demo", {
border-radius: 6px;
background: #ef4444;
color: white;
font-size: 14px;
font-size: 13px;
cursor: pointer;
transition: background 0.2s ease;
@ -589,7 +646,7 @@ setCustomComponents("playground-demo", {
border-radius: 20px;
background: white;
color: #374151;
font-size: 14px;
font-size: 13px;
cursor: pointer;
transition: all 0.2s ease;
@ -625,12 +682,7 @@ setCustomComponents("playground-demo", {
overflow: hidden;
cursor: pointer;
:deep(.n-image) {
width: 100%;
height: 100%;
}
:deep(.n-image img) {
img {
width: 100%;
height: 100%;
object-fit: cover;
@ -647,11 +699,10 @@ setCustomComponents("playground-demo", {
color: white;
opacity: 0;
transition: opacity 0.2s ease;
pointer-events: none;
}
&:hover {
:deep(.n-image img) {
img {
transform: scale(1.05);
}
@ -660,13 +711,6 @@ setCustomComponents("playground-demo", {
}
}
}
.images-grid{
display: flex;
flex-wrap: wrap;
gap: 12px;
margin-top: 12px;
}
.single-video {
margin-top: 12px;
@ -737,7 +781,7 @@ display: flex;
align-items: center;
gap: 12px;
padding: 12px 16px;
background: rgba(0, 0, 0, 0.05);
background: rgba(0, 0, 0, 0.03);
border-radius: 10px;
.dark & {
@ -807,39 +851,17 @@ display: flex;
&:nth-child(1) {
animation-delay: -0.32s;
}
&:nth-child(2) {
animation-delay: -0.16s;
}
}
}
.loading-spinner-row {
display: flex;
align-items: center;
padding: 8px 0 0;
}
.loading-spinner {
width: 14px;
height: 14px;
border: 2px solid rgba(102, 102, 102, 0.25);
border-top-color: #666666;
border-radius: 50%;
animation: spin 0.8s linear infinite;
.dark & {
border-color: rgba(255, 255, 255, 0.25);
border-top-color: #f3f4f6;
}
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
@ -847,13 +869,11 @@ display: flex;
}
@keyframes pulse {
0%,
100% {
opacity: 1;
transform: scale(1);
}
50% {
opacity: 0.5;
transform: scale(0.8);
@ -861,26 +881,18 @@ display: flex;
}
@keyframes pulseDot {
0%,
80%,
100% {
transform: scale(0.6);
opacity: 0.5;
}
40% {
transform: scale(1);
opacity: 1;
}
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
//
.message-checkbox {
position: absolute;

View File

@ -37,11 +37,13 @@ async function textCopy(data: any) {
</script>
<template>
<div class="thinking-node ">
<div
class="thinking-node p-4 my-4 bg-blue-50 dark:bg-blue-900/40 rounded-md border-l-4 border-blue-400"
>
<!-- 可点击的标题栏 -->
<div class="thinking-header" @click="toggleCollapse">
<!-- 思考图标 -->
<!-- <div class="flex-shrink-0">
<div class="flex-shrink-0">
<!-- 思考图标 -->
<div
class="w-8 h-8 rounded-full bg-blue-200 dark:bg-blue-700 flex items-center justify-center text-blue-700 dark:text-blue-100"
>
@ -61,24 +63,38 @@ async function textCopy(data: any) {
/>
</svg>
</div>
</div> -->
</div>
<div class="thinking-title">
<!-- TODO: 深度思考样式 -->
<span > 深度思考</span>
<strong class="text-sm">💭 深度思考</strong>
<!-- 加载动画 -->
<span v-if="node.loading" class="thinking-dots visible" aria-hidden="true">
<span
v-if="node.loading"
class="thinking-dots visible"
aria-hidden="true"
>
<span class="dot dot-1" />
<span class="dot dot-2" />
<span class="dot dot-3" />
</span>
<span v-else class="thinking-status ">
<span
v-else
class="thinking-status text-xs text-slate-500 dark:text-slate-300"
>
已完成
</span>
</div>
<!-- 折叠/展开箭头 -->
<div class="collapse-arrow" :class="{ collapsed }">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"
stroke-linecap="round" stroke-linejoin="round">
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
>
<polyline points="6 9 12 15 18 9" />
</svg>
</div>
@ -86,7 +102,9 @@ async function textCopy(data: any) {
<!-- 可折叠的内容区域 -->
<div class="thinking-content" :class="{ collapsed }">
<div class="mt-3 text-[13px] leading-relaxed dark:text-slate-100">
<div
class="mt-3 text-sm leading-relaxed text-slate-800 dark:text-slate-100"
>
<MarkdownRender :content="node.content" @copy="textCopy" />
</div>
</div>
@ -96,9 +114,7 @@ async function textCopy(data: any) {
<style scoped>
.thinking-node {
color: #0f172a;
margin-bottom: 15px;
}
.dark .thinking-node {
color: #e6f0ff;
}
@ -107,9 +123,6 @@ async function textCopy(data: any) {
.thinking-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 0 0 15px 0;
border-bottom: 1px solid #e2e8f0;
gap: 12px;
cursor: pointer;
user-select: none;
@ -123,13 +136,7 @@ async function textCopy(data: any) {
}
.thinking-status {
/* font-style: italic; */
color: var(--6B-BBBBBB, #BBB);
font-family: "Microsoft YaHei";
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 21px;
font-style: italic;
}
/* 折叠箭头 */
@ -143,28 +150,18 @@ line-height: 21px;
transition: transform 0.25s ease;
border-radius: 4px;
}
.collapse-arrow:hover {
background: rgba(0, 0, 0, 0.06);
}
.dark .collapse-arrow:hover {
background: rgba(255, 255, 255, 0.08);
}
.collapse-arrow.collapsed {
transform: rotate(-90deg);
}
/* 可折叠内容 */
.thinking-content {
color: var(--9-999999, #999);
font-family: "Microsoft YaHei";
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px;
/* 153.846% */
max-height: 2000px;
overflow: auto;
transition:
@ -172,7 +169,6 @@ line-height: 21px;
opacity 0.25s ease;
opacity: 1;
}
.thinking-content.collapsed {
max-height: 0;
opacity: 0;
@ -185,7 +181,6 @@ line-height: 21px;
gap: 6px;
height: 12px;
}
.thinking-dots .dot {
width: 6px;
height: 6px;
@ -193,36 +188,30 @@ line-height: 21px;
background: #1e3a8a;
opacity: 0.25;
}
.thinking-dots.visible .dot-1 {
animation: think-bounce 1s infinite ease-in-out;
animation-delay: 0s;
}
.thinking-dots.visible .dot-2 {
animation: think-bounce 1s infinite ease-in-out;
animation-delay: 0.12s;
}
.thinking-dots.visible .dot-3 {
animation: think-bounce 1s infinite ease-in-out;
animation-delay: 0.24s;
}
.dark .thinking-dots .dot {
background: #bfdbfe;
opacity: 0.28;
}
@keyframes think-bounce {
0%,
80%,
100% {
transform: translateY(0);
opacity: 0.25;
}
40% {
transform: translateY(-6px);
opacity: 1;

View File

@ -203,8 +203,6 @@
</div>
<textarea
class="prompt-textarea"
:class="{ disabled: settings.learningModeEnabled }"
:disabled="settings.learningModeEnabled"
:value="settings.defaultSystemPrompt"
rows="4"
placeholder="输入系统提示词..."
@ -216,9 +214,6 @@
})
"
/>
<p v-if="settings.learningModeEnabled" class="setting-desc">
当前提示词已由学习模式接管关闭学习模式后可恢复编辑
</p>
</div>
</div>
@ -746,7 +741,7 @@ function handleClearData() {
border-radius: 10px;
background: white;
color: #6b7280;
font-size: 14px;
font-size: 13px;
cursor: pointer;
transition: all 0.2s ease;
@ -786,7 +781,7 @@ function handleClearData() {
border-radius: 8px;
background: transparent;
color: #6b7280;
font-size: 14px;
font-size: 13px;
cursor: pointer;
transition: all 0.2s ease;
@ -854,16 +849,6 @@ function handleClearData() {
&::placeholder {
color: #9ca3af;
}
&.disabled {
opacity: 0.7;
cursor: not-allowed;
background: #f3f4f6;
.dark & {
background: #252533;
}
}
}
.data-actions {

View File

@ -34,14 +34,18 @@
<!-- 对话分享预览 -->
<div v-else class="selected-preview">
<div class="preview-header">
<span class="preview-title">当前对话</span>
<span class="preview-hint">仅支持单个对话分享</span>
<span class="preview-title">已选择 {{ selectedCount }} 对话</span>
<span class="preview-hint">最多分享 10 个对话</span>
</div>
<div v-if="shareConversation" class="preview-list">
<div class="preview-item">
<div class="preview-list">
<div
v-for="conv in selectedConversations"
:key="conv.id"
class="preview-item"
>
<MessageSquare :size="14" />
<span class="item-title">{{ shareConversation.title }}</span>
<span class="item-count">{{ shareConversation.messages.length }} 条消息</span>
<span class="item-title">{{ conv.title }}</span>
<span class="item-count">{{ conv.messages.length }} 条消息</span>
</div>
</div>
</div>
@ -116,15 +120,10 @@ const settingsStore = useSettingsStore();
const chatStore = useChatStore();
const show = computed(() => settingsStore.showShareModal);
const { shareConversationId } = storeToRefs(settingsStore);
const { isMessageSelectMode, selectedMessages, selectedMessageCount } = storeToRefs(chatStore);
const { selectedConversations, selectedCount, isMessageSelectMode, selectedMessages, selectedMessageCount } = storeToRefs(chatStore);
//
const isMessageShare = computed(() => isMessageSelectMode.value);
const shareConversation = computed(() => {
if (!shareConversationId.value) return null;
return chatStore.conversations.find((c) => c.id === shareConversationId.value) || null;
});
const password = ref("");
const showPassword = ref(false);
@ -174,13 +173,16 @@ async function handleCreateShare() {
});
} else {
//
if (!shareConversation.value) {
window.$toast?.('请先选择要分享的对话', 'error');
//
if (selectedCount.value > SHARE_LIMITS.MAX_CONVERSATIONS) {
window.$toast?.(`最多分享 ${SHARE_LIMITS.MAX_CONVERSATIONS} 个对话`, 'error');
return;
}
const conversationIds = selectedConversations.value.map(c => c.id);
result = await shareApi.createShare({
conversationIds: [shareConversation.value.id],
conversationIds,
passwordHash,
expiresIn: SHARE_LIMITS.DEFAULT_EXPIRE_SECONDS,
});
@ -205,6 +207,8 @@ async function handleCreateShare() {
password.value = "";
if (isMessageShare.value) {
chatStore.exitMessageSelectMode();
} else {
chatStore.exitSelectMode();
}
} catch (error) {
@ -354,7 +358,7 @@ watch(show, (newVal: boolean) => {
display: flex;
align-items: center;
gap: 8px;
font-size: 14px;
font-size: 13px;
color: #4b5563;
.dark & {
@ -495,7 +499,7 @@ watch(show, (newVal: boolean) => {
padding: 12px;
background: rgba(59, 130, 246, 0.05);
border-radius: 10px;
font-size: 14px;
font-size: 13px;
color: #3b82f6;
.dark & {

View File

@ -262,7 +262,7 @@ watch(show, (newVal: boolean) => {
.share-section {
.share-label {
display: block;
font-size: 14px;
font-size: 13px;
font-weight: 500;
color: #374151;
margin-bottom: 8px;
@ -282,7 +282,7 @@ watch(show, (newVal: boolean) => {
padding: 12px 14px;
border: 1px solid #e5e7eb;
border-radius: 10px;
font-size: 14px;
font-size: 13px;
color: #1f2937;
background: #f9fafb;
@ -307,7 +307,7 @@ watch(show, (newVal: boolean) => {
border-radius: 10px;
background: #f3f4f6;
color: #374151;
font-size: 14px;
font-size: 13px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;
@ -336,7 +336,7 @@ watch(show, (newVal: boolean) => {
padding: 12px;
background: rgba(245, 158, 11, 0.1);
border-radius: 10px;
font-size: 14px;
font-size: 13px;
color: #f59e0b;
}

View File

@ -264,7 +264,7 @@ function close() {
}
.tip {
font-size: 14px;
font-size: 13px;
color: #9ca3af;
kbd {

View File

@ -1,6 +1,9 @@
<template>
<aside class="chat-sidebar" :class="{ collapsed: isCollapsed }"
:style="{ width: isCollapsed ? '0px' : `${sidebarWidth}px` }">
<aside
class="chat-sidebar"
:class="{ collapsed: isCollapsed }"
:style="{ width: isCollapsed ? '0px' : `${sidebarWidth}px` }"
>
<div class="sidebar-inner">
<!-- 头部 -->
<!-- <div class="sidebar-header">
@ -19,41 +22,45 @@
<!-- 模型选择 -->
<div v-show="!isCollapsed" class="model-selector-section">
<button class="model-selector" :class="{ 'is-open': showModelMenu }" @click="showModelMenu = !showModelMenu">
<button class="model-selector" @click="showModelMenu = !showModelMenu">
<Sparkles :size="16" />
<span class="model-name-display">{{ currentModel }}</span>
<ChevronDown v-if="!showModelMenu" :size="14" />
<ChevronUp v-else :size="14" />
<ChevronDown :size="14" />
</button>
<Transition name="dropdown">
<div v-if="showModelMenu" class="model-menu">
<button v-for="model in models" :key="model.id" class="model-option"
:class="{ active: model.id === currentModelId }" @click="selectModel(model.id, model.name)">
<button
v-for="model in models"
:key="model.id"
class="model-option"
:class="{ active: model.id === currentModelId }"
@click="selectModel(model.id, model.name)"
>
<div class="model-info">
<span class="model-name">{{ model.name }}</span>
<span class="model-desc">{{ model.description }}</span>
</div>
<!-- <Check v-if="model.id === currentModelId" :size="16" class="check-icon" /> -->
<Check
v-if="model.id === currentModelId"
:size="16"
class="check-icon"
/>
</button>
</div>
</Transition>
<button class="new-chat-btn" @click="handleNewChat">
<Plus :size="18" />
<span>新对话</span>
</button>
</div>
<!-- 新建对话按钮 -->
<!-- <div class="new-chat-section">
<div class="new-chat-section">
<button class="new-chat-btn" @click="handleNewChat">
<Plus :size="18" />
<span>新建对话</span>
</button>
</div> -->
</div>
<!-- 分享按钮 -->
<!-- <ShareButton /> -->
<ShareButton />
<!-- 搜索框 -->
<!-- <div class="search-section">
@ -73,11 +80,19 @@
<span>置顶</span>
</div>
<div class="group-list">
<ConversationItem v-for="conv in pinnedConversations" :key="conv.id" :conversation="conv"
:is-active="conv.id === currentConversationId" :is-select-mode="isSelectMode"
:is-selected="isConversationSelected(conv.id)" @select="selectConversation" @delete="deleteConversation"
@rename="renameConversation" @toggle-pin="togglePinConversation" @share="handleShareConversation"
@toggle-select="toggleConversationSelection" />
<ConversationItem
v-for="conv in pinnedConversations"
:key="conv.id"
:conversation="conv"
:is-active="conv.id === currentConversationId"
:is-select-mode="isSelectMode"
:is-selected="isConversationSelected(conv.id)"
@select="selectConversation"
@delete="deleteConversation"
@rename="renameConversation"
@toggle-pin="togglePinConversation"
@toggle-select="toggleConversationSelection"
/>
</div>
</div>
@ -88,20 +103,32 @@
<span>对话历史</span>
</div>
<div class="group-list">
<ConversationItem v-for="conv in recentConversations" :key="conv.id" :conversation="conv"
:is-active="conv.id === currentConversationId" :is-select-mode="isSelectMode"
:is-selected="isConversationSelected(conv.id)" @select="selectConversation" @delete="deleteConversation"
@rename="renameConversation" @toggle-pin="togglePinConversation" @share="handleShareConversation"
@toggle-select="toggleConversationSelection" />
<ConversationItem
v-for="conv in recentConversations"
:key="conv.id"
:conversation="conv"
:is-active="conv.id === currentConversationId"
:is-select-mode="isSelectMode"
:is-selected="isConversationSelected(conv.id)"
@select="selectConversation"
@delete="deleteConversation"
@rename="renameConversation"
@toggle-pin="togglePinConversation"
@toggle-select="toggleConversationSelection"
/>
</div>
</div>
<!-- 空状态 -->
<div v-if="
pinnedConversations.length === 0 && recentConversations.length === 0
" class="empty-state">
<img src="../../assets/无对话历史.png" alt="无对话历史" srcset="">
<div
v-if="
pinnedConversations.length === 0 && recentConversations.length === 0
"
class="empty-state"
>
<MessageSquare :size="40" class="empty-icon" />
<p>暂无对话</p>
<span>点击上方按钮开始新对话</span>
</div>
</div>
@ -129,17 +156,20 @@
</template>
<script setup lang="ts">
import { onBeforeUnmount, onMounted, ref } from "vue";
import { onMounted, ref } from "vue";
import { storeToRefs } from "pinia";
import { useChatStore } from "@/stores/chat";
import { useSettingsStore } from "@/stores/settings";
import { chatApi } from "@/services/api.ts";
import ConversationItem from "./ConversationItem.vue";
import ShareButton from "./ShareButton.vue";
import {
Plus,
Pin,
MessageSquare,
Sparkles,
ChevronDown,
ChevronUp,
Check,
} from "@/components/icons";
const chatStore = useChatStore();
@ -208,10 +238,6 @@ function togglePinConversation(id: string) {
chatStore.togglePinConversation(id);
}
function handleShareConversation(id: string) {
settingsStore.openConversationShareModal(id);
}
function toggleConversationSelection(id: string) {
chatStore.toggleConversationSelection(id);
}
@ -254,12 +280,6 @@ function handleClickOutside(event: MouseEvent) {
if (typeof window !== "undefined") {
document.addEventListener("click", handleClickOutside);
}
onBeforeUnmount(() => {
if (typeof window !== "undefined") {
document.removeEventListener("click", handleClickOutside);
}
});
</script>
<style lang="scss" scoped>
@ -359,10 +379,6 @@ onBeforeUnmount(() => {
}
.model-selector-section {
display: grid;
grid-template-columns: 6fr 4fr;
gap: 10px;
align-items: center;
position: relative;
padding: 12px 16px 6px;
}
@ -372,10 +388,12 @@ onBeforeUnmount(() => {
align-items: center;
gap: 8px;
width: 100%;
height: 36px;
padding: 10px 20px;
padding: 6px 12px;
border-radius: 10px;
background: #f3f4f5;
color: #374151;
font-size: 14px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;
@ -389,19 +407,12 @@ onBeforeUnmount(() => {
border-color: #3b82f6;
}
&.is-open {
background: var(--000-f-3310, rgba(0, 15, 51, 0.10));
}
svg:first-child {
color: #8b5cf6;
flex-shrink: 0;
}
.model-name-display {
text-align: left;
height: 100%;
font-size: 14px;
flex: 1;
overflow: hidden;
text-overflow: ellipsis;
@ -417,19 +428,13 @@ onBeforeUnmount(() => {
min-width: 200px;
max-height: 300px;
overflow-y: auto;
scrollbar-width: none;
-ms-overflow-style: none;
padding: 15px;
padding: 8px;
background: white;
border: 1px solid #e2e8f0;
border-radius: 14px;
box-shadow: 0 0 20px 0 rgba(0, 0, 0, 0.10);
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.1);
z-index: 100;
&::-webkit-scrollbar {
display: none;
}
.dark & {
background: #1e1e2e;
border-color: #2d2d3d;
@ -442,7 +447,7 @@ onBeforeUnmount(() => {
align-items: center;
justify-content: space-between;
width: 100%;
padding: 10px;
padding: 12px 14px;
border: none;
border-radius: 10px;
background: transparent;
@ -451,7 +456,7 @@ onBeforeUnmount(() => {
transition: all 0.15s ease;
&:hover {
background: #F8F9FA;
background: #f3f4f6;
.dark & {
background: #2d2d3d;
@ -459,13 +464,10 @@ onBeforeUnmount(() => {
}
&.active {
background: #F8F9FA;
background: rgba(59, 130, 246, 0.1);
.model-name {
color: var(---000F33, #000F33);
}
.model-desc{
color: var(--6-666666, #666);
color: #3b82f6;
}
}
}
@ -480,7 +482,7 @@ onBeforeUnmount(() => {
.model-name {
font-size: 14px;
font-weight: 500;
color: var(--6-666666, #666);
color: #1f2937;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
@ -492,7 +494,7 @@ onBeforeUnmount(() => {
.model-desc {
font-size: 12px;
color: var(--9-999999, #999);
color: #9ca3af;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
@ -525,9 +527,8 @@ onBeforeUnmount(() => {
justify-content: center;
gap: 8px;
width: 100%;
height: 36px;
padding: 10px 12px;
border-radius: 10px;
padding: 6px 12px;
border-radius: 12px;
background: #f3f4f5;
color: #374151;
font-size: 14px;
@ -538,7 +539,6 @@ onBeforeUnmount(() => {
.dark & {
background: #2d2d3d;
color: #e5e7eb;
&:hover {
background: #0475ed;
color: #e5e7eb;
@ -581,7 +581,7 @@ onBeforeUnmount(() => {
.search-placeholder {
flex: 1;
font-size: 14px;
font-size: 13px;
}
.search-kbd {
@ -597,8 +597,6 @@ onBeforeUnmount(() => {
.conversations-section {
flex: 1;
display: flex;
flex-direction: column;
overflow-y: auto;
padding-bottom: 12px;
}
@ -613,8 +611,8 @@ onBeforeUnmount(() => {
gap: 6px;
padding: 8px 20px;
font-size: 12px;
color: #000;
font-weight: 700;
font-weight: 600;
color: #9ca3af;
text-transform: uppercase;
letter-spacing: 0.5px;
@ -623,20 +621,12 @@ onBeforeUnmount(() => {
}
}
.group-list{
gap:5px;
display:flex;
flex-direction:column;
padding: 0 20px;
}
.empty-state {
margin: auto 0;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 20px 20px 160px;
padding: 40px 20px;
text-align: center;
.empty-icon {
@ -648,17 +638,11 @@ onBeforeUnmount(() => {
}
}
img {
width: 140px;
height: 80px;
margin-bottom: 16px;
}
p {
margin: 0 0 4px;
font-size: 14px;
font-weight: 500;
color: var(--BBBBBB, #BBB);
color: #6b7280;
}
span {

View File

@ -1,10 +1,15 @@
<template>
<div class="conversation-item group" :class="{
active: isActive,
pinned: conversation.pinned,
selected: isSelected,
'select-mode': isSelectMode,
}" @click="handleClick" @dblclick="handleRename">
<div
class="conversation-item group"
:class="{
active: isActive,
pinned: conversation.pinned,
selected: isSelected,
'select-mode': isSelectMode,
}"
@click="handleClick"
@dblclick="handleRename"
>
<!-- 选择模式复选框 -->
<div v-if="isSelectMode" class="item-checkbox" @click.stop="handleToggleSelect">
<div class="checkbox" :class="{ checked: isSelected }">
@ -14,7 +19,7 @@
<!-- 图标 -->
<div v-if="!isSelectMode" class="item-icon">
<MessageIcon :size="14" />
<MessageSquare :size="18" />
</div>
<!-- 内容 -->
@ -22,59 +27,43 @@
<div v-if="!isEditing" class="item-title">
{{ conversation.title }}
</div>
<input v-else ref="inputRef" v-model="editTitle" class="item-title-input" @blur="handleSaveRename"
@keydown.enter="handleSaveRename" @keydown.escape="handleCancelRename" @click.stop />
<input
v-else
ref="inputRef"
v-model="editTitle"
class="item-title-input"
@blur="handleSaveRename"
@keydown.enter="handleSaveRename"
@keydown.escape="handleCancelRename"
@click.stop
/>
<div class="item-meta">
<Clock :size="12" />
<span>{{ formattedTime }}</span>
</div>
</div>
<!-- 置顶标识 -->
<div v-if="conversation.pinned && !isSelectMode" class="pin-indicator">
<PinIcon :size="14" />
<Pin :size="12" />
</div>
<!-- 操作按钮 (非选择模式显示) -->
<div v-if="!isSelectMode" class="item-actions" @click.stop>
<n-tooltip :style="{ borderRadius: '5px', padding: '7px 15px' }">
<template #trigger>
<button
class="action-btn pin-toggle-btn"
@click="handleTogglePin"
>
<PinOffActionIcon v-if="conversation.pinned" :size="14" />
<PinActionIcon v-else :size="14" />
</button>
</template>
{{ conversation.pinned ? '取消置顶' : '置顶' }}
</n-tooltip>
<n-tooltip :style="{ borderRadius: '5px', padding: '7px 15px' }">
<template #trigger>
<button class="action-btn" @click="handleShare">
<ShareIcon :size="14" />
</button>
</template>
分享
</n-tooltip>
<n-tooltip :style="{ borderRadius: '5px', padding: '7px 15px' }">
<template #trigger>
<button class="action-btn" @click="handleRename">
<EditIcon :size="14" />
</button>
</template>
重命名
</n-tooltip>
<n-tooltip :style="{ borderRadius: '5px', padding: '7px 15px' }">
<template #trigger>
<button class="action-btn delete" @click="handleDelete">
<DeleteIcon :size="14" />
</button>
</template>
删除
</n-tooltip>
<button
class="action-btn"
:title="conversation.pinned ? '取消置顶' : '置顶'"
@click="handleTogglePin"
>
<PinOff v-if="conversation.pinned" :size="14" />
<Pin v-else :size="14" />
</button>
<button class="action-btn" title="重命名" @click="handleRename">
<Edit3 :size="14" />
</button>
<button class="action-btn delete" title="删除" @click="handleDelete">
<Trash2 :size="14" />
</button>
</div>
</div>
</template>
@ -82,22 +71,20 @@
<script setup lang="ts">
import { ref, computed, nextTick } from "vue";
import {
MessageSquare,
Pin,
PinOff,
Edit3,
Trash2,
Clock,
Check,
} from "@/components/icons";
import { formatTimestamp } from "@/utils/helpers";
import type { Conversation } from "@/types/chat";
import { NTooltip } from "naive-ui";
import MessageIcon from "../icons/custom/MessageIcon.vue";
import PinIcon from "../icons/custom/PinIcon.vue";
import PinActionIcon from "../icons/custom/PinActionIcon.vue";
import PinOffActionIcon from "../icons/custom/PinOffActionIcon.vue";
import EditIcon from "../icons/custom/EditIcon.vue";
import DeleteIcon from "../icons/custom/DeleteIcon.vue";
import ShareIcon from "../icons/custom/ShareIcon.vue";
const props = defineProps<{
conversation: Conversation;
isActive: boolean;
isActive: boolean;
isSelectMode?: boolean;
isSelected?: boolean;
}>();
@ -108,7 +95,6 @@ const emit = defineEmits<{
rename: [id: string, title: string];
togglePin: [id: string];
toggleSelect: [id: string];
share: [id: string];
}>();
const isEditing = ref(false);
@ -135,10 +121,6 @@ function handleTogglePin() {
emit("togglePin", props.conversation.id);
}
function handleShare() {
emit("share", props.conversation.id);
}
function handleRename() {
if (props.isSelectMode) return;
isEditing.value = true;
@ -170,15 +152,17 @@ function handleDelete() {
<style lang="scss" scoped>
.conversation-item {
position: relative;
display: flex;
align-items: center;
gap: 10px;
padding: 10px 12px;
margin: 2px 8px;
border-radius: 10px;
cursor: pointer;
transition: all 0.2s ease;
position: relative;
&:hover:not(.active) {
&:hover {
background: rgba(0, 0, 0, 0.05);
.dark & {
@ -193,38 +177,17 @@ function handleDelete() {
.pin-indicator {
opacity: 0;
}
.item-content {
flex: 0 1 clamp(72px, 28%, 96px);
max-width: clamp(72px, 28%, 96px);
}
}
&.active {
background: #e6e7eb;
background: rgba(59, 130, 246, 0.1);
.dark & {
background: rgba(59, 130, 246, 0.2);
}
.item-title {
font-weight: 700;
}
}
&.active:hover {
.item-actions {
opacity: 1;
pointer-events: auto;
}
.pin-indicator {
opacity: 0;
}
.item-content {
flex: 0 1 clamp(72px, 28%, 96px);
max-width: clamp(72px, 28%, 96px);
.item-icon {
color: #3b82f6;
}
}
}
@ -239,11 +202,9 @@ function handleDelete() {
}
.item-content {
flex: 1 1 auto;
flex: 1;
min-width: 0;
overflow: hidden;
max-width: 100%;
transition: flex-basis 0.2s ease, max-width 0.2s ease;
}
.item-title {
@ -297,8 +258,6 @@ function handleDelete() {
}
.item-actions {
position: absolute;
right: 12px;
display: flex;
align-items: center;
gap: 2px;
@ -307,10 +266,6 @@ function handleDelete() {
transition: opacity 0.2s ease;
}
.conversation-item.active .pin-indicator {
opacity: 0;
}
.action-btn {
display: flex;
align-items: center;
@ -326,7 +281,7 @@ function handleDelete() {
&:hover {
background: rgba(0, 0, 0, 0.1);
color: #000F33;
color: #374151;
.dark & {
background: rgba(255, 255, 255, 0.1);
@ -335,23 +290,8 @@ function handleDelete() {
}
&.delete:hover {
color: #f86361;
}
}
.pin-toggle-btn {
color: #666666;
.dark & {
color: #666666;
}
&:hover {
color: #000f33;
.dark & {
color: #000f33;
}
background: rgba(239, 68, 68, 0.1);
color: #ef4444;
}
}

View File

@ -1,9 +1,32 @@
<template>
<div class="share-button-wrapper">
<button class="share-btn" :disabled="!currentConversation" @click="handleShareCurrent">
<button
v-if="!isSelectMode"
class="share-btn"
:disabled="conversations.length === 0"
@click="handleStartSelect"
>
<Share2 :size="16" />
<span>分享对话</span>
</button>
<div v-else class="select-actions">
<span class="select-info">
已选择 {{ selectedCount }} 个对话
</span>
<div class="action-buttons">
<button class="action-btn cancel" @click="handleCancel">
取消
</button>
<button
class="action-btn confirm"
:disabled="selectedCount === 0"
@click="handleConfirm"
>
确认分享
</button>
</div>
</div>
</div>
</template>
@ -16,12 +39,21 @@ import { Share2 } from "@/components/icons";
const chatStore = useChatStore();
const settingsStore = useSettingsStore();
const { currentConversation } = storeToRefs(chatStore);
const { isSelectMode, selectedCount, conversations } = storeToRefs(chatStore);
function handleShareCurrent() {
const conversation = currentConversation.value;
if (!conversation) return;
settingsStore.openConversationShareModal(conversation.id);
function handleStartSelect() {
chatStore.enterSelectMode();
}
function handleCancel() {
chatStore.exitSelectMode();
}
function handleConfirm() {
if (chatStore.selectedCount > 0) {
//
settingsStore.openShareModal();
}
}
</script>
@ -74,7 +106,7 @@ function handleShareCurrent() {
}
.select-info {
font-size: 14px;
font-size: 13px;
color: #6b7280;
text-align: center;
@ -92,7 +124,7 @@ function handleShareCurrent() {
flex: 1;
padding: 8px 12px;
border-radius: 10px;
font-size: 14px;
font-size: 13px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;
@ -130,4 +162,4 @@ function handleShareCurrent() {
}
}
}
</style>
</style>

View File

@ -1,804 +0,0 @@
<script setup lang="ts">
import { ref, computed, onMounted, onUnmounted, watch } from 'vue'
import { NImage, NTooltip } from 'naive-ui'
import PlusIcon from '../icons/custom/PlusIcon.vue'
export interface CardItem {
id: string | number
title?: string
name?: string
description?: string
icon?: string
color?: string
url?: string
thumbnail?: string
type?: 'image' | 'file' | 'video' | string
size?: number
mimeType?: string
uploading?: boolean
deleting?: boolean
}
interface Props {
cards: CardItem[]
maxVisible?: number
spreadGap?: number
supportsFiles?: boolean
supportsVision?: boolean
}
const props = withDefaults(defineProps<Props>(), {
maxVisible: 5,
spreadGap: 120,
supportsFiles: true,
supportsVision: true,
})
const emit = defineEmits<{
remove: [id: string | number]
'add-upload': []
}>()
const isExpanded = ref(false)
const isWrapperHovered = ref(false)
const hoveredCardId = ref<string | number | null>(null)
const containerRef = ref<HTMLElement | null>(null)
const canUpload = computed(() => props.supportsFiles || props.supportsVision)
const CARD_SCALE = 0.5
// -
const defaultColors = [
'#06b6d4', // cyan
'#8b5cf6', // purple
'#22c55e', // green
'#f59e0b', // amber
'#ef4444', // red
'#ec4899', // pink
]
function getCardTitle(card: CardItem, index: number) {
return card.title || card.name || `Card ${index + 1}`
}
function getCardIcon(card: CardItem) {
if (card.icon) return card.icon
const icons: Record<string, string> = {
image: '🖼️',
file: '📎',
video: '🎬',
}
return icons[card.type || ''] || '📎'
}
function getCardColor(card: CardItem, index: number) {
if (card.color) return card.color
const colors: Record<string, string> = {
image: 'white',
file: 'white',
video: 'white',
}
return colors[card.type || ''] || defaultColors[index % defaultColors.length]
}
function getCardImageUrl(card: CardItem) {
if (card.type === 'image') {
return card.url || card.thumbnail || ''
}
return card.thumbnail || ''
}
function removeCard(card: CardItem) {
emit('remove', card.id)
}
function expandCards() {
if (!isExpanded.value) {
isExpanded.value = true
}
}
function handleCardClick(card: CardItem) {
if (card.deleting) return
if (!isExpanded.value) {
expandCards()
return
}
}
const cardStyle = computed(() => (index: number, total: number) => {
const color = getCardColor(props.cards[index], index)
const isCardHovered = hoveredCardId.value === props.cards[index]?.id
const isCardDeleting = !!props.cards[index]?.deleting
const hoverBorderColor = isCardHovered ? '#000F33' : color
// - index 0
const stackOffset = 16 * CARD_SCALE //
const stackX = -index * stackOffset
const stackZIndex = total - index
if (!isExpanded.value) {
return {
transform: `translateX(${stackX}px)`,
zIndex: stackZIndex,
opacity: index < props.maxVisible ? 1 : 0,
'--card-border-color': color,
'--card-hover-border-color': hoverBorderColor,
cursor: isCardDeleting ? 'wait' : 'pointer',
}
}
// -
const spreadX = (total - 1 - index) * props.spreadGap * CARD_SCALE
const zIndexValue = isCardHovered ? 100 : total - index
return {
transform: `translateX(${spreadX}px) scale(${isCardHovered ? 1.05 : 1})`,
zIndex: isCardDeleting ? 101 : zIndexValue,
opacity: 1,
'--card-border-color': color,
'--card-hover-border-color': hoverBorderColor,
cursor: isCardDeleting ? 'wait' : 'pointer',
}
})
const handleDocumentClick = (event: MouseEvent) => {
if (!containerRef.value) return
const target = event.target as HTMLElement | null
// Naive UI body
if (
target?.closest(
'.n-image-preview-container, .n-image-preview-toolbar, .n-image-preview-overlay, .n-image-preview-close',
)
) {
return
}
if (!target) return
//
if (!containerRef.value.contains(target)) {
if (isExpanded.value) {
isExpanded.value = false
hoveredCardId.value = null
}
} else {
//
if (!isExpanded.value) {
isExpanded.value = true
}
}
}
onMounted(() => {
document.addEventListener('click', handleDocumentClick)
})
onUnmounted(() => {
document.removeEventListener('click', handleDocumentClick)
})
watch(
() => props.cards.length,
(length) => {
if (length === 0) {
isExpanded.value = false
hoveredCardId.value = null
}
},
)
</script>
<template>
<div v-if="cards.length === 0" class="stacked-cards-empty" aria-label="上传附件操作">
<button type="button" class="stacked-upload-btn" :class="{ disabled: !canUpload }" :disabled="!canUpload"
:title="canUpload ? '上传附件或图片' : '当前模型不支持上传'" @click.stop="emit('add-upload')">
<div class="upload-action-div" :class="{ disabled: !canUpload }">
<PlusIcon :size="13" />
</div>
</button>
</div>
<div v-else ref="containerRef" class="stacked-cards-container" :class="{ 'is-expanded': isExpanded }"
@click="expandCards">
<div class="cards-wrapper" @mouseenter="isWrapperHovered = true" @mouseleave="isWrapperHovered = false">
<TransitionGroup name="card-spread">
<NTooltip
v-for="(card, index) in cards"
:key="card.id"
trigger="hover"
placement="top"
>
<template #trigger>
<div class="card" :style="cardStyle(index, cards.length)"
@mouseenter="hoveredCardId = card.id" @mouseleave="hoveredCardId = null" @click="handleCardClick(card)">
<div class="card-glow" :style="{ background: getCardColor(card, index) }" />
<div v-if="card.type === 'image' && getCardImageUrl(card)" class="card-media">
<NImage
:src="getCardImageUrl(card)"
:alt="getCardTitle(card, index)"
object-fit="cover"
:preview-disabled="!isExpanded || card.uploading || card.deleting"
:img-props="{ loading: 'lazy' }"
/>
<div v-if="card.uploading" class="card-media-uploading">
<div class="card-media-uploading-spinner" aria-hidden="true" />
<span class="card-media-uploading-text">上传中</span>
</div>
<div v-else-if="card.deleting" class="card-media-deleting">
<div class="card-media-deleting-spinner" aria-hidden="true" />
<span class="card-media-deleting-text">删除中</span>
</div>
</div>
<div v-else-if="card.type === 'image'" class="card-preview">
<div v-if="getCardImageUrl(card)" class="card-preview-image">
<NImage
:src="getCardImageUrl(card)"
:alt="getCardTitle(card, index)"
object-fit="cover"
:preview-disabled="!isExpanded || card.uploading || card.deleting"
:img-props="{ loading: 'lazy' }"
/>
</div>
<div class="card-preview-fallback">
<span class="card-icon">
{{ getCardIcon(card) }}
</span>
</div>
<div v-if="card.deleting" class="card-preview-deleting">
<div class="card-preview-deleting-spinner" aria-hidden="true" />
<span class="card-preview-deleting-text">删除中</span>
</div>
</div>
<div v-else class="card-file-preview">
<div class="card-file-icon" aria-hidden="true">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<rect x="2.5" y="1.5" width="11" height="13" rx="1.5" stroke="#666666" />
<path d="M5 5H11" stroke="#666666" stroke-linecap="round" />
<path d="M5 8H11" stroke="#666666" stroke-linecap="round" />
<path d="M5 11H11" stroke="#666666" stroke-linecap="round" />
</svg>
</div>
<div class="card-file-name" :title="getCardTitle(card, index)">
{{ getCardTitle(card, index) }}
</div>
<div v-if="card.deleting" class="card-preview-deleting">
<div class="card-preview-deleting-spinner" aria-hidden="true" />
<span class="card-preview-deleting-text">删除中</span>
</div>
</div>
<button v-if="!card.deleting && isWrapperHovered && (hoveredCardId === card.id || (!hoveredCardId && index === 0))"
type="button" class="card-delete-btn" title="删除 OSS 文件" @click.stop="removeCard(card)">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<rect width="16.0008" height="16" rx="8" fill="#999999" />
<path d="M5.49512 5.33374L10.6692 10.6624" stroke="white" stroke-linecap="round" />
<path d="M10.5068 5.33813L5.33271 10.6668" stroke="white" stroke-linecap="round" />
</svg>
</button>
</div>
</template>
{{ getCardTitle(card, index) }}
</NTooltip>
</TransitionGroup>
</div>
<button
v-if="!isExpanded"
type="button"
class="cards-upload-fab"
:class="{ disabled: !canUpload }"
:disabled="!canUpload"
:title="canUpload ? '上传附件或图片' : '当前模型不支持上传'"
@click.stop="emit('add-upload')"
>
<PlusIcon :size="12" />
</button>
</div>
</template>
<style scoped>
/* TODO: 等待优化边框样式 */
.stacked-cards-container {
position: relative;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
perspective: 1000px;
}
.cards-upload-fab {
position: absolute;
right: 2px;
bottom: 2px;
z-index: 999;
display: inline-flex;
align-items: center;
justify-content: center;
width: 18px;
height: 18px;
border: 1px solid #e5e7eb;
border-radius: 999px;
background: #ffffff;
color: #999999;
box-shadow: 0 1px 4px rgba(15, 23, 42, 0.16);
cursor: pointer;
transition: transform 0.2s ease, opacity 0.2s ease, background 0.2s ease;
}
.cards-upload-fab:hover:not(:disabled) {
transform: scale(1.06);
}
.cards-upload-fab.disabled,
.cards-upload-fab:disabled {
opacity: 0.45;
cursor: not-allowed;
}
.stacked-cards-empty {
display: flex;
align-items: center;
justify-content: center;
}
/* 卡片大小 */
.cards-wrapper {
position: relative;
display: flex;
align-items: center;
justify-content: flex-end;
min-width: 70px;
height: 70px;
}
.card {
position: absolute;
width: 74%;
height: 100%;
border-radius: 10px;
border: 1px solid var(--card-border-color, var(--ffffff, #FFF));
background: url(<path-to-image>) lightgray 50% / cover no-repeat;
cursor: pointer;
/* overflow: hidden; */
transition: transform 0.35s ease-out, opacity 0.35s ease-out;
will-change: transform, opacity;
}
.card-media,
.card-preview,
.card-file-preview {
position: absolute;
inset: 0;
z-index: 0;
}
.card-media {
cursor: default;
border-radius: 10px;
overflow: hidden;
}
.card-media-uploading {
position: absolute;
inset: 0;
z-index: 3;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: 0.3rem;
background: rgba(2, 6, 23, 0.48);
backdrop-filter: blur(2px);
}
.card-media-deleting {
position: absolute;
inset: 0;
z-index: 3;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: 0.3rem;
background: rgba(127, 29, 29, 0.42);
backdrop-filter: blur(2px);
}
.card-media-uploading-spinner {
width: 14px;
height: 14px;
border-radius: 999px;
border: 1.5px solid rgba(255, 255, 255, 0.22);
border-top-color: #ffffff;
animation: card-spin 0.8s linear infinite;
}
.card-media-deleting-spinner {
width: 14px;
height: 14px;
border-radius: 999px;
border: 1.5px solid rgba(255, 255, 255, 0.22);
border-top-color: #fecaca;
animation: card-spin 0.8s linear infinite;
}
.card-media-uploading-text {
color: #fff;
font-size: 0.42rem;
letter-spacing: 0.05em;
font-family: 'JetBrains Mono', monospace;
}
.card-media-deleting-text {
color: #fff;
font-size: 0.42rem;
letter-spacing: 0.05em;
font-family: 'JetBrains Mono', monospace;
}
.card-media-overlay {
position: absolute;
inset: 0;
z-index: 2;
display: flex;
align-items: flex-end;
justify-content: flex-end;
padding: 0.4rem;
border: 0;
background: linear-gradient(180deg, rgba(10, 10, 15, 0.02) 0%, rgba(10, 10, 15, 0.15) 40%, rgba(10, 10, 15, 0.68) 100%);
cursor: zoom-in;
}
.card-media-overlay-badge {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 24px;
height: 14px;
padding: 0 4px;
border-radius: 999px;
background: rgba(0, 0, 0, 0.38);
color: #f8fafc;
font-family: 'JetBrains Mono', monospace;
font-size: 0.35rem;
letter-spacing: 0.04em;
}
.card-delete-btn {
position: absolute;
top: -0.3rem;
right: -0.3rem;
z-index: 4;
display: inline-flex;
align-items: center;
justify-content: center;
width: 16.001px;
height: 16px;
aspect-ratio: 1 / 1;
padding: 0;
border: 0;
border-radius: 50%;
background: transparent;
cursor: pointer;
transition: transform 0.2s ease, opacity 0.2s ease;
}
.card-delete-btn:hover {
transform: scale(1.08);
}
.card-delete-btn svg {
width: 16.001px;
height: 16px;
display: block;
}
.card-media :deep(.n-image),
.card-preview-image :deep(.n-image) {
width: 100%;
height: 100%;
}
.card-media :deep(.n-image img),
.card-preview-image :deep(.n-image img) {
width: 100%;
height: 100%;
object-fit: cover;
display: block;
}
.card-preview {
display: flex;
align-items: flex-start;
justify-content: center;
padding: 0.9rem 0.9rem 0;
}
.card-preview-image {
position: relative;
width: 100%;
height: 44px;
border-radius: 6px;
overflow: hidden;
}
.card-preview-fallback {
position: absolute;
left: 50%;
top: 50%;
transform: translate(-50%, -42%);
display: flex;
align-items: center;
justify-content: center;
width: 44px;
height: 44px;
border-radius: 9px;
background: linear-gradient(145deg, rgba(255, 255, 255, 0.08), rgba(255, 255, 255, 0.02));
border: 1px solid rgba(255, 255, 255, 0.06);
}
.card-preview-deleting {
position: absolute;
inset: 0;
z-index: 3;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: 0.3rem;
background: rgba(127, 29, 29, 0.38);
backdrop-filter: blur(2px);
border-radius: 9px;
}
.card-preview-deleting-spinner {
width: 14px;
height: 14px;
border-radius: 999px;
border: 1.5px solid rgba(255, 255, 255, 0.22);
border-top-color: #fecaca;
animation: card-spin 0.8s linear infinite;
}
.card-preview-deleting-text {
color: #fff;
font-size: 0.42rem;
letter-spacing: 0.05em;
font-family: 'JetBrains Mono', monospace;
}
.card-preview-fallback .card-icon {
margin: 0;
font-size: 0.95rem;
}
.card-file-preview {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: 6px;
padding: 8px 6px;
background: #fff;
}
.card-file-icon {
width: 16px;
height: 16px;
display: inline-flex;
align-items: center;
justify-content: center;
}
.card-file-name {
width: 100%;
color: var(--6-666666, #666);
font-family: "Microsoft YaHei";
font-size: 10px;
font-style: normal;
font-weight: 400;
line-height: normal;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
text-align: center;
}
.card:hover .card-glow {
opacity: 0.15;
}
.stacked-cards-container.is-expanded .card:hover {
border-color: var(--card-hover-border-color, "#000F33");
}
.stacked-cards-container.is-expanded .card:hover .card-glow {
opacity: 0.26;
}
/* 卡片底部强调线 */
.card-accent {
position: absolute;
bottom: 0;
left: 0;
right: 0;
height: 2px;
opacity: 0.8;
}
.card-icon {
font-size: 1rem;
margin-bottom: 0.375rem;
}
/* 展开动画 */
.card-spread-move,
.card-spread-enter-active,
.card-spread-leave-active {
transition: all 0.35s ease-out;
}
.card-spread-enter-from,
.card-spread-leave-to {
opacity: 0;
transform: scale(0.9);
}
/* 悬停提示 */
.hover-hint {
position: absolute;
left: 50%;
bottom: -0.75rem;
transform: translateX(-50%);
display: flex;
align-items: center;
gap: 0.5rem;
font-family: 'JetBrains Mono', monospace;
font-size: 0.4rem;
color: #52525b;
letter-spacing: 0.05em;
}
.hint-icon {
color: #06b6d4;
animation: pulse-glow 2s ease-in-out infinite;
}
@keyframes pulse-glow {
0%,
100% {
opacity: 0.6;
transform: scale(1);
}
50% {
opacity: 1;
transform: scale(1.2);
}
}
@keyframes card-spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
.hint-fade-enter-active,
.hint-fade-leave-active {
transition: opacity 0.25s ease-out;
}
.hint-fade-enter-from,
.hint-fade-leave-to {
opacity: 0;
}
.stacked-upload-btn {
display: inline-flex;
align-items: center;
justify-content: center;
padding: 0;
border: 0;
background: transparent;
cursor: pointer;
transition: transform 0.2s ease, opacity 0.2s ease;
}
.stacked-upload-btn:hover:not(:disabled) {
transform: translateY(-2px);
}
.stacked-upload-btn.disabled,
.stacked-upload-btn:disabled {
opacity: 0.42;
cursor: not-allowed;
}
.upload-action-div {
display: grid;
place-items: center;
border-radius: 0;
width: 50px;
height: 70px;
background-color: rgb(255, 255, 255);
}
.upload-action-div.disabled {
cursor: not-allowed;
opacity: 0.42;
background: #f3f4f6;
border: 1px dashed #cbd5e1;
filter: grayscale(1);
}
.upload-action-div.disabled :deep(svg) {
opacity: 0.5;
}
.upload-action-text {
font-family: 'JetBrains Mono', monospace;
font-size: 0.4rem;
letter-spacing: 0.04em;
}
.upload-actions-fade-enter-active,
.upload-actions-fade-leave-active {
transition: opacity 0.2s ease, transform 0.2s ease;
}
.upload-actions-fade-enter-from,
.upload-actions-fade-leave-to {
opacity: 0;
transform: translateX(-50%) translateY(6px);
}
/* 响应式 */
@media (max-width: 640px) {
.stacked-cards-container {
min-height: 120px;
padding: 0.75rem;
}
.cards-wrapper {
width: 80px;
height: 110px;
}
.card {
width: 75px;
height: 100px;
}
.card-preview-image {
height: 36px;
}
.upload-action-div {
width: 75px;
height: 100px;
}
.stacked-cards-empty {
width: 75px;
height: 100px;
}
.upload-action-btn {
height: 18px;
padding: 0 6px;
}
}
</style>

1
src/env.d.ts vendored
View File

@ -1 +0,0 @@
/// <reference types="vite/client" />

View File

@ -13,9 +13,8 @@ const router = createRouter({
path: '/share/:id',
name: 'share',
component: () => import('@/views/ShareView.vue'),
alias: ['/chat-ui/share/:id'],
},
],
})
export default router
export default router

View File

@ -3,7 +3,6 @@
*
*/
import { getAuthHeaders } from './request';
import { useSettingsStore } from "@/stores/settings";
// API 端点定义(固定)
const API_ENDPOINTS = {
@ -25,9 +24,6 @@ const API_ENDPOINTS = {
STOP: "/api/chat-ui/stop",
};
const DEFAULT_SYSTEM_PROMPT =
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。";
// 请求类型定义
export interface ChatMessage {
role: "user" | "assistant" | "system";
@ -100,24 +96,6 @@ class ChatApi {
this.baseUrl = baseUrl;
}
private resolveSystemPrompt(explicit?: string): string {
if (explicit?.trim()) {
return explicit.trim();
}
try {
const settingsStore = useSettingsStore();
const fallbackPrompt = settingsStore.settings.defaultSystemPrompt;
if (fallbackPrompt?.trim()) {
return fallbackPrompt.trim();
}
} catch (error) {
console.warn("读取全局默认系统提示词失败,使用内置兜底提示词", error);
}
return DEFAULT_SYSTEM_PROMPT;
}
/**
*
*/
@ -157,7 +135,9 @@ class ChatApi {
// 否则添加系统消息
const systemMessage = {
role: "system",
content: this.resolveSystemPrompt(request.systemPrompt),
content:
request.systemPrompt ||
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。",
};
allMessages = [systemMessage, ...request.history, { role: "user", content: userContent }];
}
@ -165,7 +145,9 @@ class ChatApi {
// 没有历史消息,添加系统消息
const systemMessage = {
role: "system",
content: this.resolveSystemPrompt(request.systemPrompt),
content:
request.systemPrompt ||
"你是一个智能助手,可以分析用户发送的文字,文件或图片内容,并进行回答。",
};
allMessages = [systemMessage, { role: "user", content: userContent }];
}
@ -275,7 +257,6 @@ class ChatApi {
const requestBody = {
...request,
message: userContent,
systemPrompt: this.resolveSystemPrompt(request.systemPrompt),
};
const response = await fetch(`${this.baseUrl}${API_ENDPOINTS.CHAT}`, {

View File

@ -18,6 +18,7 @@ export const authService = {
*
*/
getCurrentUser(): AuthUser | null {
// TODO: 从 token 解析用户信息
return { id: 'default' };
},
@ -46,6 +47,7 @@ export const authService = {
* true
*/
isAuthenticated(): boolean {
// TODO: 实现真实的认证检查
return true;
},

View File

@ -31,12 +31,11 @@ function getToken(): string | null {
* body: JSON.stringify({ name: 'John' })
* });
*/
export async function apiRequest(
url: string,
options: RequestInit = {}
): Promise<Response> {
const authStore = useAuthStore();
const token = getToken();
export async function apiRequest(
url: string,
options: RequestInit = {}
): Promise<Response> {
const token = getToken();
// 判断是否为 FormData不设置 Content-Type 让浏览器自动处理
const isFormData = options.body instanceof FormData;
@ -51,12 +50,12 @@ export async function apiRequest(
},
};
const response = await fetch(url, config);
// 401 认证失败提示
if (response.status === 401 && !(import.meta.env.DEV && authStore.isAuthenticated)) {
window.$toast?.('认证失败,请重新登录', 'error');
}
const response = await fetch(url, config);
// 401 认证失败提示
if (response.status === 401) {
window.$toast?.('认证失败,请重新登录', 'error');
}
return response;
}
@ -98,4 +97,4 @@ export function getAuthHeaders(): Record<string, string> {
headers['Authorization'] = `Bearer ${token}`;
}
return headers;
}
}

View File

@ -1,20 +1,12 @@
/**
*
*/
import { defineStore } from 'pinia';
import { ref, computed } from 'vue';
import type { UserInfo } from '@/types/chat';
// 开发环境默认跳过登录校验,避免频繁登录打断调试
const DEV_AUTH_BYPASS = import.meta.env.DEV;
// MARK: dev 默认 token当 URL 无 token 参数时使用)
const DEV_DEFAULT_TOKEN = '';
const DEV_BYPASS_USER: UserInfo = {
id: 'dev-user',
username: 'dev-user',
nickname: '开发环境用户',
};
import { defineStore } from 'pinia';
import { ref, computed } from 'vue';
import type { UserInfo } from '@/types/chat';
// MARK: dev 默认 token当 URL 无 token 参数时使用)
const DEV_DEFAULT_TOKEN = '';
// 认证接口返回格式
interface AuthResponse {
@ -26,18 +18,17 @@ interface AuthResponse {
}
// 认证接口
const AUTH_CHECK_URL = '/api/auth/check/checkTokenRn';
const AUTH_TOKEN_STORAGE_KEY = 'DEV_DEFAULT_TOKEN';
const AUTH_CHECK_URL = '/api/auth/check/checkTokenRn';
export const useAuthStore = defineStore('auth', () => {
// 状态
const token = ref<string | null>(null);
const user = ref<UserInfo | null>(null);
const isInitialized = ref(false);
// 计算属性
const isAuthenticated = computed(() => DEV_AUTH_BYPASS || !!token.value);
const userId = computed(() => user.value?.username || null); // username 用于 OSS 路径和数据库 user_id
export const useAuthStore = defineStore('auth', () => {
// 状态
const token = ref<string | null>(null);
const user = ref<UserInfo | null>(null);
const isInitialized = ref(false);
// 计算属性
const isAuthenticated = computed(() => !!token.value);
const userId = computed(() => user.value?.username || null); // username 用于 OSS 路径和数据库 user_id
/**
* token
@ -67,27 +58,20 @@ export const useAuthStore = defineStore('auth', () => {
/**
* - URL token
*/
async function init() {
if (DEV_AUTH_BYPASS) {
token.value = null;
user.value = DEV_BYPASS_USER;
isInitialized.value = true;
return;
}
const searchParams = new URLSearchParams(window.location.search);
const urlToken = searchParams.get('token');
async function init() {
const searchParams = new URLSearchParams(window.location.search);
const urlToken = searchParams.get('token');
// 获取 tokenURL > localStorage > 默认值
const tokenValue = urlToken
|| localStorage.getItem(AUTH_TOKEN_STORAGE_KEY)
|| DEV_DEFAULT_TOKEN;
if (!tokenValue) {
isInitialized.value = true;
window.$toast?.('未登录,请先登录', 'error');
return;
}
const tokenValue = urlToken
|| localStorage.getItem('DEV_DEFAULT_TOKEN')
|| DEV_DEFAULT_TOKEN;
if (!tokenValue) {
isInitialized.value = true;
window.$toast?.('未登录,请先登录', 'error');
return;
}
// 验证 token
const userInfo = await checkToken(tokenValue);
@ -138,4 +122,4 @@ export const useAuthStore = defineStore('auth', () => {
getAuthHeader,
init,
};
});
});

View File

@ -1,7 +1,6 @@
import { defineStore } from "pinia";
import { ref } from "vue";
import type { AppSettings, AIModel } from "@/types/chat";
import promptData from "@/assets/prompt.json";
// 分享结果类型
export interface ShareResult {
@ -12,13 +11,6 @@ export interface ShareResult {
}
export const useSettingsStore = defineStore("settings", () => {
const MIN_SIDEBAR_WIDTH = 310;
const MAX_SIDEBAR_WIDTH = 400;
const LEARNING_MODE_PROMPT_TITLE = "让可学 AI 成为我的全科学习导师?";
const LEARNING_MODE_SYSTEM_PROMPT =
promptData["分析与实践"]?.[LEARNING_MODE_PROMPT_TITLE] ||
"你是一位“学习模式”引导员,通过严格的苏格拉底式提问法,引导用户自己思考并逐步得出答案。";
// 默认设置
const defaultSettings: AppSettings = {
// 外观设置
@ -36,8 +28,6 @@ export const useSettingsStore = defineStore("settings", () => {
defaultTemperature: 0.7,
defaultMaxTokens: 4096,
defaultSystemPrompt: "你是一个有帮助的 AI 助手。",
learningModeEnabled: false,
learningModePrevDefaultSystemPrompt: "",
// 功能设置
enableSound: true,
@ -98,7 +88,7 @@ export const useSettingsStore = defineStore("settings", () => {
// 状态
const settings = ref<AppSettings>({ ...defaultSettings });
const sidebarCollapsed = ref(false);
const sidebarWidth = ref(MIN_SIDEBAR_WIDTH);
const sidebarWidth = ref(280);
const showShortcutsModal = ref(false);
const showSearchModal = ref(false);
const showSettingsModal = ref(false);
@ -108,7 +98,6 @@ export const useSettingsStore = defineStore("settings", () => {
const showShareModal = ref(false);
const showShareResultModal = ref(false);
const shareResult = ref<ShareResult | null>(null);
const shareConversationId = ref<string | null>(null);
// 主题相关
function applyTheme(theme: AppSettings["theme"]) {
@ -162,7 +151,7 @@ export const useSettingsStore = defineStore("settings", () => {
}
function setSidebarWidth(width: number) {
sidebarWidth.value = Math.max(MIN_SIDEBAR_WIDTH, Math.min(MAX_SIDEBAR_WIDTH, width));
sidebarWidth.value = Math.max(200, Math.min(400, width));
saveToStorage();
}
@ -204,14 +193,8 @@ export const useSettingsStore = defineStore("settings", () => {
showShareModal.value = true;
}
function openConversationShareModal(conversationId: string) {
shareConversationId.value = conversationId;
showShareModal.value = true;
}
function closeShareModal() {
showShareModal.value = false;
shareConversationId.value = null;
}
function openShareResultModal() {
@ -230,42 +213,9 @@ export const useSettingsStore = defineStore("settings", () => {
shareResult.value = null;
}
function normalizeLearningModeState() {
if (!settings.value.learningModeEnabled) return;
const currentPrompt = settings.value.defaultSystemPrompt || "";
const isUsingLearningPrompt = currentPrompt === LEARNING_MODE_SYSTEM_PROMPT;
if (!isUsingLearningPrompt && !settings.value.learningModePrevDefaultSystemPrompt) {
settings.value.learningModePrevDefaultSystemPrompt = currentPrompt;
}
settings.value.defaultSystemPrompt = LEARNING_MODE_SYSTEM_PROMPT;
}
function setLearningModeEnabled(enabled: boolean) {
if (enabled) {
if (!settings.value.learningModePrevDefaultSystemPrompt) {
settings.value.learningModePrevDefaultSystemPrompt =
settings.value.defaultSystemPrompt || defaultSettings.defaultSystemPrompt;
}
settings.value.learningModeEnabled = true;
settings.value.defaultSystemPrompt = LEARNING_MODE_SYSTEM_PROMPT;
} else {
settings.value.learningModeEnabled = false;
if (settings.value.learningModePrevDefaultSystemPrompt) {
settings.value.defaultSystemPrompt =
settings.value.learningModePrevDefaultSystemPrompt;
}
settings.value.learningModePrevDefaultSystemPrompt = "";
}
saveToStorage();
}
// 更新设置
function updateSettings(updates: Partial<AppSettings>) {
Object.assign(settings.value, updates);
normalizeLearningModeState();
if (updates.theme) {
applyTheme(updates.theme);
@ -296,7 +246,6 @@ export const useSettingsStore = defineStore("settings", () => {
try {
const imported = JSON.parse(json);
settings.value = { ...defaultSettings, ...imported };
normalizeLearningModeState();
applyTheme(settings.value.theme);
applyFontSize(settings.value.fontSize);
saveToStorage();
@ -345,7 +294,6 @@ export const useSettingsStore = defineStore("settings", () => {
if (stored) {
settings.value = { ...defaultSettings, ...JSON.parse(stored) };
}
normalizeLearningModeState();
const collapsedStored = localStorage.getItem("chat-sidebar-collapsed");
if (collapsedStored) {
@ -354,7 +302,7 @@ export const useSettingsStore = defineStore("settings", () => {
const widthStored = localStorage.getItem("chat-sidebar-width");
if (widthStored) {
setSidebarWidth(JSON.parse(widthStored));
sidebarWidth.value = JSON.parse(widthStored);
}
// 应用主题和字体
@ -392,7 +340,6 @@ export const useSettingsStore = defineStore("settings", () => {
showShareModal,
showShareResultModal,
shareResult,
shareConversationId,
// 方法
toggleTheme,
@ -410,7 +357,6 @@ export const useSettingsStore = defineStore("settings", () => {
closeConversationSettingsModal,
// 分享模态框方法
openShareModal,
openConversationShareModal,
closeShareModal,
openShareResultModal,
closeShareResultModal,
@ -423,6 +369,5 @@ export const useSettingsStore = defineStore("settings", () => {
loadFromStorage,
getSelectedModelId,
setSelectedModelId,
setLearningModeEnabled,
};
});

View File

@ -37,12 +37,6 @@
--chat-sidebar-width: 280px;
--chat-input-height: 140px;
--header-height: 60px;
--app-text-color: #333;
--app-font-family: "Microsoft YaHei", sans-serif;
--app-font-size: 14px;
--app-font-style: normal;
--app-font-weight: 400;
--app-line-height: normal;
}
// 基础样式重置
@ -50,18 +44,15 @@
box-sizing: border-box;
}
html,
body,
#app {
body {
margin: 0;
min-width: 320px;
min-height: 100vh;
color: var(--app-text-color);
font-family: var(--app-font-family);
font-size: var(--app-font-size);
font-style: var(--app-font-style);
font-weight: var(--app-font-weight);
line-height: var(--app-line-height);
font-family:
"Inter",
-apple-system,
BlinkMacSystemFont,
"Segoe UI",
Roboto,
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
@ -91,7 +82,3 @@ body,
opacity: 0;
transform: translateX(-20px);
}
hr.hr-node[custom-id="playground-demo"]{
margin: 1rem 0;
}

View File

@ -126,8 +126,6 @@ export interface AppSettings {
defaultTemperature: number;
defaultMaxTokens: number;
defaultSystemPrompt: string;
learningModeEnabled: boolean;
learningModePrevDefaultSystemPrompt: string;
// 功能设置
enableSound: boolean;

View File

@ -10,36 +10,21 @@ export function formatTimestamp(timestamp: number): string {
const date = new Date(timestamp);
const now = new Date();
const diff = now.getTime() - date.getTime();
const absDiff = Math.abs(diff);
if (diff >= 0 && diff < 60 * 1000) {
if (diff < 60 * 1000) {
return "刚刚";
}
if (diff < 0 && absDiff < 60 * 1000) {
return "即将";
}
if (diff >= 0 && diff < 60 * 60 * 1000) {
if (diff < 60 * 60 * 1000) {
const minutes = Math.floor(diff / (60 * 1000));
return `${minutes}分钟前`;
}
if (diff < 0 && absDiff < 60 * 60 * 1000) {
const minutes = Math.floor(absDiff / (60 * 1000));
return `${minutes}分钟后`;
}
if (diff >= 0 && diff < 24 * 60 * 60 * 1000) {
if (diff < 24 * 60 * 60 * 1000) {
const hours = Math.floor(diff / (60 * 60 * 1000));
return `${hours}小时前`;
}
if (diff < 0 && absDiff < 24 * 60 * 60 * 1000) {
const hours = Math.floor(absDiff / (60 * 60 * 1000));
return `${hours}小时后`;
}
if (date.getFullYear() === now.getFullYear()) {
return `${date.getMonth() + 1}${date.getDate()}${padZero(date.getHours())}:${padZero(date.getMinutes())}`;
}

View File

@ -85,7 +85,6 @@ authStore.init()
display: flex;
width: 100%;
height: 100%;
overflow-x: auto;
overflow-y: hidden;
overflow: hidden;
}
</style>
</style>

View File

@ -347,7 +347,7 @@ onMounted(() => {
display: flex;
align-items: center;
gap: 6px;
font-size: 14px;
font-size: 13px;
color: #6b7280;
&.expired {
@ -411,7 +411,7 @@ onMounted(() => {
}
.verify-error {
font-size: 14px;
font-size: 13px;
color: #ef4444;
margin: 0;
}
@ -482,7 +482,7 @@ onMounted(() => {
}
.conversation-count {
font-size: 14px;
font-size: 13px;
color: #6b7280;
}

View File

@ -20,7 +20,7 @@ trap cleanup SIGINT SIGTERM EXIT
# 启动后端
echo "[系统] 正在启动后端服务器..."
cd /home/mt/Project/ai-chat-ui/server
cd /home/mt/project/ai-chat-ui/server
if [ -d ".venv" ]; then
source .venv/bin/activate
# 使用 -u 参数强制不缓冲输出,实时显示日志
@ -34,7 +34,7 @@ sleep 2
# 启动前端
echo "[系统] 正在启动前端服务器..."
cd /home/mt/Project/ai-chat-ui
cd /home/mt/project/ai-chat-ui
# 启动 vite 开发服务器
npm run dev &