feat(model): 从后端获取模型列表并动态渲染对应按钮逻辑,提升用户交互一致性 [实现:按钮根据模型支持的模式动态生成]

This commit is contained in:
SuperManTouX 2026-03-06 17:00:56 +08:00
parent 5e81c903cf
commit bfec192158
8 changed files with 332 additions and 68 deletions

View File

@ -168,11 +168,74 @@ def _supports_thinking(self, model: str) -> bool:
--- ---
## 功能:模型列表从后端动态获取
### 需求
前端模型选择器中的模型列表改为从后端 API 获取,而非硬编码。
### 解决方案
#### 前端 `src/services/api.ts`
修改 `getModels` 方法,调用后端 API
```typescript
async getModels(): Promise<ModelInfo[]> {
try {
const response = await fetch(`${this.baseUrl}${API_ENDPOINTS.MODELS}`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
});
if (!response.ok) {
throw new Error(`获取模型列表失败: HTTP ${response.status}`);
}
const data = await response.json();
// 后端返回格式: { object: "list", data: [...] }
return data.data || [];
} catch (error) {
console.error("获取模型列表失败:", error);
// 返回默认模型列表作为降级
return [...];
}
}
```
#### 后端 `server/main.py`
已有 `/api/chat-ui/models` 端点:
```python
@app.get("/api/chat-ui/models")
async def get_models():
"""模型列表(聚合所有可用平台的模型)"""
from adapters import get_all_adapters
all_models = []
for provider, adapter in get_all_adapters().items():
if adapter.is_available():
models = adapter.list_models()
all_models.extend([m.to_dict() for m in models])
return {"object": "list", "data": all_models}
```
#### 特点
- **动态聚合**:自动聚合所有已配置 API Key 的平台模型
- **按需显示**:只有配置了 API Key 的平台才会显示模型
- **降级处理**API 获取失败时返回默认模型列表
---
## 涉及文件 ## 涉及文件
| 文件 | 修改类型 | | 文件 | 修改类型 |
|------|----------| |------|----------|
| `src/services/api.ts` | 新增 `StreamChunk` 接口,修改 `streamChat` 方法 | | `src/services/api.ts` | 新增 `StreamChunk` 接口,修改 `streamChat` 方法,修改 `getModels` 方法 |
| `src/components/chat/ChatMain.vue` | 修改流式处理逻辑,支持 `reasoning` 类型 | | `src/components/chat/ChatMain.vue` | 修改流式处理逻辑,支持 `reasoning` 类型 |
| `server/adapters/glm_adapter.py` | 修改 `_build_messages``_resolve_model` 方法 | | `server/adapters/glm_adapter.py` | 修改 `_build_messages``_resolve_model` 方法 |
| `server/adapters/openai_adapter.py` | 添加 DeepSeek 深度思考支持 | | `server/adapters/openai_adapter.py` | 添加 DeepSeek 深度思考支持 |
@ -197,3 +260,8 @@ def _supports_thinking(self, model: str) -> bool:
- 上传图片或 PDF 文件 - 上传图片或 PDF 文件
- 确认后端日志显示模型切换为 glm-4.6v - 确认后端日志显示模型切换为 glm-4.6v
- 确认多模态内容正确处理 - 确认多模态内容正确处理
4. **模型列表动态获取测试**
- 检查前端控制台,确认调用了 `/api/chat-ui/models`
- 确认模型选择器显示后端返回的模型列表
- 停止后端服务,确认降级模型列表正常显示

View File

@ -16,6 +16,11 @@ class ModelInfo:
description: str description: str
max_tokens: int = 4096 max_tokens: int = 4096
provider: str = "unknown" provider: str = "unknown"
# 能力标志
supports_thinking: bool = False # 是否支持深度思考
supports_web_search: bool = False # 是否支持在线搜索
supports_vision: bool = False # 是否支持图片识别
supports_files: bool = False # 是否支持文件附件PDF、DOCX等
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
return { return {
@ -24,6 +29,10 @@ class ModelInfo:
"description": self.description, "description": self.description,
"maxTokens": self.max_tokens, "maxTokens": self.max_tokens,
"provider": self.provider, "provider": self.provider,
"supports_thinking": self.supports_thinking,
"supports_web_Search": self.supports_web_search,
"supports_vision": self.supports_vision,
"supports_files": self.supports_files,
} }

View File

@ -22,6 +22,10 @@ DASHSCOPE_MODELS = [
description="最强大的模型", description="最强大的模型",
max_tokens=8192, max_tokens=8192,
provider="Aliyun", provider="Aliyun",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="qwen-plus", id="qwen-plus",
@ -29,6 +33,10 @@ DASHSCOPE_MODELS = [
description="能力均衡", description="能力均衡",
max_tokens=8192, max_tokens=8192,
provider="Aliyun", provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=False,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="qwen-turbo", id="qwen-turbo",
@ -36,6 +44,10 @@ DASHSCOPE_MODELS = [
description="速度更快、成本更低", description="速度更快、成本更低",
max_tokens=8192, max_tokens=8192,
provider="Aliyun", provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=False,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="qwen-vl-max", id="qwen-vl-max",
@ -43,6 +55,10 @@ DASHSCOPE_MODELS = [
description="支持视觉理解的多模态模型", description="支持视觉理解的多模态模型",
max_tokens=8192, max_tokens=8192,
provider="Aliyun", provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=True,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="qwen-vl-plus", id="qwen-vl-plus",
@ -50,6 +66,10 @@ DASHSCOPE_MODELS = [
description="支持视觉理解的多模态模型", description="支持视觉理解的多模态模型",
max_tokens=8192, max_tokens=8192,
provider="Aliyun", provider="Aliyun",
supports_thinking=False,
supports_web_search=False,
supports_vision=True,
supports_files=False,
), ),
] ]
@ -84,7 +104,9 @@ class DashScopeAdapter(BaseAdapter):
logger.info(f" - temperature: {request.temperature}") logger.info(f" - temperature: {request.temperature}")
logger.info(f" - max_tokens: {request.max_tokens}") logger.info(f" - max_tokens: {request.max_tokens}")
logger.info(f" - files: {request.files}") logger.info(f" - files: {request.files}")
logger.info(f" - messages: {json.dumps(request.messages, ensure_ascii=False, indent=2)}") logger.info(
f" - messages: {json.dumps(request.messages, ensure_ascii=False, indent=2)}"
)
# 检测是否包含多模态内容 # 检测是否包含多模态内容
has_multimodal = self._has_multimodal_content(request) has_multimodal = self._has_multimodal_content(request)
@ -161,8 +183,10 @@ class DashScopeAdapter(BaseAdapter):
if resp.status_code == 200: if resp.status_code == 200:
chunk_count += 1 chunk_count += 1
content = resp.output.choices[0].message.content content = resp.output.choices[0].message.content
if content: if content and len(content) > len(full_content):
full_content += content # DashScope 流式响应返回完整内容,计算增量
delta = content[len(full_content) :]
full_content = content
data = { data = {
"id": f"chatcmpl-{generate_unique_id()}", "id": f"chatcmpl-{generate_unique_id()}",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
@ -171,7 +195,7 @@ class DashScopeAdapter(BaseAdapter):
"choices": [ "choices": [
{ {
"index": 0, "index": 0,
"delta": {"content": content}, "delta": {"content": delta},
"finish_reason": None, "finish_reason": None,
} }
], ],
@ -192,7 +216,11 @@ class DashScopeAdapter(BaseAdapter):
logger.info(f"[DashScope] 流式文本响应完成:") logger.info(f"[DashScope] 流式文本响应完成:")
logger.info(f" - chunks: {chunk_count}") logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符") logger.info(f" - content_length: {len(full_content)} 字符")
logger.info(f" - content_preview: {full_content[:200]}..." if len(full_content) > 200 else f" - content: {full_content}") logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream") return StreamingResponse(generator(), media_type="text/event-stream")
@ -237,7 +265,11 @@ class DashScopeAdapter(BaseAdapter):
# 打印响应结果 # 打印响应结果
logger.info(f"[DashScope] 响应结果:") logger.info(f"[DashScope] 响应结果:")
logger.info(f" - content_length: {len(content)} 字符") logger.info(f" - content_length: {len(content)} 字符")
logger.info(f" - content_preview: {content[:200]}..." if len(content) > 200 else f" - content: {content}") logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
else f" - content: {content}"
)
if hasattr(resp, "usage") and resp.usage: if hasattr(resp, "usage") and resp.usage:
logger.info(f" - usage: {response['usage']}") logger.info(f" - usage: {response['usage']}")

View File

@ -23,6 +23,10 @@ GLM_MODELS = [
description="最新旗舰模型,支持文本/图像/文档/深度思考", description="最新旗舰模型,支持文本/图像/文档/深度思考",
max_tokens=128000, max_tokens=128000,
provider="ZhipuAI", provider="ZhipuAI",
supports_thinking=True,
supports_web_search=True,
supports_vision=True,
supports_files=True,
), ),
ModelInfo( ModelInfo(
id="glm-4-flash", id="glm-4-flash",
@ -30,6 +34,10 @@ GLM_MODELS = [
description="高性价比文本模型", description="高性价比文本模型",
max_tokens=128000, max_tokens=128000,
provider="ZhipuAI", provider="ZhipuAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="glm-4v-plus-0111", id="glm-4v-plus-0111",
@ -37,6 +45,10 @@ GLM_MODELS = [
description="图像 + PDF/DOCX 原生多模态", description="图像 + PDF/DOCX 原生多模态",
max_tokens=128000, max_tokens=128000,
provider="ZhipuAI", provider="ZhipuAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=True,
supports_files=True,
), ),
ModelInfo( ModelInfo(
id="glm-z1-flash", id="glm-z1-flash",
@ -44,6 +56,10 @@ GLM_MODELS = [
description="深度思考推理模型", description="深度思考推理模型",
max_tokens=128000, max_tokens=128000,
provider="ZhipuAI", provider="ZhipuAI",
supports_thinking=True,
supports_web_search=False,
supports_vision=False,
supports_files=False,
), ),
] ]

View File

@ -22,6 +22,10 @@ OPENAI_MODELS = [
description="最新旗舰多模态模型", description="最新旗舰多模态模型",
max_tokens=128000, max_tokens=128000,
provider="OpenAI", provider="OpenAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=True,
supports_files=True,
), ),
ModelInfo( ModelInfo(
id="gpt-4o-mini", id="gpt-4o-mini",
@ -29,6 +33,10 @@ OPENAI_MODELS = [
description="高性价比多模态模型", description="高性价比多模态模型",
max_tokens=128000, max_tokens=128000,
provider="OpenAI", provider="OpenAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=True,
supports_files=True,
), ),
ModelInfo( ModelInfo(
id="gpt-4-turbo", id="gpt-4-turbo",
@ -36,6 +44,10 @@ OPENAI_MODELS = [
description="GPT-4 增强版", description="GPT-4 增强版",
max_tokens=128000, max_tokens=128000,
provider="OpenAI", provider="OpenAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=True,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="gpt-3.5-turbo", id="gpt-3.5-turbo",
@ -43,6 +55,10 @@ OPENAI_MODELS = [
description="快速经济的选择", description="快速经济的选择",
max_tokens=16385, max_tokens=16385,
provider="OpenAI", provider="OpenAI",
supports_thinking=False,
supports_web_search=True,
supports_vision=False,
supports_files=False,
), ),
] ]
@ -54,6 +70,10 @@ DEEPSEEK_MODELS = [
description="Deepseek 对话模型", description="Deepseek 对话模型",
max_tokens=64000, max_tokens=64000,
provider="Deepseek", provider="Deepseek",
supports_thinking=False,
supports_web_search=False,
supports_vision=False,
supports_files=False,
), ),
ModelInfo( ModelInfo(
id="deepseek-reasoner", id="deepseek-reasoner",
@ -61,6 +81,10 @@ DEEPSEEK_MODELS = [
description="Deepseek 推理模型(支持深度思考)", description="Deepseek 推理模型(支持深度思考)",
max_tokens=64000, max_tokens=64000,
provider="Deepseek", provider="Deepseek",
supports_thinking=True,
supports_web_search=True, # 注:通过内置检索增强实现
supports_vision=False,
supports_files=False,
), ),
] ]
@ -94,9 +118,7 @@ class OpenAIAdapter(BaseAdapter):
if self._provider_type == "deepseek": if self._provider_type == "deepseek":
api_key = os.getenv("DEEPSEEK_API_KEY", "") api_key = os.getenv("DEEPSEEK_API_KEY", "")
base_url = os.getenv( base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
"DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1"
)
else: else:
api_key = os.getenv("OPENAI_API_KEY", "") api_key = os.getenv("OPENAI_API_KEY", "")
base_url = os.getenv("OPENAI_BASE_URL") # 可选自定义端点 base_url = os.getenv("OPENAI_BASE_URL") # 可选自定义端点
@ -133,7 +155,9 @@ class OpenAIAdapter(BaseAdapter):
# 构建消息 # 构建消息
messages = self._build_messages(request) messages = self._build_messages(request)
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}") logger.info(
f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}"
)
# 构建请求参数 # 构建请求参数
kwargs = { kwargs = {
@ -150,7 +174,9 @@ class OpenAIAdapter(BaseAdapter):
if self._supports_thinking(request.model): if self._supports_thinking(request.model):
extra_body = {"thinking": {"type": "enabled"}} extra_body = {"thinking": {"type": "enabled"}}
kwargs["extra_body"] = extra_body kwargs["extra_body"] = extra_body
logger.info(f"[{provider_name}] 深度思考已启用: extra_body = {extra_body}") logger.info(
f"[{provider_name}] 深度思考已启用: extra_body = {extra_body}"
)
if request.stream: if request.stream:
return self._stream_chat(client, kwargs, extra_body) return self._stream_chat(client, kwargs, extra_body)
@ -184,7 +210,9 @@ class OpenAIAdapter(BaseAdapter):
return messages return messages
def _stream_chat(self, client, kwargs: Dict, extra_body: Optional[Dict] = None) -> StreamingResponse: def _stream_chat(
self, client, kwargs: Dict, extra_body: Optional[Dict] = None
) -> StreamingResponse:
"""流式聊天""" """流式聊天"""
provider_name = self._provider_type.upper() provider_name = self._provider_type.upper()
logger.info(f"[{provider_name}] 开始流式响应...") logger.info(f"[{provider_name}] 开始流式响应...")
@ -242,11 +270,17 @@ class OpenAIAdapter(BaseAdapter):
logger.info(f" - content_length: {len(full_content)} 字符") logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning: if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符") logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(f" - content_preview: {full_content[:200]}..." if len(full_content) > 200 else f" - content: {full_content}") logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
else f" - content: {full_content}"
)
return StreamingResponse(generator(), media_type="text/event-stream") return StreamingResponse(generator(), media_type="text/event-stream")
def _sync_chat(self, client, kwargs: Dict, extra_body: Optional[Dict] = None) -> JSONResponse: def _sync_chat(
self, client, kwargs: Dict, extra_body: Optional[Dict] = None
) -> JSONResponse:
"""非流式聊天""" """非流式聊天"""
from utils.helpers import generate_unique_id, get_current_timestamp from utils.helpers import generate_unique_id, get_current_timestamp
@ -273,9 +307,9 @@ class OpenAIAdapter(BaseAdapter):
# 添加推理内容(如有) # 添加推理内容(如有)
if hasattr(message, "reasoning_content") and message.reasoning_content: if hasattr(message, "reasoning_content") and message.reasoning_content:
response["choices"][0]["message"]["reasoning_content"] = ( response["choices"][0]["message"][
message.reasoning_content "reasoning_content"
) ] = message.reasoning_content
if resp.usage: if resp.usage:
response["usage"] = { response["usage"] = {
@ -290,7 +324,11 @@ class OpenAIAdapter(BaseAdapter):
logger.info(f" - content_length: {len(content)} 字符") logger.info(f" - content_length: {len(content)} 字符")
if hasattr(message, "reasoning_content") and message.reasoning_content: if hasattr(message, "reasoning_content") and message.reasoning_content:
logger.info(f" - reasoning_length: {len(message.reasoning_content)} 字符") logger.info(f" - reasoning_length: {len(message.reasoning_content)} 字符")
logger.info(f" - content_preview: {content[:200]}..." if len(content) > 200 else f" - content: {content}") logger.info(
f" - content_preview: {content[:200]}..."
if len(content) > 200
else f" - content: {content}"
)
if resp.usage: if resp.usage:
logger.info(f" - usage: {response['usage']}") logger.info(f" - usage: {response['usage']}")

View File

@ -35,6 +35,10 @@
:is-streaming="isStreaming" :is-streaming="isStreaming"
:send-on-enter="settings.sendOnEnter" :send-on-enter="settings.sendOnEnter"
:disabled="false" :disabled="false"
:supports_thinking="currentModelCapabilities.supports_thinking"
:supports_web_search="currentModelCapabilities.supports_web_search"
:supports_vision="currentModelCapabilities.supports_vision"
:supports_files="currentModelCapabilities.supports_files"
@send="handleSend" @send="handleSend"
@stop="handleStop" @stop="handleStop"
/> />
@ -44,7 +48,7 @@
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { ref, computed, watch, nextTick } from "vue"; import { ref, computed, watch, nextTick, onMounted } from "vue";
import { storeToRefs } from "pinia"; import { storeToRefs } from "pinia";
import { useChatStore } from "@/stores/chat"; import { useChatStore } from "@/stores/chat";
import { useSettingsStore } from "@/stores/settings"; import { useSettingsStore } from "@/stores/settings";
@ -53,7 +57,7 @@ import MessageList from "./MessageList.vue";
import ChatInput from "@/components/input/ChatInput.vue"; import ChatInput from "@/components/input/ChatInput.vue";
import { MessageType, MessageRole } from "@/types/chat"; import { MessageType, MessageRole } from "@/types/chat";
import type { Attachment, Suggestion } from "@/types/chat"; import type { Attachment, Suggestion } from "@/types/chat";
import { chatApi } from "@/services/api"; import { chatApi, type ModelInfo } from "@/services/api";
defineEmits<{ defineEmits<{
"toggle-sidebar": []; "toggle-sidebar": [];
@ -72,6 +76,40 @@ const isTyping = ref(false);
const currentStreamingMessageId = ref<string | null>(null); const currentStreamingMessageId = ref<string | null>(null);
const abortController: any = ref<AbortController | null>(null); const abortController: any = ref<AbortController | null>(null);
//
const availableModels = ref<ModelInfo[]>([]);
//
onMounted(async () => {
try {
availableModels.value = await chatApi.getModels();
} catch (error) {
console.error("获取模型列表失败:", error);
}
});
//
const currentModelCapabilities = computed(() => {
const modelId = settings.value.defaultModel;
const model = availableModels.value.find((m) => m.id === modelId);
console.log(model);
if (model) {
return {
supports_thinking: model.supports_thinking ?? false,
supports_web_search: model.supports_web_search ?? false,
supports_vision: model.supports_vision ?? false,
supports_files: model.supports_files ?? false,
};
}
//
return {
supports_thinking: true,
supports_web_search: true,
supports_vision: true,
supports_files: true,
};
});
const messages: any = computed(() => currentConversation.value?.messages || []); const messages: any = computed(() => currentConversation.value?.messages || []);
const inputPlaceholder = computed(() => { const inputPlaceholder = computed(() => {

View File

@ -15,12 +15,24 @@
<!-- 左侧功能按钮 --> <!-- 左侧功能按钮 -->
<div class="input-actions left"> <div class="input-actions left">
<!-- 附件按钮 --> <!-- 附件按钮 -->
<button class="action-btn" title="添加附件" @click="triggerFileInput"> <button
class="action-btn"
:class="{ disabled: !supports_files }"
:disabled="!supports_files"
:title="supports_files ? '添加附件' : '当前模型不支持文件附件'"
@click="supports_files && triggerFileInput()"
>
<Paperclip :size="20" /> <Paperclip :size="20" />
</button> </button>
<!-- 图片按钮 --> <!-- 图片按钮 -->
<button class="action-btn" title="添加图片" @click="triggerImageInput"> <button
class="action-btn"
:class="{ disabled: !supports_vision }"
:disabled="!supports_vision"
:title="supports_vision ? '添加图片' : '当前模型不支持图片识别'"
@click="supports_vision && triggerImageInput()"
>
<Image :size="20" /> <Image :size="20" />
</button> </button>
@ -88,9 +100,10 @@
<!-- 深度思考开关 --> <!-- 深度思考开关 -->
<button <button
class="toolbar-btn" class="toolbar-btn"
:class="{ active: isDeepThinking }" :class="{ active: isDeepThinking, disabled: !supports_thinking }"
title="深度思考" :disabled="!supports_thinking"
@click="toggleDeepThink" :title="supports_thinking ? '深度思考' : '当前模型不支持深度思考'"
@click="supports_thinking && toggleDeepThink()"
> >
<Brain :size="16" /> <Brain :size="16" />
<span>深度思考</span> <span>深度思考</span>
@ -99,9 +112,10 @@
<!-- 深度搜索开关 --> <!-- 深度搜索开关 -->
<button <button
class="toolbar-btn" class="toolbar-btn"
:class="{ active: isDeepSearch }" :class="{ active: isDeepSearch, disabled: !supports_web_search }"
title="深度搜索" :disabled="!supports_web_search"
@click="toggleDeepSearch" :title="supports_web_search ? '深度搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleDeepSearch()"
> >
<Sparkles :size="16" /> <Sparkles :size="16" />
<span>深度搜索</span> <span>深度搜索</span>
@ -110,9 +124,10 @@
<!-- 联网搜索开关 --> <!-- 联网搜索开关 -->
<button <button
class="toolbar-btn" class="toolbar-btn"
:class="{ active: isWebSearch }" :class="{ active: isWebSearch, disabled: !supports_web_search }"
title="联网搜索" :disabled="!supports_web_search"
@click="toggleWebSearch" :title="supports_web_search ? '联网搜索' : '当前模型不支持联网搜索'"
@click="supports_web_search && toggleWebSearch()"
> >
<Globe :size="16" /> <Globe :size="16" />
<span>联网搜索</span> <span>联网搜索</span>
@ -171,6 +186,11 @@ const props = withDefaults(
sendOnEnter?: boolean; sendOnEnter?: boolean;
maxChars?: number; maxChars?: number;
disabled?: boolean; disabled?: boolean;
//
supports_thinking?: boolean;
supports_web_search?: boolean;
supports_vision?: boolean;
supports_files?: boolean;
}>(), }>(),
{ {
placeholder: "输入你的问题...", placeholder: "输入你的问题...",
@ -178,6 +198,11 @@ const props = withDefaults(
sendOnEnter: false, sendOnEnter: false,
maxChars: 4000, maxChars: 4000,
disabled: false, disabled: false,
//
supports_thinking: true,
supports_web_search: true,
supports_vision: true,
supports_files: true,
}, },
); );
@ -518,6 +543,17 @@ onMounted(() => {
} }
} }
&.disabled,
&:disabled {
opacity: 0.4;
cursor: not-allowed;
&:hover {
background: transparent;
color: #6b7280;
}
}
&.send { &.send {
background: #e5e7eb; background: #e5e7eb;
color: #9ca3af; color: #9ca3af;
@ -639,6 +675,17 @@ onMounted(() => {
color: #3b82f6; color: #3b82f6;
} }
} }
&.disabled,
&:disabled {
opacity: 0.4;
cursor: not-allowed;
&:hover {
background: transparent;
color: #6b7280;
}
}
} }
.toolbar-right { .toolbar-right {

View File

@ -67,6 +67,10 @@ export interface ModelInfo {
description: string; description: string;
maxTokens: number; maxTokens: number;
provider: string; provider: string;
supports_thinking: boolean;
supports_web_search: boolean;
supports_vision: boolean;
supports_files: boolean;
} }
export interface UploadResult { export interface UploadResult {
@ -270,37 +274,49 @@ class ChatApi {
* *
*/ */
async getModels(): Promise<ModelInfo[]> { async getModels(): Promise<ModelInfo[]> {
return [ try {
{ const response = await fetch(`${this.baseUrl}${API_ENDPOINTS.MODELS}`, {
id: "glm-4.6", method: "GET",
name: "智普 GLM-4.6", headers: {
description: "最强大的模型", "Content-Type": "application/json",
maxTokens: 8192, },
provider: "Zhipu", });
},
// GLM-4.5,联网搜索功能有问题 if (!response.ok) {
// { throw new Error(`获取模型列表失败: HTTP ${response.status}`);
// id: "glm-4.5", }
// name: "智普 GLM-4.5",
// description: "能力均衡", const data = await response.json();
// maxTokens: 8192, // 后端返回格式: { object: "list", data: [...] }
// provider: "Zhipu", return data.data || [];
// }, } catch (error) {
{ console.error("获取模型列表失败:", error);
id: "deepseek-chat", // 返回默认模型列表作为降级
name: "DeepSeek Chat", return [
description: "DeepSeek 对话模型", {
maxTokens: 8192, id: "glm-4.6",
provider: "DeepSeek", name: "智普 GLM-4.6",
}, description: "最强大的模型",
{ maxTokens: 200000,
id: "deepseek-reasoner", provider: "Zhipu",
name: "DeepSeek Reasoner", supports_thinking: true,
description: "DeepSeek 深度思考模型", supports_web_search: true,
maxTokens: 8192, supports_vision: false,
provider: "DeepSeek", supports_files: false,
}, },
]; {
id: "deepseek-chat",
name: "DeepSeek Chat",
description: "DeepSeek 对话模型",
maxTokens: 64000,
provider: "DeepSeek",
supports_thinking: false,
supports_web_search: false,
supports_vision: false,
supports_files: false,
},
];
}
} }
/** /**