From 354c6f4cc7f2e9b6caa699bbf26551c5a0bd875b Mon Sep 17 00:00:00 2001 From: SuperManTouX <93423476+SuperManTouX@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:30:52 +0800 Subject: [PATCH] =?UTF-8?q?debug:=20=E4=BF=AE=E6=94=B9=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E6=A0=87=E8=AF=86=E5=8F=AF=E7=BD=91=E7=BB=9C=E6=90=9C=E7=B4=A2?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/adapters/glm_adapter.py | 4 +- server/utils/test_glm_search.py | 79 ++++++++++++++++++++++++++------- 2 files changed, 65 insertions(+), 18 deletions(-) diff --git a/server/adapters/glm_adapter.py b/server/adapters/glm_adapter.py index f7e6897..a9427d2 100644 --- a/server/adapters/glm_adapter.py +++ b/server/adapters/glm_adapter.py @@ -24,7 +24,7 @@ GLM_MODELS = [ max_tokens=128000, provider="ZhipuAI", supports_thinking=True, - supports_web_search=True, + supports_web_search=False, supports_vision=True, supports_files=True, ), @@ -46,7 +46,7 @@ GLM_MODELS = [ max_tokens=128000, provider="ZhipuAI", supports_thinking=False, - supports_web_search=True, + supports_web_search=False, supports_vision=True, supports_files=True, ), diff --git a/server/utils/test_glm_search.py b/server/utils/test_glm_search.py index 7acb47b..8ff920c 100644 --- a/server/utils/test_glm_search.py +++ b/server/utils/test_glm_search.py @@ -1,37 +1,84 @@ +""" +GLM 适配器测试脚本 +测试 GLMAdapter 的流式和非流式调用,包括联网搜索功能 +""" + import asyncio import os import sys from pathlib import Path # Add project root to sys.path -root_dir = Path(__file__).parent +root_dir = Path(__file__).parent.parent sys.path.insert(0, str(root_dir)) -# Set API key from .env if needed from dotenv import load_dotenv -from utils.glm_adapter import _ensure_venv, glm_chat_sync, glm_stream_generator +from adapters.glm_adapter import GLMAdapter +from adapters.base import ChatCompletionRequest load_dotenv() async def test_stream(): - msgs = [{"role": "user", "content": "今天北京天气怎样?"}] - print("Testing stream...") - async for chunk in glm_stream_generator( - msgs, "glm-4.5-air", 0.7, 1024, web_search=True - ): + """测试流式调用(联网搜索)""" + adapter = GLMAdapter() + + if not adapter.is_available(): + print("错误:未配置 ZHIPU_API_KEY 或 GLM_API_KEY") + return + + request = ChatCompletionRequest( + model="glm-4.6v", + messages=[{"role": "user", "content": "今天北京天气怎样?"}], + stream=True, + temperature=0.7, + max_tokens=1024, + web_search=True, + ) + + print("Testing stream with web_search...") + response = await adapter.chat(request) + + # 流式响应是 StreamingResponse,需要手动读取 + async for chunk in response.body_iterator: + # body_iterator 已经返回字符串 print(chunk, end="") -def test_sync(): - msgs = [{"role": "user", "content": "今天几号?武汉天气怎样?"}] - print("Testing sync...") - res = glm_chat_sync(msgs, "glm-4.5-air", 0.7, 1024, web_search=True) - print(res) +async def test_sync(): + """测试非流式调用(联网搜索)""" + adapter = GLMAdapter() + + if not adapter.is_available(): + print("错误:未配置 ZHIPU_API_KEY 或 GLM_API_KEY") + return + + request = ChatCompletionRequest( + model="glm-4-flash", + messages=[{"role": "user", "content": "今天几号?武汉天气怎样?"}], + stream=False, + temperature=0.7, + max_tokens=1024, + web_search=True, + ) + + print("Testing sync with web_search...") + response = await adapter.chat(request) + + # 非流式响应返回 JSONResponse + if hasattr(response, "body"): + import json + + data = json.loads(response.body) + content = data.get("choices", [{}])[0].get("message", {}).get("content", "") + print(f"Response: {content}") + else: + print(f"Response: {response}") if __name__ == "__main__": - _ensure_venv() - # test_sync() - asyncio.run(test_stream()) + # 运行流式测试 + # asyncio.run(test_stream()) + # 运行非流式测试 + asyncio.run(test_sync()) \ No newline at end of file