debug: 修改错误标识可网络搜索模型的错误

This commit is contained in:
SuperManTouX 2026-03-09 15:30:52 +08:00
parent 315b3776cf
commit 354c6f4cc7
2 changed files with 65 additions and 18 deletions

View File

@ -24,7 +24,7 @@ GLM_MODELS = [
max_tokens=128000,
provider="ZhipuAI",
supports_thinking=True,
supports_web_search=True,
supports_web_search=False,
supports_vision=True,
supports_files=True,
),
@ -46,7 +46,7 @@ GLM_MODELS = [
max_tokens=128000,
provider="ZhipuAI",
supports_thinking=False,
supports_web_search=True,
supports_web_search=False,
supports_vision=True,
supports_files=True,
),

View File

@ -1,37 +1,84 @@
"""
GLM 适配器测试脚本
测试 GLMAdapter 的流式和非流式调用包括联网搜索功能
"""
import asyncio
import os
import sys
from pathlib import Path
# Add project root to sys.path
root_dir = Path(__file__).parent
root_dir = Path(__file__).parent.parent
sys.path.insert(0, str(root_dir))
# Set API key from .env if needed
from dotenv import load_dotenv
from utils.glm_adapter import _ensure_venv, glm_chat_sync, glm_stream_generator
from adapters.glm_adapter import GLMAdapter
from adapters.base import ChatCompletionRequest
load_dotenv()
async def test_stream():
msgs = [{"role": "user", "content": "今天北京天气怎样?"}]
print("Testing stream...")
async for chunk in glm_stream_generator(
msgs, "glm-4.5-air", 0.7, 1024, web_search=True
):
"""测试流式调用(联网搜索)"""
adapter = GLMAdapter()
if not adapter.is_available():
print("错误:未配置 ZHIPU_API_KEY 或 GLM_API_KEY")
return
request = ChatCompletionRequest(
model="glm-4.6v",
messages=[{"role": "user", "content": "今天北京天气怎样?"}],
stream=True,
temperature=0.7,
max_tokens=1024,
web_search=True,
)
print("Testing stream with web_search...")
response = await adapter.chat(request)
# 流式响应是 StreamingResponse需要手动读取
async for chunk in response.body_iterator:
# body_iterator 已经返回字符串
print(chunk, end="")
def test_sync():
msgs = [{"role": "user", "content": "今天几号?武汉天气怎样?"}]
print("Testing sync...")
res = glm_chat_sync(msgs, "glm-4.5-air", 0.7, 1024, web_search=True)
print(res)
async def test_sync():
"""测试非流式调用(联网搜索)"""
adapter = GLMAdapter()
if not adapter.is_available():
print("错误:未配置 ZHIPU_API_KEY 或 GLM_API_KEY")
return
request = ChatCompletionRequest(
model="glm-4-flash",
messages=[{"role": "user", "content": "今天几号?武汉天气怎样?"}],
stream=False,
temperature=0.7,
max_tokens=1024,
web_search=True,
)
print("Testing sync with web_search...")
response = await adapter.chat(request)
# 非流式响应返回 JSONResponse
if hasattr(response, "body"):
import json
data = json.loads(response.body)
content = data.get("choices", [{}])[0].get("message", {}).get("content", "")
print(f"Response: {content}")
else:
print(f"Response: {response}")
if __name__ == "__main__":
_ensure_venv()
# test_sync()
asyncio.run(test_stream())
# 运行流式测试
# asyncio.run(test_stream())
# 运行非流式测试
asyncio.run(test_sync())