debug: 修复qwen3.5系列模型思考模式输出内容错乱的问题;修复错误显示GLM5能识别图片和文件的问题。

This commit is contained in:
肖应宇 2026-03-11 13:38:51 +08:00
parent 3c53e89b43
commit 38faeeb46d
2 changed files with 12 additions and 22 deletions

View File

@ -527,6 +527,7 @@ class DashScopeAdapter(BaseAdapter):
"model": model,
"messages": messages,
"stream": True,
"enable_thinking": False,
"max_tokens": request.max_tokens,
"temperature": request.temperature,
}
@ -535,13 +536,12 @@ class DashScopeAdapter(BaseAdapter):
if thinking_enabled:
api_params["enable_thinking"] = True
logger.info(f"[DashScope] 多模态 API 调用参数:")
logger.info(f"[DashScope] 流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
try:
@ -568,14 +568,9 @@ class DashScopeAdapter(BaseAdapter):
for resp in responses:
chunk_count += 1
logger.info(f"[DashScope] === chunk {chunk_count} ===")
if resp.status_code == 200:
try:
# 打印原始响应结构
logger.info(f" - resp.status_code: {resp.status_code}")
logger.info(f" - resp.output: {resp.output}")
choice = resp.output.choices[0]
message = choice["message"]
@ -585,7 +580,6 @@ class DashScopeAdapter(BaseAdapter):
if reasoning_content:
delta_reasoning = reasoning_content
full_reasoning += reasoning_content
logger.info(f" - reasoning_delta: {delta_reasoning}")
data = {
"id": f"chatcmpl-{generate_unique_id()}",
@ -610,15 +604,10 @@ class DashScopeAdapter(BaseAdapter):
if isinstance(item, dict) and "text" in item:
text += item["text"]
# 打印每个 chunk 的内容
logger.info(f" - text_len: {len(text)}, full_len: {len(full_content)}")
logger.info(f" - text: {text}")
# 多模态 API 返回的 content 是独立的片段(不是累积的),直接作为 delta
if text:
delta = text
full_content += text
logger.info(f" - delta: {delta}")
data = {
"id": f"chatcmpl-{generate_unique_id()}",
@ -652,12 +641,13 @@ class DashScopeAdapter(BaseAdapter):
# 打印流式响应结果
logger.info(f"[DashScope] 流式多模态响应完成:")
logger.info(f" - chunks: {chunk_count}")
logger.info(f" - content_length: {len(full_content)} 字符")
if full_reasoning:
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
logger.info(f" - reasoning: {full_reasoning[:500]}..." if len(full_reasoning) > 500 else f" - reasoning: {full_reasoning}")
logger.info(f" - content_length: {len(full_content)} 字符")
logger.info(
f" - content_preview: {full_content[:200]}..."
if len(full_content) > 200
f" - content: {full_content[:500]}..."
if len(full_content) > 500
else f" - content: {full_content}"
)
@ -686,6 +676,7 @@ class DashScopeAdapter(BaseAdapter):
"messages": messages,
"stream": False,
"max_tokens": request.max_tokens,
"enable_thinking": False,
"temperature": request.temperature,
}
@ -693,13 +684,12 @@ class DashScopeAdapter(BaseAdapter):
if thinking_enabled:
api_params["enable_thinking"] = True
logger.info(f"[DashScope] 多模态 API 调用参数:")
logger.info(f"[DashScope] 非流式多模态 API 调用参数:")
logger.info(f" - model: {api_params['model']}")
logger.info(f" - stream: {api_params['stream']}")
logger.info(f" - max_tokens: {api_params['max_tokens']}")
logger.info(f" - temperature: {api_params['temperature']}")
if thinking_enabled:
logger.info(f" - enable_thinking: True")
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
try:
resp = MultiModalConversation.call(**api_params)

View File

@ -25,8 +25,8 @@ GLM_MODELS = [
provider="ZhipuAI",
supports_thinking=True,
supports_web_search=False,
supports_vision=True,
supports_files=True,
supports_vision=False,
supports_files=False,
),
ModelInfo(
id="glm-4.6v",