debug: 修复qwen3.5系列模型思考模式输出内容错乱的问题;修复错误显示GLM5能识别图片和文件的问题。
This commit is contained in:
parent
3c53e89b43
commit
38faeeb46d
|
|
@ -527,6 +527,7 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
"model": model,
|
"model": model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"stream": True,
|
"stream": True,
|
||||||
|
"enable_thinking": False,
|
||||||
"max_tokens": request.max_tokens,
|
"max_tokens": request.max_tokens,
|
||||||
"temperature": request.temperature,
|
"temperature": request.temperature,
|
||||||
}
|
}
|
||||||
|
|
@ -535,13 +536,12 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
if thinking_enabled:
|
if thinking_enabled:
|
||||||
api_params["enable_thinking"] = True
|
api_params["enable_thinking"] = True
|
||||||
|
|
||||||
logger.info(f"[DashScope] 多模态 API 调用参数:")
|
logger.info(f"[DashScope] 流式多模态 API 调用参数:")
|
||||||
logger.info(f" - model: {api_params['model']}")
|
logger.info(f" - model: {api_params['model']}")
|
||||||
logger.info(f" - stream: {api_params['stream']}")
|
logger.info(f" - stream: {api_params['stream']}")
|
||||||
logger.info(f" - max_tokens: {api_params['max_tokens']}")
|
logger.info(f" - max_tokens: {api_params['max_tokens']}")
|
||||||
logger.info(f" - temperature: {api_params['temperature']}")
|
logger.info(f" - temperature: {api_params['temperature']}")
|
||||||
if thinking_enabled:
|
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
|
||||||
logger.info(f" - enable_thinking: True")
|
|
||||||
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
|
logger.info(f" - messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
@ -568,14 +568,9 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
|
|
||||||
for resp in responses:
|
for resp in responses:
|
||||||
chunk_count += 1
|
chunk_count += 1
|
||||||
logger.info(f"[DashScope] === chunk {chunk_count} ===")
|
|
||||||
|
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
try:
|
try:
|
||||||
# 打印原始响应结构
|
|
||||||
logger.info(f" - resp.status_code: {resp.status_code}")
|
|
||||||
logger.info(f" - resp.output: {resp.output}")
|
|
||||||
|
|
||||||
choice = resp.output.choices[0]
|
choice = resp.output.choices[0]
|
||||||
message = choice["message"]
|
message = choice["message"]
|
||||||
|
|
||||||
|
|
@ -585,7 +580,6 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
if reasoning_content:
|
if reasoning_content:
|
||||||
delta_reasoning = reasoning_content
|
delta_reasoning = reasoning_content
|
||||||
full_reasoning += reasoning_content
|
full_reasoning += reasoning_content
|
||||||
logger.info(f" - reasoning_delta: {delta_reasoning}")
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"id": f"chatcmpl-{generate_unique_id()}",
|
"id": f"chatcmpl-{generate_unique_id()}",
|
||||||
|
|
@ -610,15 +604,10 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
if isinstance(item, dict) and "text" in item:
|
if isinstance(item, dict) and "text" in item:
|
||||||
text += item["text"]
|
text += item["text"]
|
||||||
|
|
||||||
# 打印每个 chunk 的内容
|
|
||||||
logger.info(f" - text_len: {len(text)}, full_len: {len(full_content)}")
|
|
||||||
logger.info(f" - text: {text}")
|
|
||||||
|
|
||||||
# 多模态 API 返回的 content 是独立的片段(不是累积的),直接作为 delta
|
# 多模态 API 返回的 content 是独立的片段(不是累积的),直接作为 delta
|
||||||
if text:
|
if text:
|
||||||
delta = text
|
delta = text
|
||||||
full_content += text
|
full_content += text
|
||||||
logger.info(f" - delta: {delta}")
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"id": f"chatcmpl-{generate_unique_id()}",
|
"id": f"chatcmpl-{generate_unique_id()}",
|
||||||
|
|
@ -652,12 +641,13 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
# 打印流式响应结果
|
# 打印流式响应结果
|
||||||
logger.info(f"[DashScope] 流式多模态响应完成:")
|
logger.info(f"[DashScope] 流式多模态响应完成:")
|
||||||
logger.info(f" - chunks: {chunk_count}")
|
logger.info(f" - chunks: {chunk_count}")
|
||||||
logger.info(f" - content_length: {len(full_content)} 字符")
|
|
||||||
if full_reasoning:
|
if full_reasoning:
|
||||||
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
|
logger.info(f" - reasoning_length: {len(full_reasoning)} 字符")
|
||||||
|
logger.info(f" - reasoning: {full_reasoning[:500]}..." if len(full_reasoning) > 500 else f" - reasoning: {full_reasoning}")
|
||||||
|
logger.info(f" - content_length: {len(full_content)} 字符")
|
||||||
logger.info(
|
logger.info(
|
||||||
f" - content_preview: {full_content[:200]}..."
|
f" - content: {full_content[:500]}..."
|
||||||
if len(full_content) > 200
|
if len(full_content) > 500
|
||||||
else f" - content: {full_content}"
|
else f" - content: {full_content}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -686,6 +676,7 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"max_tokens": request.max_tokens,
|
"max_tokens": request.max_tokens,
|
||||||
|
"enable_thinking": False,
|
||||||
"temperature": request.temperature,
|
"temperature": request.temperature,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -693,13 +684,12 @@ class DashScopeAdapter(BaseAdapter):
|
||||||
if thinking_enabled:
|
if thinking_enabled:
|
||||||
api_params["enable_thinking"] = True
|
api_params["enable_thinking"] = True
|
||||||
|
|
||||||
logger.info(f"[DashScope] 多模态 API 调用参数:")
|
logger.info(f"[DashScope] 非流式多模态 API 调用参数:")
|
||||||
logger.info(f" - model: {api_params['model']}")
|
logger.info(f" - model: {api_params['model']}")
|
||||||
logger.info(f" - stream: {api_params['stream']}")
|
logger.info(f" - stream: {api_params['stream']}")
|
||||||
logger.info(f" - max_tokens: {api_params['max_tokens']}")
|
logger.info(f" - max_tokens: {api_params['max_tokens']}")
|
||||||
logger.info(f" - temperature: {api_params['temperature']}")
|
logger.info(f" - temperature: {api_params['temperature']}")
|
||||||
if thinking_enabled:
|
logger.info(f" - enable_thinking: {api_params['enable_thinking']}")
|
||||||
logger.info(f" - enable_thinking: True")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resp = MultiModalConversation.call(**api_params)
|
resp = MultiModalConversation.call(**api_params)
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ GLM_MODELS = [
|
||||||
provider="ZhipuAI",
|
provider="ZhipuAI",
|
||||||
supports_thinking=True,
|
supports_thinking=True,
|
||||||
supports_web_search=False,
|
supports_web_search=False,
|
||||||
supports_vision=True,
|
supports_vision=False,
|
||||||
supports_files=True,
|
supports_files=False,
|
||||||
),
|
),
|
||||||
ModelInfo(
|
ModelInfo(
|
||||||
id="glm-4.6v",
|
id="glm-4.6v",
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue