From d80b17050d483bf866f40343224f02da5942f752 Mon Sep 17 00:00:00 2001
From: MT-Fire <798521692@qq.com>
Date: Thu, 5 Mar 2026 11:43:27 +0800
Subject: [PATCH] =?UTF-8?q?feat(ui):=20=E5=AE=9E=E7=8E=B0=E6=B7=B1?=
=?UTF-8?q?=E5=BA=A6=E6=80=9D=E8=80=83=E6=8A=98=E5=8F=A0=E5=8A=9F=E8=83=BD?=
=?UTF-8?q?=EF=BC=8C=E6=8F=90=E5=8D=87=E7=94=A8=E6=88=B7=E7=95=8C=E9=9D=A2?=
=?UTF-8?q?=E4=BF=A1=E6=81=AF=E5=AF=86=E5=BA=A6=E4=B8=8E=E4=BA=A4=E4=BA=92?=
=?UTF-8?q?=E4=BD=93=E9=AA=8C=20[=E4=BC=98=E5=8C=96=EF=BC=9A=E6=94=AF?=
=?UTF-8?q?=E6=8C=81=E7=94=A8=E6=88=B7=E5=B1=95=E5=BC=80/=E6=94=B6?=
=?UTF-8?q?=E8=B5=B7=E6=80=9D=E8=80=83=E8=BF=87=E7=A8=8B=EF=BC=8C=E5=87=8F?=
=?UTF-8?q?=E5=B0=91=E4=BF=A1=E6=81=AF=E5=B9=B2=E6=89=B0]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
server/api/chat_routes.py | 18 +-
server/utils/glm_adapter.py | 11 +-
.../message/components/ThinkingNode.vue | 219 ++++++++++--------
3 files changed, 135 insertions(+), 113 deletions(-)
diff --git a/server/api/chat_routes.py b/server/api/chat_routes.py
index 25dbc90..8145131 100644
--- a/server/api/chat_routes.py
+++ b/server/api/chat_routes.py
@@ -442,21 +442,16 @@ async def chat_endpoint_handler(body: dict):
# 处理思考过程片段
if reasoning_content:
if not full_reasoning_content:
- # 第一个思考片段,加标题前缀
- delta_str += (
- "> **💭 深度思考过程:**\n> \n> "
- )
+ # 第一个思考片段,添加 开始标签
+ delta_str += ""
full_reasoning_content += reasoning_content
- # markdown 引用块内换行需加 >
- delta_str += reasoning_content.replace(
- "\n", "\n> "
- )
+ delta_str += reasoning_content
# 处理正式回复片段
if content:
if not full_content and full_reasoning_content:
- # 思考结束后首个正式回复,加分隔线
- delta_str += "\n\n---\n\n"
+ # 思考结束后首个正式回复,关闭 标签
+ delta_str += "\n\n"
full_content += content
delta_str += content
@@ -581,8 +576,7 @@ async def chat_endpoint_handler(body: dict):
content = msg_dict.get("content", "")
rc = msg_dict.get("reasoning_content", "")
if rc:
- rc_formatted = rc.replace("\n", "\n> ")
- content = f"> **💭 深度思考过程:**\n> \n> {rc_formatted}\n\n---\n\n{content}"
+ content = f"{rc}\n\n{content}"
# 否则尝试从 output.text 获取内容(DashScope特定格式)
elif (
hasattr(response, "output")
diff --git a/server/utils/glm_adapter.py b/server/utils/glm_adapter.py
index a4270b7..68a614a 100644
--- a/server/utils/glm_adapter.py
+++ b/server/utils/glm_adapter.py
@@ -364,17 +364,16 @@ async def glm_stream_generator(
# ── 思考过程(reasoning_content)────────────────────────
if reasoning:
if not full_reasoning:
- # 首个思考片段:加 Markdown 引用块标题
- delta_str += "> **💭 深度思考过程:**\n> \n> "
+ # 首个思考片段:添加 开始标签
+ delta_str += ""
full_reasoning += reasoning
- # 引用块内换行需在每行前加 `> `
- delta_str += reasoning.replace("\n", "\n> ")
+ delta_str += reasoning
# ── 正式回答(content)──────────────────────────────────
if text:
if not full_content and full_reasoning:
- # 思考结束后首次出现正式回答:加分隔线
- delta_str += "\n\n---\n\n"
+ # 思考结束后首次出现正式回答:关闭 标签
+ delta_str += "\n\n"
full_content += text
delta_str += text
diff --git a/src/components/message/components/ThinkingNode.vue b/src/components/message/components/ThinkingNode.vue
index 5e7caca..ba6a68f 100644
--- a/src/components/message/components/ThinkingNode.vue
+++ b/src/components/message/components/ThinkingNode.vue
@@ -1,8 +1,9 @@