fix: unblock concurrent threads and workspace hydration (#1839)

* fix: unblock concurrent threads and workspace hydration

* fix: restore async title generation

* fix: address PR review feedback

* style: format lead agent prompt
This commit is contained in:
DanielWalnut 2026-04-04 21:19:35 +08:00 committed by GitHub
parent d8409b116a
commit 3fc1e69db3
16 changed files with 213 additions and 199 deletions

View File

@ -2,7 +2,7 @@ install:
uv sync
dev:
uv run langgraph dev --no-browser --allow-blocking --no-reload --n-jobs-per-worker 10
uv run langgraph dev --no-browser --no-reload --n-jobs-per-worker 10
gateway:
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001

View File

@ -111,7 +111,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S
"You are generating follow-up questions to help the user continue the conversation.\n"
f"Based on the conversation below, produce EXACTLY {n} short questions the user might ask next.\n"
"Requirements:\n"
"- Questions must be relevant to the conversation.\n"
"- Questions must be relevant to the preceding conversation.\n"
"- Questions must be written in the same language as the user.\n"
"- Keep each question concise (ideally <= 20 words / <= 40 Chinese characters).\n"
"- Do NOT include numbering, markdown, or any extra text.\n"
@ -121,7 +121,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S
try:
model = create_chat_model(name=request.model_name, thinking_enabled=False)
response = model.invoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)])
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)])
raw = _extract_response_text(response.content)
suggestions = _parse_json_string_list(raw) or []
cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()]

View File

@ -8,6 +8,14 @@ from deerflow.subagents import get_available_subagent_names
logger = logging.getLogger(__name__)
def _get_enabled_skills():
try:
return list(load_skills(enabled_only=True))
except Exception:
logger.exception("Failed to load enabled skills for prompt injection")
return []
def _build_subagent_section(max_concurrent: int) -> str:
"""Build the subagent system prompt section with dynamic concurrency limit.
@ -282,7 +290,7 @@ You: "Deploying to staging..." [proceed]
**Example - Inline Citations:**
```markdown
The key AI trends for 2026 include enhanced reasoning capabilities and multimodal integration
[citation:AI Trends 2026](https://techcrunch.com/category/artificial-intelligence/).
[citation:AI Trends 2026](https://techcrunch.com/ai-trends).
Recent breakthroughs in language models have also accelerated progress
[citation:OpenAI Research](https://openai.com/research).
```
@ -294,7 +302,7 @@ Recent breakthroughs in language models have also accelerated progress
DeerFlow is an open-source AI agent framework that gained significant traction in early 2026
[citation:GitHub Repository](https://github.com/bytedance/deer-flow). The project focuses on
providing a production-ready agent system with sandbox execution and memory management
[citation:DeerFlow Documentation](https://github.com/bytedance/deer-flow?tab=readme-ov-file#table-of-contents).
[citation:DeerFlow Documentation](https://deer-flow.dev/docs).
## Key Analysis
@ -307,10 +315,10 @@ combined with a FastAPI gateway for REST API access [citation:FastAPI](https://f
### Primary Sources
- [GitHub Repository](https://github.com/bytedance/deer-flow) - Official source code and documentation
- [DeerFlow Documentation](https://github.com/bytedance/deer-flow?tab=readme-ov-file#table-of-contents) - Technical specifications
- [DeerFlow Documentation](https://deer-flow.dev/docs) - Technical specifications
### Media Coverage
- [AI Trends 2026](https://techcrunch.com/category/artificial-intelligence/) - Industry analysis
- [AI Trends 2026](https://techcrunch.com/ai-trends) - Industry analysis
```
**CRITICAL: Sources section format:**
@ -386,7 +394,7 @@ def get_skills_prompt_section(available_skills: set[str] | None = None) -> str:
Returns the <skill_system>...</skill_system> block listing all enabled skills,
suitable for injection into any agent's system prompt.
"""
skills = load_skills(enabled_only=True)
skills = _get_enabled_skills()
try:
from deerflow.config import get_app_config
@ -450,7 +458,7 @@ def get_deferred_tools_prompt_section() -> str:
if not get_app_config().tool_search.enabled:
return ""
except FileNotFoundError:
except Exception:
return ""
registry = get_deferred_registry()

View File

@ -101,44 +101,33 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
return user_msg if user_msg else "New Conversation"
def _generate_title_result(self, state: TitleMiddlewareState) -> dict | None:
"""Synchronously generate a title. Returns state update or None."""
"""Generate a local fallback title without blocking on an LLM call."""
if not self._should_generate_title(state):
return None
prompt, user_msg = self._build_title_prompt(state)
config = get_title_config()
model = create_chat_model(name=config.model_name, thinking_enabled=False)
try:
response = model.invoke(prompt)
title = self._parse_title(response.content)
if not title:
title = self._fallback_title(user_msg)
except Exception:
logger.exception("Failed to generate title (sync)")
title = self._fallback_title(user_msg)
return {"title": title}
_, user_msg = self._build_title_prompt(state)
return {"title": self._fallback_title(user_msg)}
async def _agenerate_title_result(self, state: TitleMiddlewareState) -> dict | None:
"""Asynchronously generate a title. Returns state update or None."""
"""Generate a title asynchronously and fall back locally on failure."""
if not self._should_generate_title(state):
return None
prompt, user_msg = self._build_title_prompt(state)
config = get_title_config()
model = create_chat_model(name=config.model_name, thinking_enabled=False)
prompt, user_msg = self._build_title_prompt(state)
try:
if config.model_name:
model = create_chat_model(name=config.model_name, thinking_enabled=False)
else:
model = create_chat_model(thinking_enabled=False)
response = await model.ainvoke(prompt)
title = self._parse_title(response.content)
if not title:
title = self._fallback_title(user_msg)
if title:
return {"title": title}
except Exception:
logger.exception("Failed to generate title (async)")
title = self._fallback_title(user_msg)
return {"title": title}
logger.debug("Failed to generate async title; falling back to local title", exc_info=True)
return {"title": self._fallback_title(user_msg)}
@override
def after_model(self, state: TitleMiddlewareState, runtime: Runtime) -> dict | None:

View File

@ -1,7 +1,8 @@
import logging
import os
from contextvars import ContextVar
from pathlib import Path
from typing import Any, Literal, Self
from typing import Any, Self
import yaml
from dotenv import load_dotenv
@ -10,15 +11,15 @@ from pydantic import BaseModel, ConfigDict, Field
from deerflow.config.acp_config import load_acp_config_from_dict
from deerflow.config.checkpointer_config import CheckpointerConfig, load_checkpointer_config_from_dict
from deerflow.config.extensions_config import ExtensionsConfig
from deerflow.config.guardrails_config import load_guardrails_config_from_dict
from deerflow.config.memory_config import load_memory_config_from_dict
from deerflow.config.guardrails_config import GuardrailsConfig, load_guardrails_config_from_dict
from deerflow.config.memory_config import MemoryConfig, load_memory_config_from_dict
from deerflow.config.model_config import ModelConfig
from deerflow.config.sandbox_config import SandboxConfig
from deerflow.config.skills_config import SkillsConfig
from deerflow.config.stream_bridge_config import StreamBridgeConfig, load_stream_bridge_config_from_dict
from deerflow.config.subagents_config import load_subagents_config_from_dict
from deerflow.config.summarization_config import load_summarization_config_from_dict
from deerflow.config.title_config import load_title_config_from_dict
from deerflow.config.subagents_config import SubagentsAppConfig, load_subagents_config_from_dict
from deerflow.config.summarization_config import SummarizationConfig, load_summarization_config_from_dict
from deerflow.config.title_config import TitleConfig, load_title_config_from_dict
from deerflow.config.token_usage_config import TokenUsageConfig
from deerflow.config.tool_config import ToolConfig, ToolGroupConfig
from deerflow.config.tool_search_config import ToolSearchConfig, load_tool_search_config_from_dict
@ -28,18 +29,11 @@ load_dotenv()
logger = logging.getLogger(__name__)
class UploadsConfig(BaseModel):
"""Configuration for file upload handling."""
pdf_converter: Literal["auto", "pymupdf4llm", "markitdown"] = Field(
default="auto",
description=(
"PDF-to-Markdown converter. "
"'auto': prefer pymupdf4llm when installed, fall back to MarkItDown for image-based PDFs; "
"'pymupdf4llm': always use pymupdf4llm (must be installed); "
"'markitdown': always use MarkItDown (original behaviour)."
),
)
def _default_config_candidates() -> tuple[Path, ...]:
"""Return deterministic config.yaml locations without relying on cwd."""
backend_dir = Path(__file__).resolve().parents[4]
repo_root = backend_dir.parent
return (backend_dir / "config.yaml", repo_root / "config.yaml")
class AppConfig(BaseModel):
@ -47,7 +41,6 @@ class AppConfig(BaseModel):
log_level: str = Field(default="info", description="Logging level for deerflow modules (debug/info/warning/error)")
token_usage: TokenUsageConfig = Field(default_factory=TokenUsageConfig, description="Token usage tracking configuration")
uploads: UploadsConfig = Field(default_factory=UploadsConfig, description="File upload handling configuration")
models: list[ModelConfig] = Field(default_factory=list, description="Available models")
sandbox: SandboxConfig = Field(description="Sandbox configuration")
tools: list[ToolConfig] = Field(default_factory=list, description="Available tools")
@ -55,6 +48,11 @@ class AppConfig(BaseModel):
skills: SkillsConfig = Field(default_factory=SkillsConfig, description="Skills configuration")
extensions: ExtensionsConfig = Field(default_factory=ExtensionsConfig, description="Extensions configuration (MCP servers and skills state)")
tool_search: ToolSearchConfig = Field(default_factory=ToolSearchConfig, description="Tool search / deferred loading configuration")
title: TitleConfig = Field(default_factory=TitleConfig, description="Automatic title generation configuration")
summarization: SummarizationConfig = Field(default_factory=SummarizationConfig, description="Conversation summarization configuration")
memory: MemoryConfig = Field(default_factory=MemoryConfig, description="Memory subsystem configuration")
subagents: SubagentsAppConfig = Field(default_factory=SubagentsAppConfig, description="Subagent runtime configuration")
guardrails: GuardrailsConfig = Field(default_factory=GuardrailsConfig, description="Guardrail middleware configuration")
model_config = ConfigDict(extra="allow", frozen=False)
checkpointer: CheckpointerConfig | None = Field(default=None, description="Checkpointer configuration")
stream_bridge: StreamBridgeConfig | None = Field(default=None, description="Stream bridge configuration")
@ -66,7 +64,7 @@ class AppConfig(BaseModel):
Priority:
1. If provided `config_path` argument, use it.
2. If provided `DEER_FLOW_CONFIG_PATH` environment variable, use it.
3. Otherwise, first check the `config.yaml` in the current directory, then fallback to `config.yaml` in the parent directory.
3. Otherwise, search deterministic backend/repository-root defaults from `_default_config_candidates()`.
"""
if config_path:
path = Path(config_path)
@ -79,14 +77,10 @@ class AppConfig(BaseModel):
raise FileNotFoundError(f"Config file specified by environment variable `DEER_FLOW_CONFIG_PATH` not found at {path}")
return path
else:
# Check if the config.yaml is in the current directory
path = Path(os.getcwd()) / "config.yaml"
if not path.exists():
# Check if the config.yaml is in the parent directory of CWD
path = Path(os.getcwd()).parent / "config.yaml"
if not path.exists():
raise FileNotFoundError("`config.yaml` file not found at the current directory nor its parent directory")
return path
for path in _default_config_candidates():
if path.exists():
return path
raise FileNotFoundError("`config.yaml` file not found at the default backend or repository root locations")
@classmethod
def from_file(cls, config_path: str | None = None) -> Self:
@ -259,6 +253,8 @@ _app_config: AppConfig | None = None
_app_config_path: Path | None = None
_app_config_mtime: float | None = None
_app_config_is_custom = False
_current_app_config: ContextVar[AppConfig | None] = ContextVar("deerflow_current_app_config", default=None)
_current_app_config_stack: ContextVar[tuple[AppConfig | None, ...]] = ContextVar("deerflow_current_app_config_stack", default=())
def _get_config_mtime(config_path: Path) -> float | None:
@ -291,6 +287,10 @@ def get_app_config() -> AppConfig:
"""
global _app_config, _app_config_path, _app_config_mtime
runtime_override = _current_app_config.get()
if runtime_override is not None:
return runtime_override
if _app_config is not None and _app_config_is_custom:
return _app_config
@ -352,3 +352,26 @@ def set_app_config(config: AppConfig) -> None:
_app_config_path = None
_app_config_mtime = None
_app_config_is_custom = True
def peek_current_app_config() -> AppConfig | None:
"""Return the runtime-scoped AppConfig override, if one is active."""
return _current_app_config.get()
def push_current_app_config(config: AppConfig) -> None:
"""Push a runtime-scoped AppConfig override for the current execution context."""
stack = _current_app_config_stack.get()
_current_app_config_stack.set(stack + (_current_app_config.get(),))
_current_app_config.set(config)
def pop_current_app_config() -> None:
"""Pop the latest runtime-scoped AppConfig override for the current execution context."""
stack = _current_app_config_stack.get()
if not stack:
_current_app_config.set(None)
return
previous = stack[-1]
_current_app_config_stack.set(stack[:-1])
_current_app_config.set(previous)

View File

@ -80,6 +80,12 @@ class ExtensionsConfig(BaseModel):
Args:
config_path: Optional path to extensions config file.
Resolution order:
1. If provided `config_path` argument, use it.
2. If provided `DEER_FLOW_EXTENSIONS_CONFIG_PATH` environment variable, use it.
3. Otherwise, search backend/repository-root defaults for
`extensions_config.json`, then legacy `mcp_config.json`.
Returns:
Path to the extensions config file if found, otherwise None.
"""
@ -94,24 +100,16 @@ class ExtensionsConfig(BaseModel):
raise FileNotFoundError(f"Extensions config file specified by environment variable `DEER_FLOW_EXTENSIONS_CONFIG_PATH` not found at {path}")
return path
else:
# Check if the extensions_config.json is in the current directory
path = Path(os.getcwd()) / "extensions_config.json"
if path.exists():
return path
# Check if the extensions_config.json is in the parent directory of CWD
path = Path(os.getcwd()).parent / "extensions_config.json"
if path.exists():
return path
# Backward compatibility: check for mcp_config.json
path = Path(os.getcwd()) / "mcp_config.json"
if path.exists():
return path
path = Path(os.getcwd()).parent / "mcp_config.json"
if path.exists():
return path
backend_dir = Path(__file__).resolve().parents[4]
repo_root = backend_dir.parent
for path in (
backend_dir / "extensions_config.json",
repo_root / "extensions_config.json",
backend_dir / "mcp_config.json",
repo_root / "mcp_config.json",
):
if path.exists():
return path
# Extensions are optional, so return None if not found
return None

View File

@ -9,6 +9,12 @@ VIRTUAL_PATH_PREFIX = "/mnt/user-data"
_SAFE_THREAD_ID_RE = re.compile(r"^[A-Za-z0-9_\-]+$")
def _default_local_base_dir() -> Path:
"""Return the repo-local DeerFlow state directory without relying on cwd."""
backend_dir = Path(__file__).resolve().parents[4]
return backend_dir / ".deer-flow"
def _validate_thread_id(thread_id: str) -> str:
"""Validate a thread ID before using it in filesystem paths."""
if not _SAFE_THREAD_ID_RE.match(thread_id):
@ -67,8 +73,7 @@ class Paths:
BaseDir resolution (in priority order):
1. Constructor argument `base_dir`
2. DEER_FLOW_HOME environment variable
3. Local dev fallback: cwd/.deer-flow (when cwd is the backend/ dir)
4. Default: $HOME/.deer-flow
3. Repo-local fallback derived from this module path: `{backend_dir}/.deer-flow`
"""
def __init__(self, base_dir: str | Path | None = None) -> None:
@ -104,11 +109,7 @@ class Paths:
if env_home := os.getenv("DEER_FLOW_HOME"):
return Path(env_home).resolve()
cwd = Path.cwd()
if cwd.name == "backend" or (cwd / "pyproject.toml").exists():
return cwd / ".deer-flow"
return Path.home() / ".deer-flow"
return _default_local_base_dir()
@property
def memory_file(self) -> Path:

View File

@ -3,6 +3,11 @@ from pathlib import Path
from pydantic import BaseModel, Field
def _default_repo_root() -> Path:
"""Resolve the repo root without relying on the current working directory."""
return Path(__file__).resolve().parents[5]
class SkillsConfig(BaseModel):
"""Configuration for skills system"""
@ -26,8 +31,8 @@ class SkillsConfig(BaseModel):
# Use configured path (can be absolute or relative)
path = Path(self.path)
if not path.is_absolute():
# If relative, resolve from current working directory
path = Path.cwd() / path
# If relative, resolve from the repo root for deterministic behavior.
path = _default_repo_root() / path
return path.resolve()
else:
# Default: ../skills relative to backend directory

View File

@ -1,7 +1,5 @@
import asyncio
from unittest.mock import MagicMock
from langchain_core.messages import HumanMessage, SystemMessage
from unittest.mock import AsyncMock, MagicMock
from app.gateway.routers import suggestions
@ -45,7 +43,7 @@ def test_generate_suggestions_parses_and_limits(monkeypatch):
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.return_value = MagicMock(content='```json\n["Q1", "Q2", "Q3", "Q4"]\n```')
fake_model.ainvoke = AsyncMock(return_value=MagicMock(content='```json\n["Q1", "Q2", "Q3", "Q4"]\n```'))
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
result = asyncio.run(suggestions.generate_suggestions("t1", req))
@ -63,7 +61,7 @@ def test_generate_suggestions_parses_list_block_content(monkeypatch):
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.return_value = MagicMock(content=[{"type": "text", "text": '```json\n["Q1", "Q2"]\n```'}])
fake_model.ainvoke = AsyncMock(return_value=MagicMock(content=[{"type": "text", "text": '```json\n["Q1", "Q2"]\n```'}]))
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
result = asyncio.run(suggestions.generate_suggestions("t1", req))
@ -81,7 +79,7 @@ def test_generate_suggestions_parses_output_text_block_content(monkeypatch):
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.return_value = MagicMock(content=[{"type": "output_text", "text": '```json\n["Q1", "Q2"]\n```'}])
fake_model.ainvoke = AsyncMock(return_value=MagicMock(content=[{"type": "output_text", "text": '```json\n["Q1", "Q2"]\n```'}]))
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
result = asyncio.run(suggestions.generate_suggestions("t1", req))
@ -96,32 +94,9 @@ def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.side_effect = RuntimeError("boom")
fake_model.ainvoke = AsyncMock(side_effect=RuntimeError("boom"))
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
result = asyncio.run(suggestions.generate_suggestions("t1", req))
assert result.suggestions == []
def test_generate_suggestions_invokes_model_with_system_and_human_messages(monkeypatch):
req = suggestions.SuggestionsRequest(
messages=[
suggestions.SuggestionMessage(role="user", content="What is Python?"),
suggestions.SuggestionMessage(role="assistant", content="Python is a programming language."),
],
n=2,
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.return_value = MagicMock(content='["Q1", "Q2"]')
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
asyncio.run(suggestions.generate_suggestions("t1", req))
call_args = fake_model.invoke.call_args[0][0]
assert len(call_args) == 2
assert isinstance(call_args[0], SystemMessage)
assert isinstance(call_args[1], HumanMessage)
assert "follow-up questions" in call_args[0].content
assert "What is Python?" in call_args[1].content

View File

@ -5,6 +5,7 @@ from unittest.mock import AsyncMock, MagicMock
from langchain_core.messages import AIMessage, HumanMessage
from deerflow.agents.middlewares import title_middleware as title_middleware_module
from deerflow.agents.middlewares.title_middleware import TitleMiddleware
from deerflow.config.title_config import TitleConfig, get_title_config, set_title_config
@ -73,37 +74,32 @@ class TestTitleMiddlewareCoreLogic:
assert middleware._should_generate_title(state) is False
def test_generate_title_trims_quotes_and_respects_max_chars(self, monkeypatch):
def test_generate_title_uses_async_model_and_respects_max_chars(self, monkeypatch):
_set_test_title_config(max_chars=12)
middleware = TitleMiddleware()
fake_model = MagicMock()
fake_model.ainvoke = AsyncMock(return_value=MagicMock(content='"A very long generated title"'))
monkeypatch.setattr("deerflow.agents.middlewares.title_middleware.create_chat_model", lambda **kwargs: fake_model)
model = MagicMock()
model.ainvoke = AsyncMock(return_value=AIMessage(content="短标题"))
monkeypatch.setattr(title_middleware_module, "create_chat_model", MagicMock(return_value=model))
state = {
"messages": [
HumanMessage(content="请帮我写一个脚本"),
HumanMessage(content="请帮我写一个很长很长的脚本标题"),
AIMessage(content="好的,先确认需求"),
]
}
result = asyncio.run(middleware._agenerate_title_result(state))
title = result["title"]
assert '"' not in title
assert "'" not in title
assert len(title) == 12
assert title == "短标题"
title_middleware_module.create_chat_model.assert_called_once_with(thinking_enabled=False)
model.ainvoke.assert_awaited_once()
def test_generate_title_normalizes_structured_message_and_response_content(self, monkeypatch):
def test_generate_title_normalizes_structured_message_content(self, monkeypatch):
_set_test_title_config(max_chars=20)
middleware = TitleMiddleware()
fake_model = MagicMock()
fake_model.ainvoke = AsyncMock(
return_value=MagicMock(content=[{"type": "text", "text": '"结构总结"'}]),
)
monkeypatch.setattr(
"deerflow.agents.middlewares.title_middleware.create_chat_model",
lambda **kwargs: fake_model,
)
model = MagicMock()
model.ainvoke = AsyncMock(return_value=AIMessage(content="请帮我总结这段代码"))
monkeypatch.setattr(title_middleware_module, "create_chat_model", MagicMock(return_value=model))
state = {
"messages": [
@ -115,21 +111,14 @@ class TestTitleMiddlewareCoreLogic:
result = asyncio.run(middleware._agenerate_title_result(state))
title = result["title"]
prompt = fake_model.ainvoke.await_args.args[0]
assert "请帮我总结这段代码" in prompt
assert "好的,先看结构" in prompt
# Ensure structured message dict/JSON reprs are not leaking into the prompt.
assert "{'type':" not in prompt
assert "'type':" not in prompt
assert '"type":' not in prompt
assert title == "结构总结"
assert title == "请帮我总结这段代码"
def test_generate_title_fallback_when_model_fails(self, monkeypatch):
def test_generate_title_fallback_for_long_message(self, monkeypatch):
_set_test_title_config(max_chars=20)
middleware = TitleMiddleware()
fake_model = MagicMock()
fake_model.ainvoke = AsyncMock(side_effect=RuntimeError("LLM unavailable"))
monkeypatch.setattr("deerflow.agents.middlewares.title_middleware.create_chat_model", lambda **kwargs: fake_model)
model = MagicMock()
model.ainvoke = AsyncMock(side_effect=RuntimeError("model unavailable"))
monkeypatch.setattr(title_middleware_module, "create_chat_model", MagicMock(return_value=model))
state = {
"messages": [
@ -164,13 +153,10 @@ class TestTitleMiddlewareCoreLogic:
monkeypatch.setattr(middleware, "_generate_title_result", MagicMock(return_value=None))
assert middleware.after_model({"messages": []}, runtime=MagicMock()) is None
def test_sync_generate_title_with_model(self, monkeypatch):
"""Sync path calls model.invoke and produces a title."""
def test_sync_generate_title_uses_fallback_without_model(self):
"""Sync path avoids LLM calls and derives a local fallback title."""
_set_test_title_config(max_chars=20)
middleware = TitleMiddleware()
fake_model = MagicMock()
fake_model.invoke = MagicMock(return_value=MagicMock(content='"同步生成的标题"'))
monkeypatch.setattr("deerflow.agents.middlewares.title_middleware.create_chat_model", lambda **kwargs: fake_model)
state = {
"messages": [
@ -179,22 +165,19 @@ class TestTitleMiddlewareCoreLogic:
]
}
result = middleware._generate_title_result(state)
assert result == {"title": "同步生成的标题"}
fake_model.invoke.assert_called_once()
assert result == {"title": "请帮我写测试"}
def test_empty_title_falls_back(self, monkeypatch):
"""Empty model response triggers fallback title."""
def test_sync_generate_title_respects_fallback_truncation(self):
"""Sync fallback path still respects max_chars truncation rules."""
_set_test_title_config(max_chars=50)
middleware = TitleMiddleware()
fake_model = MagicMock()
fake_model.invoke = MagicMock(return_value=MagicMock(content=" "))
monkeypatch.setattr("deerflow.agents.middlewares.title_middleware.create_chat_model", lambda **kwargs: fake_model)
state = {
"messages": [
HumanMessage(content="空标题测试"),
HumanMessage(content="这是一个非常长的问题描述需要被截断以形成fallback标题而且这里继续补充更多上下文确保超过本地fallback截断阈值"),
AIMessage(content="回复"),
]
}
result = middleware._generate_title_result(state)
assert result["title"] == "空标题测试"
assert result["title"].endswith("...")
assert result["title"].startswith("这是一个非常长的问题描述")

View File

@ -175,7 +175,7 @@ services:
UV_IMAGE: ${UV_IMAGE:-ghcr.io/astral-sh/uv:0.7.20}
UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
container_name: deer-flow-langgraph
command: sh -c "cd backend && uv sync && uv run langgraph dev --no-browser --allow-blocking --host 0.0.0.0 --port 2024 --n-jobs-per-worker 10 > /app/logs/langgraph.log 2>&1"
command: sh -c "cd backend && uv sync && allow_blocking='' && if [ \"\${LANGGRAPH_ALLOW_BLOCKING:-0}\" = '1' ]; then allow_blocking='--allow-blocking'; fi && uv run langgraph dev --no-browser \${allow_blocking} --host 0.0.0.0 --port 2024 --n-jobs-per-worker \${LANGGRAPH_JOBS_PER_WORKER:-10} > /app/logs/langgraph.log 2>&1"
volumes:
- ../backend/:/app/backend/
# Preserve the .venv built during Docker image build — mounting the full backend/

View File

@ -93,8 +93,6 @@ services:
environment:
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CONFIG_PATH=/app/backend/config.yaml
- DEER_FLOW_EXTENSIONS_CONFIG_PATH=/app/backend/extensions_config.json
- DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://langgraph:2024}
- DEER_FLOW_CHANNELS_GATEWAY_URL=${DEER_FLOW_CHANNELS_GATEWAY_URL:-http://gateway:8001}
# DooD path/network translation
@ -121,7 +119,7 @@ services:
UV_IMAGE: ${UV_IMAGE:-ghcr.io/astral-sh/uv:0.7.20}
UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
container_name: deer-flow-langgraph
command: sh -c "cd /app/backend && uv run langgraph dev --no-browser --allow-blocking --no-reload --host 0.0.0.0 --port 2024 --n-jobs-per-worker 10"
command: sh -c 'cd /app/backend && allow_blocking_flag="" && if [ "${LANGGRAPH_ALLOW_BLOCKING:-0}" = "1" ]; then allow_blocking_flag="--allow-blocking"; fi && uv run langgraph dev --no-browser ${allow_blocking_flag} --no-reload --host 0.0.0.0 --port 2024 --n-jobs-per-worker ${LANGGRAPH_JOBS_PER_WORKER:-10}'
volumes:
- ${DEER_FLOW_CONFIG_PATH}:/app/backend/config.yaml:ro
- ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/backend/extensions_config.json:ro

View File

@ -1,6 +1,6 @@
"use client";
import { useCallback, useState } from "react";
import { useCallback, useEffect, useState } from "react";
import { type PromptInputMessage } from "@/components/ai-elements/prompt-input";
import { ArtifactTrigger } from "@/components/workspace/artifacts";
@ -34,8 +34,13 @@ export default function ChatPage() {
const [showFollowups, setShowFollowups] = useState(false);
const { threadId, isNewThread, setIsNewThread, isMock } = useThreadChat();
const [settings, setSettings] = useThreadSettings(threadId);
const [mounted, setMounted] = useState(false);
useSpecificChatMode();
useEffect(() => {
setMounted(true);
}, []);
const { showNotification } = useNotification();
const [thread, sendMessage, isUploading] = useThreadStream({
@ -131,31 +136,42 @@ export default function ChatPage() {
/>
</div>
</div>
<InputBox
className={cn("bg-background/5 w-full -translate-y-4")}
isNewThread={isNewThread}
threadId={threadId}
autoFocus={isNewThread}
status={
thread.error
? "error"
: thread.isLoading
? "streaming"
: "ready"
}
context={settings.context}
extraHeader={
isNewThread && <Welcome mode={settings.context.mode} />
}
disabled={
env.NEXT_PUBLIC_STATIC_WEBSITE_ONLY === "true" ||
isUploading
}
onContextChange={(context) => setSettings("context", context)}
onFollowupsVisibilityChange={setShowFollowups}
onSubmit={handleSubmit}
onStop={handleStop}
/>
{mounted ? (
<InputBox
className={cn("bg-background/5 w-full -translate-y-4")}
isNewThread={isNewThread}
threadId={threadId}
autoFocus={isNewThread}
status={
thread.error
? "error"
: thread.isLoading
? "streaming"
: "ready"
}
context={settings.context}
extraHeader={
isNewThread && <Welcome mode={settings.context.mode} />
}
disabled={
env.NEXT_PUBLIC_STATIC_WEBSITE_ONLY === "true" ||
isUploading
}
onContextChange={(context) =>
setSettings("context", context)
}
onFollowupsVisibilityChange={setShowFollowups}
onSubmit={handleSubmit}
onStop={handleStop}
/>
) : (
<div
aria-hidden="true"
className={cn(
"bg-background/5 h-32 w-full -translate-y-4 rounded-2xl border",
)}
/>
)}
{env.NEXT_PUBLIC_STATIC_WEBSITE_ONLY === "true" && (
<div className="text-muted-foreground/67 w-full translate-y-12 text-center text-xs">
{t.common.notAvailableInDemoMode}

View File

@ -6,7 +6,7 @@ import {
SettingsIcon,
} from "lucide-react";
import { useRouter } from "next/navigation";
import { useCallback, useMemo, useState } from "react";
import { useCallback, useEffect, useMemo, useState } from "react";
import {
CommandDialog,
@ -32,10 +32,15 @@ import { SettingsDialog } from "./settings";
export function CommandPalette() {
const { t } = useI18n();
const router = useRouter();
const [mounted, setMounted] = useState(false);
const [open, setOpen] = useState(false);
const [shortcutsOpen, setShortcutsOpen] = useState(false);
const [settingsOpen, setSettingsOpen] = useState(false);
useEffect(() => {
setMounted(true);
}, []);
const handleNewChat = useCallback(() => {
router.push("/workspace/chats/new");
setOpen(false);
@ -63,11 +68,14 @@ export function CommandPalette() {
useGlobalShortcuts(shortcuts);
const isMac =
typeof navigator !== "undefined" && navigator.userAgent.includes("Mac");
const isMac = mounted && navigator.userAgent.includes("Mac");
const metaKey = isMac ? "⌘" : "Ctrl+";
const shiftKey = isMac ? "⇧" : "Shift+";
if (!mounted) {
return null;
}
return (
<>
<SettingsDialog open={settingsOpen} onOpenChange={setSettingsOpen} />

View File

@ -28,9 +28,7 @@ for arg in "$@"; do
done
if $DEV_MODE; then
# Webpack dev mode is more stable for the workspace routes in this repo.
# Turbopack currently triggers hydration mismatch overlays on the chat page.
FRONTEND_CMD="pnpm exec next dev --webpack"
FRONTEND_CMD="pnpm run dev"
else
if command -v python3 >/dev/null 2>&1; then
PYTHON_BIN="python3"
@ -146,7 +144,13 @@ if [ "${SKIP_LANGGRAPH_SERVER:-0}" != "1" ]; then
# Read log_level from config.yaml, fallback to env var, then to "info"
CONFIG_LOG_LEVEL=$(grep -m1 '^log_level:' config.yaml 2>/dev/null | awk '{print $2}' | tr -d ' ')
LANGGRAPH_LOG_LEVEL="${LANGGRAPH_LOG_LEVEL:-${CONFIG_LOG_LEVEL:-info}}"
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS > ../logs/langgraph.log 2>&1) &
LANGGRAPH_JOBS_PER_WORKER="${LANGGRAPH_JOBS_PER_WORKER:-10}"
LANGGRAPH_ALLOW_BLOCKING="${LANGGRAPH_ALLOW_BLOCKING:-0}"
LANGGRAPH_ALLOW_BLOCKING_FLAG=""
if [ "$LANGGRAPH_ALLOW_BLOCKING" = "1" ]; then
LANGGRAPH_ALLOW_BLOCKING_FLAG="--allow-blocking"
fi
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser $LANGGRAPH_ALLOW_BLOCKING_FLAG --n-jobs-per-worker "$LANGGRAPH_JOBS_PER_WORKER" --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS > ../logs/langgraph.log 2>&1) &
./scripts/wait-for-port.sh 2024 60 "LangGraph" || {
echo " See logs/langgraph.log for details"
tail -20 logs/langgraph.log

View File

@ -74,7 +74,13 @@ mkdir -p logs
mkdir -p temp/client_body_temp temp/proxy_temp temp/fastcgi_temp temp/uwsgi_temp temp/scgi_temp
echo "Starting LangGraph server..."
nohup sh -c 'cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --no-reload > ../logs/langgraph.log 2>&1' &
LANGGRAPH_JOBS_PER_WORKER="${LANGGRAPH_JOBS_PER_WORKER:-10}"
LANGGRAPH_ALLOW_BLOCKING="${LANGGRAPH_ALLOW_BLOCKING:-0}"
LANGGRAPH_ALLOW_BLOCKING_FLAG=""
if [ "$LANGGRAPH_ALLOW_BLOCKING" = "1" ]; then
LANGGRAPH_ALLOW_BLOCKING_FLAG="--allow-blocking"
fi
nohup sh -c "cd backend && NO_COLOR=1 uv run langgraph dev --no-browser ${LANGGRAPH_ALLOW_BLOCKING_FLAG} --no-reload --n-jobs-per-worker ${LANGGRAPH_JOBS_PER_WORKER} > ../logs/langgraph.log 2>&1" &
./scripts/wait-for-port.sh 2024 60 "LangGraph" || {
echo "✗ LangGraph failed to start. Last log output:"
tail -60 logs/langgraph.log