Compare commits

...

11 Commits

Author SHA1 Message Date
肖应宇 8554423f4e chore(i18n): 更新中文模型推荐项映射 2026-04-24 17:08:15 +08:00
肖应宇 1248084650 fix(frontend-workspace): 修复引用滚动与产物路径解析 2026-04-24 17:08:15 +08:00
肖应宇 138b4a1f7d feat(frontend-messages): 支持摘要折叠与表格导出 2026-04-24 17:08:15 +08:00
肖应宇 612f1cdb9f feat(backend): 增加摘要标题与产物状态对账 2026-04-24 17:08:15 +08:00
肖应宇 0abf5aaff0 fix: 修复单个\n输入,渲染时不会换行的问题 2026-04-24 17:08:15 +08:00
肖应宇 c2464c3449 refactor(workspace): 将颜色 token 重命名为语义化命名 2026-04-24 17:08:15 +08:00
Titan 1ffe32fe00 feat(docs): add Skill Proxy Migration Guide for transitioning to gateway-based API calls 2026-04-23 17:29:27 +08:00
Titan f677c653bd feat(docker): set timezone to Asia/Shanghai and add restart command for services 2026-04-23 17:29:27 +08:00
Titan dabe529cc7 feat(proxy): add third-party proxy module with billing integration
- Introduced a new third-party proxy package for handling async task APIs.
- Implemented billing client with reserve and finalize functionalities.
- Created an in-memory ledger to track call states and ensure idempotency.
- Added route classification for submit and query requests.
- Configured third-party provider settings and routes in the application config.
- Updated local backend to support Docker networking for sandbox containers.
2026-04-23 17:29:27 +08:00
Titan 8d5b01a59b fix(billing_middleware): update model configuration retrieval to use 'model' instead of 'display_name' 2026-04-23 17:29:27 +08:00
Titan 77801c03ff feat(aio_sandbox): add extra_env parameter for thread_id injection in sandbox creation 2026-04-23 17:29:27 +08:00
53 changed files with 2581 additions and 202 deletions

View File

@ -1,4 +1,5 @@
import logging
import os
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
@ -17,21 +18,39 @@ from app.gateway.routers import (
runs,
skills,
suggestions,
third_party,
thread_runs,
threads,
uploads,
)
from deerflow.config.app_config import get_app_config
# Configure logging with env override
import os
log_level = os.environ.get("LOG_LEVEL", "INFO").upper()
# Configure logging (prefer config.yaml log_level, fallback to LOG_LEVEL env)
env_log_level = os.environ.get("LOG_LEVEL", "INFO").upper()
log_level = env_log_level
try:
configured_log_level = get_app_config().log_level.upper()
if configured_log_level:
log_level = configured_log_level
except Exception:
# Keep startup resilient even if config is temporarily invalid/unavailable.
log_level = env_log_level
resolved_log_level = getattr(logging, log_level, logging.INFO)
logging.basicConfig(
level=getattr(logging, log_level, logging.INFO),
level=resolved_log_level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
# Uvicorn installs logging handlers before app import; force reconfigure so
# config.yaml log_level reliably takes effect.
force=True,
)
# Ensure package loggers inherit the intended level even under custom handlers.
logging.getLogger().setLevel(resolved_log_level)
logging.getLogger("app").setLevel(resolved_log_level)
logging.getLogger("deerflow").setLevel(resolved_log_level)
logger = logging.getLogger(__name__)
@ -162,6 +181,10 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
"name": "health",
"description": "Health check and system status endpoints",
},
{
"name": "third-party-proxy",
"description": "Universal third-party API proxy with billing integration (/api/proxy/{provider}/...)",
},
],
)
@ -207,6 +230,9 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
# Stateless Runs API (stream/wait without a pre-existing thread)
app.include_router(runs.router)
# Third-party API proxy with billing integration
app.include_router(third_party.router)
@app.get("/health", tags=["health"])
async def health_check() -> dict:
"""Health check endpoint.

View File

@ -1,3 +1,3 @@
from . import artifacts, assistants_compat, mcp, models, skills, suggestions, thread_runs, threads, uploads
from . import artifacts, assistants_compat, mcp, models, skills, suggestions, third_party, thread_runs, threads, uploads
__all__ = ["artifacts", "assistants_compat", "mcp", "models", "skills", "suggestions", "threads", "thread_runs", "uploads"]
__all__ = ["artifacts", "assistants_compat", "mcp", "models", "skills", "suggestions", "third_party", "threads", "thread_runs", "uploads"]

View File

@ -0,0 +1,403 @@
"""Universal third-party API proxy router with integrated billing.
Endpoint: ANY /api/proxy/{provider}/{path...}
The caller (a sandbox skill script) should set:
X-Thread-Id: <thread_id> used for billing reservation (injected via THREAD_ID env var)
X-Idempotency-Key: <uuid> optional; deduplicates submit calls
The gateway automatically:
1. Injects the provider's API key from the configured env var.
2. For *submit* routes: reserves billing, forwards, records task state.
3. For *query* routes: forwards, detects terminal status, finalizes billing once.
4. For all other routes: transparent passthrough, no billing side-effects.
"""
from __future__ import annotations
import json
import logging
from typing import Any
from fastapi import APIRouter, HTTPException, Request
from fastapi.responses import JSONResponse, Response
from app.gateway.third_party_proxy import billing, proxy
from app.gateway.third_party_proxy.ledger import CallRecord, get_ledger
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/proxy", tags=["third-party-proxy"])
# ---------------------------------------------------------------------------
# Main entry point
# ---------------------------------------------------------------------------
@router.api_route("/{provider}/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
async def proxy_request(provider: str, path: str, request: Request) -> Response:
"""Universal proxy endpoint for third-party API calls with billing integration."""
provider_config = proxy.get_provider_config(provider)
if provider_config is None:
raise HTTPException(
status_code=404,
detail=f"Provider '{provider}' is not configured or the proxy is disabled.",
)
method = request.method
# Normalise: ensure leading slash so patterns like /openapi/v2/** match correctly
path = "/" + path.lstrip("/")
thread_id = request.headers.get("x-thread-id")
idempotency_key = request.headers.get("x-idempotency-key")
body = await request.body()
request_json: dict[str, Any] | None = _try_parse_json(body)
submit_route = proxy.match_submit_route(provider_config, method, path)
query_route = proxy.match_query_route(provider_config, method, path)
logger.info("[ThirdPartyProxy] route=%s provider=%s method=%s path=%s", "submit" if submit_route else "query" if query_route else "passthrough", provider, method, path)
if submit_route:
return await _handle_submit(
provider=provider,
provider_config=provider_config,
method=method,
path=path,
request=request,
body=body,
thread_id=thread_id,
idempotency_key=idempotency_key,
task_id_jsonpath=submit_route.task_id_jsonpath,
route_frozen_amount=submit_route.frozen_amount,
route_frozen_type=submit_route.frozen_type,
)
if query_route:
return await _handle_query(
provider=provider,
provider_config=provider_config,
method=method,
path=path,
request=request,
body=body,
request_json=request_json,
query_route=query_route,
)
# Pure passthrough — no billing, no state
return await _passthrough(
provider_config=provider_config,
method=method,
path=path,
request=request,
body=body,
)
# ---------------------------------------------------------------------------
# Submit handler
# ---------------------------------------------------------------------------
async def _handle_submit(
*,
provider: str,
provider_config,
method: str,
path: str,
request: Request,
body: bytes,
thread_id: str | None,
idempotency_key: str | None,
task_id_jsonpath: str,
route_frozen_amount: float | None,
route_frozen_type: int | None,
) -> Response:
ledger = get_ledger()
# Idempotency: if we've already handled this exact submit, return the cached response
if idempotency_key:
existing = ledger.get_by_idempotency_key(provider, idempotency_key)
if existing is not None and existing.last_response is not None:
logger.info("[ThirdPartyProxy] idempotent submit: proxy_call_id=%s", existing.proxy_call_id)
return _proxy_response(existing.last_response, existing.proxy_call_id)
record = ledger.create(provider, thread_id, idempotency_key)
# Reserve billing before touching the provider
reserve_frozen_amount = route_frozen_amount if route_frozen_amount is not None else provider_config.frozen_amount
reserve_frozen_type = route_frozen_type if route_frozen_type is not None else provider_config.frozen_type
frozen_id = await billing.reserve(
thread_id=thread_id,
call_id=record.call_id,
provider=provider,
operation=path,
frozen_amount=reserve_frozen_amount,
frozen_type=reserve_frozen_type,
)
if frozen_id:
ledger.set_reserved(record.proxy_call_id, frozen_id)
# Forward to provider
try:
status_code, resp_headers, resp_body = await proxy.forward_request(
provider_config=provider_config,
method=method,
path=path,
headers=dict(request.headers),
body=body,
query_params=str(request.query_params),
)
except Exception as exc:
await _finalize_zero(frozen_id, record.proxy_call_id, "error exception")
raise HTTPException(status_code=502, detail=f"Provider unreachable: {exc}") from exc
resp_json = _try_parse_json(resp_body)
# HTTP-level failure
if status_code >= 400:
reason = f"error_http_{status_code}"
await _finalize_zero(frozen_id, record.proxy_call_id, reason)
if resp_json is not None:
ledger.update_response(record.proxy_call_id, resp_json)
return Response(content=resp_body, status_code=status_code, headers=resp_headers, media_type="application/json")
# Extract task_id from response; no task_id means provider rejected at business level
provider_task_id: str | None = None
if resp_json is not None:
raw = proxy.jsonpath_get(resp_json, task_id_jsonpath)
if raw is not None:
provider_task_id = str(raw)
if provider_task_id:
ledger.set_running(record.proxy_call_id, provider_task_id)
else:
# No async task ID usually means provider-side business rejection.
# Propagate errorCode (if present) into finalize_reason.
error_code = None
if resp_json is not None:
raw_error_code = resp_json.get("errorCode")
if raw_error_code is None:
raw_error_code = resp_json.get("code")
if raw_error_code is not None:
error_code = str(raw_error_code)
finalize_reason = error_code or "no_task_id"
await _finalize_zero(frozen_id, record.proxy_call_id, finalize_reason)
if resp_json is not None:
ledger.update_response(record.proxy_call_id, resp_json)
return _proxy_response(resp_json or {}, record.proxy_call_id, status_code, resp_headers)
# ---------------------------------------------------------------------------
# Query handler
# ---------------------------------------------------------------------------
async def _handle_query(
*,
provider: str,
provider_config,
method: str,
path: str,
request: Request,
body: bytes,
request_json: dict[str, Any] | None,
query_route,
) -> Response:
ledger = get_ledger()
# Locate the call record by provider_task_id embedded in the request body
provider_task_id: str | None = None
if request_json:
raw = proxy.jsonpath_get(request_json, query_route.request_task_id_jsonpath)
if raw is not None:
provider_task_id = str(raw)
record: CallRecord | None = None
if provider_task_id:
record = ledger.get_by_task_id(provider, provider_task_id)
# Already at terminal state — return cached result without calling the provider again
if record is not None and ledger.is_finalized(record.proxy_call_id) and record.last_response is not None:
logger.info("[ThirdPartyProxy] query already finalized, returning cache: proxy_call_id=%s", record.proxy_call_id)
return _proxy_response(record.last_response, record.proxy_call_id)
# Forward query to provider
try:
status_code, resp_headers, resp_body = await proxy.forward_request(
provider_config=provider_config,
method=method,
path=path,
headers=dict(request.headers),
body=body,
query_params=str(request.query_params),
)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Provider query failed: {exc}") from exc
resp_json = _try_parse_json(resp_body)
if status_code >= 400 or resp_json is None:
return Response(content=resp_body, status_code=status_code, headers=resp_headers, media_type="application/json")
# Detect terminal status in the response
status_value = proxy.jsonpath_get(resp_json, query_route.status_jsonpath)
status_str = str(status_value) if status_value is not None else None
is_success = status_str in query_route.success_values
is_failure = status_str in query_route.failure_values
logger.debug(
"[ThirdPartyProxy] query terminal check: provider=%s task_id=%s status=%s is_success=%s is_failure=%s",
provider,
provider_task_id,
status_str,
is_success,
is_failure,
)
if record is not None and (is_success or is_failure):
logger.info(
"[ThirdPartyProxy] finalize candidate: proxy_call_id=%s provider_task_id=%s terminal_status=%s",
record.proxy_call_id,
provider_task_id,
status_str,
)
# Atomically claim finalize rights — only one concurrent query wins
if ledger.try_claim_finalize(record.proxy_call_id):
logger.info(
"[ThirdPartyProxy] finalize claimed: proxy_call_id=%s",
record.proxy_call_id,
)
final_amount: float = 0.0
if is_success and query_route.usage_jsonpath:
raw_amount = proxy.jsonpath_get(resp_json, query_route.usage_jsonpath)
try:
final_amount = float(raw_amount) if raw_amount is not None else 0.0
except (TypeError, ValueError):
final_amount = 0.0
logger.debug(
"[ThirdPartyProxy] finalize amount resolved: proxy_call_id=%s final_amount=%s usage_path=%s",
record.proxy_call_id,
final_amount,
query_route.usage_jsonpath,
)
task_state = "SUCCESS" if is_success else "FAILED"
finalize_reason = "success" if is_success else "error"
logger.info(
"[ThirdPartyProxy] finalize start: proxy_call_id=%s reason=%s task_state=%s has_frozen_id=%s",
record.proxy_call_id,
finalize_reason,
task_state,
bool(record.frozen_id),
)
if record.frozen_id:
ok = await billing.finalize(
frozen_id=record.frozen_id,
final_amount=final_amount,
finalize_reason=finalize_reason,
)
logger.info(
"[ThirdPartyProxy] finalize result: proxy_call_id=%s ok=%s",
record.proxy_call_id,
ok,
)
if ok:
ledger.set_finalized(record.proxy_call_id, task_state)
else:
ledger.set_finalize_failed(record.proxy_call_id, task_state)
else:
logger.info(
"[ThirdPartyProxy] finalize skipped billing call (no frozen_id): proxy_call_id=%s",
record.proxy_call_id,
)
ledger.set_finalized(record.proxy_call_id, task_state)
ledger.update_response(record.proxy_call_id, resp_json)
else:
logger.info(
"[ThirdPartyProxy] finalize claim denied (already processed): proxy_call_id=%s",
record.proxy_call_id,
)
proxy_call_id = record.proxy_call_id if record else None
return _proxy_response(resp_json, proxy_call_id, status_code, resp_headers)
# ---------------------------------------------------------------------------
# Passthrough handler
# ---------------------------------------------------------------------------
async def _passthrough(*, provider_config, method: str, path: str, request: Request, body: bytes) -> Response:
try:
status_code, resp_headers, resp_body = await proxy.forward_request(
provider_config=provider_config,
method=method,
path=path,
headers=dict(request.headers),
body=body,
query_params=str(request.query_params),
)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Provider request failed: {exc}") from exc
return Response(content=resp_body, status_code=status_code, headers=resp_headers)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
async def _finalize_zero(frozen_id: str | None, proxy_call_id: str, reason: str) -> None:
"""Finalize with amount=0 when billing was reserved but the call failed."""
ledger = get_ledger()
logger.info(
"[ThirdPartyProxy] finalize_zero requested: proxy_call_id=%s reason=%s has_frozen_id=%s",
proxy_call_id,
reason,
bool(frozen_id),
)
if frozen_id and ledger.try_claim_finalize(proxy_call_id):
logger.info("[ThirdPartyProxy] finalize_zero claimed: proxy_call_id=%s", proxy_call_id)
ok = await billing.finalize(frozen_id=frozen_id, final_amount=0, finalize_reason=reason)
logger.info("[ThirdPartyProxy] finalize_zero result: proxy_call_id=%s ok=%s", proxy_call_id, ok)
task_state = "SUCCESS" if reason == "success" else "FAILED"
if ok:
ledger.set_finalized(proxy_call_id, task_state)
else:
ledger.set_finalize_failed(proxy_call_id, task_state)
elif not frozen_id:
logger.debug("[ThirdPartyProxy] finalize_zero skipped: no frozen_id proxy_call_id=%s", proxy_call_id)
else:
logger.info("[ThirdPartyProxy] finalize_zero claim denied: proxy_call_id=%s", proxy_call_id)
def _try_parse_json(data: bytes) -> dict[str, Any] | None:
if not data:
return None
try:
parsed = json.loads(data)
return parsed if isinstance(parsed, dict) else None
except (json.JSONDecodeError, ValueError):
return None
def _proxy_response(
data: dict[str, Any],
proxy_call_id: str | None,
status_code: int = 200,
extra_headers: dict[str, str] | None = None,
) -> JSONResponse:
headers: dict[str, str] = dict(extra_headers or {})
if proxy_call_id:
headers["X-Proxy-Call-Id"] = proxy_call_id
return JSONResponse(content=data, status_code=status_code, headers=headers)

View File

@ -0,0 +1 @@
"""Third-party proxy package."""

View File

@ -0,0 +1,190 @@
"""Thin async billing client for the third-party proxy.
Calls the same reserve/finalize HTTP endpoints as BillingMiddleware,
but with semantics appropriate for third-party task calls:
- estimatedTokens = 0 (not applicable)
- finalAmount = actual provider monetary charge (thirdPartyConsumeMoney)
"""
from __future__ import annotations
import logging
from datetime import datetime, timedelta
import httpx
from deerflow.config.app_config import get_app_config
logger = logging.getLogger(__name__)
_SUCCESS_STATUS_CODES = {200, 1000}
async def reserve(
*,
thread_id: str | None,
call_id: str,
provider: str,
operation: str,
frozen_amount: float,
frozen_type: int | None,
) -> str | None:
"""Reserve billing before forwarding a submit call.
Returns the frozen_id string on success, or None if billing is disabled
or the reserve call fails (non-blocking proxy continues in that case).
"""
cfg = get_app_config().billing
if not cfg.enabled or not cfg.reserve_url:
logger.info(
"[ThirdPartyProxy][Billing] reserve skipped: enabled=%s reserve_url=%s call_id=%s",
cfg.enabled,
cfg.reserve_url,
call_id,
)
return None
expire_at = datetime.now() + timedelta(seconds=cfg.default_expire_seconds)
payload = {
"sessionId": thread_id,
"callId": call_id,
"modelName": provider,
"question": f"skill invokes {operation.split('/')[-1]}",
"frozenAmount": frozen_amount,
"frozenType": frozen_type if frozen_type is not None else cfg.frozen_type,
"estimatedInputTokens": 0,
"estimatedOutputTokens": 0,
"expireAt": expire_at.strftime("%Y-%m-%d %H:%M:%S"),
}
logger.info(
"[ThirdPartyProxy][Billing] reserve request: url=%s call_id=%s provider=%s thread_id=%s",
cfg.reserve_url,
call_id,
provider,
thread_id,
)
logger.debug("[ThirdPartyProxy][Billing] reserve payload: %s", payload)
try:
async with httpx.AsyncClient(timeout=cfg.timeout_seconds) as client:
resp = await client.post(cfg.reserve_url, headers=cfg.headers, json=payload)
resp.raise_for_status()
data: dict = resp.json()
except Exception as exc:
logger.warning("[ThirdPartyProxy][Billing] reserve HTTP error: %s", exc)
return None
logger.info(
"[ThirdPartyProxy][Billing] reserve response: call_id=%s status_code=%s",
call_id,
resp.status_code,
)
logger.debug("[ThirdPartyProxy][Billing] reserve response body: %s", data)
if not _is_success(data):
logger.warning(
"[ThirdPartyProxy][Billing] reserve rejected: call_id=%s status=%s payload=%s",
call_id,
data.get("status") or data.get("code"),
data,
)
return None
frozen_id = (data.get("data") or {}).get("frozenId")
if not isinstance(frozen_id, str) or not frozen_id:
logger.warning(
"[ThirdPartyProxy][Billing] reserve response missing frozenId: call_id=%s payload=%s",
call_id,
data,
)
return None
logger.info("[ThirdPartyProxy][Billing] reserve ok: call_id=%s frozen_id=%s", call_id, frozen_id)
logger.debug(
"[ThirdPartyProxy][Billing] reserve success details: provider=%s operation=%s expire_at=%s",
provider,
operation,
payload["expireAt"],
)
return frozen_id
async def finalize(
*,
frozen_id: str,
final_amount: float,
finalize_reason: str,
) -> bool:
"""Finalize billing after a third-party call reaches a terminal state.
final_amount is the actual provider charge (e.g. thirdPartyConsumeMoney from RunningHub).
Pass 0 for failed/cancelled calls.
Returns True on success.
"""
cfg = get_app_config().billing
if not cfg.enabled or not cfg.finalize_url:
# Billing not configured — treat as success so the caller marks the record finalized
logger.info(
"[ThirdPartyProxy][Billing] finalize skipped: enabled=%s finalize_url=%s frozen_id=%s",
cfg.enabled,
cfg.finalize_url,
frozen_id,
)
return True
payload = {
"frozenId": frozen_id,
"finalAmount": final_amount,
"usageInputTokens": 0,
"usageOutputTokens": 0,
"usageTotalTokens": 0,
"finalizeReason": finalize_reason,
}
logger.info(
"[ThirdPartyProxy][Billing] finalize request: frozen_id=%s amount=%s reason=%s url=%s",
frozen_id,
final_amount,
finalize_reason,
cfg.finalize_url,
)
logger.debug("[ThirdPartyProxy][Billing] finalize payload: %s", payload)
try:
async with httpx.AsyncClient(timeout=cfg.timeout_seconds) as client:
resp = await client.post(cfg.finalize_url, headers=cfg.headers, json=payload)
resp.raise_for_status()
data: dict = resp.json()
except Exception as exc:
logger.warning("[ThirdPartyProxy][Billing] finalize HTTP error: frozen_id=%s err=%s", frozen_id, exc)
return False
logger.info(
"[ThirdPartyProxy][Billing] finalize response: frozen_id=%s status_code=%s",
frozen_id,
resp.status_code,
)
logger.debug("[ThirdPartyProxy][Billing] finalize response body: %s", data)
if not _is_success(data):
logger.warning(
"[ThirdPartyProxy][Billing] finalize rejected: frozen_id=%s status=%s payload=%s",
frozen_id,
data.get("status") or data.get("code"),
data,
)
return False
logger.info("[ThirdPartyProxy][Billing] finalize ok: frozen_id=%s", frozen_id)
logger.debug(
"[ThirdPartyProxy][Billing] finalize success details: amount=%s reason=%s",
final_amount,
finalize_reason,
)
return True
def _is_success(data: dict) -> bool:
status = data.get("status") or data.get("code")
if isinstance(status, int) and status in _SUCCESS_STATUS_CODES:
return True
return data.get("success") is True

View File

@ -0,0 +1,289 @@
"""In-memory call state ledger for the third-party proxy.
Tracks each proxied call from reserve submit query finalize,
enforcing idempotency and ensuring billing finalize runs exactly once.
"""
from __future__ import annotations
import logging
import threading
import time
from dataclasses import dataclass, field
from typing import Any, Literal
from uuid import uuid4
logger = logging.getLogger(__name__)
BillingState = Literal["UNRESERVED", "RESERVED", "FINALIZED", "FINALIZE_FAILED"]
TaskState = Literal["PENDING", "RUNNING", "SUCCESS", "FAILED", "UNKNOWN"]
@dataclass
class CallRecord:
proxy_call_id: str
provider: str
thread_id: str | None
# call_id is sent to the billing platform (callId in reserve payload)
call_id: str
frozen_id: str | None = None
provider_task_id: str | None = None
billing_state: BillingState = "UNRESERVED"
task_state: TaskState = "PENDING"
created_at: float = field(default_factory=time.time)
finalized_at: float | None = None
error: str | None = None
idempotency_key: str | None = None
# Cached last provider response — returned for repeat queries after finalization
last_response: dict[str, Any] | None = None
class CallLedger:
"""Thread-safe in-memory ledger for third-party proxy call records."""
def __init__(self) -> None:
self._records: dict[str, CallRecord] = {} # proxy_call_id → record
self._task_index: dict[str, str] = {} # "{provider}:{provider_task_id}" → proxy_call_id
self._idem_index: dict[str, str] = {} # "{provider}:{idem_key}" → proxy_call_id
self._lock = threading.Lock()
def create(
self,
provider: str,
thread_id: str | None,
idempotency_key: str | None = None,
) -> CallRecord:
"""Create a new call record, or return the existing one if idempotency key matches."""
with self._lock:
if idempotency_key:
existing = self._get_by_idem_key_locked(provider, idempotency_key)
if existing is not None:
logger.info(
"[ThirdPartyProxy][Ledger] idempotent hit: provider=%s proxy_call_id=%s idem_key=%s",
provider,
existing.proxy_call_id,
idempotency_key,
)
# logger.debug(
# "[ThirdPartyProxy][Ledger] existing record reused: call_id=%s task_id=%s billing_state=%s task_state=%s",
# existing.call_id,
# existing.provider_task_id,
# existing.billing_state,
# existing.task_state,
# )
return existing
record = CallRecord(
proxy_call_id=str(uuid4()),
provider=provider,
thread_id=thread_id,
call_id=str(uuid4()),
idempotency_key=idempotency_key,
)
self._records[record.proxy_call_id] = record
if idempotency_key:
self._idem_index[f"{provider}:{idempotency_key}"] = record.proxy_call_id
logger.info(
"[ThirdPartyProxy][Ledger] created record: provider=%s proxy_call_id=%s call_id=%s thread_id=%s",
provider,
record.proxy_call_id,
record.call_id,
thread_id,
)
# logger.debug(
# "[ThirdPartyProxy][Ledger] create details: idem_key=%s billing_state=%s task_state=%s",
# idempotency_key,
# record.billing_state,
# record.task_state,
# )
return record
def get(self, proxy_call_id: str) -> CallRecord | None:
return self._records.get(proxy_call_id)
def get_by_task_id(self, provider: str, provider_task_id: str) -> CallRecord | None:
key = f"{provider}:{provider_task_id}"
proxy_call_id = self._task_index.get(key)
return self._records.get(proxy_call_id) if proxy_call_id else None
def get_by_idempotency_key(self, provider: str, idempotency_key: str) -> CallRecord | None:
return self._get_by_idem_key_locked(provider, idempotency_key)
def set_reserved(self, proxy_call_id: str, frozen_id: str) -> None:
with self._lock:
record = self._records.get(proxy_call_id)
if record:
record.frozen_id = frozen_id
record.billing_state = "RESERVED"
logger.info(
"[ThirdPartyProxy][Ledger] reserved: proxy_call_id=%s frozen_id=%s",
proxy_call_id,
frozen_id,
)
# logger.debug(
# "[ThirdPartyProxy][Ledger] reserve state: call_id=%s provider=%s task_state=%s",
# record.call_id,
# record.provider,
# record.task_state,
# )
else:
logger.debug(
"[ThirdPartyProxy][Ledger] set_reserved ignored for missing record: proxy_call_id=%s",
proxy_call_id,
)
def set_running(self, proxy_call_id: str, provider_task_id: str) -> None:
with self._lock:
record = self._records.get(proxy_call_id)
if record:
record.provider_task_id = provider_task_id
record.task_state = "RUNNING"
self._task_index[f"{record.provider}:{provider_task_id}"] = proxy_call_id
logger.info(
"[ThirdPartyProxy][Ledger] running: proxy_call_id=%s provider_task_id=%s",
proxy_call_id,
provider_task_id,
)
# logger.debug(
# "[ThirdPartyProxy][Ledger] running state: provider=%s call_id=%s billing_state=%s",
# record.provider,
# record.call_id,
# record.billing_state,
# )
else:
logger.debug(
"[ThirdPartyProxy][Ledger] set_running ignored for missing record: proxy_call_id=%s provider_task_id=%s",
proxy_call_id,
provider_task_id,
)
def try_claim_finalize(self, proxy_call_id: str) -> bool:
"""Atomically claim finalization rights. Returns True only once per record."""
with self._lock:
record = self._records.get(proxy_call_id)
if record is None:
logger.debug(
"[ThirdPartyProxy][Ledger] finalize claim denied: missing record proxy_call_id=%s",
proxy_call_id,
)
return False
if record.billing_state in ("FINALIZED", "FINALIZE_FAILED"):
logger.debug(
"[ThirdPartyProxy][Ledger] finalize claim denied: proxy_call_id=%s billing_state=%s",
proxy_call_id,
record.billing_state,
)
return False
# Mark as finalized immediately to prevent concurrent finalize
record.billing_state = "FINALIZED"
logger.info(
"[ThirdPartyProxy][Ledger] finalize claimed: proxy_call_id=%s",
proxy_call_id,
)
logger.debug(
"[ThirdPartyProxy][Ledger] finalize claim state: call_id=%s provider=%s task_state=%s frozen_id=%s",
record.call_id,
record.provider,
record.task_state,
record.frozen_id,
)
return True
def set_finalized(self, proxy_call_id: str, task_state: TaskState) -> None:
with self._lock:
record = self._records.get(proxy_call_id)
if record:
record.task_state = task_state
record.billing_state = "FINALIZED"
record.finalized_at = time.time()
logger.info(
"[ThirdPartyProxy][Ledger] finalized: proxy_call_id=%s task_state=%s",
proxy_call_id,
task_state,
)
logger.debug(
"[ThirdPartyProxy][Ledger] finalized state: provider=%s call_id=%s frozen_id=%s finalized_at=%s",
record.provider,
record.call_id,
record.frozen_id,
record.finalized_at,
)
else:
logger.debug(
"[ThirdPartyProxy][Ledger] set_finalized ignored for missing record: proxy_call_id=%s task_state=%s",
proxy_call_id,
task_state,
)
def set_finalize_failed(self, proxy_call_id: str, task_state: TaskState) -> None:
with self._lock:
record = self._records.get(proxy_call_id)
if record:
record.task_state = task_state
record.billing_state = "FINALIZE_FAILED"
record.finalized_at = time.time()
logger.info(
"[ThirdPartyProxy][Ledger] finalize failed: proxy_call_id=%s task_state=%s",
proxy_call_id,
task_state,
)
logger.debug(
"[ThirdPartyProxy][Ledger] finalize failure state: provider=%s call_id=%s frozen_id=%s finalized_at=%s",
record.provider,
record.call_id,
record.frozen_id,
record.finalized_at,
)
else:
logger.debug(
"[ThirdPartyProxy][Ledger] set_finalize_failed ignored for missing record: proxy_call_id=%s task_state=%s",
proxy_call_id,
task_state,
)
def update_response(self, proxy_call_id: str, response: dict[str, Any]) -> None:
with self._lock:
record = self._records.get(proxy_call_id)
if record:
record.last_response = response
logger.debug(
"[ThirdPartyProxy][Ledger] cached response: proxy_call_id=%s keys=%s",
proxy_call_id,
sorted(response.keys()),
)
else:
logger.debug(
"[ThirdPartyProxy][Ledger] update_response ignored for missing record: proxy_call_id=%s",
proxy_call_id,
)
def is_finalized(self, proxy_call_id: str) -> bool:
record = self._records.get(proxy_call_id)
return record is not None and record.billing_state in ("FINALIZED", "FINALIZE_FAILED")
# ------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------
def _get_by_idem_key_locked(self, provider: str, idempotency_key: str) -> CallRecord | None:
key = f"{provider}:{idempotency_key}"
proxy_call_id = self._idem_index.get(key)
return self._records.get(proxy_call_id) if proxy_call_id else None
# ---------------------------------------------------------------------------
# Module-level singleton
# ---------------------------------------------------------------------------
_ledger: CallLedger | None = None
_ledger_lock = threading.Lock()
def get_ledger() -> CallLedger:
global _ledger
if _ledger is None:
with _ledger_lock:
if _ledger is None:
_ledger = CallLedger()
logger.info("[ThirdPartyProxy][Ledger] singleton initialized")
return _ledger

View File

@ -0,0 +1,246 @@
"""HTTP forwarding, route classification, and JSONPath extraction for the third-party proxy."""
from __future__ import annotations
import logging
import os
from typing import Any
import httpx
from deerflow.config.app_config import get_app_config
from deerflow.config.third_party_proxy_config import (
QueryRouteConfig,
SubmitRouteConfig,
ThirdPartyProviderConfig,
)
logger = logging.getLogger(__name__)
_SENSITIVE_HEADERS = frozenset(
[
"authorization",
"proxy-authorization",
"x-api-key",
"api-key",
"cookie",
"set-cookie",
]
)
# ---------------------------------------------------------------------------
# Provider config lookup
# ---------------------------------------------------------------------------
def get_provider_config(provider: str) -> ThirdPartyProviderConfig | None:
"""Return the provider config for *provider*, or None if not configured/disabled."""
cfg = get_app_config().third_party_proxy
if not cfg.enabled:
return None
return cfg.providers.get(provider)
# ---------------------------------------------------------------------------
# Route classification
# ---------------------------------------------------------------------------
def match_submit_route(
config: ThirdPartyProviderConfig,
method: str,
path: str,
) -> SubmitRouteConfig | None:
"""Return the first submit route that matches (method, path), or None."""
for route in config.submit_routes:
if route.method.upper() != method.upper():
continue
if not _path_matches(path, route.path_pattern):
continue
if route.exclude_path_pattern and _path_matches(path, route.exclude_path_pattern):
continue
return route
return None
def match_query_route(
config: ThirdPartyProviderConfig,
method: str,
path: str,
) -> QueryRouteConfig | None:
"""Return the first query route that matches (method, path), or None."""
for route in config.query_routes:
if route.method.upper() != method.upper():
continue
if _path_matches(path, route.path_pattern):
return route
return None
def _path_matches(path: str, pattern: str) -> bool:
"""Match *path* against a glob-ish *pattern*.
Rules:
- Pattern ending in /** matches the prefix and any sub-path.
- Otherwise exact match.
"""
# Normalise trailing slashes
path = path.rstrip("/") or "/"
pattern = pattern.rstrip("/") or "/"
if pattern.endswith("/**"):
prefix = pattern[:-3]
return path == prefix or path.startswith(prefix + "/")
return path == pattern
# ---------------------------------------------------------------------------
# Minimal path evaluator (dot-notation shorthand only)
# ---------------------------------------------------------------------------
def jsonpath_get(data: Any, path: str) -> Any:
"""Extract a value from *data* using a simple dot-notation shorthand path.
Supports paths like: taskId usage.thirdPartyConsumeMoney
Paths with a leading '$' are intentionally not supported.
Returns None if any segment is missing or the input is not a dict.
"""
if not isinstance(path, str):
return None
remainder = path.strip()
if not remainder or remainder.startswith("$"):
return None
current: Any = data
for part in remainder.split("."):
if not part:
return None
if not isinstance(current, dict):
return None
current = current.get(part)
if current is None:
return None
return current
# ---------------------------------------------------------------------------
# HTTP forwarding
# ---------------------------------------------------------------------------
# Request headers we never forward (hop-by-hop, sensitive, or proxy-internal)
_STRIP_REQUEST_HEADERS = frozenset(
[
"host",
"content-length",
"transfer-encoding",
"connection",
"x-thread-id",
"x-idempotency-key",
]
)
# Response headers we strip before returning to the caller
_STRIP_RESPONSE_HEADERS = frozenset(
[
"transfer-encoding",
"connection",
"keep-alive",
"content-encoding",
"content-length",
]
)
def _sanitize_headers(headers: dict[str, str]) -> dict[str, str]:
"""Return a copy of headers with sensitive values redacted."""
sanitized: dict[str, str] = {}
for key, value in headers.items():
if key.lower() in _SENSITIVE_HEADERS:
sanitized[key] = "***"
else:
sanitized[key] = value
return sanitized
def _preview_body(data: bytes, limit: int = 2048) -> str:
"""Return a safe textual preview of body bytes for debugging logs."""
if not data:
return ""
chunk = data[:limit]
text = chunk.decode("utf-8", errors="replace")
if len(data) > limit:
text += f" ...<truncated {len(data) - limit} bytes>"
return text
async def forward_request(
*,
provider_config: ThirdPartyProviderConfig,
method: str,
path: str,
headers: dict[str, str],
body: bytes,
query_params: str,
) -> tuple[int, dict[str, str], bytes]:
"""Forward *method* *path* to the provider and return (status_code, headers, body).
The provider's API key (read from the environment variable named in
``provider_config.api_key_env``) is injected automatically, replacing
any Authorization header the caller might have sent.
"""
target_url = provider_config.base_url.rstrip("/") + "/" + path.lstrip("/")
if query_params:
target_url += "?" + query_params
# Build forwarded headers: drop internal/hop-by-hop, then inject API key
forward_headers = {
k: v for k, v in headers.items() if k.lower() not in _STRIP_REQUEST_HEADERS
}
if provider_config.api_key_env:
api_key = os.getenv(provider_config.api_key_env)
if api_key:
forward_headers[provider_config.api_key_header] = provider_config.api_key_prefix + api_key
else:
logger.warning(
"[ThirdPartyProxy] api_key_env '%s' is not set for provider",
provider_config.api_key_env,
)
logger.info("[ThirdPartyProxy] → %s %s", method, target_url)
logger.debug(
"[ThirdPartyProxy] request headers=%s",
_sanitize_headers(forward_headers)
)
logger.debug(
"[ThirdPartyProxy] request body(%dB)=%s",
len(body),
_preview_body(body),
)
async with httpx.AsyncClient(timeout=provider_config.timeout_seconds) as client:
response = await client.request(
method=method,
url=target_url,
headers=forward_headers,
content=body,
)
response_headers = {
k: v
for k, v in response.headers.items()
if k.lower() not in _STRIP_RESPONSE_HEADERS
}
logger.info("[ThirdPartyProxy] ← %s %s %d", method, target_url, response.status_code)
logger.debug(
"[ThirdPartyProxy] response headers=%s",
_sanitize_headers(response_headers)
)
logger.debug(
"[ThirdPartyProxy] response body(%dB)=%s",
len(response.content),
_preview_body(response.content),
)
return response.status_code, response_headers, response.content

View File

@ -2,10 +2,12 @@ import logging
from langchain.agents import create_agent
from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware
from langchain_core.messages.human import HumanMessage
from langchain_core.runnables import RunnableConfig
from deerflow.agents.lead_agent.prompt import apply_prompt_template
from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware
from deerflow.agents.middlewares.artifact_reconcile_middleware import ArtifactReconcileMiddleware
from deerflow.agents.middlewares.loop_detection_middleware import LoopDetectionMiddleware
from deerflow.agents.middlewares.message_timestamp_middleware import MessageTimestampMiddleware
from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
@ -23,6 +25,15 @@ from deerflow.models import create_chat_model
logger = logging.getLogger(__name__)
SUMMARY_MESSAGE_TITLE = "以下是目前对话的摘要:"
class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
"""Summarization middleware with DeerFlow's user-facing summary heading."""
def _build_new_messages(self, summary: str) -> list[HumanMessage]:
return [HumanMessage(content=f"{SUMMARY_MESSAGE_TITLE}\n\n{summary}")]
def _resolve_model_name(requested_model_name: str | None = None) -> str:
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
@ -78,7 +89,7 @@ def _create_summarization_middleware() -> SummarizationMiddleware | None:
if config.summary_prompt is not None:
kwargs["summary_prompt"] = config.summary_prompt
return SummarizationMiddleware(**kwargs)
return DeerFlowSummarizationMiddleware(**kwargs)
def _create_todo_list_middleware(is_plan_mode: bool) -> TodoMiddleware | None:
@ -234,6 +245,9 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
if get_app_config().token_usage.enabled:
middlewares.append(TokenUsageMiddleware())
# Reconcile stale artifact entries against real outputs files.
middlewares.append(ArtifactReconcileMiddleware())
# Stamp every conversation message with backend timestamp metadata.
middlewares.append(MessageTimestampMiddleware())

View File

@ -0,0 +1,114 @@
import logging
from pathlib import Path
from typing import NotRequired, override
from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware
from langgraph.runtime import Runtime
from deerflow.agents.thread_state import (
ARTIFACTS_REPLACE_SENTINEL,
ThreadDataState,
)
from deerflow.config.paths import VIRTUAL_PATH_PREFIX
logger = logging.getLogger(__name__)
_OUTPUTS_VIRTUAL_PREFIX = f"{VIRTUAL_PATH_PREFIX}/outputs/"
_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH = _OUTPUTS_VIRTUAL_PREFIX.lstrip("/")
class ArtifactReconcileState(AgentState):
"""Compatible with the `ThreadState` schema."""
artifacts: NotRequired[list[str] | None]
thread_data: NotRequired[ThreadDataState | None]
class ArtifactReconcileMiddleware(AgentMiddleware[ArtifactReconcileState]):
"""Keep artifact state aligned with files currently in outputs."""
state_schema = ArtifactReconcileState
def _to_outputs_file(self, virtual_path: str, outputs_dir: Path) -> Path | None:
stripped = virtual_path.lstrip("/")
if not stripped.startswith(_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH):
# Keep non-outputs paths untouched; this middleware is for outputs drift.
return None
relative = stripped[len(_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH) :]
if not relative:
return None
candidate = (outputs_dir / relative).resolve()
try:
candidate.relative_to(outputs_dir)
except ValueError:
return None
return candidate
def _to_virtual_artifact(self, actual_path: Path, outputs_dir: Path) -> str | None:
try:
relative = actual_path.resolve().relative_to(outputs_dir)
except ValueError:
return None
return f"{_OUTPUTS_VIRTUAL_PREFIX}{relative.as_posix()}"
def _discover_outputs(self, outputs_dir: Path) -> list[str]:
if not outputs_dir.is_dir():
return []
discovered: list[str] = []
for path in sorted(outputs_dir.rglob("*")):
if not path.is_file():
continue
virtual_path = self._to_virtual_artifact(path, outputs_dir)
if virtual_path:
discovered.append(virtual_path)
return discovered
@override
def before_model(
self,
state: ArtifactReconcileState,
runtime: Runtime, # noqa: ARG002
) -> dict | None:
artifacts = state.get("artifacts") or []
thread_data = state.get("thread_data") or {}
outputs_path = thread_data.get("outputs_path")
if not outputs_path:
return None
outputs_dir = Path(outputs_path).resolve()
kept: list[str] = []
changed = False
for artifact in artifacts:
if not isinstance(artifact, str):
changed = True
continue
actual_path = self._to_outputs_file(artifact, outputs_dir)
if actual_path is None:
kept.append(artifact)
continue
if actual_path.exists() and actual_path.is_file():
kept.append(artifact)
else:
changed = True
logger.info(
"Reconciled stale artifact from state: virtual=%s outputs_dir=%s",
artifact,
outputs_dir,
)
discovered = self._discover_outputs(outputs_dir)
merged = list(dict.fromkeys([*kept, *discovered]))
if merged != kept:
changed = True
if not changed:
return None
return {"artifacts": [ARTIFACTS_REPLACE_SENTINEL, *merged]}

View File

@ -438,8 +438,8 @@ def _resolve_model_name(model_key: str | None) -> str | None:
if not model_key:
return None
model_cfg = get_app_config().get_model_config(model_key)
if model_cfg and model_cfg.display_name:
return model_cfg.display_name
if model_cfg and model_cfg.model:
return model_cfg.model
return model_key

View File

@ -2,6 +2,8 @@ from typing import Annotated, NotRequired, TypedDict
from langchain.agents import AgentState
ARTIFACTS_REPLACE_SENTINEL = "__deerflow_replace_artifacts__"
class SandboxState(TypedDict):
sandbox_id: NotRequired[str | None]
@ -20,6 +22,8 @@ class ViewedImageData(TypedDict):
def merge_artifacts(existing: list[str] | None, new: list[str] | None) -> list[str]:
"""Reducer for artifacts list - merges and deduplicates artifacts."""
if new and new[0] == ARTIFACTS_REPLACE_SENTINEL:
return list(dict.fromkeys(new[1:]))
if existing is None:
return new or []
if new is None:

View File

@ -514,7 +514,7 @@ class AioSandboxProvider(SandboxProvider):
# that is actively serving a thread.
logger.warning(f"All {replicas} replica slots are in active use; creating sandbox {sandbox_id} beyond the soft limit")
info = self._backend.create(thread_id, sandbox_id, extra_mounts=extra_mounts or None)
info = self._backend.create(thread_id, sandbox_id, extra_mounts=extra_mounts or None, extra_env={"THREAD_ID": thread_id} if thread_id else None)
# Wait for sandbox to be ready
if not wait_for_sandbox_ready(info.sandbox_url, timeout=60):

View File

@ -44,7 +44,7 @@ class SandboxBackend(ABC):
"""
@abstractmethod
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None, extra_env: dict[str, str] | None = None) -> SandboxInfo:
"""Create/provision a new sandbox.
Args:
@ -52,6 +52,9 @@ class SandboxBackend(ABC):
sandbox_id: Deterministic sandbox identifier.
extra_mounts: Additional volume mounts as (host_path, container_path, read_only) tuples.
Ignored by backends that don't manage containers (e.g., remote).
extra_env: Additional environment variables to inject at runtime (e.g. THREAD_ID).
These are merged after static config env vars, so runtime values override same-key static values.
Ignored by backends that don't manage containers (e.g., remote).
Returns:
SandboxInfo with connection details.

View File

@ -110,7 +110,7 @@ class LocalContainerBackend(SandboxBackend):
# ── SandboxBackend interface ──────────────────────────────────────────
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None, extra_env: dict[str, str] | None = None) -> SandboxInfo:
"""Start a new container and return its connection info.
Args:
@ -137,7 +137,7 @@ class LocalContainerBackend(SandboxBackend):
for _attempt in range(10):
port = get_free_port(start_port=_next_start)
try:
container_id = self._start_container(container_name, port, extra_mounts)
container_id = self._start_container(container_name, port, extra_mounts, extra_env=extra_env)
break
except RuntimeError as exc:
release_port(port)
@ -229,6 +229,7 @@ class LocalContainerBackend(SandboxBackend):
container_name: str,
port: int,
extra_mounts: list[tuple[str, str, bool]] | None = None,
extra_env: dict[str, str] | None = None,
) -> str:
"""Start a new container.
@ -260,9 +261,17 @@ class LocalContainerBackend(SandboxBackend):
]
)
# Environment variables
# On Linux, containers started via DooD (Docker-out-of-Docker) do not
# automatically resolve host.docker.internal. Add the mapping explicitly
# so sandbox containers can call back into the host-exposed gateway.
if self._runtime == "docker":
cmd.extend(["--add-host", "host.docker.internal:host-gateway"])
# Environment variables (static config first, runtime overrides last)
for key, value in self._environment.items():
cmd.extend(["-e", f"{key}={value}"])
for key, value in (extra_env or {}).items():
cmd.extend(["-e", f"{key}={value}"])
# Config-level volume mounts
for mount in self._config_mounts:

View File

@ -60,6 +60,7 @@ class RemoteSandboxBackend(SandboxBackend):
thread_id: str,
sandbox_id: str,
extra_mounts: list[tuple[str, str, bool]] | None = None,
extra_env: dict[str, str] | None = None,
) -> SandboxInfo:
"""Create a sandbox Pod + Service via the provisioner.

View File

@ -20,6 +20,7 @@ from deerflow.config.skills_config import SkillsConfig
from deerflow.config.stream_bridge_config import StreamBridgeConfig, load_stream_bridge_config_from_dict
from deerflow.config.subagents_config import SubagentsAppConfig, load_subagents_config_from_dict
from deerflow.config.summarization_config import SummarizationConfig, load_summarization_config_from_dict
from deerflow.config.third_party_proxy_config import ThirdPartyProxyConfig
from deerflow.config.title_config import TitleConfig, load_title_config_from_dict
from deerflow.config.token_usage_config import TokenUsageConfig
from deerflow.config.tool_config import ToolConfig, ToolGroupConfig
@ -42,6 +43,7 @@ class AppConfig(BaseModel):
log_level: str = Field(default="info", description="Logging level for deerflow modules (debug/info/warning/error)")
billing: BillingConfig = Field(default_factory=BillingConfig, description="External billing reservation/finalization configuration")
third_party_proxy: ThirdPartyProxyConfig = Field(default_factory=ThirdPartyProxyConfig, description="Third-party API proxy with billing integration")
token_usage: TokenUsageConfig = Field(default_factory=TokenUsageConfig, description="Token usage tracking configuration")
models: list[ModelConfig] = Field(default_factory=list, description="Available models")
sandbox: SandboxConfig = Field(description="Sandbox configuration")

View File

@ -0,0 +1,108 @@
"""Configuration for the third-party API proxy with billing integration."""
from __future__ import annotations
from pydantic import BaseModel, Field
class SubmitRouteConfig(BaseModel):
"""Identifies a submit request — triggers billing reserve + task state tracking."""
method: str = Field(default="POST", description="HTTP method to match (case-insensitive)")
path_pattern: str = Field(
description="Glob-style path pattern. Use ** to match any sub-path, e.g. /openapi/v2/**"
)
exclude_path_pattern: str | None = Field(
default=None,
description="If set, paths matching this pattern are excluded from submit handling",
)
task_id_jsonpath: str = Field(
description="Dot-path into the *response* body to extract the provider task ID, e.g. taskId"
)
frozen_amount: float | None = Field(
default=None,
ge=0,
description="Optional route-level override for billing reserve payload frozenAmount",
)
frozen_type: int | None = Field(
default=None,
description="Optional route-level override for billing reserve payload frozenType",
)
class QueryRouteConfig(BaseModel):
"""Identifies a query/poll request — checks for terminal status + triggers billing finalize."""
method: str = Field(default="POST", description="HTTP method to match (case-insensitive)")
path_pattern: str = Field(description="Glob-style path pattern for the query endpoint")
request_task_id_jsonpath: str = Field(
description="Dot-path into the *request* body to extract the task ID being queried"
)
status_jsonpath: str = Field(
description="Dot-path into the response body to read the task status value"
)
success_values: list[str] = Field(
default_factory=list,
description="Status string values that indicate successful terminal state, e.g. [\"SUCCESS\"]",
)
failure_values: list[str] = Field(
default_factory=list,
description="Status string values that indicate failed terminal state, e.g. [\"FAILED\", \"CANCELLED\"]",
)
usage_jsonpath: str | None = Field(
default=None,
description=(
"Dot-path into the response body for the actual monetary cost to pass to billing finalize. "
"E.g. usage.thirdPartyConsumeMoney"
),
)
class ThirdPartyProviderConfig(BaseModel):
"""Configuration for a single third-party API platform."""
base_url: str = Field(description="Base URL of the provider, e.g. https://www.runninghub.cn")
api_key_env: str | None = Field(
default=None,
description="Name of the environment variable holding the API key",
)
api_key_header: str = Field(
default="Authorization",
description="Request header name for the API key",
)
api_key_prefix: str = Field(
default="Bearer ",
description="String prepended to the API key value in the header",
)
timeout_seconds: float = Field(
default=30.0,
gt=0,
description="HTTP request timeout when forwarding to the provider",
)
frozen_amount: float = Field(
default=0.0,
ge=0,
description="Amount to reserve in billing reserve payload (frozenAmount)",
)
frozen_type: int | None = Field(
default=None,
description="Billing frozen type for this provider (frozenType). If omitted, falls back to billing.frozen_type",
)
submit_routes: list[SubmitRouteConfig] = Field(
default_factory=list,
description="Route patterns that identify submit (task-create) requests",
)
query_routes: list[QueryRouteConfig] = Field(
default_factory=list,
description="Route patterns that identify query/poll requests",
)
class ThirdPartyProxyConfig(BaseModel):
"""Top-level configuration for the third-party API proxy."""
enabled: bool = Field(default=False, description="Enable the proxy endpoint")
providers: dict[str, ThirdPartyProviderConfig] = Field(
default_factory=dict,
description="Keyed by provider name (used in the URL path /api/proxy/{provider}/...)",
)

View File

@ -1,4 +1,6 @@
from deerflow.community.aio_sandbox.local_backend import _format_container_mount
from unittest.mock import MagicMock
from deerflow.community.aio_sandbox.local_backend import LocalContainerBackend, _format_container_mount
def test_format_container_mount_uses_mount_syntax_for_docker_windows_paths():
@ -26,3 +28,90 @@ def test_format_container_mount_keeps_volume_syntax_for_apple_container():
"-v",
"/host/path:/mnt/path:ro",
]
# ── extra_env injection ──────────────────────────────────────────────────────
def _make_backend(runtime: str = "docker") -> LocalContainerBackend:
"""Build a minimal LocalContainerBackend without real config."""
backend = LocalContainerBackend.__new__(LocalContainerBackend)
backend._runtime = runtime
backend._container_prefix = "test"
backend._environment = {}
backend._config_mounts = []
backend._base_port = 9000
backend._image = "test-image:latest"
return backend
def test_start_container_injects_extra_env(monkeypatch):
"""_start_container must append -e KEY=VALUE for each extra_env entry."""
backend = _make_backend()
captured: list[list[str]] = []
def fake_run(cmd, **_kwargs):
captured.append(list(cmd))
result = MagicMock()
result.returncode = 0
result.stdout = "fake-container-id\n"
return result
monkeypatch.setattr("deerflow.community.aio_sandbox.local_backend.subprocess.run", fake_run)
backend._start_container("c", 9000, extra_env={"THREAD_ID": "thread-abc", "FOO": "bar"})
cmd = captured[0]
assert "-e" in cmd
env_pairs = {cmd[i + 1] for i in range(len(cmd)) if cmd[i] == "-e"}
assert "THREAD_ID=thread-abc" in env_pairs
assert "FOO=bar" in env_pairs
def test_start_container_no_extra_env_does_not_inject(monkeypatch):
"""_start_container with no extra_env must not add unexpected -e flags."""
backend = _make_backend()
captured: list[list[str]] = []
def fake_run(cmd, **_kwargs):
captured.append(list(cmd))
result = MagicMock()
result.returncode = 0
result.stdout = "fake-container-id\n"
return result
monkeypatch.setattr("deerflow.community.aio_sandbox.local_backend.subprocess.run", fake_run)
backend._start_container("c", 9000)
cmd = captured[0]
env_pairs = {cmd[i + 1] for i in range(len(cmd)) if cmd[i] == "-e"}
assert all("THREAD_ID" not in pair for pair in env_pairs)
def test_start_container_extra_env_overrides_static_env(monkeypatch):
"""Runtime extra_env values must appear after static env, effectively overriding same-key entries."""
backend = _make_backend()
backend._environment = {"MY_VAR": "static"}
captured: list[list[str]] = []
def fake_run(cmd, **_kwargs):
captured.append(list(cmd))
result = MagicMock()
result.returncode = 0
result.stdout = "fake-container-id\n"
return result
monkeypatch.setattr("deerflow.community.aio_sandbox.local_backend.subprocess.run", fake_run)
backend._start_container("c", 9000, extra_env={"MY_VAR": "runtime"})
cmd = captured[0]
env_pairs = [cmd[i + 1] for i in range(len(cmd)) if cmd[i] == "-e"]
# Both entries should be present; the runtime one comes after, which Docker respects
assert "MY_VAR=static" in env_pairs
assert "MY_VAR=runtime" in env_pairs
assert env_pairs.index("MY_VAR=runtime") > env_pairs.index("MY_VAR=static")

View File

@ -134,3 +134,68 @@ def test_discover_or_create_only_unlocks_when_lock_succeeds(tmp_path, monkeypatc
provider._discover_or_create_with_lock("thread-5", "sandbox-5")
assert unlock_calls == []
# ── THREAD_ID env injection ──────────────────────────────────────────────────
def test_create_sandbox_passes_thread_id_as_extra_env(tmp_path, monkeypatch):
"""_create_sandbox must pass extra_env={'THREAD_ID': thread_id} to backend.create."""
aio_mod = importlib.import_module("deerflow.community.aio_sandbox.aio_sandbox_provider")
monkeypatch.setattr(aio_mod, "get_paths", lambda: MagicMock())
monkeypatch.setattr(aio_mod.AioSandboxProvider, "_get_extra_mounts", lambda self, tid: [])
provider = _make_provider(tmp_path)
provider._config = {"replicas": 100}
provider._warm_pool = {}
provider._sandbox_infos = {}
provider._thread_sandboxes = {}
provider._thread_locks = {}
provider._last_activity = {}
fake_info = MagicMock()
fake_info.sandbox_url = "http://localhost:9999"
backend_mock = MagicMock()
backend_mock.create.return_value = fake_info
provider._backend = backend_mock
with patch.object(aio_mod, "wait_for_sandbox_ready", return_value=True):
provider._create_sandbox("thread-xyz", "sandbox-1")
backend_mock.create.assert_called_once_with(
"thread-xyz",
"sandbox-1",
extra_mounts=None,
extra_env={"THREAD_ID": "thread-xyz"},
)
def test_create_sandbox_no_thread_id_passes_no_extra_env(tmp_path, monkeypatch):
"""_create_sandbox with thread_id=None must not inject THREAD_ID."""
aio_mod = importlib.import_module("deerflow.community.aio_sandbox.aio_sandbox_provider")
monkeypatch.setattr(aio_mod, "get_paths", lambda: MagicMock())
monkeypatch.setattr(aio_mod.AioSandboxProvider, "_get_extra_mounts", lambda self, tid: [])
provider = _make_provider(tmp_path)
provider._config = {"replicas": 100}
provider._warm_pool = {}
provider._sandbox_infos = {}
provider._thread_sandboxes = {}
provider._thread_locks = {}
provider._last_activity = {}
fake_info = MagicMock()
fake_info.sandbox_url = "http://localhost:9999"
backend_mock = MagicMock()
backend_mock.create.return_value = fake_info
provider._backend = backend_mock
with patch.object(aio_mod, "wait_for_sandbox_ready", return_value=True):
provider._create_sandbox(None, "sandbox-2")
backend_mock.create.assert_called_once_with(
None,
"sandbox-2",
extra_mounts=None,
extra_env=None,
)

View File

@ -0,0 +1,89 @@
from types import SimpleNamespace
from deerflow.agents.middlewares.artifact_reconcile_middleware import (
ArtifactReconcileMiddleware,
)
from deerflow.agents.thread_state import ARTIFACTS_REPLACE_SENTINEL
def test_before_model_prunes_missing_outputs_artifacts(tmp_path):
outputs_dir = tmp_path / "outputs"
outputs_dir.mkdir()
existing = outputs_dir / "keep.md"
existing.write_text("ok", encoding="utf-8")
middleware = ArtifactReconcileMiddleware()
state = {
"thread_data": {"outputs_path": str(outputs_dir)},
"artifacts": [
"/mnt/user-data/outputs/keep.md",
"/mnt/user-data/outputs/missing.md",
],
}
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
assert result == {
"artifacts": [ARTIFACTS_REPLACE_SENTINEL, "/mnt/user-data/outputs/keep.md"]
}
def test_before_model_returns_none_when_no_changes(tmp_path):
outputs_dir = tmp_path / "outputs"
outputs_dir.mkdir()
existing = outputs_dir / "keep.md"
existing.write_text("ok", encoding="utf-8")
middleware = ArtifactReconcileMiddleware()
state = {
"thread_data": {"outputs_path": str(outputs_dir)},
"artifacts": ["/mnt/user-data/outputs/keep.md"],
}
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
assert result is None
def test_before_model_adds_unpresented_outputs_files(tmp_path):
outputs_dir = tmp_path / "outputs"
outputs_dir.mkdir()
existing = outputs_dir / "keep.md"
existing.write_text("ok", encoding="utf-8")
extra = outputs_dir / "extra.md"
extra.write_text("ok", encoding="utf-8")
middleware = ArtifactReconcileMiddleware()
state = {
"thread_data": {"outputs_path": str(outputs_dir)},
"artifacts": ["/mnt/user-data/outputs/keep.md"],
}
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
assert result == {
"artifacts": [
ARTIFACTS_REPLACE_SENTINEL,
"/mnt/user-data/outputs/keep.md",
"/mnt/user-data/outputs/extra.md",
]
}
def test_before_model_discovers_outputs_when_artifacts_empty(tmp_path):
outputs_dir = tmp_path / "outputs"
outputs_dir.mkdir()
report = outputs_dir / "report.md"
report.write_text("ok", encoding="utf-8")
middleware = ArtifactReconcileMiddleware()
state = {
"thread_data": {"outputs_path": str(outputs_dir)},
"artifacts": [],
}
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
assert result == {
"artifacts": [ARTIFACTS_REPLACE_SENTINEL, "/mnt/user-data/outputs/report.md"]
}

View File

@ -147,7 +147,8 @@ def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch
)
captured: dict[str, object] = {}
fake_model = object()
fake_model = MagicMock()
fake_model._llm_type = "test-chat"
def _fake_create_chat_model(*, name=None, thinking_enabled, reasoning_effort=None):
captured["name"] = name
@ -156,10 +157,20 @@ def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch
return fake_model
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
monkeypatch.setattr(lead_agent_module, "SummarizationMiddleware", lambda **kwargs: kwargs)
middleware = lead_agent_module._create_summarization_middleware()
assert captured["name"] == "model-masswork"
assert captured["thinking_enabled"] is False
assert middleware["model"] is fake_model
assert isinstance(middleware, lead_agent_module.DeerFlowSummarizationMiddleware)
assert middleware.model is fake_model
def test_deerflow_summarization_middleware_uses_chinese_summary_title():
middleware = lead_agent_module.DeerFlowSummarizationMiddleware(
model=MagicMock(),
trigger=("messages", 2),
)
messages = middleware._build_new_messages("旧上下文")
assert messages[0].content == "以下是目前对话的摘要:\n\n旧上下文"

View File

@ -0,0 +1,192 @@
"""Unit tests for the third-party proxy module."""
from __future__ import annotations
from app.gateway.third_party_proxy.ledger import CallLedger
from app.gateway.third_party_proxy.proxy import (
_path_matches,
jsonpath_get,
match_query_route,
match_submit_route,
)
from deerflow.config.third_party_proxy_config import (
QueryRouteConfig,
SubmitRouteConfig,
ThirdPartyProviderConfig,
)
# ---------------------------------------------------------------------------
# _path_matches
# ---------------------------------------------------------------------------
class TestPathMatches:
def test_exact_match(self):
assert _path_matches("/openapi/v2/query", "/openapi/v2/query")
def test_exact_no_match(self):
assert not _path_matches("/openapi/v2/query", "/openapi/v2/submit")
def test_glob_matches_prefix(self):
assert _path_matches("/openapi/v2/vidu/submit", "/openapi/v2/**")
def test_glob_matches_prefix_itself(self):
assert _path_matches("/openapi/v2", "/openapi/v2/**")
def test_glob_no_match_different_prefix(self):
assert not _path_matches("/other/v2/submit", "/openapi/v2/**")
def test_trailing_slashes_normalised(self):
assert _path_matches("/openapi/v2/query/", "/openapi/v2/query")
def test_glob_excludes_sibling_prefix(self):
# /openapi/v2/** should not match /openapi/v2extra/foo
assert not _path_matches("/openapi/v2extra/foo", "/openapi/v2/**")
# ---------------------------------------------------------------------------
# jsonpath_get
# ---------------------------------------------------------------------------
class TestJsonpathGet:
def test_single_key(self):
assert jsonpath_get({"taskId": "abc"}, "taskId") == "abc"
def test_nested_key(self):
data = {"usage": {"thirdPartyConsumeMoney": 1.23}}
assert jsonpath_get(data, "usage.thirdPartyConsumeMoney") == 1.23
def test_missing_key_returns_none(self):
assert jsonpath_get({"foo": "bar"}, "taskId") is None
def test_rejects_dollar_prefixed_path(self):
assert jsonpath_get({"taskId": "abc"}, "$.taskId") is None
def test_short_path_supported(self):
assert jsonpath_get({"x": 1}, "x") == 1
def test_non_dict_intermediate(self):
data = {"usage": "not-a-dict"}
assert jsonpath_get(data, "usage.something") is None
def test_none_input(self):
assert jsonpath_get(None, "x") is None
# ---------------------------------------------------------------------------
# match_submit_route / match_query_route
# ---------------------------------------------------------------------------
_PROVIDER_CFG = ThirdPartyProviderConfig(
base_url="https://example.com",
api_key_env="TEST_API_KEY",
submit_routes=[
SubmitRouteConfig(
method="POST",
path_pattern="/openapi/v2/**",
exclude_path_pattern="/openapi/v2/query",
task_id_jsonpath="taskId",
)
],
query_routes=[
QueryRouteConfig(
method="POST",
path_pattern="/openapi/v2/query",
request_task_id_jsonpath="taskId",
status_jsonpath="status",
success_values=["SUCCESS"],
failure_values=["FAILED", "CANCELLED"],
usage_jsonpath="usage.thirdPartyConsumeMoney",
)
],
)
class TestMatchRoutes:
def test_submit_matches_non_query_path(self):
result = match_submit_route(_PROVIDER_CFG, "POST", "/openapi/v2/vidu/submit")
assert result is not None
assert result.task_id_jsonpath == "taskId"
def test_submit_excluded_by_exclude_pattern(self):
result = match_submit_route(_PROVIDER_CFG, "POST", "/openapi/v2/query")
assert result is None
def test_submit_wrong_method(self):
result = match_submit_route(_PROVIDER_CFG, "GET", "/openapi/v2/vidu/submit")
assert result is None
def test_query_matches(self):
result = match_query_route(_PROVIDER_CFG, "POST", "/openapi/v2/query")
assert result is not None
assert result.status_jsonpath == "status"
def test_query_wrong_method(self):
result = match_query_route(_PROVIDER_CFG, "GET", "/openapi/v2/query")
assert result is None
# ---------------------------------------------------------------------------
# CallLedger
# ---------------------------------------------------------------------------
class TestCallLedger:
def _make_ledger(self) -> CallLedger:
return CallLedger()
def test_create_and_get(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
assert rec.provider == "prov"
found = ledger.get(rec.proxy_call_id)
assert found is not None
assert found.proxy_call_id == rec.proxy_call_id
def test_set_reserved(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
ledger.set_reserved(rec.proxy_call_id, "frozen-123")
found = ledger.get(rec.proxy_call_id)
assert found.frozen_id == "frozen-123"
assert found.billing_state == "RESERVED"
def test_set_running(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
ledger.set_running(rec.proxy_call_id, "task-abc")
found = ledger.get_by_task_id("prov", "task-abc")
assert found is not None
assert found.proxy_call_id == rec.proxy_call_id
def test_try_claim_finalize_once(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
# First claim should succeed
assert ledger.try_claim_finalize(rec.proxy_call_id) is True
# Second claim should fail — already in progress/done
assert ledger.try_claim_finalize(rec.proxy_call_id) is False
def test_is_finalized(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
assert ledger.is_finalized(rec.proxy_call_id) is False
ledger.try_claim_finalize(rec.proxy_call_id)
ledger.set_finalized(rec.proxy_call_id, "SUCCESS")
assert ledger.is_finalized(rec.proxy_call_id) is True
def test_idempotency_key_dedup(self):
ledger = self._make_ledger()
rec1 = ledger.create("prov", "tid", "idem-key-1")
rec2 = ledger.get_by_idempotency_key("prov", "idem-key-1")
assert rec2 is not None
assert rec2.proxy_call_id == rec1.proxy_call_id
def test_update_response(self):
ledger = self._make_ledger()
rec = ledger.create("prov", "tid", None)
ledger.update_response(rec.proxy_call_id, {"result": "ok"})
found = ledger.get(rec.proxy_call_id)
assert found.last_response == {"result": "ok"}

View File

@ -0,0 +1,34 @@
from deerflow.agents.thread_state import (
ARTIFACTS_REPLACE_SENTINEL,
merge_artifacts,
)
def test_merge_artifacts_default_merge_dedup():
existing = ["/mnt/user-data/outputs/a.md", "/mnt/user-data/outputs/b.md"]
new = ["/mnt/user-data/outputs/b.md", "/mnt/user-data/outputs/c.md"]
result = merge_artifacts(existing, new)
assert result == [
"/mnt/user-data/outputs/a.md",
"/mnt/user-data/outputs/b.md",
"/mnt/user-data/outputs/c.md",
]
def test_merge_artifacts_supports_replace_sentinel():
existing = ["/mnt/user-data/outputs/a.md", "/mnt/user-data/outputs/b.md"]
new = [
ARTIFACTS_REPLACE_SENTINEL,
"/mnt/user-data/outputs/b.md",
"/mnt/user-data/outputs/c.md",
"/mnt/user-data/outputs/c.md",
]
result = merge_artifacts(existing, new)
assert result == [
"/mnt/user-data/outputs/b.md",
"/mnt/user-data/outputs/c.md",
]

View File

@ -49,6 +49,51 @@ billing:
# Authorization: "Bearer your-secret-token"
# X-App-Id: "deer-flow"
# ============================================================================
# Third-Party Transparent Proxy
# ============================================================================
# Exposes /api/proxy/{provider}/... and handles reserve/finalize around
# third-party async task APIs such as RunningHub.
third_party_proxy:
enabled: false
providers:
runninghub:
base_url: https://www.runninghub.cn
api_key_env: RUNNINGHUB_API_KEY
api_key_header: Authorization
api_key_prefix: "Bearer "
timeout_seconds: 30.0
frozen_type: 2
submit_routes:
- path_pattern: "/openapi/v2/**"
exclude_path_pattern: "/openapi/v2/query"
task_id_jsonpath: "taskId"
# Optional per-model billing override examples:
# frozen_amount: 10.0
# frozen_type: 2
# Example: model-specific reserve policy
# - path_pattern: "/openapi/v2/rhart-image/z-image/turbo-lora"
# task_id_jsonpath: "taskId"
# frozen_amount: 10.0
# frozen_type: 2
# - path_pattern: "/openapi/v2/vidu/text-to-video-q3-turbo"
# task_id_jsonpath: "taskId"
# frozen_amount: 50.0
# frozen_type: 2
# - path_pattern: "/openapi/v2/wan-2.7/image-edit"
# task_id_jsonpath: "taskId"
# frozen_amount: 20.0
# frozen_type: 2
query_routes:
- path_pattern: "/openapi/v2/query"
request_task_id_jsonpath: "taskId"
status_jsonpath: "status"
success_values: ["SUCCESS"]
failure_values: ["FAILED", "CANCELLED"]
usage_jsonpath: "usage.thirdPartyConsumeMoney"
# ============================================================================
# Token Usage Tracking
# ============================================================================

View File

@ -121,6 +121,10 @@ services:
UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
container_name: deer-flow-gateway
command: sh -c "cd backend && uv sync && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 --reload --reload-include='*.yaml .env' > /app/logs/gateway.log 2>&1"
ports:
# Expose to host so DooD-started sandbox containers can reach the gateway
# via host.docker.internal:8001
- "8001:8001"
volumes:
- ../backend/:/app/backend/
# Preserve the .venv built during Docker image build — mounting the full backend/
@ -149,6 +153,7 @@ services:
create_host_path: true
working_dir: /app
environment:
- TZ=Asia/Shanghai
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://langgraph:2024}
@ -206,6 +211,7 @@ services:
create_host_path: true
working_dir: /app
environment:
- TZ=Asia/Shanghai
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_ROOT}/backend/.deer-flow

View File

@ -69,7 +69,13 @@ services:
UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
container_name: deer-flow-gateway
command: sh -c "cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 --workers 2"
ports:
# Expose gateway port for direct access (e.g. for API clients or testing tools like Postman).
# via host.docker.internal:8001
- "8001:8001"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ${DEER_FLOW_CONFIG_PATH}:/app/backend/config.yaml:ro
- ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/backend/extensions_config.json:ro
- ../skills:/app/skills:ro
@ -91,6 +97,7 @@ services:
create_host_path: true
working_dir: /app
environment:
- TZ=Asia/Shanghai
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://langgraph:2024}
@ -119,8 +126,10 @@ services:
UV_IMAGE: ${UV_IMAGE:-ghcr.io/astral-sh/uv:0.7.20}
UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
container_name: deer-flow-langgraph
command: sh -c 'cd /app/backend && allow_blocking_flag="" && if [ "${LANGGRAPH_ALLOW_BLOCKING:-0}" = "1" ]; then allow_blocking_flag="--allow-blocking"; fi && uv run langgraph dev --no-browser ${allow_blocking_flag} --no-reload --host 0.0.0.0 --port 2024 --n-jobs-per-worker ${LANGGRAPH_JOBS_PER_WORKER:-10}'
command: sh -c 'cd /app/backend && allow_blocking_flag="" && if [ "${LANGGRAPH_ALLOW_BLOCKING:-0}" = "1" ]; then allow_blocking_flag="--allow-blocking"; fi && uv run langgraph dev --no-browser --allow-blocking --no-reload --host 0.0.0.0 --port 2024 --n-jobs-per-worker ${LANGGRAPH_JOBS_PER_WORKER:-10}'
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ${DEER_FLOW_CONFIG_PATH}:/app/backend/config.yaml:ro
- ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/backend/extensions_config.json:ro
- ${DEER_FLOW_HOME}:/app/backend/.deer-flow
@ -142,6 +151,7 @@ services:
bind:
create_host_path: true
environment:
- TZ=Asia/Shanghai
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CONFIG_PATH=/app/backend/config.yaml

View File

@ -0,0 +1,203 @@
# Skill Proxy Migration Guide (via Gateway)
This document explains how to migrate a skill script from directly calling a third-party API to using DeerFlow Gateway's transparent proxy, with unified billing orchestration (reserve/finalize).
Applicable scenarios:
- Async third-party task skills (image/video/audio generation, etc.)
- Existing scripts that directly call providers (for example, RunningHub)
## 1. Migration Goals
1. The skill no longer calls third-party domains directly.
2. The skill no longer manages third-party API keys itself.
3. All requests go through `/api/proxy/{provider}/...`.
4. Gateway handles:
- API key injection
- Idempotent submit deduplication
- Billing reserve/finalize orchestration
- Query terminal-state detection and settlement
## 2. Core Principles
1. Keep provider names stable (for example, `runninghub`); do not encode model paths in provider names.
2. Only submit requests should carry `X-Idempotency-Key`; query requests should not.
3. Use `X-Thread-Id` as a common context header whenever available.
4. Use shorthand dot-paths in config extraction fields:
- Correct: `taskId`, `status`, `usage.thirdPartyConsumeMoney`
- Incorrect: `$.taskId`, `'$'.taskId`
## 3. Skill Script Migration Steps
The examples below assume Python + requests.
### Step 1: Add gateway config loaders
Add:
- `load_skill_env()`: loads skill-local `.env`
- `get_gateway_config()`: reads
- `DEER_FLOW_GATEWAY_URL` (default `http://host.docker.internal:8001`)
- `RUNNINGHUB_PROXY_PROVIDER` (default `runninghub`)
### Step 2: Centralize proxy headers
Implement:
- `build_proxy_headers(include_idempotency: bool = False)`
- always sets `Content-Type: application/json`
- optionally sets `X-Thread-Id`
- sets `X-Idempotency-Key` only when `include_idempotency=True`
### Step 3: Route submit calls through gateway
Replace:
- `https://www.runninghub.cn/openapi/v2/<model-path>`
With:
- `{gateway}/api/proxy/{provider}/openapi/v2/<model-path>`
And use:
- `headers=build_proxy_headers(include_idempotency=True)`
### Step 4: Route query calls through gateway
Replace:
- `https://www.runninghub.cn/openapi/v2/query`
With:
- `{gateway}/api/proxy/{provider}/openapi/v2/query`
And use:
- `headers=build_proxy_headers()`
### Step 5: Remove third-party API key logic from the skill
Remove:
- Loading `RUNNINGHUB_API_KEY` in the script
- Building `Authorization: Bearer ...` in the script
Reason: third-party credentials are injected by gateway.
### Step 6: Keep essential error handling
Recommended checks:
- `response.raise_for_status()`
- submit fallback when `taskId` is missing
- query loop timeout/failure handling
## 4. Proxy Config Migration (config.yaml)
Configure submit/query routes under `third_party_proxy.providers.<provider>`.
Example (RunningHub):
```yaml
third_party_proxy:
enabled: true
providers:
runninghub:
base_url: https://www.runninghub.cn
api_key_env: RUNNINGHUB_API_KEY
api_key_header: Authorization
api_key_prefix: "Bearer "
timeout_seconds: 30.0
frozen_amount: 10.0
frozen_type: 2
submit_routes:
- path_pattern: "/openapi/v2/rhart-image/z-image/turbo-lora"
task_id_jsonpath: "taskId"
frozen_amount: 0.03
frozen_type: 2
- path_pattern: "/openapi/v2/vidu/text-to-video-q3-turbo"
task_id_jsonpath: "taskId"
frozen_amount: 11.2
frozen_type: 2
query_routes:
- path_pattern: "/openapi/v2/query"
request_task_id_jsonpath: "taskId"
status_jsonpath: "status"
success_values: ["SUCCESS"]
failure_values: ["FAILED", "CANCELLED"]
usage_jsonpath: "usage.thirdPartyConsumeMoney"
```
Notes:
- Provider-level `frozen_amount`/`frozen_type` are defaults.
- Submit-route values can override defaults per model endpoint.
## 5. Reusable Function Template
```python
import os
from pathlib import Path
from dotenv import dotenv_values
def load_skill_env() -> dict[str, str]:
"""Load skill-local .env values."""
env_path = Path(__file__).parent.parent / ".env"
return {
key: value
for key, value in dotenv_values(env_path).items()
if isinstance(key, str) and isinstance(value, str)
}
def get_gateway_config() -> tuple[str, str]:
"""Get DeerFlow gateway base URL and proxy provider name."""
env_vars = load_skill_env()
gateway_url = os.getenv("DEER_FLOW_GATEWAY_URL") or env_vars.get(
"DEER_FLOW_GATEWAY_URL",
"http://host.docker.internal:8001",
)
provider = os.getenv("RUNNINGHUB_PROXY_PROVIDER") or env_vars.get(
"RUNNINGHUB_PROXY_PROVIDER",
"runninghub",
)
return gateway_url.rstrip("/"), provider
def build_proxy_headers(*, include_idempotency: bool = False) -> dict[str, str]:
headers = {"Content-Type": "application/json"}
thread_id = os.getenv("THREAD_ID")
if thread_id:
headers["X-Thread-Id"] = thread_id
if include_idempotency:
from uuid import uuid4
headers["X-Idempotency-Key"] = str(uuid4())
return headers
```
## 6. Common Pitfalls
### 6.1 Response contains taskId but extraction fails
Usually caused by wrong config path syntax:
- Wrong: `$.taskId` or `'$'.taskId`
- Right: `taskId`
### 6.2 Why query should not include X-Idempotency-Key
Idempotency keys are for submit deduplication (to avoid duplicate task creation). Query requests are polling and should not generate new idempotency keys.
### 6.3 Sandbox cannot reach gateway
For Docker-based sandbox execution, use:
- `DEER_FLOW_GATEWAY_URL=http://host.docker.internal:8001`
## 7. Validation Checklist
1. No direct third-party domain calls remain in the skill script.
2. The skill script no longer reads third-party API keys.
3. Submit uses proxy URL + `include_idempotency=True`.
4. Query uses proxy URL + `include_idempotency=False`.
5. Config extraction fields use shorthand dot-paths only.
6. Submit returns `taskId`, then query reaches `RUNNING/SUCCESS`.
7. Gateway logs show submit/query route hits and finalize flow.
## 8. Reference Implementations
- `skills/public/image-generation/scripts/generate.py`
- `skills/public/video-generation/scripts/generate.py`
- `backend/app/gateway/routers/third_party.py`
- `backend/app/gateway/third_party_proxy/proxy.py`
- `third_party_proxy` section in `config.yaml`

View File

@ -194,7 +194,7 @@ async function validateTokenRegistry() {
const darkSeen = new Map();
for (const [name, value] of entries) {
if (!/^ws-[0-9a-f]{6,8}$/.test(name)) {
if (!/^ws-[a-z0-9]+(?:-[a-z0-9]+)*$/.test(name)) {
errors.push(`invalid token name "${name}"`);
}
const light = String(value.light ?? "").toLowerCase();
@ -234,7 +234,7 @@ function collectWsVarsFromBlocks(css, selectorPattern) {
const selector = block[1]?.trim() ?? "";
const body = block[2] ?? "";
if (!selectorPattern.test(selector)) continue;
for (const match of body.matchAll(/--ws-color-([0-9a-z]+)\s*:/g)) {
for (const match of body.matchAll(/--ws-color-([0-9a-z-]+)\s*:/g)) {
vars.add(`ws-${match[1]}`);
}
}
@ -246,7 +246,7 @@ function validateGlobalsCoverage(tokenEntries) {
const rootVars = collectWsVarsFromBlocks(css, /(^|,)\s*:root(\s|,|$)/);
const darkVars = collectWsVarsFromBlocks(css, /(^|,)\s*\.dark(\s|,|$)/);
const inlineVars = new Set(
[...css.matchAll(/--color-ws-([0-9a-z]+)\s*:/g)].map((match) => `ws-${match[1]}`),
[...css.matchAll(/--color-ws-([0-9a-z-]+)\s*:/g)].map((match) => `ws-${match[1]}`),
);
const tokenNames = new Set(tokenEntries.map(([name]) => name));

View File

@ -96,7 +96,7 @@ export default function ChatPage() {
sloganIndex % motivationSlogans.length
] ?? {
text: t.chatPage.defaultSlogan,
color: "var(--color-ws-333333)",
color: "var(--color-ws-fg-primary)",
};
const tickerCharacterList = useMemo(() => {
const seen = new Set<string>();
@ -357,7 +357,7 @@ export default function ChatPage() {
<Button
size="sm"
variant="ghost"
className="px-[10px] py-[5px] text-sm font-medium text-ws-150033 hover:text-ws-150033/80"
className="px-[10px] py-[5px] text-sm font-medium text-ws-base-1 hover:text-ws-base-1/80"
disabled={isStreaming}
onClick={() => setShowExitDialog(true)}
>
@ -370,7 +370,7 @@ export default function ChatPage() {
>
<path
d="M3.5 10H13.25H15.6875H16.5M3.5 10L7.5625 6M3.5 10L7.5625 14"
className="text-ws-667085"
className="text-ws-text-muted"
stroke="currentColor"
strokeWidth="1.5"
strokeLinecap="round"
@ -380,7 +380,7 @@ export default function ChatPage() {
</Button>
</div>
<div
className="flex items-center justify-center overflow-hidden text-sm font-bold font-medium whitespace-nowrap text-ws-333333"
className="flex items-center justify-center overflow-hidden text-sm font-bold font-medium whitespace-nowrap text-ws-fg-primary"
style={{
color: currentSlogan.color,
}}
@ -400,7 +400,7 @@ export default function ChatPage() {
<div className="flex items-center justify-end gap-2 overflow-hidden">
{/* 取消TodoList */}
{/* <DevTodoList
className="bg-ws-ffffff"
className="bg-ws-surface-base"
todos={thread.values.todos ?? []}
hidden={
!thread.values.todos || thread.values.todos.length === 0
@ -409,7 +409,7 @@ export default function ChatPage() {
<Button
size="sm"
variant="ghost"
className="h-full px-[10px] py-[5px] text-sm font-medium text-ws-150033 hover:text-ws-150033"
className="h-full px-[10px] py-[5px] text-sm font-medium text-ws-base-1 hover:text-ws-base-1"
>
<ListTodoIcon className="size-4" /> To-dos
</Button>
@ -420,7 +420,7 @@ export default function ChatPage() {
<Tooltip content={t.chatPage.viewArtifactsTooltip}>
<Button
data-testid="artifacts-open-button"
className="text-ws-150033 hover:text-ws-150033/80"
className="text-ws-base-1 hover:text-ws-base-1/80"
variant="ghost"
onClick={() => {
setArtifactsOpen(true);
@ -438,7 +438,7 @@ export default function ChatPage() {
className={cn(
"flex min-h-0 max-w-full grow flex-col",
showWelcomeStyle && !hasSubmitted
? "bg-ws-ffffff"
? "bg-ws-surface-base"
: "bg-background",
)}
>
@ -501,7 +501,7 @@ export default function ChatPage() {
) : (
<div className="flex size-full max-w-(--container-width-sm) flex-col justify-center">
<header className="flex shrink-0 items-center justify-between border-b">
<h2 className="h-[58px] text-sm leading-[58px] font-bold text-ws-333333">
<h2 className="h-[58px] text-sm leading-[58px] font-bold text-ws-fg-primary">
<span>{t.common.artifacts}</span>
</h2>
<Button
@ -549,7 +549,7 @@ export default function ChatPage() {
{!(showWelcomeStyle && thread.isThreadLoading) ? (
<>
<InputBox
className={cn("w-full rounded-[20px] bg-ws-fbfafc")}
className={cn("w-full rounded-[20px] bg-ws-surface-elevated")}
threadId={threadId}
showWelcomeStyle={showWelcomeStyle}
hasSubmitted={hasSubmitted}
@ -609,14 +609,14 @@ export default function ChatPage() {
</p>
<DevDialogFooter>
<Button
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
variant="ghost"
onClick={() => setShowExitDialog(false)}
>
{t.common.cancel}
</Button>
<Button
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
variant="ghost"
onClick={async () => {
// 如果正在生成,先终止再退出
@ -665,7 +665,7 @@ export default function ChatPage() {
</p>
<DevDialogFooter singleColumn>
<Button
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
variant="ghost"
onClick={clearSelectedSkillError}
>

View File

@ -130,7 +130,7 @@ export default function WorkspaceLayout({
/* 灰色圆角矩形容器 */
"rounded-[20px] border-none",
/* 浅灰色背景 + 轻微透明 */
"bg-ws-999999! backdrop-blur-sm",
"bg-ws-overlay-neutral! backdrop-blur-sm",
/* 阴影极轻 */
"shadow-[0_2px_12px_0_rgba(0,0,0,0.18)]",
/* 内边距:宽松居中 */

View File

@ -36,7 +36,7 @@ export const Message = ({
"group flex w-full flex-col gap-2",
from === "user"
? cn("is-user ml-auto justify-end", !isFirstInSession && "mt-6")
: "is-assistant rounded-[10px] bg-ws-ffffff p-4",
: "is-assistant rounded-[10px] bg-ws-surface-base p-4",
className,
)}
{...props}

View File

@ -352,7 +352,7 @@ export function PromptInputAttachment({
{/* 删除按钮 - 右上角 */}
<button
aria-label={t.common.removeAttachment}
className="absolute top-1.5 right-1.5 z-10 flex size-4 cursor-pointer items-center justify-center rounded-sm transition-colors hover:bg-ws-ffffff/20"
className="absolute top-1.5 right-1.5 z-10 flex size-4 cursor-pointer items-center justify-center rounded-sm transition-colors hover:bg-ws-surface-base/20"
onClick={(e) => {
e.stopPropagation();
if (onRemove) {
@ -397,7 +397,7 @@ export function PromptInputAttachment({
{/* 关闭按钮 - 右上角 */}
<button
aria-label={t.common.removeAttachment}
className="absolute top-1 right-1 z-10 flex size-5 cursor-pointer items-center justify-center rounded bg-ws-ffffff/90 opacity-0 transition-opacity group-hover:opacity-100 hover:bg-ws-ffffff dark:bg-gray-800/90 dark:hover:bg-gray-800"
className="absolute top-1 right-1 z-10 flex size-5 cursor-pointer items-center justify-center rounded bg-ws-surface-base/90 opacity-0 transition-opacity group-hover:opacity-100 hover:bg-ws-surface-base dark:bg-gray-800/90 dark:hover:bg-gray-800"
onClick={(e) => {
e.stopPropagation();
if (onRemove) {

View File

@ -62,8 +62,8 @@ export const Suggestion = ({
<Button
className={cn(
"cursor-pointer rounded-full px-[20px] py-[15px] text-sm font-normal",
"border-none bg-ws-f9f8fa text-ws-667085",
"hover:bg-ws-fbfafc hover:text-ws-150033",
"border-none bg-ws-surface-subtle text-ws-text-muted",
"hover:bg-ws-surface-elevated hover:text-ws-base-1",
className,
)}
onClick={handleClick}

View File

@ -16,7 +16,7 @@ function ScrollArea({
return (
<ScrollAreaPrimitive.Root
data-slot="scroll-area"
className={cn("relative", className)}
className={cn("relative overflow-hidden", className)}
{...props}
>
<ScrollAreaPrimitive.Viewport

View File

@ -309,7 +309,7 @@ function SidebarInset({ className, ...props }: React.ComponentProps<"main">) {
<main
data-slot="sidebar-inset"
className={cn(
"relative flex w-full flex-1 flex-col",
"relative flex w-full flex-1 flex-col bg-ws-surface-base",
"md:peer-data-[variant=inset]:m-2 md:peer-data-[variant=inset]:ml-0 md:peer-data-[variant=inset]:rounded-xl md:peer-data-[variant=inset]:shadow-sm md:peer-data-[variant=inset]:peer-data-[state=collapsed]:ml-2",
className,
)}

View File

@ -430,7 +430,7 @@ export function ArtifactFileDetail({
type="single"
variant={null}
size="default"
className="h-[28px] bg-ws-ffffff"
className="bg-ws-surface-base h-[28px]"
value={viewMode}
onValueChange={(value) => {
if (value) {
@ -721,7 +721,7 @@ export function ArtifactFileDetail({
</ArtifactHeader>
<ArtifactContent>
{/* 遮挡多余的滚动顶部 */}
{/* <div className="absolute w-[calc(100%-40px)] bg-ws-ffffff z-20 h-5 rounded-t-[10px] top-[57px]"></div> */}
{/* <div className="absolute w-[calc(100%-40px)] bg-ws-surface-base z-20 h-5 rounded-t-[10px] top-[57px]"></div> */}
{previewable &&
viewMode === "preview" &&
(language === "markdown" || language === "html") && (
@ -734,7 +734,7 @@ export function ArtifactFileDetail({
/>
)}
{isCodeFile && viewMode === "code" && (
<div className="mb-0 mb-[207px] min-h-full rounded-b-[10px] bg-ws-ffffff p-0">
<div className="bg-ws-surface-base mb-0 mb-[207px] min-h-full rounded-b-[10px] p-0">
<CodeEditor
className="size-full resize-none rounded-none border-none py-[20px]"
value={displayContent ?? ""}
@ -917,7 +917,7 @@ export function ArtifactFilePreview({
if (language === "markdown") {
return (
<div
className={cn("mb-[207px] w-full bg-ws-ffffff p-[20px]")}
className={cn("bg-ws-surface-base mb-[207px] w-full p-[20px]")}
style={{ "--zoom-scale": zoomScale } as CSSProperties}
>
<Streamdown
@ -974,7 +974,7 @@ function PreviewIframe({
{...props}
/>
{isLoading && (
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/85">
<div className="bg-ws-surface-base/85 absolute inset-0 z-10 flex items-center justify-center">
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
</div>
)}
@ -1046,7 +1046,7 @@ function ArtifactPdfPreview({
const pageWrapper = document.createElement("div");
pageWrapper.className =
"mx-auto mb-4 w-fit rounded-md border border-ws-e4e7ec bg-ws-ffffff p-2 shadow-sm";
"mx-auto mb-4 w-fit rounded-md border border-ws-line-default bg-ws-surface-base p-2 shadow-sm";
const canvas = document.createElement("canvas");
canvas.style.width = `${viewport.width}px`;
@ -1089,8 +1089,13 @@ function ArtifactPdfPreview({
if (error) {
return (
<div className={cn("relative overflow-auto bg-ws-f9f8fa p-4", className)}>
<div className="mx-auto grid max-w-xl gap-3 rounded-md border border-ws-e4e7ec bg-ws-ffffff p-5 text-center">
<div
className={cn(
"bg-ws-surface-subtle relative overflow-auto p-4",
className,
)}
>
<div className="border-ws-line-default bg-ws-surface-base mx-auto grid max-w-xl gap-3 rounded-md border p-5 text-center">
<p className="text-sm font-medium break-all">{fileName}</p>
<p className="text-muted-foreground text-sm">{error}</p>
<a
@ -1107,15 +1112,20 @@ function ArtifactPdfPreview({
}
return (
<div className={cn("relative overflow-auto bg-ws-f9f8fa p-4", className)}>
<div className="mb-3 text-center text-xs text-ws-667085">
<div
className={cn(
"bg-ws-surface-subtle relative overflow-auto p-4",
className,
)}
>
<div className="text-ws-text-muted mb-3 text-center text-xs">
{pageCount > 0
? t.artifactPreview.pageCountLabel(fileName, pageCount)
: fileName}
</div>
<div ref={containerRef} />
{isLoading && (
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/70">
<div className="bg-ws-surface-base/70 absolute inset-0 z-10 flex items-center justify-center">
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
</div>
)}
@ -1313,7 +1323,12 @@ function ArtifactOfficePreview({
}, [canRenderPptx, t.artifactPreview.pptxDownloadHint]);
return (
<div className={cn("relative h-full overflow-hidden bg-ws-ffffff", className)}>
<div
className={cn(
"bg-ws-surface-base relative h-full overflow-hidden",
className,
)}
>
{canRenderXlsx && sheetNames.length > 0 && (
<div className="border-border flex items-center gap-1 overflow-x-auto border-b p-2">
{sheetNames.map((sheetName) => (
@ -1323,7 +1338,7 @@ function ArtifactOfficePreview({
className={cn(
"rounded px-4 py-3 text-xs whitespace-nowrap",
activeSheet === sheetName
? "bg-ws-1500331a text-foreground"
? "bg-ws-accent-tint-soft text-foreground"
: "text-muted-foreground hover:text-foreground",
)}
onClick={() => setActiveSheet(sheetName)}
@ -1357,7 +1372,7 @@ function ArtifactOfficePreview({
/>
)}
{isLoading && (
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/85">
<div className="bg-ws-surface-base/85 absolute inset-0 z-10 flex items-center justify-center">
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
</div>
)}
@ -1376,7 +1391,7 @@ function ArtifactPreviewFallback({
}) {
const { t } = useI18n();
return (
<div className="absolute inset-0 z-20 grid place-content-center bg-ws-ffffff p-6 text-center">
<div className="bg-ws-surface-base absolute inset-0 z-20 grid place-content-center p-6 text-center">
<p className="text-foreground mb-2 text-sm font-medium">{fileName}</p>
<p className="text-muted-foreground mb-3 text-xs">{message}</p>
<a
@ -1400,9 +1415,23 @@ function rewriteArtifactImagePaths(
return content;
}
const encodeVirtualPath = (path: string) =>
path
.split("/")
.map((segment) => {
try {
return encodeURIComponent(decodeURIComponent(segment));
} catch {
return encodeURIComponent(segment);
}
})
.join("/");
const toArtifactUrl = (rawPath: string) => {
const normalizedPath = rawPath.startsWith("/") ? rawPath : `/${rawPath}`;
return resolveArtifactURL(normalizedPath, threadId);
const trimmedPath = rawPath.trim();
const normalizedPath = trimmedPath.startsWith("/")
? trimmedPath
: `/${trimmedPath}`;
return resolveArtifactURL(encodeVirtualPath(normalizedPath), threadId);
};
const toArtifactUrlFromRelative = (rawPath: string) => {
const trimmed = rawPath.trim();
@ -1416,17 +1445,17 @@ function rewriteArtifactImagePaths(
const absolutePath = new URL(trimmed, `file://${baseDir}`).pathname;
if (!absolutePath.startsWith("/mnt/user-data/")) return null;
return resolveArtifactURL(absolutePath, threadId);
return resolveArtifactURL(encodeVirtualPath(absolutePath), threadId);
};
const markdownRewritten = content.replace(
/!\[([^\]]*)\]\(\s*(\/?mnt\/user-data\/outputs\/[^)\s]+)\s*\)/g,
/!\[([^\]]*)\]\(\s*(\/?mnt\/user-data\/(?:outputs|uploads)\/[^)]+?)\s*\)/g,
(_full, alt, rawPath) => {
return `![${alt}](${toArtifactUrl(rawPath)})`;
},
);
const markdownRelativeRewritten = markdownRewritten.replace(
/!\[([^\]]*)\]\(\s*([^) \t]+)\s*\)/g,
/!\[([^\]]*)\]\(\s*([^)]+?)\s*\)/g,
(_full, alt, rawPath) => {
const absoluteUrl = toArtifactUrlFromRelative(rawPath);
if (!absoluteUrl) {
@ -1437,7 +1466,7 @@ function rewriteArtifactImagePaths(
);
const shorthandMarkdownRewritten = markdownRelativeRewritten.replace(
/!(?!\[)([^\n()]+?)\s*[(]\s*(\/?mnt\/user-data\/outputs\/[^)\s]+)\s*[)]/g,
/!(?!\[)([^\n()]+?)\s*[(]\s*(\/?mnt\/user-data\/(?:outputs|uploads)\/[^)]+?)\s*[)]/g,
(_full, alt, rawPath) => {
return `![${String(alt).trim()}](${toArtifactUrl(rawPath)})`;
},
@ -1446,7 +1475,7 @@ function rewriteArtifactImagePaths(
return shorthandMarkdownRewritten.replace(
/(<img\b[^>]*\bsrc\s*=\s*)(["'])([^"']+)\2/gi,
(_full, prefix, quote, rawPath) => {
if (/^\/?mnt\/user-data\/outputs\//.test(rawPath)) {
if (/^\/?mnt\/user-data\/(?:outputs|uploads)\//.test(rawPath)) {
return `${prefix}${quote}${toArtifactUrl(rawPath)}${quote}`;
}
const absoluteUrl = toArtifactUrlFromRelative(rawPath);
@ -1559,34 +1588,34 @@ function buildArtifactViewerSrcDoc({
<meta name="viewport" content="width=device-width,initial-scale=1" />
<style>
:root {
--ws-color-f8f9fb: rgb(248 249 251);
--ws-color-ffffff: rgb(255 255 255);
--ws-color-0f172a: rgb(15 23 42);
--ws-color-667085: rgb(102 112 133);
--ws-color-e4e7ec: rgb(228 231 236);
--ws-color-f4f4f5: rgb(244 244 245);
--ws-color-000000: rgb(0 0 0);
--ws-color-2563eb: rgb(37 99 235);
--bg: var(--ws-color-f8f9fb);
--panel: var(--ws-color-ffffff);
--text: var(--ws-color-0f172a);
--muted: var(--ws-color-667085);
--line: var(--ws-color-e4e7ec);
--checker: var(--ws-color-f4f4f5);
--media-bg: var(--ws-color-000000);
--link: var(--ws-color-2563eb);
--ws-color-surface-app: rgb(248 249 251);
--ws-color-surface-base: rgb(255 255 255);
--ws-color-text-primary-strong: rgb(15 23 42);
--ws-color-text-muted: rgb(102 112 133);
--ws-color-line-default: rgb(228 231 236);
--ws-color-surface-checker: rgb(244 244 245);
--ws-color-black-solid: rgb(0 0 0);
--ws-color-info-primary: rgb(37 99 235);
--bg: var(--ws-color-surface-app);
--panel: var(--ws-color-surface-base);
--text: var(--ws-color-text-primary-strong);
--muted: var(--ws-color-text-muted);
--line: var(--ws-color-line-default);
--checker: var(--ws-color-surface-checker);
--media-bg: var(--ws-color-black-solid);
--link: var(--ws-color-info-primary);
--radius: 12px;
}
@media (prefers-color-scheme: dark) {
:root {
--ws-color-f8f9fb: rgb(32 36 44);
--ws-color-ffffff: rgb(42 39 49);
--ws-color-0f172a: rgb(230 234 242);
--ws-color-667085: rgb(152 162 179);
--ws-color-e4e7ec: rgb(58 61 69);
--ws-color-f4f4f5: rgb(44 47 56);
--ws-color-000000: rgb(0 0 0);
--ws-color-2563eb: rgb(127 178 255);
--ws-color-surface-app: rgb(32 36 44);
--ws-color-surface-base: rgb(42 39 49);
--ws-color-text-primary-strong: rgb(230 234 242);
--ws-color-text-muted: rgb(152 162 179);
--ws-color-line-default: rgb(58 61 69);
--ws-color-surface-checker: rgb(44 47 56);
--ws-color-black-solid: rgb(0 0 0);
--ws-color-info-primary: rgb(127 178 255);
}
}
* { box-sizing: border-box; }
@ -1736,7 +1765,12 @@ export const ArtifactZoomSelector = ({
viewBox="0 0 16 16"
fill="none"
>
<circle cx="7.55558" cy="7.55534" r="6.16667" stroke="currentColor" />
<circle
cx="7.55558"
cy="7.55534"
r="6.16667"
stroke="currentColor"
/>
<path
d="M13.8688 15.4646C14.064 15.6598 14.3806 15.6598 14.5759 15.4646C14.7711 15.2693 14.7711 14.9527 14.5759 14.7574L14.2223 15.111L13.8688 15.4646ZM14.2223 15.111L14.5759 14.7574L11.9092 12.0908L11.5557 12.4443L11.2021 12.7979L13.8688 15.4646L14.2223 15.111Z"
fill="currentColor"

View File

@ -104,7 +104,7 @@ export function ArtifactFileList({
<div className="absolute top-5 left-4">
{getFileIcon(
file,
"size-9 stroke-1 text-ws-333333 stroke-current",
"size-9 stroke-1 text-ws-fg-primary stroke-current",
)}
</div>
<CardDescription className="pl-10 text-xs">
@ -137,7 +137,7 @@ export function ArtifactFileList({
>
<Button
variant="ghost"
className="text-muted-foreground h-full! hover:bg-transparent! hover:text-ws-333333!"
className="text-muted-foreground h-full! hover:bg-transparent! hover:text-ws-fg-primary!"
>
<DownloadIcon className="size-4" />
{t.common.download}

View File

@ -34,7 +34,7 @@ export function DevTodoList({
<DropdownMenuTrigger asChild>{trigger}</DropdownMenuTrigger>
<DropdownMenuContent
className={cn(
"z-[100] rounded-[20px] bg-ws-ffffff p-5 shadow-[0_0_20px_0_rgba(0,0,0,0.20)]",
"z-[100] rounded-[20px] bg-ws-surface-base p-5 shadow-[0_0_20px_0_rgba(0,0,0,0.20)]",
className,
)}
align="start"

View File

@ -157,7 +157,7 @@ export function IframeTestPanel() {
<div
ref={panelRef}
className={cn(
"fixed z-[9999] w-72 rounded-xl border border-violet-200 bg-ws-ffffff/95 shadow-2xl backdrop-blur-sm",
"fixed z-[9999] w-72 rounded-xl border border-violet-200 bg-ws-surface-base/95 shadow-2xl backdrop-blur-sm",
position ? "top-0 left-0" : "bottom-24 left-3",
)}
style={position ? { left: position.x, top: position.y } : undefined}

View File

@ -149,7 +149,7 @@ function WorkspaceToolButton({
return (
<PromptInputButton
className={cn(
"group h-full rounded-[10px] p-[10px]! hover:bg-ws-f9f8fa hover:text-ws-8e47f0",
"group h-full rounded-[10px] p-[10px]! hover:bg-ws-surface-subtle hover:text-ws-interactive-primary",
className,
)}
{...props}
@ -889,12 +889,12 @@ export function InputBox({
textareaRef.current?.focus();
}}
>
<DropdownMenuLabel className="p-0 text-sm text-ws-333333">
<DropdownMenuLabel className="p-0 text-sm text-ws-fg-primary">
{t.inputBox.addReference}
</DropdownMenuLabel>
<DropdownMenuSeparator className="mx-0 mt-[20px] mb-0" />
<DropdownMenuGroup className="flex max-h-[480px] flex-col gap-[10px] px-0 pt-[20px]">
<ScrollArea className="h-[480px]" data-state="hidden">
<DropdownMenuGroup className="flex min-h-0 flex-col gap-[10px] px-0">
<ScrollArea className="h-[320px] pt-[20px]" hideScrollbar={false}>
{filteredMentionCandidates.map((candidate, index) => {
const detail = [candidate.typeLabel, candidate.pathTail]
.filter(Boolean)
@ -1234,7 +1234,7 @@ function AddAttachmentsButton({ className }: { className?: string }) {
return (
<Tooltip content={t.inputBox.addAttachments}>
<WorkspaceToolButton
className={cn("text-ws-150033 hover:text-ws-8e47f0", className)}
className={cn("text-ws-base-1 hover:text-ws-interactive-primary", className)}
onClick={() => attachments.openFileDialog()}
>
<svg
@ -1272,7 +1272,7 @@ function HistoryButton({
return (
<Tooltip content={t.inputBox.history}>
<WorkspaceToolButton
className={cn("text-ws-150033 hover:text-ws-8e47f0", className)}
className={cn("text-ws-base-1 hover:text-ws-interactive-primary", className)}
onClick={() =>
router.replace(`/workspace/chats/${threadId}?is_chatting=true`)
}
@ -1330,7 +1330,7 @@ function IframeSkillDialogButton({
>
<svg
xmlns="http://www.w3.org/2000/svg"
className="size-4 text-ws-150033 transition-[color] duration-200 group-hover:text-ws-8e47f0"
className="size-4 text-ws-base-1 transition-[color] duration-200 group-hover:text-ws-interactive-primary"
viewBox="0 0 12 16"
fill="none"
>

View File

@ -1,6 +1,6 @@
"use client";
import { CheckIcon, CopyIcon } from "lucide-react";
import { CheckIcon, CopyIcon, DownloadIcon } from "lucide-react";
import { useCallback, useMemo, useState, type MouseEvent } from "react";
import type {
AnchorHTMLAttributes,
@ -56,27 +56,57 @@ function toMarkdownTable(data: TableData): string {
return [headerLine, dividerLine, ...rowLines].join("\n");
}
function escapeCsvCell(value: string): string {
if (!/[",\n\r]/.test(value)) return value;
return `"${value.replaceAll('"', '""')}"`;
}
function toCsvTable(data: TableData): string {
if (data.headers.length === 0) return "";
return [data.headers, ...data.rows]
.map((row) => row.map(escapeCsvCell).join(","))
.join("\n");
}
function downloadCsvFile(content: string, filename: string) {
const blob = new Blob(["\uFEFF", content], {
type: "text/csv;charset=utf-8",
});
const url = URL.createObjectURL(blob);
const anchor = document.createElement("a");
anchor.href = url;
anchor.download = filename;
anchor.click();
URL.revokeObjectURL(url);
}
function MarkdownTable({
className,
children,
isLoading,
copyLabel,
downloadLabel,
...props
}: ComponentPropsWithoutRef<"table"> & {
isLoading: boolean;
copyLabel: string;
downloadLabel: string;
}) {
const [copied, setCopied] = useState(false);
const getTableData = useCallback((event: MouseEvent<HTMLButtonElement>) => {
const wrapper = event.currentTarget.closest(
'[data-streamdown="table-wrapper"]',
);
const table = wrapper?.querySelector("table");
if (!(table instanceof HTMLTableElement)) return null;
return parseTableData(table);
}, []);
const handleCopy = useCallback(
async (event: MouseEvent<HTMLButtonElement>) => {
const wrapper = event.currentTarget.closest(
'[data-streamdown="table-wrapper"]',
);
const table = wrapper?.querySelector("table");
if (!(table instanceof HTMLTableElement)) return;
const data = getTableData(event);
if (!data) return;
const markdown = toMarkdownTable(parseTableData(table));
const markdown = toMarkdownTable(data);
if (!markdown) return;
try {
@ -87,7 +117,20 @@ function MarkdownTable({
// no-op
}
},
[],
[getTableData],
);
const handleDownload = useCallback(
(event: MouseEvent<HTMLButtonElement>) => {
const data = getTableData(event);
if (!data) return;
const csv = toCsvTable(data);
if (!csv) return;
downloadCsvFile(csv, "table.csv");
},
[getTableData],
);
return (
@ -97,14 +140,21 @@ function MarkdownTable({
>
<div className="flex items-center justify-end gap-1">
<button
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all disabled:cursor-not-allowed disabled:opacity-50"
disabled={isLoading}
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all"
onClick={handleCopy}
title={copyLabel}
type="button"
>
{copied ? <CheckIcon size={14} /> : <CopyIcon size={14} />}
</button>
<button
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all"
onClick={handleDownload}
title={downloadLabel}
type="button"
>
<DownloadIcon size={14} />
</button>
</div>
<div className="overflow-x-auto">
<table
@ -165,7 +215,7 @@ export function MarkdownContent({
<MarkdownTable
className={className}
copyLabel={t.clipboard.copyToClipboard}
isLoading={isLoading}
downloadLabel={t.common.download}
{...props}
>
{children}
@ -173,7 +223,12 @@ export function MarkdownContent({
),
...componentsFromProps,
};
}, [componentsFromProps, isLoading, t.clipboard.copyToClipboard]);
}, [
componentsFromProps,
isLoading,
t.clipboard.copyToClipboard,
t.common.download,
]);
if (!content) return null;

View File

@ -114,7 +114,7 @@ export function MessageGroup({
);
return (
<ChainOfThought
className={cn("w-full gap-2 rounded-lg bg-ws-ffffff", className)}
className={cn("w-full gap-2 rounded-lg bg-ws-surface-base", className)}
open={true}
>
{aboveLastToolCallSteps.length > 0 && (

View File

@ -27,8 +27,11 @@ import {
import { resolveArtifactURL } from "@/core/artifacts/utils";
import { useI18n } from "@/core/i18n/hooks";
import {
extractSummaryTemplateBody,
extractContentFromMessage,
normalizeHumanMessageDisplayText,
extractReasoningContentFromMessage,
isSummaryTemplateMessage,
parseUploadedFiles,
stripPriorityHintSuffix,
stripUploadedFilesTag,
@ -139,6 +142,7 @@ function MessageContent_({
isLoading?: boolean;
threadId: string;
}) {
const { t } = useI18n();
const rehypePlugins = useRehypeSplitWordsIntoSpans(isLoading);
const isHuman = message.type === "human";
const components = useMemo(
@ -167,12 +171,23 @@ function MessageContent_({
const contentToDisplay = useMemo(() => {
if (isHuman) {
return rawContent
? stripPriorityHintSuffix(stripUploadedFilesTag(rawContent))
: "";
if (!rawContent) {
return "";
}
const cleaned = stripPriorityHintSuffix(stripUploadedFilesTag(rawContent));
return normalizeHumanMessageDisplayText(cleaned);
}
return rawContent ?? "";
}, [rawContent, isHuman]);
const isSummaryMessage = useMemo(
() => isHuman && isSummaryTemplateMessage(message),
[isHuman, message],
);
const summaryBody = useMemo(
() => (isSummaryMessage ? extractSummaryTemplateBody(message) : ""),
[isSummaryMessage, message],
);
const [isSummaryExpanded, setIsSummaryExpanded] = useState(false);
const filesList =
files && files.length > 0 && threadId ? (
@ -208,6 +223,7 @@ function MessageContent_({
}
if (isHuman) {
const shouldRenderSummaryCollapse = isSummaryMessage && summaryBody;
const messageResponse = contentToDisplay ? (
<AIElementMessageResponse
remarkPlugins={humanMessagePlugins.remarkPlugins}
@ -220,8 +236,37 @@ function MessageContent_({
return (
<div className={cn("ml-auto flex flex-col gap-2", className)}>
{filesList}
{shouldRenderSummaryCollapse && (
<details
className="w-fit max-w-full rounded-lg border"
open={isSummaryExpanded}
onToggle={(event) => {
setIsSummaryExpanded(event.currentTarget.open);
}}
>
<summary className="text-muted-foreground cursor-pointer px-3 py-2 text-xs select-none">
{isSummaryExpanded
? t.toolCalls.collapseContent
: t.toolCalls.expandContent}
</summary>
<AIElementMessageContent className="w-fit border-t">
<AIElementMessageResponse
remarkPlugins={humanMessagePlugins.remarkPlugins}
rehypePlugins={humanMessagePlugins.rehypePlugins}
components={components}
>
{summaryBody}
</AIElementMessageResponse>
</AIElementMessageContent>
</details>
)}
{messageResponse && (
<AIElementMessageContent className="w-fit">
<AIElementMessageContent
className={cn(
"w-fit",
shouldRenderSummaryCollapse ? "hidden" : undefined,
)}
>
{messageResponse}
</AIElementMessageContent>
)}

View File

@ -225,7 +225,7 @@ export function MessageList({
{showScrollToBottomButton && (
<ConversationScrollButton
className={cn(
"z-20 rounded-full border bg-ws-ffffff/90 shadow-sm backdrop-blur-sm",
"z-20 rounded-full border bg-ws-surface-base/90 shadow-sm backdrop-blur-sm",
scrollButtonClassName,
)}
title={t.chats.scrollToBottom}

View File

@ -157,7 +157,7 @@ function ThemePreviewCard({
"relative overflow-hidden rounded-md border text-xs transition-colors",
previewMode === "dark"
? "border-neutral-800 bg-neutral-900 text-neutral-200"
: "border-slate-200 bg-ws-ffffff text-slate-900",
: "border-slate-200 bg-ws-surface-base text-slate-900",
)}
>
<div className="border-border/50 flex items-center gap-2 border-b px-3 py-2">

View File

@ -14,19 +14,19 @@ export function StreamingIndicator({
<div
className={cn(
dotSize,
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100",
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100",
)}
/>
<div
className={cn(
dotSize,
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100 [animation-delay:0.2s]",
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100 [animation-delay:0.2s]",
)}
/>
<div
className={cn(
dotSize,
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100 [animation-delay:0.4s]",
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100 [animation-delay:0.4s]",
)}
/>
</div>

View File

@ -39,7 +39,7 @@ export function TodoList({
return (
<div
className={cn(
"flex h-fit w-full origin-bottom translate-y-4 flex-col overflow-hidden rounded-t-xl border border-b-0 bg-ws-ffffff backdrop-blur-sm transition-all duration-200 ease-out",
"flex h-fit w-full origin-bottom translate-y-4 flex-col overflow-hidden rounded-t-xl border border-b-0 bg-ws-surface-base backdrop-blur-sm transition-all duration-200 ease-out",
hidden ? "pointer-events-none translate-y-8 opacity-0" : "",
className,
)}

View File

@ -43,7 +43,7 @@ export function WorkspaceHeader({ className }: { className?: string }) {
) : (
<div className="text-primary ml-2 cursor-default font-serif">
{/* TODO: 测试标识 */}
XClaw <span className="text-sm text-ws-000000c5">v3.2.8</span>
XClaw <span className="text-sm text-ws-text-subtle-strong">v3.2.8</span>
</div>
)}
<SidebarTrigger />

View File

@ -134,13 +134,13 @@ export const zhCN: Translations = {
suggestion: "GPT-Image-2",
prompt: "编写[项目/功能]的需求文档,包含功能描述、用户故事和验收标准。",
icon: CompassIcon,
children: [{ id: "6107", name: "GPT-Image-2" }],
children: [{ id: "6130", name: "GPT-Image-2" }],
},
{
suggestion: "音乐生成",
prompt: "编写[产品/功能]的使用指南,包含操作步骤、注意事项和常见问题。",
icon: GraduationCapIcon,
children: [{ id: "6126", name: "旋律制造机" }],
children: [{ id: "6133", name: "音乐生成器" }],
},
{
suggestion: "excel数据处理",

View File

@ -26,6 +26,47 @@ type MessageGroup =
| AssistantClarificationGroup
| AssistantSubagentGroup;
const SUMMARY_MESSAGE_TITLES = [
"Here is a summary of the conversation to date",
"以下是目前对话的摘要",
];
function escapeRegExp(value: string) {
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
}
function getSummaryTemplateTitle(content: string) {
return (
SUMMARY_MESSAGE_TITLES.find((title) => {
const titlePattern = new RegExp(
`^\\s*${escapeRegExp(title)}\\s*[:]?(?:\\n|$)`,
"i",
);
return titlePattern.test(content);
}) ?? null
);
}
export function isSummaryTemplateMessage(message: Message) {
if (message.type !== "human") {
return false;
}
return getSummaryTemplateTitle(extractTextFromMessage(message)) !== null;
}
export function extractSummaryTemplateBody(message: Message) {
const content = extractTextFromMessage(message);
const title = getSummaryTemplateTitle(content);
if (!title) {
return content;
}
const titlePrefixPattern = new RegExp(
`^\\s*${escapeRegExp(title)}\\s*[:]?\\s*\\n*`,
"i",
);
return content.replace(titlePrefixPattern, "").trim();
}
export function groupMessages<T>(
messages: Message[],
mapper: (group: MessageGroup) => T,
@ -57,6 +98,9 @@ export function groupMessages<T>(
}
if (message.type === "human") {
// if (isSummaryTemplateMessage(message)) {
// continue;
// }
groups.push({ id: message.id, type: "human", messages: [message] });
continue;
}
@ -364,6 +408,20 @@ export function stripPriorityHintSuffix(content: string): string {
.trim();
}
/**
* Normalize human-authored message text for markdown rendering.
* - Decode literal "\n" into real line breaks.
* - Split Chinese-numbered items (e.g. "1...") into separate paragraphs.
*/
export function normalizeHumanMessageDisplayText(content: string): string {
return content
.replace(/\\n/g, "\n")
.replace(/\r\n?/g, "\n")
.replace(/\n(?=\d+[)]\s*)/g, "\n\n")
.replace(/\n{3,}/g, "\n\n")
.trim();
}
export function parseUploadedFiles(content: string): FileInMessage[] {
// Match <uploaded_files>...</uploaded_files> tag
const uploadedFilesRegex = /<uploaded_files>([\s\S]*?)<\/uploaded_files>/;

View File

@ -201,24 +201,24 @@
--color-sidebar-border: var(--sidebar-border);
--color-sidebar-ring: var(--sidebar-ring);
--color-tooltip-background: var(--tooltip-background);
--color-ws-150033: var(--ws-color-150033);
--color-ws-333333: var(--ws-color-333333);
--color-ws-f9f8fa: var(--ws-color-f9f8fa);
--color-ws-fbfafc: var(--ws-color-fbfafc);
--color-ws-8e47f0: var(--ws-color-8e47f0);
--color-ws-e4e7ec: var(--ws-color-e4e7ec);
--color-ws-667085: var(--ws-color-667085);
--color-ws-a3a1a1: var(--ws-color-a3a1a1);
--color-ws-999999: var(--ws-color-999999);
--color-ws-000000c5: var(--ws-color-000000c5);
--color-ws-00000015: var(--ws-color-00000015);
--color-ws-1500331a: var(--ws-color-1500331a);
--color-ws-f8f9fb: var(--ws-color-f8f9fb);
--color-ws-ffffff: var(--ws-color-ffffff);
--color-ws-0f172a: var(--ws-color-0f172a);
--color-ws-f4f4f5: var(--ws-color-f4f4f5);
--color-ws-000000: var(--ws-color-000000);
--color-ws-2563eb: var(--ws-color-2563eb);
--color-ws-base-1: var(--ws-color-base-1);
--color-ws-fg-primary: var(--ws-color-fg-primary);
--color-ws-surface-subtle: var(--ws-color-surface-subtle);
--color-ws-surface-elevated: var(--ws-color-surface-elevated);
--color-ws-interactive-primary: var(--ws-color-interactive-primary);
--color-ws-line-default: var(--ws-color-line-default);
--color-ws-text-muted: var(--ws-color-text-muted);
--color-ws-icon-muted: var(--ws-color-icon-muted);
--color-ws-overlay-neutral: var(--ws-color-overlay-neutral);
--color-ws-text-subtle-strong: var(--ws-color-text-subtle-strong);
--color-ws-border-hairline: var(--ws-color-border-hairline);
--color-ws-accent-tint-soft: var(--ws-color-accent-tint-soft);
--color-ws-surface-app: var(--ws-color-surface-app);
--color-ws-surface-base: var(--ws-color-surface-base);
--color-ws-text-primary-strong: var(--ws-color-text-primary-strong);
--color-ws-surface-checker: var(--ws-color-surface-checker);
--color-ws-black-solid: var(--ws-color-black-solid);
--color-ws-info-primary: var(--ws-color-info-primary);
--animate-aurora: aurora 8s ease-in-out infinite alternate;
@keyframes aurora {
@ -307,24 +307,24 @@
--sidebar-border: oklch(0.922 0.0098 87.47);
--sidebar-ring: oklch(0.708 0 0);
--tooltip-background: #00000066;
--ws-color-150033: #150033;
--ws-color-333333: #333333;
--ws-color-f9f8fa: #f9f8fa;
--ws-color-fbfafc: #fbfafc;
--ws-color-8e47f0: #8e47f0;
--ws-color-e4e7ec: #e4e7ec;
--ws-color-667085: #667085;
--ws-color-a3a1a1: #a3a1a1;
--ws-color-999999: #999999;
--ws-color-000000c5: #000000c5;
--ws-color-00000015: #00000015;
--ws-color-1500331a: #1500331a;
--ws-color-f8f9fb: #f8f9fb;
--ws-color-ffffff: #ffffff;
--ws-color-0f172a: #0f172a;
--ws-color-f4f4f5: #f4f4f5;
--ws-color-000000: #000000;
--ws-color-2563eb: #2563eb;
--ws-color-base-1: #150033;
--ws-color-fg-primary: #333333;
--ws-color-surface-subtle: #f9f8fa;
--ws-color-surface-elevated: #fbfafc;
--ws-color-interactive-primary: #8e47f0;
--ws-color-line-default: #e4e7ec;
--ws-color-text-muted: #667085;
--ws-color-icon-muted: #a3a1a1;
--ws-color-overlay-neutral: #999999;
--ws-color-text-subtle-strong: #000000c5;
--ws-color-border-hairline: #00000015;
--ws-color-accent-tint-soft: #1500331a;
--ws-color-surface-app: #f8f9fb;
--ws-color-surface-base: #ffffff;
--ws-color-text-primary-strong: #0f172a;
--ws-color-surface-checker: #f4f4f5;
--ws-color-black-solid: #000000;
--ws-color-info-primary: #2563eb;
}
.dark {
@ -360,24 +360,24 @@
--sidebar-border: oklch(1 0 0 / 10%);
--sidebar-ring: oklch(0.556 0 0);
--tooltip-background: oklch(0.85 0 0);
--ws-color-150033: #f4ebff;
--ws-color-333333: #f5f5f5;
--ws-color-f9f8fa: #1f1f1f;
--ws-color-fbfafc: #24222a;
--ws-color-8e47f0: #b987ff;
--ws-color-e4e7ec: #3b3f48;
--ws-color-667085: #98a2b3;
--ws-color-a3a1a1: #d0d0d0;
--ws-color-999999: #c2c2c2;
--ws-color-000000c5: #ffffffcc;
--ws-color-00000015: #ffffff1f;
--ws-color-1500331a: #f4ebff24;
--ws-color-f8f9fb: #20242c;
--ws-color-ffffff: #2a2731;
--ws-color-0f172a: #e6eaf2;
--ws-color-f4f4f5: #2c2f38;
--ws-color-000000: #000000;
--ws-color-2563eb: #7fb2ff;
--ws-color-base-1: #f4ebff;
--ws-color-fg-primary: #f5f5f5;
--ws-color-surface-subtle: #1f1f1f;
--ws-color-surface-elevated: #24222a;
--ws-color-interactive-primary: #b987ff;
--ws-color-line-default: #3b3f48;
--ws-color-text-muted: #98a2b3;
--ws-color-icon-muted: #d0d0d0;
--ws-color-overlay-neutral: #c2c2c2;
--ws-color-text-subtle-strong: #ffffffcc;
--ws-color-border-hairline: #ffffff1f;
--ws-color-accent-tint-soft: #f4ebff24;
--ws-color-surface-app: #20242c;
--ws-color-surface-base: #2a2731;
--ws-color-text-primary-strong: #e6eaf2;
--ws-color-surface-checker: #2c2f38;
--ws-color-black-solid: #000000;
--ws-color-info-primary: #7fb2ff;
font-weight: 300;
}

View File

@ -1,25 +1,36 @@
/**
* Workspace Token
*
*
* 1) Token UI 使 `bg-ws-surface-base`
* 2) `src/styles/globals.css` CSS
* - `:root` `.dark` `--ws-color-<token-suffix>`
* - `@theme inline` `--color-ws-<token-suffix>`
* 3) `scripts/color-guard.mjs`
*/
export type WorkspaceColorToken = {
light: `#${string}`;
dark: `#${string}`;
};
// Token 键保持语义化且稳定:`ws-<role>-<level>`(不要再使用原始 hex 命名)。
export const WORKSPACE_COLOR_TOKENS = {
"ws-150033": { light: "#150033", dark: "#f4ebff" },
"ws-333333": { light: "#333333", dark: "#f5f5f5" },
"ws-f9f8fa": { light: "#f9f8fa", dark: "#1f1f1f" },
"ws-fbfafc": { light: "#fbfafc", dark: "#24222a" },
"ws-8e47f0": { light: "#8e47f0", dark: "#b987ff" },
"ws-e4e7ec": { light: "#e4e7ec", dark: "#3b3f48" },
"ws-667085": { light: "#667085", dark: "#98a2b3" },
"ws-a3a1a1": { light: "#a3a1a1", dark: "#d0d0d0" },
"ws-999999": { light: "#999999", dark: "#c2c2c2" },
"ws-000000c5": { light: "#000000c5", dark: "#ffffffcc" },
"ws-00000015": { light: "#00000015", dark: "#ffffff1f" },
"ws-1500331a": { light: "#1500331a", dark: "#f4ebff24" },
"ws-f8f9fb": { light: "#f8f9fb", dark: "#20242c" },
"ws-ffffff": { light: "#ffffff", dark: "#2a2731" },
"ws-0f172a": { light: "#0f172a", dark: "#e6eaf2" },
"ws-f4f4f5": { light: "#f4f4f5", dark: "#2c2f38" },
"ws-000000": { light: "#000000", dark: "#000000" },
"ws-2563eb": { light: "#2563eb", dark: "#7fb2ff" },
"ws-base-1": { light: "#150033", dark: "#f4ebff" },
"ws-fg-primary": { light: "#333333", dark: "#f5f5f5" },
"ws-surface-subtle": { light: "#f9f8fa", dark: "#1f1f1f" },
"ws-surface-elevated": { light: "#fbfafc", dark: "#24222a" },
"ws-interactive-primary": { light: "#8e47f0", dark: "#b987ff" },
"ws-line-default": { light: "#e4e7ec", dark: "#3b3f48" },
"ws-text-muted": { light: "#667085", dark: "#98a2b3" },
"ws-icon-muted": { light: "#a3a1a1", dark: "#d0d0d0" },
"ws-overlay-neutral": { light: "#999999", dark: "#c2c2c2" },
"ws-text-subtle-strong": { light: "#000000c5", dark: "#ffffffcc" },
"ws-border-hairline": { light: "#00000015", dark: "#ffffff1f" },
"ws-accent-tint-soft": { light: "#1500331a", dark: "#f4ebff24" },
"ws-surface-app": { light: "#f8f9fb", dark: "#20242c" },
"ws-surface-base": { light: "#ffffff", dark: "#2a2731" },
"ws-text-primary-strong": { light: "#0f172a", dark: "#e6eaf2" },
"ws-surface-checker": { light: "#f4f4f5", dark: "#2c2f38" },
"ws-black-solid": { light: "#000000", dark: "#000000" },
"ws-info-primary": { light: "#2563eb", dark: "#7fb2ff" },
} as const satisfies Record<string, WorkspaceColorToken>;

View File

@ -135,6 +135,28 @@ detect_sandbox_mode() {
fi
}
# ── restart ───────────────────────────────────────────────────────────────────
if [ "$CMD" = "restart" ]; then
SERVICE="$2"
if [ -z "$SERVICE" ]; then
echo -e "${RED}✗ Usage: $0 restart {service}${NC}"
exit 1
fi
# 保证变量和 up 一致
export DEER_FLOW_HOME="${DEER_FLOW_HOME:-$REPO_ROOT/backend/.deer-flow}"
export DEER_FLOW_CONFIG_PATH="${DEER_FLOW_CONFIG_PATH:-$DEER_FLOW_HOME/config.yaml}"
export DEER_FLOW_EXTENSIONS_CONFIG_PATH="${DEER_FLOW_EXTENSIONS_CONFIG_PATH:-$DEER_FLOW_HOME/extensions_config.json}"
export DEER_FLOW_DOCKER_SOCKET="${DEER_FLOW_DOCKER_SOCKET:-/var/run/docker.sock}"
export DEER_FLOW_REPO_ROOT="${DEER_FLOW_REPO_ROOT:-$REPO_ROOT}"
export BETTER_AUTH_SECRET="${BETTER_AUTH_SECRET:-placeholder}"
# 检查服务名有效性(可选)
# shellcheck disable=SC2086
echo -e "${BLUE}Restarting service: $SERVICE${NC}"
"${COMPOSE_CMD[@]}" restart "$SERVICE"
exit 0
fi
# ── down ──────────────────────────────────────────────────────────────────────
if [ "$CMD" = "down" ]; then