feat: 新增生产环境脚本

This commit is contained in:
肖应宇 2026-04-01 18:49:56 +08:00
parent 8b0c69327b
commit ae4916ce6f
13 changed files with 1612 additions and 192 deletions

256
Makefile
View File

@ -1,18 +1,34 @@
# DeerFlow - Unified Development Environment
.PHONY: help config check install dev stop clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
.PHONY: help config config-upgrade check install dev dev-daemon start stop up down clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
PYTHON ?= python
BASH ?= bash
# Detect OS for Windows compatibility
ifeq ($(OS),Windows_NT)
SHELL := cmd.exe
endif
help:
@echo "DeerFlow Development Commands:"
@echo " make config - Generate local config files (aborts if config already exists)"
@echo " make config-upgrade - Merge new fields from config.example.yaml into config.yaml"
@echo " make check - Check if all required tools are installed"
@echo " make install - Install all dependencies (frontend + backend)"
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
@echo " make dev - Start all services (frontend + backend + nginx on localhost:2026)"
@echo " make dev - Start all services in development mode (with hot-reloading)"
@echo " make dev-daemon - Start all services in background (daemon mode)"
@echo " make start - Start all services in production mode (optimized, no hot-reloading)"
@echo " make stop - Stop all running services"
@echo " make clean - Clean up processes and temporary files"
@echo ""
@echo "Docker Production Commands:"
@echo " make up - Build and start production Docker services (localhost:2026)"
@echo " make down - Stop and remove production Docker containers"
@echo ""
@echo "Docker Development Commands:"
@echo " make docker-init - Build the custom k3s image (with pre-cached sandbox image)"
@echo " make docker-init - Pull the sandbox image"
@echo " make docker-start - Start Docker services (mode-aware from config.yaml, localhost:2026)"
@echo " make docker-stop - Stop Docker development services"
@echo " make docker-logs - View Docker development logs"
@ -20,84 +36,14 @@ help:
@echo " make docker-logs-gateway - View Docker gateway logs"
config:
@test -f config.yaml || cp config.example.yaml config.yaml
@test -f .env || cp .env.example .env
@test -f frontend/.env || cp frontend/.env.example frontend/.env
@$(PYTHON) ./scripts/configure.py
config-upgrade:
@./scripts/config-upgrade.sh
# Check required tools
check:
@echo "=========================================="
@echo " Checking Required Dependencies"
@echo "=========================================="
@echo ""
@FAILED=0; \
echo "Checking Node.js..."; \
if command -v node >/dev/null 2>&1; then \
NODE_VERSION=$$(node -v | sed 's/v//'); \
NODE_MAJOR=$$(echo $$NODE_VERSION | cut -d. -f1); \
if [ $$NODE_MAJOR -ge 22 ]; then \
echo " ✓ Node.js $$NODE_VERSION (>= 22 required)"; \
else \
echo " ✗ Node.js $$NODE_VERSION found, but version 22+ is required"; \
echo " Install from: https://nodejs.org/"; \
FAILED=1; \
fi; \
else \
echo " ✗ Node.js not found (version 22+ required)"; \
echo " Install from: https://nodejs.org/"; \
FAILED=1; \
fi; \
echo ""; \
echo "Checking pnpm..."; \
if command -v pnpm >/dev/null 2>&1; then \
PNPM_VERSION=$$(pnpm -v); \
echo " ✓ pnpm $$PNPM_VERSION"; \
else \
echo " ✗ pnpm not found"; \
echo " Install: npm install -g pnpm"; \
echo " Or visit: https://pnpm.io/installation"; \
FAILED=1; \
fi; \
echo ""; \
echo "Checking uv..."; \
if command -v uv >/dev/null 2>&1; then \
UV_VERSION=$$(uv --version | awk '{print $$2}'); \
echo " ✓ uv $$UV_VERSION"; \
else \
echo " ✗ uv not found"; \
echo " Install: curl -LsSf https://astral.sh/uv/install.sh | sh"; \
echo " Or visit: https://docs.astral.sh/uv/getting-started/installation/"; \
FAILED=1; \
fi; \
echo ""; \
echo "Checking nginx..."; \
if command -v nginx >/dev/null 2>&1; then \
NGINX_VERSION=$$(nginx -v 2>&1 | awk -F'/' '{print $$2}'); \
echo " ✓ nginx $$NGINX_VERSION"; \
else \
echo " ✗ nginx not found"; \
echo " macOS: brew install nginx"; \
echo " Ubuntu: sudo apt install nginx"; \
echo " Or visit: https://nginx.org/en/download.html"; \
FAILED=1; \
fi; \
echo ""; \
if [ $$FAILED -eq 0 ]; then \
echo "=========================================="; \
echo " ✓ All dependencies are installed!"; \
echo "=========================================="; \
echo ""; \
echo "You can now run:"; \
echo " make install - Install project dependencies"; \
echo " make dev - Start development server"; \
else \
echo "=========================================="; \
echo " ✗ Some dependencies are missing"; \
echo "=========================================="; \
echo ""; \
echo "Please install the missing tools and run 'make check' again."; \
exit 1; \
fi
@$(PYTHON) ./scripts/check.py
# Install all dependencies
install:
@ -135,86 +81,38 @@ setup-sandbox:
fi; \
if command -v docker >/dev/null 2>&1; then \
echo "Pulling image using Docker..."; \
docker pull "$$IMAGE"; \
echo ""; \
echo "✓ Sandbox image pulled successfully"; \
if docker pull "$$IMAGE"; then \
echo ""; \
echo "✓ Sandbox image pulled successfully"; \
else \
echo ""; \
echo "⚠ Failed to pull sandbox image (this is OK for local sandbox mode)"; \
fi; \
else \
echo "✗ Neither Docker nor Apple Container is available"; \
echo " Please install Docker: https://docs.docker.com/get-docker/"; \
exit 1; \
fi
# Start all services
# Start all services in development mode (with hot-reloading)
dev:
@echo "Stopping existing services if any..."
@-pkill -f "langgraph dev" 2>/dev/null || true
@-pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
@-pkill -f "next dev" 2>/dev/null || true
@-nginx -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) -s quit 2>/dev/null || true
@sleep 1
@-pkill -9 nginx 2>/dev/null || true
@-./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
@sleep 1
@echo ""
@echo "=========================================="
@echo " Starting DeerFlow Development Server"
@echo "=========================================="
@echo ""
@echo "Services starting up..."
@echo " → Backend: LangGraph + Gateway"
@echo " → Frontend: Next.js"
@echo " → Nginx: Reverse Proxy"
@echo ""
@cleanup() { \
echo ""; \
echo "Shutting down services..."; \
pkill -f "langgraph dev" 2>/dev/null || true; \
pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true; \
pkill -f "next dev" 2>/dev/null || true; \
nginx -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) -s quit 2>/dev/null || true; \
sleep 1; \
pkill -9 nginx 2>/dev/null || true; \
echo "Cleaning up sandbox containers..."; \
./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true; \
echo "✓ All services stopped"; \
exit 0; \
}; \
trap cleanup INT TERM; \
mkdir -p logs; \
echo "Starting LangGraph server..."; \
cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --no-reload > ../logs/langgraph.log 2>&1 & \
sleep 3; \
echo "✓ LangGraph server started on localhost:2024"; \
echo "Starting Gateway API..."; \
cd backend && uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8001 > ../logs/gateway.log 2>&1 & \
sleep 2; \
echo "✓ Gateway API started on localhost:8001"; \
echo "Starting Frontend..."; \
cd frontend && pnpm run dev > ../logs/frontend.log 2>&1 & \
sleep 3; \
echo "✓ Frontend started on localhost:3000"; \
echo "Starting Nginx reverse proxy..."; \
mkdir -p logs && nginx -g 'daemon off;' -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) > logs/nginx.log 2>&1 & \
sleep 2; \
echo "✓ Nginx started on localhost:2026"; \
echo ""; \
echo "=========================================="; \
echo " DeerFlow is ready!"; \
echo "=========================================="; \
echo ""; \
echo " 🌐 Application: http://localhost:2026"; \
echo " 📡 API Gateway: http://localhost:2026/api/*"; \
echo " 🤖 LangGraph: http://localhost:2026/api/langgraph/*"; \
echo ""; \
echo " 📋 Logs:"; \
echo " - LangGraph: logs/langgraph.log"; \
echo " - Gateway: logs/gateway.log"; \
echo " - Frontend: logs/frontend.log"; \
echo " - Nginx: logs/nginx.log"; \
echo ""; \
echo "Press Ctrl+C to stop all services"; \
echo ""; \
wait
ifeq ($(OS),Windows_NT)
@call scripts\run-with-git-bash.cmd ./scripts/serve.sh --dev
else
@./scripts/serve.sh --dev
endif
# Start all services in production mode (with optimizations)
start:
ifeq ($(OS),Windows_NT)
@call scripts\run-with-git-bash.cmd ./scripts/serve.sh --prod
else
@./scripts/serve.sh --prod
endif
# Start all services in daemon mode (background)
dev-daemon:
@./scripts/start-daemon.sh
# Stop all services
stop:
@ -222,6 +120,9 @@ stop:
@-pkill -f "langgraph dev" 2>/dev/null || true
@-pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
@-pkill -f "next dev" 2>/dev/null || true
@-pkill -f "next start" 2>/dev/null || true
@-pkill -f "next-server" 2>/dev/null || true
@-pkill -f "next-server" 2>/dev/null || true
@-nginx -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) -s quit 2>/dev/null || true
@sleep 1
@-pkill -9 nginx 2>/dev/null || true
@ -232,6 +133,8 @@ stop:
# Clean up
clean: stop
@echo "Cleaning up..."
@-rm -rf backend/.deer-flow 2>/dev/null || true
@-rm -rf backend/.langgraph_api 2>/dev/null || true
@-rm -rf logs/*.log 2>/dev/null || true
@echo "✓ Cleanup complete"
@ -255,51 +158,20 @@ docker-stop:
docker-logs:
@./scripts/docker.sh logs
# View Docker development logs
docker-logs-frontend:
@./scripts/docker.sh logs --frontend
docker-logs-gateway:
@./scripts/docker.sh logs --gateway
# ==========================================
# Docker Publish Command
# Production Docker Commands
# ==========================================
# Usage: make docker-publish VER=[version] SVC=[service name] [PUSH=1]
# Example: make docker-publish VER=v2.0.20251202 SVC=frontend PUSH=0
docker-publish:
@if [ -z "$(VER)" ]; then \
echo "✗ VER is required (e.g. v2.0.20251202)"; \
exit 1; \
fi
@if [ -z "$(SVC)" ]; then \
echo "✗ SVC is required (frontend, gateway, langgraph)"; \
exit 1; \
fi
@echo "=========================================="
@echo " Building Docker image for $(SVC)"
@echo "=========================================="
@IMAGE=registry.xueai.art/deerflow/deerflow-$(SVC):$(VER); \
DOCKERFILE=$$(case "$(SVC)" in \
frontend) echo "frontend/Dockerfile";; \
gateway) echo "backend/Dockerfile";; \
langgraph) echo "backend/Dockerfile";; \
*) echo "";; \
esac); \
if [ -z "$$DOCKERFILE" ]; then \
echo "✗ Unknown SVC: $(SVC)"; \
exit 1; \
fi; \
docker build -f $$DOCKERFILE -t $$IMAGE .; \
if [ $$? -ne 0 ]; then \
echo "✗ Docker build failed"; \
exit 1; \
fi; \
if [ "$(PUSH)" = "0" ]; then \
echo "✓ Docker image $$IMAGE built successfully (not pushed)"; \
else \
docker push $$IMAGE; \
if [ $$? -ne 0 ]; then \
echo "✗ Docker push failed"; \
exit 1; \
fi; \
echo "✓ Docker image $$IMAGE built and pushed successfully"; \
fi
# Build and start production services
up:
@./scripts/deploy.sh
# Stop and remove production containers
down:
@./scripts/deploy.sh down

145
scripts/check.py Normal file
View File

@ -0,0 +1,145 @@
#!/usr/bin/env python3
"""Cross-platform dependency checker for DeerFlow."""
from __future__ import annotations
import shutil
import subprocess
import sys
from typing import Optional
def configure_stdio() -> None:
"""Prefer UTF-8 output so Unicode status markers render on Windows."""
for stream_name in ("stdout", "stderr"):
stream = getattr(sys, stream_name, None)
if hasattr(stream, "reconfigure"):
try:
stream.reconfigure(encoding="utf-8", errors="replace")
except (OSError, ValueError):
continue
def run_command(command: list[str]) -> Optional[str]:
"""Run a command and return trimmed stdout, or None on failure."""
try:
result = subprocess.run(command, capture_output=True, text=True, check=True, shell=False)
except (OSError, subprocess.CalledProcessError):
return None
return result.stdout.strip() or result.stderr.strip()
def parse_node_major(version_text: str) -> Optional[int]:
version = version_text.strip()
if version.startswith("v"):
version = version[1:]
major_str = version.split(".", 1)[0]
if not major_str.isdigit():
return None
return int(major_str)
def main() -> int:
configure_stdio()
print("==========================================")
print(" Checking Required Dependencies")
print("==========================================")
print()
failed = False
print("Checking Node.js...")
node_path = shutil.which("node")
if node_path:
node_version = run_command(["node", "-v"])
if node_version:
major = parse_node_major(node_version)
if major is not None and major >= 22:
print(f" ✓ Node.js {node_version.lstrip('v')} (>= 22 required)")
else:
print(
f" ✗ Node.js {node_version.lstrip('v')} found, but version 22+ is required"
)
print(" Install from: https://nodejs.org/")
failed = True
else:
print(" ✗ Unable to determine Node.js version")
print(" Install from: https://nodejs.org/")
failed = True
else:
print(" ✗ Node.js not found (version 22+ required)")
print(" Install from: https://nodejs.org/")
failed = True
print()
print("Checking pnpm...")
pnpm_executable = shutil.which("pnpm.cmd") or shutil.which("pnpm")
if pnpm_executable:
pnpm_version = run_command([pnpm_executable, "-v"])
if pnpm_version:
print(f" ✓ pnpm {pnpm_version}")
else:
print(" ✗ Unable to determine pnpm version")
failed = True
else:
print(" ✗ pnpm not found")
print(" Install: npm install -g pnpm")
print(" Or visit: https://pnpm.io/installation")
failed = True
print()
print("Checking uv...")
if shutil.which("uv"):
uv_version_text = run_command(["uv", "--version"])
if uv_version_text:
uv_version = uv_version_text.split()[-1]
print(f" ✓ uv {uv_version}")
else:
print(" ✗ Unable to determine uv version")
failed = True
else:
print(" ✗ uv not found")
print(" Visit the official installation guide for your platform:")
print(" https://docs.astral.sh/uv/getting-started/installation/")
failed = True
print()
print("Checking nginx...")
if shutil.which("nginx"):
nginx_version_text = run_command(["nginx", "-v"])
if nginx_version_text and "/" in nginx_version_text:
nginx_version = nginx_version_text.split("/", 1)[1]
print(f" ✓ nginx {nginx_version}")
else:
print(" ✓ nginx (version unknown)")
else:
print(" ✗ nginx not found")
print(" macOS: brew install nginx")
print(" Ubuntu: sudo apt install nginx")
print(" Windows: use WSL for local mode or use Docker mode")
print(" Or visit: https://nginx.org/en/download.html")
failed = True
print()
if not failed:
print("==========================================")
print(" ✓ All dependencies are installed!")
print("==========================================")
print()
print("You can now run:")
print(" make install - Install project dependencies")
print(" make config - Generate local config files")
print(" make dev - Start development server")
print(" make start - Start production server")
return 0
print("==========================================")
print(" ✗ Some dependencies are missing")
print("==========================================")
print()
print("Please install the missing tools and run 'make check' again.")
return 1
if __name__ == "__main__":
sys.exit(main())

83
scripts/check.sh Executable file
View File

@ -0,0 +1,83 @@
#!/usr/bin/env bash
set -euo pipefail
echo "=========================================="
echo " Checking Required Dependencies"
echo "=========================================="
echo ""
FAILED=0
echo "Checking Node.js..."
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node -v | sed 's/v//')
NODE_MAJOR=$(echo "$NODE_VERSION" | cut -d. -f1)
if [ "$NODE_MAJOR" -ge 22 ]; then
echo " ✓ Node.js $NODE_VERSION (>= 22 required)"
else
echo " ✗ Node.js $NODE_VERSION found, but version 22+ is required"
echo " Install from: https://nodejs.org/"
FAILED=1
fi
else
echo " ✗ Node.js not found (version 22+ required)"
echo " Install from: https://nodejs.org/"
FAILED=1
fi
echo ""
echo "Checking pnpm..."
if command -v pnpm >/dev/null 2>&1; then
PNPM_VERSION=$(pnpm -v)
echo " ✓ pnpm $PNPM_VERSION"
else
echo " ✗ pnpm not found"
echo " Install: npm install -g pnpm"
echo " Or visit: https://pnpm.io/installation"
FAILED=1
fi
echo ""
echo "Checking uv..."
if command -v uv >/dev/null 2>&1; then
UV_VERSION=$(uv --version | awk '{print $2}')
echo " ✓ uv $UV_VERSION"
else
echo " ✗ uv not found"
echo " Install: curl -LsSf https://astral.sh/uv/install.sh | sh"
echo " Or visit: https://docs.astral.sh/uv/getting-started/installation/"
FAILED=1
fi
echo ""
echo "Checking nginx..."
if command -v nginx >/dev/null 2>&1; then
NGINX_VERSION=$(nginx -v 2>&1 | awk -F'/' '{print $2}')
echo " ✓ nginx $NGINX_VERSION"
else
echo " ✗ nginx not found"
echo " macOS: brew install nginx"
echo " Ubuntu: sudo apt install nginx"
echo " Or visit: https://nginx.org/en/download.html"
FAILED=1
fi
echo ""
if [ "$FAILED" -eq 0 ]; then
echo "=========================================="
echo " ✓ All dependencies are installed!"
echo "=========================================="
echo ""
echo "You can now run:"
echo " make install - Install project dependencies"
echo " make config - Generate local config files"
echo " make dev - Start development server"
echo " make start - Start production server"
else
echo "=========================================="
echo " ✗ Some dependencies are missing"
echo "=========================================="
echo ""
echo "Please install the missing tools and run 'make check' again."
exit 1
fi

146
scripts/config-upgrade.sh Executable file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env bash
#
# config-upgrade.sh - Upgrade config.yaml to match config.example.yaml
#
# 1. Runs version-specific migrations (value replacements, renames, etc.)
# 2. Merges missing fields from the example into the user config
# 3. Backs up config.yaml to config.yaml.bak before modifying.
set -e
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
EXAMPLE="$REPO_ROOT/config.example.yaml"
# Resolve config.yaml location: env var > backend/ > repo root
if [ -n "$DEER_FLOW_CONFIG_PATH" ] && [ -f "$DEER_FLOW_CONFIG_PATH" ]; then
CONFIG="$DEER_FLOW_CONFIG_PATH"
elif [ -f "$REPO_ROOT/backend/config.yaml" ]; then
CONFIG="$REPO_ROOT/backend/config.yaml"
elif [ -f "$REPO_ROOT/config.yaml" ]; then
CONFIG="$REPO_ROOT/config.yaml"
else
CONFIG=""
fi
if [ ! -f "$EXAMPLE" ]; then
echo "✗ config.example.yaml not found at $EXAMPLE"
exit 1
fi
if [ -z "$CONFIG" ]; then
echo "No config.yaml found — creating from example..."
cp "$EXAMPLE" "$REPO_ROOT/config.yaml"
echo "✓ config.yaml created. Please review and set your API keys."
exit 0
fi
# Use inline Python to do migrations + recursive merge with PyYAML
cd "$REPO_ROOT/backend" && uv run python3 -c "
import sys, shutil, copy, re
from pathlib import Path
import yaml
config_path = Path('$CONFIG')
example_path = Path('$EXAMPLE')
with open(config_path, encoding='utf-8') as f:
raw_text = f.read()
user = yaml.safe_load(raw_text) or {}
with open(example_path, encoding='utf-8') as f:
example = yaml.safe_load(f) or {}
user_version = user.get('config_version', 0)
example_version = example.get('config_version', 0)
if user_version >= example_version:
print(f'✓ config.yaml is already up to date (version {user_version}).')
sys.exit(0)
print(f'Upgrading config.yaml: version {user_version} → {example_version}')
print()
# ── Migrations ───────────────────────────────────────────────────────────
# Each migration targets a specific version upgrade.
# 'replacements': list of (old_string, new_string) applied to the raw YAML text.
# This handles value changes that a dict merge cannot catch.
MIGRATIONS = {
1: {
'description': 'Rename src.* module paths to deerflow.*',
'replacements': [
('src.community.', 'deerflow.community.'),
('src.sandbox.', 'deerflow.sandbox.'),
('src.models.', 'deerflow.models.'),
('src.tools.', 'deerflow.tools.'),
],
},
# Future migrations go here:
# 2: {
# 'description': '...',
# 'replacements': [('old', 'new')],
# },
}
# Apply migrations in order for versions (user_version, example_version]
migrated = []
for version in range(user_version + 1, example_version + 1):
migration = MIGRATIONS.get(version)
if not migration:
continue
desc = migration.get('description', f'Migration to v{version}')
for old, new in migration.get('replacements', []):
if old in raw_text:
raw_text = raw_text.replace(old, new)
migrated.append(f'{old} → {new}')
# Re-parse after text migrations
user = yaml.safe_load(raw_text) or {}
if migrated:
print(f'Applied {len(migrated)} migration(s):')
for m in migrated:
print(f' ~ {m}')
print()
# ── Merge missing fields ─────────────────────────────────────────────────
added = []
def merge(target, source, path=''):
\"\"\"Recursively merge source into target, adding missing keys only.\"\"\"
for key, value in source.items():
key_path = f'{path}.{key}' if path else key
if key not in target:
target[key] = copy.deepcopy(value)
added.append(key_path)
elif isinstance(value, dict) and isinstance(target[key], dict):
merge(target[key], value, key_path)
merge(user, example)
# Always update config_version
user['config_version'] = example_version
# ── Write ─────────────────────────────────────────────────────────────────
backup = config_path.with_suffix('.yaml.bak')
shutil.copy2(config_path, backup)
print(f'Backed up to {backup.name}')
with open(config_path, 'w', encoding='utf-8') as f:
yaml.dump(user, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
if added:
print(f'Added {len(added)} new field(s):')
for a in added:
print(f' + {a}')
if not migrated and not added:
print('No changes needed (version bumped only).')
print()
print(f'✓ config.yaml upgraded to version {example_version}.')
print(' Please review the changes and set any new required values.')
"

58
scripts/configure.py Normal file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
"""Cross-platform config bootstrap script for DeerFlow."""
from __future__ import annotations
import shutil
import sys
from pathlib import Path
def copy_if_missing(src: Path, dst: Path) -> None:
if dst.exists():
return
if not src.exists():
raise FileNotFoundError(f"Missing template file: {src}")
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src, dst)
def main() -> int:
project_root = Path(__file__).resolve().parent.parent
existing_config = [
project_root / "config.yaml",
project_root / "config.yml",
project_root / "configure.yml",
]
if any(path.exists() for path in existing_config):
print(
"Error: configuration file already exists "
"(config.yaml/config.yml/configure.yml). Aborting."
)
return 1
try:
copy_if_missing(project_root / "config.example.yaml", project_root / "config.yaml")
copy_if_missing(project_root / ".env.example", project_root / ".env")
copy_if_missing(
project_root / "frontend" / ".env.example",
project_root / "frontend" / ".env",
)
except (FileNotFoundError, OSError) as exc:
print("Error while generating configuration files:")
print(f" {exc}")
if isinstance(exc, PermissionError):
print(
"Hint: Check file permissions and ensure the files are not "
"read-only or locked by another process."
)
return 1
print("✓ Configuration files generated")
return 0
if __name__ == "__main__":
sys.exit(main())

212
scripts/deploy.sh Executable file
View File

@ -0,0 +1,212 @@
#!/usr/bin/env bash
#
# deploy.sh - Build and start (or stop) DeerFlow production services
#
# Usage:
# deploy.sh [up] — build images and start containers (default)
# deploy.sh down — stop and remove containers
#
# Must be run from the repo root directory.
set -e
CMD="${1:-up}"
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
DOCKER_DIR="$REPO_ROOT/docker"
COMPOSE_CMD=(docker compose -p deer-flow -f "$DOCKER_DIR/docker-compose.yaml")
# ── Colors ────────────────────────────────────────────────────────────────────
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
# ── DEER_FLOW_HOME ────────────────────────────────────────────────────────────
if [ -z "$DEER_FLOW_HOME" ]; then
export DEER_FLOW_HOME="$REPO_ROOT/backend/.deer-flow"
fi
echo -e "${BLUE}DEER_FLOW_HOME=$DEER_FLOW_HOME${NC}"
mkdir -p "$DEER_FLOW_HOME"
# ── DEER_FLOW_REPO_ROOT (for skills host path in DooD) ───────────────────────
export DEER_FLOW_REPO_ROOT="$REPO_ROOT"
# ── config.yaml ───────────────────────────────────────────────────────────────
if [ -z "$DEER_FLOW_CONFIG_PATH" ]; then
export DEER_FLOW_CONFIG_PATH="$REPO_ROOT/config.yaml"
fi
if [ ! -f "$DEER_FLOW_CONFIG_PATH" ]; then
# Try to seed from repo (config.example.yaml is the canonical template)
if [ -f "$REPO_ROOT/config.example.yaml" ]; then
cp "$REPO_ROOT/config.example.yaml" "$DEER_FLOW_CONFIG_PATH"
echo -e "${GREEN}✓ Seeded config.example.yaml → $DEER_FLOW_CONFIG_PATH${NC}"
echo -e "${YELLOW}⚠ config.yaml was seeded from the example template.${NC}"
echo " Edit $DEER_FLOW_CONFIG_PATH and set your model API keys before use."
else
echo -e "${RED}✗ No config.yaml found.${NC}"
echo " Run 'make config' from the repo root to generate one,"
echo " then set the required model API keys."
exit 1
fi
else
echo -e "${GREEN}✓ config.yaml: $DEER_FLOW_CONFIG_PATH${NC}"
fi
# ── extensions_config.json ───────────────────────────────────────────────────
if [ -z "$DEER_FLOW_EXTENSIONS_CONFIG_PATH" ]; then
export DEER_FLOW_EXTENSIONS_CONFIG_PATH="$REPO_ROOT/extensions_config.json"
fi
if [ ! -f "$DEER_FLOW_EXTENSIONS_CONFIG_PATH" ]; then
if [ -f "$REPO_ROOT/extensions_config.json" ]; then
cp "$REPO_ROOT/extensions_config.json" "$DEER_FLOW_EXTENSIONS_CONFIG_PATH"
echo -e "${GREEN}✓ Seeded extensions_config.json → $DEER_FLOW_EXTENSIONS_CONFIG_PATH${NC}"
else
# Create a minimal empty config so the gateway doesn't fail on startup
echo '{"mcpServers":{},"skills":{}}' > "$DEER_FLOW_EXTENSIONS_CONFIG_PATH"
echo -e "${YELLOW}⚠ extensions_config.json not found, created empty config at $DEER_FLOW_EXTENSIONS_CONFIG_PATH${NC}"
fi
else
echo -e "${GREEN}✓ extensions_config.json: $DEER_FLOW_EXTENSIONS_CONFIG_PATH${NC}"
fi
# ── BETTER_AUTH_SECRET ───────────────────────────────────────────────────────
# Required by Next.js in production. Generated once and persisted so auth
# sessions survive container restarts.
_secret_file="$DEER_FLOW_HOME/.better-auth-secret"
if [ -z "$BETTER_AUTH_SECRET" ]; then
if [ -f "$_secret_file" ]; then
export BETTER_AUTH_SECRET
BETTER_AUTH_SECRET="$(cat "$_secret_file")"
echo -e "${GREEN}✓ BETTER_AUTH_SECRET loaded from $_secret_file${NC}"
else
export BETTER_AUTH_SECRET
BETTER_AUTH_SECRET="$(python3 -c 'import secrets; print(secrets.token_hex(32))')"
echo "$BETTER_AUTH_SECRET" > "$_secret_file"
chmod 600 "$_secret_file"
echo -e "${GREEN}✓ BETTER_AUTH_SECRET generated → $_secret_file${NC}"
fi
fi
# ── detect_sandbox_mode ───────────────────────────────────────────────────────
detect_sandbox_mode() {
local sandbox_use=""
local provisioner_url=""
[ -f "$DEER_FLOW_CONFIG_PATH" ] || { echo "local"; return; }
sandbox_use=$(awk '
/^[[:space:]]*sandbox:[[:space:]]*$/ { in_sandbox=1; next }
in_sandbox && /^[^[:space:]#]/ { in_sandbox=0 }
in_sandbox && /^[[:space:]]*use:[[:space:]]*/ {
line=$0; sub(/^[[:space:]]*use:[[:space:]]*/, "", line); print line; exit
}
' "$DEER_FLOW_CONFIG_PATH")
provisioner_url=$(awk '
/^[[:space:]]*sandbox:[[:space:]]*$/ { in_sandbox=1; next }
in_sandbox && /^[^[:space:]#]/ { in_sandbox=0 }
in_sandbox && /^[[:space:]]*provisioner_url:[[:space:]]*/ {
line=$0; sub(/^[[:space:]]*provisioner_url:[[:space:]]*/, "", line); print line; exit
}
' "$DEER_FLOW_CONFIG_PATH")
if [[ "$sandbox_use" == *"deerflow.community.aio_sandbox:AioSandboxProvider"* ]]; then
if [ -n "$provisioner_url" ]; then
echo "provisioner"
else
echo "aio"
fi
else
echo "local"
fi
}
# ── down ──────────────────────────────────────────────────────────────────────
if [ "$CMD" = "down" ]; then
# Set minimal env var defaults so docker compose can parse the file without
# warning about unset variables that appear in volume specs.
export DEER_FLOW_HOME="${DEER_FLOW_HOME:-$REPO_ROOT/backend/.deer-flow}"
export DEER_FLOW_CONFIG_PATH="${DEER_FLOW_CONFIG_PATH:-$DEER_FLOW_HOME/config.yaml}"
export DEER_FLOW_EXTENSIONS_CONFIG_PATH="${DEER_FLOW_EXTENSIONS_CONFIG_PATH:-$DEER_FLOW_HOME/extensions_config.json}"
export DEER_FLOW_DOCKER_SOCKET="${DEER_FLOW_DOCKER_SOCKET:-/var/run/docker.sock}"
export DEER_FLOW_REPO_ROOT="${DEER_FLOW_REPO_ROOT:-$REPO_ROOT}"
export BETTER_AUTH_SECRET="${BETTER_AUTH_SECRET:-placeholder}"
"${COMPOSE_CMD[@]}" down
exit 0
fi
# ── Banner ────────────────────────────────────────────────────────────────────
echo "=========================================="
echo " DeerFlow Production Deployment"
echo "=========================================="
echo ""
# ── Step 1: Detect sandbox mode ──────────────────────────────────────────────
sandbox_mode="$(detect_sandbox_mode)"
echo -e "${BLUE}Sandbox mode: $sandbox_mode${NC}"
if [ "$sandbox_mode" = "provisioner" ]; then
services=""
extra_args="--profile provisioner"
else
services="frontend gateway langgraph nginx"
extra_args=""
fi
# ── DEER_FLOW_DOCKER_SOCKET ───────────────────────────────────────────────────
if [ -z "$DEER_FLOW_DOCKER_SOCKET" ]; then
export DEER_FLOW_DOCKER_SOCKET="/var/run/docker.sock"
fi
if [ "$sandbox_mode" != "local" ]; then
if [ ! -S "$DEER_FLOW_DOCKER_SOCKET" ]; then
echo -e "${RED}⚠ Docker socket not found at $DEER_FLOW_DOCKER_SOCKET${NC}"
echo " AioSandboxProvider (DooD) will not work."
exit 1
else
echo -e "${GREEN}✓ Docker socket: $DEER_FLOW_DOCKER_SOCKET${NC}"
fi
fi
echo ""
# ── Step 2: Build and start ───────────────────────────────────────────────────
echo "Building images and starting containers..."
echo ""
# shellcheck disable=SC2086
"${COMPOSE_CMD[@]}" $extra_args up --build -d --remove-orphans $services
echo ""
echo "=========================================="
echo " DeerFlow is running!"
echo "=========================================="
echo ""
echo " 🌐 Application: http://localhost:${PORT:-2026}"
echo " 📡 API Gateway: http://localhost:${PORT:-2026}/api/*"
echo " 🤖 LangGraph: http://localhost:${PORT:-2026}/api/langgraph/*"
echo ""
echo " Manage:"
echo " make down — stop and remove containers"
echo " make docker-logs — view logs"
echo ""

View File

@ -0,0 +1,166 @@
#!/usr/bin/env python3
"""Export Claude Code OAuth credentials from macOS Keychain on purpose.
This helper is intentionally manual. DeerFlow runtime does not probe Keychain.
Use this script when you want to bridge an existing Claude Code login into an
environment variable or an exported credentials file for DeerFlow.
"""
from __future__ import annotations
import argparse
import json
import os
import platform
import shlex
import subprocess
import sys
import tempfile
from hashlib import sha256
from pathlib import Path
from typing import Any
def claude_code_oauth_file_suffix() -> str:
if os.getenv("CLAUDE_CODE_CUSTOM_OAUTH_URL"):
return "-custom-oauth"
if os.getenv("USE_LOCAL_OAUTH") or os.getenv("LOCAL_BRIDGE"):
return "-local-oauth"
if os.getenv("USE_STAGING_OAUTH"):
return "-staging-oauth"
return ""
def default_service_name() -> str:
service = f"Claude Code{claude_code_oauth_file_suffix()}-credentials"
config_dir = os.getenv("CLAUDE_CONFIG_DIR")
if config_dir:
config_hash = sha256(str(Path(config_dir).expanduser()).encode()).hexdigest()[:8]
service = f"{service}-{config_hash}"
return service
def default_account_name() -> str:
return os.getenv("USER") or "claude-code-user"
def load_keychain_container(service: str, account: str) -> dict[str, Any]:
if platform.system() != "Darwin":
raise RuntimeError("Claude Code Keychain export is only supported on macOS.")
try:
result = subprocess.run(
["security", "find-generic-password", "-a", account, "-w", "-s", service],
capture_output=True,
text=True,
check=False,
)
except OSError as exc:
raise RuntimeError(f"Failed to invoke macOS security tool: {exc}") from exc
if result.returncode != 0:
stderr = (result.stderr or "").strip() or "unknown Keychain error"
raise RuntimeError(f"Keychain lookup failed for service={service!r} account={account!r}: {stderr}")
secret = (result.stdout or "").strip()
if not secret:
raise RuntimeError("Keychain item was empty.")
try:
data = json.loads(secret)
except json.JSONDecodeError as exc:
raise RuntimeError("Claude Code Keychain item did not contain valid JSON.") from exc
access_token = data.get("claudeAiOauth", {}).get("accessToken", "")
if not access_token:
raise RuntimeError("Claude Code Keychain item did not contain claudeAiOauth.accessToken.")
return data
def write_credentials_file(output_path: Path, data: dict[str, Any]) -> None:
output_path.parent.mkdir(parents=True, exist_ok=True)
fd, tmp_name = tempfile.mkstemp(prefix=f"{output_path.name}.", suffix=".tmp", dir=output_path.parent)
try:
with os.fdopen(fd, "w", encoding="utf-8") as fh:
fh.write(json.dumps(data, indent=2) + "\n")
Path(tmp_name).replace(output_path)
except Exception:
Path(tmp_name).unlink(missing_ok=True)
raise
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Manually export Claude Code OAuth credentials from macOS Keychain for DeerFlow.",
)
parser.add_argument(
"--service",
default=default_service_name(),
help="Override the Keychain service name. Defaults to Claude Code's computed service name.",
)
parser.add_argument(
"--account",
default=default_account_name(),
help="Override the Keychain account name. Defaults to the current user.",
)
parser.add_argument(
"--show-target",
action="store_true",
help="Print the resolved Keychain service/account without reading Keychain.",
)
parser.add_argument(
"--print-token",
action="store_true",
help="Print only the OAuth access token to stdout.",
)
parser.add_argument(
"--print-export",
action="store_true",
help="Print a shell export command for CLAUDE_CODE_OAUTH_TOKEN.",
)
parser.add_argument(
"--write-credentials",
type=Path,
help="Write the full Claude credentials container to this file with 0600 permissions.",
)
return parser.parse_args()
def main() -> int:
args = parse_args()
if args.show_target:
print(f"service={args.service}")
print(f"account={args.account}")
if not any([args.print_token, args.print_export, args.write_credentials]):
if not args.show_target:
print("No export action selected. Use --show-target, --print-export, --print-token, or --write-credentials.", file=sys.stderr)
return 2
return 0
try:
data = load_keychain_container(service=args.service, account=args.account)
except RuntimeError as exc:
print(str(exc), file=sys.stderr)
return 1
access_token = data["claudeAiOauth"]["accessToken"]
if args.print_token:
print(access_token)
if args.print_export:
print(f"export CLAUDE_CODE_OAUTH_TOKEN={shlex.quote(access_token)}")
if args.write_credentials:
output_path = args.write_credentials.expanduser()
write_credentials_file(output_path, data)
print(f"Wrote Claude Code credentials to {output_path}", file=sys.stderr)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,81 @@
#!/usr/bin/env python3
"""Load the Memory Settings review sample into a local DeerFlow runtime."""
from __future__ import annotations
import argparse
import json
import shutil
from datetime import datetime
from pathlib import Path
def default_source(repo_root: Path) -> Path:
return repo_root / "backend" / "docs" / "memory-settings-sample.json"
def default_target(repo_root: Path) -> Path:
return repo_root / "backend" / ".deer-flow" / "memory.json"
def parse_args(repo_root: Path) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Copy the Memory Settings sample data into the local runtime memory file.",
)
parser.add_argument(
"--source",
type=Path,
default=default_source(repo_root),
help="Path to the sample JSON file.",
)
parser.add_argument(
"--target",
type=Path,
default=default_target(repo_root),
help="Path to the runtime memory.json file.",
)
parser.add_argument(
"--no-backup",
action="store_true",
help="Overwrite the target without writing a backup copy first.",
)
return parser.parse_args()
def validate_json_file(path: Path) -> None:
with path.open(encoding="utf-8") as handle:
json.load(handle)
def main() -> int:
repo_root = Path(__file__).resolve().parents[1]
args = parse_args(repo_root)
source = args.source.resolve()
target = args.target.resolve()
if not source.exists():
raise SystemExit(f"Sample file not found: {source}")
validate_json_file(source)
target.parent.mkdir(parents=True, exist_ok=True)
backup_path: Path | None = None
if target.exists() and not args.no_backup:
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
backup_path = target.with_name(f"{target.name}.bak-{timestamp}")
shutil.copy2(target, backup_path)
shutil.copy2(source, target)
print(f"Loaded sample memory into: {target}")
if backup_path is not None:
print(f"Backup created at: {backup_path}")
else:
print("No backup created.")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,20 @@
@echo off
setlocal
set "bash_exe="
for /f "delims=" %%I in ('where git 2^>NUL') do (
if exist "%%~dpI..\bin\bash.exe" (
set "bash_exe=%%~dpI..\bin\bash.exe"
goto :found_bash
)
)
echo Could not locate Git for Windows Bash ("..\bin\bash.exe" relative to git on PATH). Ensure Git for Windows is installed and that git and bash.exe are available on PATH.
exit /b 1
:found_bash
echo Detected Windows - using Git Bash...
"%bash_exe%" %*
set "cmd_rc=%ERRORLEVEL%"
exit /b %cmd_rc%

219
scripts/serve.sh Executable file
View File

@ -0,0 +1,219 @@
#!/usr/bin/env bash
#
# start.sh - Start all DeerFlow development services
#
# Must be run from the repo root directory.
set -e
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
# ── Load environment variables from .env ──────────────────────────────────────
if [ -f "$REPO_ROOT/.env" ]; then
set -a
source "$REPO_ROOT/.env"
set +a
fi
# ── Argument parsing ─────────────────────────────────────────────────────────
DEV_MODE=true
for arg in "$@"; do
case "$arg" in
--dev) DEV_MODE=true ;;
--prod) DEV_MODE=false ;;
*) echo "Unknown argument: $arg"; echo "Usage: $0 [--dev|--prod]"; exit 1 ;;
esac
done
if $DEV_MODE; then
FRONTEND_CMD="pnpm run dev"
else
FRONTEND_CMD="env BETTER_AUTH_SECRET=$(python3 -c 'import secrets; print(secrets.token_hex(16))') pnpm run preview"
fi
# ── Stop existing services ────────────────────────────────────────────────────
echo "Stopping existing services if any..."
pkill -f "langgraph dev" 2>/dev/null || true
pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
pkill -f "next dev" 2>/dev/null || true
pkill -f "next-server" 2>/dev/null || true
nginx -c "$REPO_ROOT/docker/nginx/nginx.local.conf" -p "$REPO_ROOT" -s quit 2>/dev/null || true
sleep 1
pkill -9 nginx 2>/dev/null || true
killall -9 nginx 2>/dev/null || true
./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
sleep 1
# ── Banner ────────────────────────────────────────────────────────────────────
echo ""
echo "=========================================="
echo " Starting DeerFlow Development Server"
echo "=========================================="
echo ""
if $DEV_MODE; then
echo " Mode: DEV (hot-reload enabled)"
echo " Tip: run \`make start\` in production mode"
else
echo " Mode: PROD (hot-reload disabled)"
echo " Tip: run \`make dev\` to start in development mode"
fi
echo ""
echo "Services starting up..."
echo " → Backend: LangGraph + Gateway"
echo " → Frontend: Next.js"
echo " → Nginx: Reverse Proxy"
echo ""
# ── Config check ─────────────────────────────────────────────────────────────
if ! { \
[ -n "$DEER_FLOW_CONFIG_PATH" ] && [ -f "$DEER_FLOW_CONFIG_PATH" ] || \
[ -f backend/config.yaml ] || \
[ -f config.yaml ]; \
}; then
echo "✗ No DeerFlow config file found."
echo " Checked these locations:"
echo " - $DEER_FLOW_CONFIG_PATH (when DEER_FLOW_CONFIG_PATH is set)"
echo " - backend/config.yaml"
echo " - ./config.yaml"
echo ""
echo " Run 'make config' from the repo root to generate ./config.yaml, then set required model API keys in .env or your config file."
exit 1
fi
# ── Auto-upgrade config ──────────────────────────────────────────────────
"$REPO_ROOT/scripts/config-upgrade.sh"
# ── Cleanup trap ─────────────────────────────────────────────────────────────
cleanup() {
trap - INT TERM
echo ""
echo "Shutting down services..."
if [ "${SKIP_LANGGRAPH_SERVER:-0}" != "1" ]; then
pkill -f "langgraph dev" 2>/dev/null || true
fi
pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
pkill -f "next dev" 2>/dev/null || true
pkill -f "next start" 2>/dev/null || true
pkill -f "next-server" 2>/dev/null || true
# Kill nginx using the captured PID first (most reliable),
# then fall back to pkill/killall for any stray nginx workers.
if [ -n "${NGINX_PID:-}" ] && kill -0 "$NGINX_PID" 2>/dev/null; then
kill -TERM "$NGINX_PID" 2>/dev/null || true
sleep 1
kill -9 "$NGINX_PID" 2>/dev/null || true
fi
pkill -9 nginx 2>/dev/null || true
killall -9 nginx 2>/dev/null || true
echo "Cleaning up sandbox containers..."
./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
echo "✓ All services stopped"
exit 0
}
trap cleanup INT TERM
# ── Start services ────────────────────────────────────────────────────────────
mkdir -p logs
if $DEV_MODE; then
LANGGRAPH_EXTRA_FLAGS="--no-reload"
GATEWAY_EXTRA_FLAGS="--reload --reload-include='*.yaml' --reload-include='.env' --reload-exclude='*.pyc' --reload-exclude='__pycache__' --reload-exclude='sandbox/' --reload-exclude='.deer-flow/'"
else
LANGGRAPH_EXTRA_FLAGS="--no-reload"
GATEWAY_EXTRA_FLAGS=""
fi
if [ "${SKIP_LANGGRAPH_SERVER:-0}" != "1" ]; then
echo "Starting LangGraph server..."
# Read log_level from config.yaml, fallback to env var, then to "info"
CONFIG_LOG_LEVEL=$(grep -m1 '^log_level:' config.yaml 2>/dev/null | awk '{print $2}' | tr -d ' ')
LANGGRAPH_LOG_LEVEL="${LANGGRAPH_LOG_LEVEL:-${CONFIG_LOG_LEVEL:-info}}"
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS > ../logs/langgraph.log 2>&1) &
./scripts/wait-for-port.sh 2024 60 "LangGraph" || {
echo " See logs/langgraph.log for details"
tail -20 logs/langgraph.log
if grep -qE "config_version|outdated|Environment variable .* not found|KeyError|ValidationError|config\.yaml" logs/langgraph.log 2>/dev/null; then
echo ""
echo " Hint: This may be a configuration issue. Try running 'make config-upgrade' to update your config.yaml."
fi
cleanup
}
echo "✓ LangGraph server started on localhost:2024"
else
echo "⏩ Skipping LangGraph server (SKIP_LANGGRAPH_SERVER=1)"
echo " Use /api/langgraph-compat/* via Gateway instead"
fi
echo "Starting Gateway API..."
(cd backend && PYTHONPATH=. uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8001 $GATEWAY_EXTRA_FLAGS > ../logs/gateway.log 2>&1) &
./scripts/wait-for-port.sh 8001 30 "Gateway API" || {
echo "✗ Gateway API failed to start. Last log output:"
tail -60 logs/gateway.log
echo ""
echo "Likely configuration errors:"
grep -E "Failed to load configuration|Environment variable .* not found|config\.yaml.*not found" logs/gateway.log | tail -5 || true
echo ""
echo " Hint: Try running 'make config-upgrade' to update your config.yaml with the latest fields."
cleanup
}
echo "✓ Gateway API started on localhost:8001"
echo "Starting Frontend..."
(cd frontend && $FRONTEND_CMD > ../logs/frontend.log 2>&1) &
./scripts/wait-for-port.sh 3000 120 "Frontend" || {
echo " See logs/frontend.log for details"
tail -20 logs/frontend.log
cleanup
}
echo "✓ Frontend started on localhost:3000"
echo "Starting Nginx reverse proxy..."
nginx -g 'daemon off;' -c "$REPO_ROOT/docker/nginx/nginx.local.conf" -p "$REPO_ROOT" > logs/nginx.log 2>&1 &
NGINX_PID=$!
./scripts/wait-for-port.sh 2026 10 "Nginx" || {
echo " See logs/nginx.log for details"
tail -10 logs/nginx.log
cleanup
}
echo "✓ Nginx started on localhost:2026"
# ── Ready ─────────────────────────────────────────────────────────────────────
echo ""
echo "=========================================="
if $DEV_MODE; then
echo " ✓ DeerFlow development server is running!"
else
echo " ✓ DeerFlow production server is running!"
fi
echo "=========================================="
echo ""
echo " 🌐 Application: http://localhost:2026"
echo " 📡 API Gateway: http://localhost:2026/api/*"
if [ "${SKIP_LANGGRAPH_SERVER:-0}" = "1" ]; then
echo " 🤖 LangGraph: skipped (SKIP_LANGGRAPH_SERVER=1)"
else
echo " 🤖 LangGraph: http://localhost:2026/api/langgraph/* (served by langgraph dev)"
fi
echo " 🧪 LangGraph Compat (experimental): http://localhost:2026/api/langgraph-compat/* (served by Gateway)"
if [ "${SKIP_LANGGRAPH_SERVER:-0}" = "1" ]; then
echo ""
echo " 💡 Set NEXT_PUBLIC_LANGGRAPH_BASE_URL=/api/langgraph-compat in frontend/.env.local"
fi
echo ""
echo " 📋 Logs:"
echo " - LangGraph: logs/langgraph.log"
echo " - Gateway: logs/gateway.log"
echo " - Frontend: logs/frontend.log"
echo " - Nginx: logs/nginx.log"
echo ""
echo "Press Ctrl+C to stop all services"
wait

139
scripts/start-daemon.sh Executable file
View File

@ -0,0 +1,139 @@
#!/usr/bin/env bash
#
# start-daemon.sh - Start all DeerFlow development services in daemon mode
#
# This script starts DeerFlow services in the background without keeping
# the terminal connection. Logs are written to separate files.
#
# Must be run from the repo root directory.
set -e
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
# ── Stop existing services ────────────────────────────────────────────────────
echo "Stopping existing services if any..."
pkill -f "langgraph dev" 2>/dev/null || true
pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
pkill -f "next dev" 2>/dev/null || true
nginx -c "$REPO_ROOT/docker/nginx/nginx.local.conf" -p "$REPO_ROOT" -s quit 2>/dev/null || true
sleep 1
pkill -9 nginx 2>/dev/null || true
./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
sleep 1
# ── Banner ────────────────────────────────────────────────────────────────────
echo ""
echo "=========================================="
echo " Starting DeerFlow in Daemon Mode"
echo "=========================================="
echo ""
# ── Config check ─────────────────────────────────────────────────────────────
if ! { \
[ -n "$DEER_FLOW_CONFIG_PATH" ] && [ -f "$DEER_FLOW_CONFIG_PATH" ] || \
[ -f backend/config.yaml ] || \
[ -f config.yaml ]; \
}; then
echo "✗ No DeerFlow config file found."
echo " Checked these locations:"
echo " - $DEER_FLOW_CONFIG_PATH (when DEER_FLOW_CONFIG_PATH is set)"
echo " - backend/config.yaml"
echo " - ./config.yaml"
echo ""
echo " Run 'make config' from the repo root to generate ./config.yaml, then set required model API keys in .env or your config file."
exit 1
fi
# ── Auto-upgrade config ──────────────────────────────────────────────────
"$REPO_ROOT/scripts/config-upgrade.sh"
# ── Cleanup on failure ───────────────────────────────────────────────────────
cleanup_on_failure() {
echo "Failed to start services, cleaning up..."
pkill -f "langgraph dev" 2>/dev/null || true
pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true
pkill -f "next dev" 2>/dev/null || true
nginx -c "$REPO_ROOT/docker/nginx/nginx.local.conf" -p "$REPO_ROOT" -s quit 2>/dev/null || true
sleep 1
pkill -9 nginx 2>/dev/null || true
echo "✓ Cleanup complete"
}
trap cleanup_on_failure INT TERM
# ── Start services ────────────────────────────────────────────────────────────
mkdir -p logs
echo "Starting LangGraph server..."
nohup sh -c 'cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --no-reload > ../logs/langgraph.log 2>&1' &
./scripts/wait-for-port.sh 2024 60 "LangGraph" || {
echo "✗ LangGraph failed to start. Last log output:"
tail -60 logs/langgraph.log
if grep -qE "config_version|outdated|Environment variable .* not found|KeyError|ValidationError|config\.yaml" logs/langgraph.log 2>/dev/null; then
echo ""
echo " Hint: This may be a configuration issue. Try running 'make config-upgrade' to update your config.yaml."
fi
cleanup_on_failure
exit 1
}
echo "✓ LangGraph server started on localhost:2024"
echo "Starting Gateway API..."
nohup sh -c 'cd backend && PYTHONPATH=. uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8001 > ../logs/gateway.log 2>&1' &
./scripts/wait-for-port.sh 8001 30 "Gateway API" || {
echo "✗ Gateway API failed to start. Last log output:"
tail -60 logs/gateway.log
echo ""
echo " Hint: Try running 'make config-upgrade' to update your config.yaml with the latest fields."
cleanup_on_failure
exit 1
}
echo "✓ Gateway API started on localhost:8001"
echo "Starting Frontend..."
nohup sh -c 'cd frontend && pnpm run dev > ../logs/frontend.log 2>&1' &
./scripts/wait-for-port.sh 3000 120 "Frontend" || {
echo "✗ Frontend failed to start. Last log output:"
tail -60 logs/frontend.log
cleanup_on_failure
exit 1
}
echo "✓ Frontend started on localhost:3000"
echo "Starting Nginx reverse proxy..."
nohup sh -c 'nginx -g "daemon off;" -c "$1/docker/nginx/nginx.local.conf" -p "$1" > logs/nginx.log 2>&1' _ "$REPO_ROOT" &
./scripts/wait-for-port.sh 2026 10 "Nginx" || {
echo "✗ Nginx failed to start. Last log output:"
tail -60 logs/nginx.log
cleanup_on_failure
exit 1
}
echo "✓ Nginx started on localhost:2026"
# ── Ready ─────────────────────────────────────────────────────────────────────
echo ""
echo "=========================================="
echo " DeerFlow is running in daemon mode!"
echo "=========================================="
echo ""
echo " 🌐 Application: http://localhost:2026"
echo " 📡 API Gateway: http://localhost:2026/api/*"
echo " 🤖 LangGraph: http://localhost:2026/api/langgraph/*"
echo ""
echo " 📋 Logs:"
echo " - LangGraph: logs/langgraph.log"
echo " - Gateway: logs/gateway.log"
echo " - Frontend: logs/frontend.log"
echo " - Nginx: logs/nginx.log"
echo ""
echo " 🛑 Stop daemon: make stop"
echo ""

View File

@ -0,0 +1,218 @@
#!/usr/bin/env bash
set -euo pipefail
# Detect whether the current branch has working tool-failure downgrade:
# - Lead agent middleware chain includes error-handling
# - Subagent middleware chain includes error-handling
# - Failing tool call does not abort the whole call sequence
# - Subsequent successful tool call result is still preserved
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
BACKEND_DIR="${ROOT_DIR}/backend"
if ! command -v uv >/dev/null 2>&1; then
echo "[FAIL] uv is required but not found in PATH."
exit 1
fi
export UV_CACHE_DIR="${UV_CACHE_DIR:-/tmp/uv-cache}"
echo "[INFO] Root: ${ROOT_DIR}"
echo "[INFO] Backend: ${BACKEND_DIR}"
echo "[INFO] UV cache: ${UV_CACHE_DIR}"
echo "[INFO] Running tool-failure downgrade detector..."
cd "${BACKEND_DIR}"
uv run python -u - <<'PY'
import asyncio
import logging
import ssl
from types import SimpleNamespace
from requests.exceptions import SSLError
from langchain.agents.middleware import AgentMiddleware
from langchain_core.messages import ToolMessage
from deerflow.agents.lead_agent.agent import _build_middlewares
from deerflow.config import get_app_config
from deerflow.sandbox.middleware import SandboxMiddleware
from deerflow.agents.middlewares.thread_data_middleware import ThreadDataMiddleware
HANDSHAKE_ERROR = "[SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1000)"
logging.getLogger("deerflow.agents.middlewares.tool_error_handling_middleware").setLevel(logging.CRITICAL)
def _make_ssl_error():
return SSLError(ssl.SSLEOFError(8, HANDSHAKE_ERROR))
print("[STEP 1] Prepare simulated Tavily SSL handshake failure.")
print(f"[INFO] Handshake error payload: {HANDSHAKE_ERROR}")
TOOL_CALLS = [
{"name": "web_search", "id": "tc-fail", "args": {"query": "latest agent news"}},
{"name": "web_fetch", "id": "tc-ok", "args": {"url": "https://example.com"}},
]
def _sync_handler(req):
tool_name = req.tool_call.get("name", "unknown_tool")
if tool_name == "web_search":
raise _make_ssl_error()
return ToolMessage(
content=f"{tool_name} success",
tool_call_id=req.tool_call.get("id", "missing-id"),
name=tool_name,
status="success",
)
async def _async_handler(req):
tool_name = req.tool_call.get("name", "unknown_tool")
if tool_name == "web_search":
raise _make_ssl_error()
return ToolMessage(
content=f"{tool_name} success",
tool_call_id=req.tool_call.get("id", "missing-id"),
name=tool_name,
status="success",
)
def _collect_sync_wrappers(middlewares):
return [
m.wrap_tool_call
for m in middlewares
if m.__class__.wrap_tool_call is not AgentMiddleware.wrap_tool_call
or m.__class__.awrap_tool_call is not AgentMiddleware.awrap_tool_call
]
def _collect_async_wrappers(middlewares):
return [
m.awrap_tool_call
for m in middlewares
if m.__class__.awrap_tool_call is not AgentMiddleware.awrap_tool_call
or m.__class__.wrap_tool_call is not AgentMiddleware.wrap_tool_call
]
def _compose_sync(wrappers):
def execute(req):
return _sync_handler(req)
for wrapper in reversed(wrappers):
previous = execute
def execute(req, wrapper=wrapper, previous=previous):
return wrapper(req, previous)
return execute
def _compose_async(wrappers):
async def execute(req):
return await _async_handler(req)
for wrapper in reversed(wrappers):
previous = execute
async def execute(req, wrapper=wrapper, previous=previous):
return await wrapper(req, previous)
return execute
def _validate_outputs(label, outputs):
if len(outputs) != 2:
print(f"[FAIL] {label}: expected 2 tool outputs, got {len(outputs)}")
raise SystemExit(2)
first, second = outputs
if not isinstance(first, ToolMessage) or not isinstance(second, ToolMessage):
print(f"[FAIL] {label}: outputs are not ToolMessage instances")
raise SystemExit(3)
if first.status != "error":
print(f"[FAIL] {label}: first tool should be status=error, got {first.status}")
raise SystemExit(4)
if second.status != "success":
print(f"[FAIL] {label}: second tool should be status=success, got {second.status}")
raise SystemExit(5)
if "Error: Tool 'web_search' failed" not in first.text:
print(f"[FAIL] {label}: first tool error text missing")
raise SystemExit(6)
if "web_fetch success" not in second.text:
print(f"[FAIL] {label}: second tool success text missing")
raise SystemExit(7)
print(f"[INFO] {label}: no crash, outputs preserved (error + success).")
def _build_sub_middlewares():
try:
from deerflow.agents.middlewares.tool_error_handling_middleware import build_subagent_runtime_middlewares
except Exception:
return [
ThreadDataMiddleware(lazy_init=True),
SandboxMiddleware(lazy_init=True),
]
return build_subagent_runtime_middlewares()
def _run_sync_sequence(executor):
outputs = []
try:
for call in TOOL_CALLS:
req = SimpleNamespace(tool_call=call)
outputs.append(executor(req))
except Exception as exc:
return outputs, exc
return outputs, None
async def _run_async_sequence(executor):
outputs = []
try:
for call in TOOL_CALLS:
req = SimpleNamespace(tool_call=call)
outputs.append(await executor(req))
except Exception as exc:
return outputs, exc
return outputs, None
print("[STEP 2] Load current branch middleware chains.")
app_cfg = get_app_config()
model_name = app_cfg.models[0].name if app_cfg.models else None
if not model_name:
print("[FAIL] No model configured; cannot evaluate lead middleware chain.")
raise SystemExit(8)
lead_middlewares = _build_middlewares({"configurable": {}}, model_name=model_name)
sub_middlewares = _build_sub_middlewares()
print("[STEP 3] Simulate two sequential tool calls and check whether conversation flow aborts.")
any_crash = False
for label, middlewares in [("lead", lead_middlewares), ("subagent", sub_middlewares)]:
sync_exec = _compose_sync(_collect_sync_wrappers(middlewares))
sync_outputs, sync_exc = _run_sync_sequence(sync_exec)
if sync_exc is not None:
any_crash = True
print(f"[INFO] {label}/sync: conversation aborted after tool error ({sync_exc.__class__.__name__}: {sync_exc}).")
else:
_validate_outputs(f"{label}/sync", sync_outputs)
async_exec = _compose_async(_collect_async_wrappers(middlewares))
async_outputs, async_exc = asyncio.run(_run_async_sequence(async_exec))
if async_exc is not None:
any_crash = True
print(f"[INFO] {label}/async: conversation aborted after tool error ({async_exc.__class__.__name__}: {async_exc}).")
else:
_validate_outputs(f"{label}/async", async_outputs)
if any_crash:
print("[FAIL] Tool exception caused conversation flow to abort (no effective downgrade).")
raise SystemExit(9)
print("[PASS] Tool exceptions were downgraded; conversation flow continued with remaining tool results.")
PY

61
scripts/wait-for-port.sh Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
#
# wait-for-port.sh - Wait for a TCP port to become available
#
# Usage: ./scripts/wait-for-port.sh <port> [timeout_seconds] [service_name]
#
# Arguments:
# port - TCP port to wait for (required)
# timeout_seconds - Max seconds to wait (default: 60)
# service_name - Display name for messages (default: "Service")
#
# Exit codes:
# 0 - Port is listening
# 1 - Timed out waiting
PORT="${1:?Usage: wait-for-port.sh <port> [timeout] [service_name]}"
TIMEOUT="${2:-60}"
SERVICE="${3:-Service}"
elapsed=0
interval=1
is_port_listening() {
if command -v lsof >/dev/null 2>&1; then
if lsof -nP -iTCP:"$PORT" -sTCP:LISTEN -t >/dev/null 2>&1; then
return 0
fi
fi
if command -v ss >/dev/null 2>&1; then
if ss -ltn "( sport = :$PORT )" 2>/dev/null | tail -n +2 | grep -q .; then
return 0
fi
fi
if command -v netstat >/dev/null 2>&1; then
if netstat -ltn 2>/dev/null | awk '{print $4}' | grep -Eq "(^|[.:])${PORT}$"; then
return 0
fi
fi
if command -v timeout >/dev/null 2>&1; then
timeout 1 bash -c "exec 3<>/dev/tcp/127.0.0.1/$PORT" >/dev/null 2>&1
return $?
fi
return 1
}
while ! is_port_listening; do
if [ "$elapsed" -ge "$TIMEOUT" ]; then
echo ""
echo "$SERVICE failed to start on port $PORT after ${TIMEOUT}s"
exit 1
fi
printf "\r Waiting for %s on port %s... %ds" "$SERVICE" "$PORT" "$elapsed"
sleep "$interval"
elapsed=$((elapsed + interval))
done
printf "\r %-60s\r" "" # clear the waiting line