Compare commits
17 Commits
81bd9b3d74
...
f584c3e53b
| Author | SHA1 | Date |
|---|---|---|
|
|
f584c3e53b | |
|
|
169332ab29 | |
|
|
1124a2e371 | |
|
|
7178c14705 | |
|
|
6ece5e0c39 | |
|
|
4ef9b896e3 | |
|
|
927edfb610 | |
|
|
08e8de5e3e | |
|
|
d1cdb7eef7 | |
|
|
1fd7a5d4f7 | |
|
|
299d819026 | |
|
|
2d50c49369 | |
|
|
74813ff61d | |
|
|
d8226b834c | |
|
|
256a2d36ec | |
|
|
31f4bdb99a | |
|
|
6853ed71bc |
|
|
@ -65,6 +65,8 @@ frontend/node_modules
|
|||
backend/.venv
|
||||
backend/htmlcov
|
||||
backend/.coverage
|
||||
backend/.deer-flow
|
||||
backend/.deer-flow/**/*
|
||||
*.md
|
||||
!README.md
|
||||
!frontend/README.md
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ coverage/
|
|||
.deer-flow/
|
||||
.claude/
|
||||
skills/custom/*
|
||||
skills/
|
||||
logs/
|
||||
log/
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from fastapi import APIRouter, HTTPException, Request
|
|||
from fastapi.responses import FileResponse, PlainTextResponse, Response
|
||||
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -62,6 +63,38 @@ def _find_compat_filename_match(missing_path: Path) -> Path | None:
|
|||
return matches[0] if len(matches) == 1 else None
|
||||
|
||||
|
||||
def _list_reference_files_in_dir(
|
||||
thread_id: str,
|
||||
root_dir: Path,
|
||||
virtual_prefix: str,
|
||||
source: str,
|
||||
) -> list[dict[str, str]]:
|
||||
if not root_dir.is_dir():
|
||||
return []
|
||||
|
||||
files: list[dict[str, str]] = []
|
||||
for file_path in sorted(root_dir.rglob("*")):
|
||||
if not file_path.is_file():
|
||||
continue
|
||||
relative_path = file_path.relative_to(root_dir).as_posix()
|
||||
# Internal uploaded skills are bootstrap assets, not user-facing references.
|
||||
if source == "upload" and relative_path.startswith("skill/"):
|
||||
continue
|
||||
virtual_path = f"{virtual_prefix}/{relative_path}"
|
||||
encoded_virtual_path = quote(virtual_path, safe="/")
|
||||
files.append(
|
||||
{
|
||||
"filename": file_path.name,
|
||||
"size": str(file_path.stat().st_size),
|
||||
"virtual_path": virtual_path,
|
||||
"artifact_url": f"/api/threads/{thread_id}/artifacts{encoded_virtual_path}",
|
||||
"source": source,
|
||||
}
|
||||
)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def is_text_file_by_content(path: Path, sample_size: int = 8192) -> bool:
|
||||
"""Check if file is text by examining content for null bytes."""
|
||||
try:
|
||||
|
|
@ -106,6 +139,38 @@ def _extract_file_from_skill_archive(zip_path: Path, internal_path: str) -> byte
|
|||
return None
|
||||
|
||||
|
||||
@router.get(
|
||||
"/threads/{thread_id}/artifacts/list",
|
||||
summary="List Reference Files",
|
||||
description="List current files under outputs and uploads for @ references.",
|
||||
)
|
||||
async def list_reference_files(thread_id: str) -> dict:
|
||||
"""List real files from outputs/uploads so mention candidates stay fresh."""
|
||||
paths = get_paths()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id)
|
||||
uploads_dir = paths.sandbox_uploads_dir(thread_id)
|
||||
|
||||
outputs_virtual_prefix = f"{VIRTUAL_PATH_PREFIX}/outputs"
|
||||
uploads_virtual_prefix = f"{VIRTUAL_PATH_PREFIX}/uploads"
|
||||
output_files = _list_reference_files_in_dir(
|
||||
thread_id,
|
||||
outputs_dir,
|
||||
outputs_virtual_prefix,
|
||||
"artifact",
|
||||
)
|
||||
upload_files = _list_reference_files_in_dir(
|
||||
thread_id,
|
||||
uploads_dir,
|
||||
uploads_virtual_prefix,
|
||||
"upload",
|
||||
)
|
||||
files = [*output_files, *upload_files]
|
||||
return {
|
||||
"files": files,
|
||||
"count": len(files),
|
||||
}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/threads/{thread_id}/artifacts/{path:path}",
|
||||
summary="Get Artifact File",
|
||||
|
|
|
|||
|
|
@ -67,11 +67,13 @@ async def proxy_request(provider: str, path: str, request: Request) -> Response:
|
|||
path=path,
|
||||
request=request,
|
||||
body=body,
|
||||
request_json=request_json,
|
||||
thread_id=thread_id,
|
||||
idempotency_key=idempotency_key,
|
||||
task_id_jsonpath=submit_route.task_id_jsonpath,
|
||||
route_frozen_amount=submit_route.frozen_amount,
|
||||
route_frozen_type=submit_route.frozen_type,
|
||||
route_frozen_token=submit_route.frozen_token,
|
||||
)
|
||||
|
||||
if query_route:
|
||||
|
|
@ -109,11 +111,13 @@ async def _handle_submit(
|
|||
path: str,
|
||||
request: Request,
|
||||
body: bytes,
|
||||
request_json: dict[str, Any] | None,
|
||||
thread_id: str | None,
|
||||
idempotency_key: str | None,
|
||||
task_id_jsonpath: str,
|
||||
route_frozen_amount: float | None,
|
||||
route_frozen_type: int | None,
|
||||
route_frozen_token: int | None,
|
||||
) -> Response:
|
||||
ledger = get_ledger()
|
||||
|
||||
|
|
@ -129,6 +133,7 @@ async def _handle_submit(
|
|||
# Reserve billing before touching the provider
|
||||
reserve_frozen_amount = route_frozen_amount if route_frozen_amount is not None else provider_config.frozen_amount
|
||||
reserve_frozen_type = route_frozen_type if route_frozen_type is not None else provider_config.frozen_type
|
||||
reserve_frozen_token = route_frozen_token if route_frozen_token is not None else provider_config.frozen_token
|
||||
frozen_id = await billing.reserve(
|
||||
thread_id=thread_id,
|
||||
call_id=record.call_id,
|
||||
|
|
@ -136,9 +141,11 @@ async def _handle_submit(
|
|||
operation=path,
|
||||
frozen_amount=reserve_frozen_amount,
|
||||
frozen_type=reserve_frozen_type,
|
||||
frozen_token=reserve_frozen_token,
|
||||
request_payload=request_json,
|
||||
)
|
||||
if frozen_id:
|
||||
ledger.set_reserved(record.proxy_call_id, frozen_id)
|
||||
ledger.set_reserved(record.proxy_call_id, frozen_id, reserve_frozen_type)
|
||||
|
||||
# Forward to provider
|
||||
try:
|
||||
|
|
@ -156,6 +163,32 @@ async def _handle_submit(
|
|||
|
||||
resp_json = _try_parse_json(resp_body)
|
||||
|
||||
if resp_json is None:
|
||||
if frozen_id and reserve_frozen_type == 1:
|
||||
usage_input_tokens, usage_output_tokens = _extract_usage_tokens_from_submit_stream(resp_body)
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] submit stream usage resolved: proxy_call_id=%s usage_input_tokens=%s usage_output_tokens=%s",
|
||||
record.proxy_call_id,
|
||||
usage_input_tokens,
|
||||
usage_output_tokens,
|
||||
)
|
||||
|
||||
if ledger.try_claim_finalize(record.proxy_call_id):
|
||||
ok = await billing.finalize(
|
||||
frozen_id=frozen_id,
|
||||
final_amount=0.0,
|
||||
finalize_reason="success",
|
||||
usage_input_tokens=usage_input_tokens,
|
||||
usage_output_tokens=usage_output_tokens,
|
||||
)
|
||||
if ok:
|
||||
ledger.set_finalized(record.proxy_call_id, "SUCCESS")
|
||||
else:
|
||||
ledger.set_finalize_failed(record.proxy_call_id, "FAILED")
|
||||
|
||||
media_type = resp_headers.get("content-type")
|
||||
return Response(content=resp_body, status_code=status_code, headers=resp_headers, media_type=media_type)
|
||||
|
||||
# HTTP-level failure
|
||||
if status_code >= 400:
|
||||
reason = f"error_http_{status_code}"
|
||||
|
|
@ -272,18 +305,31 @@ async def _handle_query(
|
|||
"[ThirdPartyProxy] finalize claimed: proxy_call_id=%s",
|
||||
record.proxy_call_id,
|
||||
)
|
||||
resolved_frozen_type = (
|
||||
record.frozen_type if record.frozen_type is not None else provider_config.frozen_type
|
||||
)
|
||||
|
||||
usage_input_tokens = 0
|
||||
usage_output_tokens = 0
|
||||
usage_paths = list(query_route.usage_jsonpaths or [])
|
||||
if not usage_paths and query_route.usage_jsonpath:
|
||||
usage_paths = [query_route.usage_jsonpath]
|
||||
|
||||
final_amount: float = 0.0
|
||||
if is_success and query_route.usage_jsonpath:
|
||||
raw_amount = proxy.jsonpath_get(resp_json, query_route.usage_jsonpath)
|
||||
try:
|
||||
final_amount = float(raw_amount) if raw_amount is not None else 0.0
|
||||
except (TypeError, ValueError):
|
||||
final_amount = 0.0
|
||||
if is_success:
|
||||
if resolved_frozen_type == 1:
|
||||
usage_input_tokens, usage_output_tokens = _extract_usage_tokens(resp_json)
|
||||
else:
|
||||
final_amount = _resolve_final_amount(resp_json, query_route)
|
||||
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] finalize amount resolved: proxy_call_id=%s final_amount=%s usage_path=%s",
|
||||
"[ThirdPartyProxy] finalize amount resolved: proxy_call_id=%s frozen_type=%s final_amount=%s usage_input_tokens=%s usage_output_tokens=%s usage_paths=%s legacy_path=%s",
|
||||
record.proxy_call_id,
|
||||
resolved_frozen_type,
|
||||
final_amount,
|
||||
usage_input_tokens,
|
||||
usage_output_tokens,
|
||||
usage_paths,
|
||||
query_route.usage_jsonpath,
|
||||
)
|
||||
|
||||
|
|
@ -303,6 +349,8 @@ async def _handle_query(
|
|||
frozen_id=record.frozen_id,
|
||||
final_amount=final_amount,
|
||||
finalize_reason=finalize_reason,
|
||||
usage_input_tokens=usage_input_tokens,
|
||||
usage_output_tokens=usage_output_tokens,
|
||||
)
|
||||
logger.info(
|
||||
"[ThirdPartyProxy] finalize result: proxy_call_id=%s ok=%s",
|
||||
|
|
@ -391,6 +439,85 @@ def _try_parse_json(data: bytes) -> dict[str, Any] | None:
|
|||
return None
|
||||
|
||||
|
||||
def _resolve_final_amount(resp_json: dict[str, Any], query_route) -> float:
|
||||
"""Resolve final billing amount from configured usage paths.
|
||||
|
||||
Priority:
|
||||
1) `usage_jsonpaths` (sum all valid numeric values)
|
||||
2) legacy `usage_jsonpath` (single value)
|
||||
"""
|
||||
usage_paths = list(query_route.usage_jsonpaths or [])
|
||||
if not usage_paths and query_route.usage_jsonpath:
|
||||
usage_paths = [query_route.usage_jsonpath]
|
||||
|
||||
total = 0.0
|
||||
for path in usage_paths:
|
||||
raw = proxy.jsonpath_get(resp_json, path)
|
||||
if raw is None:
|
||||
continue
|
||||
try:
|
||||
total += float(raw)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
|
||||
return total
|
||||
|
||||
|
||||
def _extract_usage_tokens(resp_json: dict[str, Any]) -> tuple[int, int]:
|
||||
usage = resp_json.get("usage")
|
||||
if not isinstance(usage, dict):
|
||||
return 0, 0
|
||||
|
||||
input_tokens = _as_int(usage.get("input_tokens"))
|
||||
if input_tokens == 0:
|
||||
input_tokens = _as_int(usage.get("prompt_tokens"))
|
||||
|
||||
output_tokens = _as_int(usage.get("output_tokens"))
|
||||
if output_tokens == 0:
|
||||
output_tokens = _as_int(usage.get("completion_tokens"))
|
||||
|
||||
return input_tokens, output_tokens
|
||||
|
||||
|
||||
def _extract_usage_tokens_from_submit_stream(resp_body: bytes) -> tuple[int, int]:
|
||||
"""Extract usage tokens from the final SSE chunk in a submit stream response."""
|
||||
if not resp_body:
|
||||
return 0, 0
|
||||
|
||||
input_tokens = 0
|
||||
output_tokens = 0
|
||||
for raw_line in resp_body.splitlines():
|
||||
line = raw_line.decode("utf-8", errors="replace").strip()
|
||||
if not line.startswith("data:"):
|
||||
continue
|
||||
payload_str = line[5:].strip()
|
||||
if not payload_str or payload_str == "[DONE]":
|
||||
continue
|
||||
try:
|
||||
payload = json.loads(payload_str)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
continue
|
||||
if isinstance(payload, dict):
|
||||
in_tokens, out_tokens = _extract_usage_tokens(payload)
|
||||
if in_tokens or out_tokens:
|
||||
input_tokens, output_tokens = in_tokens, out_tokens
|
||||
|
||||
return input_tokens, output_tokens
|
||||
|
||||
|
||||
def _as_int(value: Any) -> int:
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, float):
|
||||
return int(value)
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
return int(float(value))
|
||||
except ValueError:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
|
||||
def _proxy_response(
|
||||
data: dict[str, Any],
|
||||
proxy_call_id: str | None,
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from __future__ import annotations
|
|||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
|
|
@ -28,6 +29,8 @@ async def reserve(
|
|||
operation: str,
|
||||
frozen_amount: float,
|
||||
frozen_type: int | None,
|
||||
frozen_token: int = 0,
|
||||
request_payload: dict[str, Any] | None = None,
|
||||
) -> str | None:
|
||||
"""Reserve billing before forwarding a submit call.
|
||||
|
||||
|
|
@ -44,19 +47,25 @@ async def reserve(
|
|||
)
|
||||
return None
|
||||
|
||||
resolved_frozen_type = frozen_type if frozen_type is not None else cfg.frozen_type
|
||||
expire_at = datetime.now() + timedelta(seconds=cfg.default_expire_seconds)
|
||||
payload = {
|
||||
payload: dict[str, Any] = {
|
||||
"sessionId": thread_id,
|
||||
"callId": call_id,
|
||||
"modelName": provider,
|
||||
"modelName": _extract_model_name(request_payload) or provider,
|
||||
"question": f"skill invokes {operation.split('/')[-1]}",
|
||||
"frozenAmount": frozen_amount,
|
||||
"frozenType": frozen_type if frozen_type is not None else cfg.frozen_type,
|
||||
"estimatedInputTokens": 0,
|
||||
"estimatedOutputTokens": 0,
|
||||
"frozenType": resolved_frozen_type,
|
||||
"expireAt": expire_at.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
}
|
||||
|
||||
if resolved_frozen_type == 1:
|
||||
payload["estimatedInputTokens"] = int(frozen_token)
|
||||
payload["estimatedOutputTokens"] = int(frozen_token)
|
||||
else:
|
||||
payload["frozenAmount"] = frozen_amount
|
||||
payload["estimatedInputTokens"] = 0
|
||||
payload["estimatedOutputTokens"] = 0
|
||||
|
||||
logger.info(
|
||||
"[ThirdPartyProxy][Billing] reserve request: url=%s call_id=%s provider=%s thread_id=%s",
|
||||
cfg.reserve_url,
|
||||
|
|
@ -114,6 +123,8 @@ async def finalize(
|
|||
frozen_id: str,
|
||||
final_amount: float,
|
||||
finalize_reason: str,
|
||||
usage_input_tokens: int = 0,
|
||||
usage_output_tokens: int = 0,
|
||||
) -> bool:
|
||||
"""Finalize billing after a third-party call reaches a terminal state.
|
||||
|
||||
|
|
@ -135,9 +146,9 @@ async def finalize(
|
|||
payload = {
|
||||
"frozenId": frozen_id,
|
||||
"finalAmount": final_amount,
|
||||
"usageInputTokens": 0,
|
||||
"usageOutputTokens": 0,
|
||||
"usageTotalTokens": 0,
|
||||
"usageInputTokens": usage_input_tokens,
|
||||
"usageOutputTokens": usage_output_tokens,
|
||||
"usageTotalTokens": usage_input_tokens + usage_output_tokens,
|
||||
"finalizeReason": finalize_reason,
|
||||
}
|
||||
|
||||
|
|
@ -188,3 +199,12 @@ def _is_success(data: dict) -> bool:
|
|||
if isinstance(status, int) and status in _SUCCESS_STATUS_CODES:
|
||||
return True
|
||||
return data.get("success") is True
|
||||
|
||||
|
||||
def _extract_model_name(request_payload: dict[str, Any] | None) -> str | None:
|
||||
if not isinstance(request_payload, dict):
|
||||
return None
|
||||
model = request_payload.get("model")
|
||||
if isinstance(model, str) and model:
|
||||
return model
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ class CallRecord:
|
|||
# call_id is sent to the billing platform (callId in reserve payload)
|
||||
call_id: str
|
||||
frozen_id: str | None = None
|
||||
frozen_type: int | None = None
|
||||
provider_task_id: str | None = None
|
||||
billing_state: BillingState = "UNRESERVED"
|
||||
task_state: TaskState = "PENDING"
|
||||
|
|
@ -109,16 +110,18 @@ class CallLedger:
|
|||
def get_by_idempotency_key(self, provider: str, idempotency_key: str) -> CallRecord | None:
|
||||
return self._get_by_idem_key_locked(provider, idempotency_key)
|
||||
|
||||
def set_reserved(self, proxy_call_id: str, frozen_id: str) -> None:
|
||||
def set_reserved(self, proxy_call_id: str, frozen_id: str, frozen_type: int | None = None) -> None:
|
||||
with self._lock:
|
||||
record = self._records.get(proxy_call_id)
|
||||
if record:
|
||||
record.frozen_id = frozen_id
|
||||
record.frozen_type = frozen_type
|
||||
record.billing_state = "RESERVED"
|
||||
logger.info(
|
||||
"[ThirdPartyProxy][Ledger] reserved: proxy_call_id=%s frozen_id=%s",
|
||||
"[ThirdPartyProxy][Ledger] reserved: proxy_call_id=%s frozen_id=%s frozen_type=%s",
|
||||
proxy_call_id,
|
||||
frozen_id,
|
||||
frozen_type,
|
||||
)
|
||||
# logger.debug(
|
||||
# "[ThirdPartyProxy][Ledger] reserve state: call_id=%s provider=%s task_state=%s",
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
|
|
@ -17,16 +18,7 @@ from deerflow.config.third_party_proxy_config import (
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SENSITIVE_HEADERS = frozenset(
|
||||
[
|
||||
"authorization",
|
||||
"proxy-authorization",
|
||||
"x-api-key",
|
||||
"api-key",
|
||||
"cookie",
|
||||
"set-cookie",
|
||||
]
|
||||
)
|
||||
API_KEY_MARKER = "__API_KEY_MARKER__"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Provider config lookup
|
||||
|
|
@ -154,17 +146,6 @@ _STRIP_RESPONSE_HEADERS = frozenset(
|
|||
)
|
||||
|
||||
|
||||
def _sanitize_headers(headers: dict[str, str]) -> dict[str, str]:
|
||||
"""Return a copy of headers with sensitive values redacted."""
|
||||
sanitized: dict[str, str] = {}
|
||||
for key, value in headers.items():
|
||||
if key.lower() in _SENSITIVE_HEADERS:
|
||||
sanitized[key] = "***"
|
||||
else:
|
||||
sanitized[key] = value
|
||||
return sanitized
|
||||
|
||||
|
||||
def _preview_body(data: bytes, limit: int = 2048) -> str:
|
||||
"""Return a safe textual preview of body bytes for debugging logs."""
|
||||
if not data:
|
||||
|
|
@ -176,6 +157,53 @@ def _preview_body(data: bytes, limit: int = 2048) -> str:
|
|||
return text
|
||||
|
||||
|
||||
def _replace_api_key_marker_in_headers(headers: dict[str, str], api_key: str) -> dict[str, str]:
|
||||
"""Replace API key marker placeholders in header values."""
|
||||
replaced: dict[str, str] = {}
|
||||
for key, value in headers.items():
|
||||
if isinstance(value, str) and API_KEY_MARKER in value:
|
||||
replaced[key] = value.replace(API_KEY_MARKER, api_key)
|
||||
else:
|
||||
replaced[key] = value
|
||||
return replaced
|
||||
|
||||
|
||||
def _header_value(headers: dict[str, str], key: str) -> str | None:
|
||||
target = key.lower()
|
||||
for h_key, h_val in headers.items():
|
||||
if h_key.lower() == target:
|
||||
return h_val
|
||||
return None
|
||||
|
||||
|
||||
def _replace_api_key_marker_in_json(data: Any, api_key: str) -> Any:
|
||||
if isinstance(data, str):
|
||||
return data.replace(API_KEY_MARKER, api_key)
|
||||
if isinstance(data, list):
|
||||
return [_replace_api_key_marker_in_json(item, api_key) for item in data]
|
||||
if isinstance(data, dict):
|
||||
return {k: _replace_api_key_marker_in_json(v, api_key) for k, v in data.items()}
|
||||
return data
|
||||
|
||||
|
||||
def _replace_api_key_marker_in_body(headers: dict[str, str], body: bytes, api_key: str) -> bytes:
|
||||
"""Replace API key marker in JSON body payloads only."""
|
||||
if not body:
|
||||
return body
|
||||
|
||||
content_type = _header_value(headers, "content-type") or ""
|
||||
if "application/json" not in content_type.lower():
|
||||
return body
|
||||
|
||||
try:
|
||||
parsed = json.loads(body)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
return body
|
||||
|
||||
replaced = _replace_api_key_marker_in_json(parsed, api_key)
|
||||
return json.dumps(replaced, ensure_ascii=False, separators=(",", ":")).encode("utf-8")
|
||||
|
||||
|
||||
async def forward_request(
|
||||
*,
|
||||
provider_config: ThirdPartyProviderConfig,
|
||||
|
|
@ -202,6 +230,9 @@ async def forward_request(
|
|||
if provider_config.api_key_env:
|
||||
api_key = os.getenv(provider_config.api_key_env)
|
||||
if api_key:
|
||||
# Dependency-injection style: replace marker placeholders first.
|
||||
forward_headers = _replace_api_key_marker_in_headers(forward_headers, api_key)
|
||||
body = _replace_api_key_marker_in_body(forward_headers, body, api_key)
|
||||
forward_headers[provider_config.api_key_header] = provider_config.api_key_prefix + api_key
|
||||
else:
|
||||
logger.warning(
|
||||
|
|
@ -212,7 +243,7 @@ async def forward_request(
|
|||
logger.info("[ThirdPartyProxy] → %s %s", method, target_url)
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] request headers=%s",
|
||||
_sanitize_headers(forward_headers)
|
||||
forward_headers,
|
||||
)
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] request body(%dB)=%s",
|
||||
|
|
@ -236,7 +267,7 @@ async def forward_request(
|
|||
logger.info("[ThirdPartyProxy] ← %s %s %d", method, target_url, response.status_code)
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] response headers=%s",
|
||||
_sanitize_headers(response_headers)
|
||||
response_headers,
|
||||
)
|
||||
logger.debug(
|
||||
"[ThirdPartyProxy] response body(%dB)=%s",
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@ import logging
|
|||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware
|
||||
from langchain_core.messages.human import HumanMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
from deerflow.agents.lead_agent.prompt import apply_prompt_template
|
||||
from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware
|
||||
from deerflow.agents.middlewares.artifact_reconcile_middleware import ArtifactReconcileMiddleware
|
||||
from deerflow.agents.middlewares.loop_detection_middleware import LoopDetectionMiddleware
|
||||
from deerflow.agents.middlewares.message_timestamp_middleware import MessageTimestampMiddleware
|
||||
from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
|
||||
|
|
@ -23,6 +25,15 @@ from deerflow.models import create_chat_model
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SUMMARY_MESSAGE_TITLE = "以下是目前对话的摘要:"
|
||||
|
||||
|
||||
class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
"""Summarization middleware with DeerFlow's user-facing summary heading."""
|
||||
|
||||
def _build_new_messages(self, summary: str) -> list[HumanMessage]:
|
||||
return [HumanMessage(content=f"{SUMMARY_MESSAGE_TITLE}\n\n{summary}")]
|
||||
|
||||
|
||||
def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
|
||||
|
|
@ -78,7 +89,7 @@ def _create_summarization_middleware() -> SummarizationMiddleware | None:
|
|||
if config.summary_prompt is not None:
|
||||
kwargs["summary_prompt"] = config.summary_prompt
|
||||
|
||||
return SummarizationMiddleware(**kwargs)
|
||||
return DeerFlowSummarizationMiddleware(**kwargs)
|
||||
|
||||
|
||||
def _create_todo_list_middleware(is_plan_mode: bool) -> TodoMiddleware | None:
|
||||
|
|
@ -234,6 +245,9 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
|||
if get_app_config().token_usage.enabled:
|
||||
middlewares.append(TokenUsageMiddleware())
|
||||
|
||||
# Reconcile stale artifact entries against real outputs files.
|
||||
middlewares.append(ArtifactReconcileMiddleware())
|
||||
|
||||
# Stamp every conversation message with backend timestamp metadata.
|
||||
middlewares.append(MessageTimestampMiddleware())
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,117 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from typing import NotRequired, override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents.thread_state import (
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
ThreadDataState,
|
||||
)
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_OUTPUTS_VIRTUAL_PREFIX = f"{VIRTUAL_PATH_PREFIX}/outputs/"
|
||||
_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH = _OUTPUTS_VIRTUAL_PREFIX.lstrip("/")
|
||||
|
||||
|
||||
class ArtifactReconcileState(AgentState):
|
||||
"""Compatible with the `ThreadState` schema."""
|
||||
|
||||
artifacts: NotRequired[list[str] | None]
|
||||
thread_data: NotRequired[ThreadDataState | None]
|
||||
|
||||
|
||||
class ArtifactReconcileMiddleware(AgentMiddleware[ArtifactReconcileState]):
|
||||
"""Keep artifact state aligned with files currently in outputs."""
|
||||
|
||||
state_schema = ArtifactReconcileState
|
||||
|
||||
def _to_outputs_file(self, virtual_path: str, outputs_dir: Path) -> Path | None:
|
||||
stripped = virtual_path.lstrip("/")
|
||||
if not stripped.startswith(_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH):
|
||||
# Keep non-outputs paths untouched; this middleware is for outputs drift.
|
||||
return None
|
||||
|
||||
relative = stripped[len(_OUTPUTS_VIRTUAL_PREFIX_NO_LEADING_SLASH) :]
|
||||
if not relative:
|
||||
return None
|
||||
|
||||
candidate = (outputs_dir / relative).resolve()
|
||||
try:
|
||||
candidate.relative_to(outputs_dir)
|
||||
except ValueError:
|
||||
return None
|
||||
return candidate
|
||||
|
||||
def _to_virtual_artifact(self, actual_path: Path, outputs_dir: Path) -> str | None:
|
||||
try:
|
||||
relative = actual_path.resolve().relative_to(outputs_dir)
|
||||
except ValueError:
|
||||
return None
|
||||
return f"{_OUTPUTS_VIRTUAL_PREFIX}{relative.as_posix()}"
|
||||
|
||||
def _discover_outputs(self, outputs_dir: Path) -> list[str]:
|
||||
if not outputs_dir.is_dir():
|
||||
return []
|
||||
|
||||
discovered: list[str] = []
|
||||
for path in sorted(outputs_dir.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
virtual_path = self._to_virtual_artifact(path, outputs_dir)
|
||||
if virtual_path:
|
||||
discovered.append(virtual_path)
|
||||
return discovered
|
||||
|
||||
@override
|
||||
def before_model(
|
||||
self,
|
||||
state: ArtifactReconcileState,
|
||||
runtime: Runtime, # noqa: ARG002
|
||||
) -> dict | None:
|
||||
artifacts = state.get("artifacts") or []
|
||||
thread_data = state.get("thread_data") or {}
|
||||
outputs_path = thread_data.get("outputs_path")
|
||||
if not outputs_path:
|
||||
return None
|
||||
|
||||
outputs_dir = Path(outputs_path).resolve()
|
||||
kept: list[str] = []
|
||||
changed = False
|
||||
|
||||
for artifact in artifacts:
|
||||
if not isinstance(artifact, str):
|
||||
changed = True
|
||||
continue
|
||||
if artifact == ARTIFACTS_REPLACE_SENTINEL:
|
||||
changed = True
|
||||
continue
|
||||
|
||||
actual_path = self._to_outputs_file(artifact, outputs_dir)
|
||||
if actual_path is None:
|
||||
kept.append(artifact)
|
||||
continue
|
||||
|
||||
if actual_path.exists() and actual_path.is_file():
|
||||
kept.append(artifact)
|
||||
else:
|
||||
changed = True
|
||||
logger.info(
|
||||
"Reconciled stale artifact from state: virtual=%s outputs_dir=%s",
|
||||
artifact,
|
||||
outputs_dir,
|
||||
)
|
||||
|
||||
discovered = self._discover_outputs(outputs_dir)
|
||||
merged = list(dict.fromkeys([*kept, *discovered]))
|
||||
if merged != kept:
|
||||
changed = True
|
||||
|
||||
if not changed:
|
||||
return None
|
||||
|
||||
return {"artifacts": [ARTIFACTS_REPLACE_SENTINEL, *merged]}
|
||||
|
|
@ -174,6 +174,7 @@ def _extract_run_id(request: ModelRequest) -> str | None: # noqa: ARG001
|
|||
|
||||
def _reserve_failure_message(status_code: int | None) -> str:
|
||||
if status_code in _blocking_reserve_code_set():
|
||||
# TODO: 将账单错误文案迁移到国际化资源中,按语言返回提示。
|
||||
return "The account balance is insufficient for this model call."
|
||||
return "Billing reservation failed. Please try again later."
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
"""Middleware for intercepting clarification requests and presenting them to the user."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from typing import override
|
||||
|
|
@ -35,6 +36,28 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
|||
|
||||
state_schema = ClarificationMiddlewareState
|
||||
|
||||
def _normalize_options(self, options: object) -> list[str]:
|
||||
"""Normalize clarification options into a list of display strings."""
|
||||
if options is None:
|
||||
return []
|
||||
|
||||
if isinstance(options, list):
|
||||
return [str(option) for option in options]
|
||||
|
||||
if isinstance(options, str):
|
||||
stripped = options.strip()
|
||||
if not stripped:
|
||||
return []
|
||||
try:
|
||||
parsed = json.loads(stripped)
|
||||
except json.JSONDecodeError:
|
||||
return [stripped]
|
||||
if isinstance(parsed, list):
|
||||
return [str(option) for option in parsed]
|
||||
return [str(parsed)]
|
||||
|
||||
return [str(options)]
|
||||
|
||||
def _is_chinese(self, text: str) -> bool:
|
||||
"""Check if text contains Chinese characters.
|
||||
|
||||
|
|
@ -58,7 +81,7 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
|||
question = args.get("question", "")
|
||||
clarification_type = args.get("clarification_type", "missing_info")
|
||||
context = args.get("context")
|
||||
options = args.get("options", [])
|
||||
options = self._normalize_options(args.get("options"))
|
||||
|
||||
# Type-specific icons
|
||||
type_icons = {
|
||||
|
|
@ -84,7 +107,7 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
|||
message_parts.append(f"{icon} {question}")
|
||||
|
||||
# Add options in a cleaner format
|
||||
if options and len(options) > 0:
|
||||
if options:
|
||||
message_parts.append("") # blank line for spacing
|
||||
for i, option in enumerate(options, 1):
|
||||
message_parts.append(f" {i}. {option}")
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ from typing import Annotated, NotRequired, TypedDict
|
|||
|
||||
from langchain.agents import AgentState
|
||||
|
||||
ARTIFACTS_REPLACE_SENTINEL = "__deerflow_replace_artifacts__"
|
||||
|
||||
|
||||
class SandboxState(TypedDict):
|
||||
sandbox_id: NotRequired[str | None]
|
||||
|
|
@ -20,12 +22,22 @@ class ViewedImageData(TypedDict):
|
|||
|
||||
def merge_artifacts(existing: list[str] | None, new: list[str] | None) -> list[str]:
|
||||
"""Reducer for artifacts list - merges and deduplicates artifacts."""
|
||||
def _clean(values: list[str] | None) -> list[str]:
|
||||
if not values:
|
||||
return []
|
||||
return [v for v in values if isinstance(v, str) and v != ARTIFACTS_REPLACE_SENTINEL]
|
||||
|
||||
cleaned_existing = _clean(existing)
|
||||
cleaned_new = _clean(new)
|
||||
|
||||
if new and new[0] == ARTIFACTS_REPLACE_SENTINEL:
|
||||
return list(dict.fromkeys(cleaned_new))
|
||||
if existing is None:
|
||||
return new or []
|
||||
return cleaned_new
|
||||
if new is None:
|
||||
return existing
|
||||
return cleaned_existing
|
||||
# Use dict.fromkeys to deduplicate while preserving order
|
||||
return list(dict.fromkeys(existing + new))
|
||||
return list(dict.fromkeys(cleaned_existing + cleaned_new))
|
||||
|
||||
|
||||
def merge_viewed_images(existing: dict[str, ViewedImageData] | None, new: dict[str, ViewedImageData] | None) -> dict[str, ViewedImageData]:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,11 @@ class SubmitRouteConfig(BaseModel):
|
|||
default=None,
|
||||
description="Optional route-level override for billing reserve payload frozenType",
|
||||
)
|
||||
frozen_token: int | None = Field(
|
||||
default=None,
|
||||
ge=0,
|
||||
description="Optional route-level override for billing reserve payload estimatedInputTokens/estimatedOutputTokens when frozenType=1",
|
||||
)
|
||||
|
||||
|
||||
class QueryRouteConfig(BaseModel):
|
||||
|
|
@ -56,6 +61,14 @@ class QueryRouteConfig(BaseModel):
|
|||
"E.g. usage.thirdPartyConsumeMoney"
|
||||
),
|
||||
)
|
||||
usage_jsonpaths: list[str] = Field(
|
||||
default_factory=list,
|
||||
description=(
|
||||
"Optional list of dot-paths into the response body to extract monetary costs and sum them. "
|
||||
"When set, values from all valid paths are added together. "
|
||||
"Example: [\"usage.thirdPartyConsumeMoney\", \"usage.consumeMoney\"]"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ThirdPartyProviderConfig(BaseModel):
|
||||
|
|
@ -88,6 +101,11 @@ class ThirdPartyProviderConfig(BaseModel):
|
|||
default=None,
|
||||
description="Billing frozen type for this provider (frozenType). If omitted, falls back to billing.frozen_type",
|
||||
)
|
||||
frozen_token: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Estimated token amount used for reserve payload when frozenType=1",
|
||||
)
|
||||
submit_routes: list[SubmitRouteConfig] = Field(
|
||||
default_factory=list,
|
||||
description="Route patterns that identify submit (task-create) requests",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,111 @@
|
|||
from types import SimpleNamespace
|
||||
|
||||
from deerflow.agents.middlewares.artifact_reconcile_middleware import (
|
||||
ArtifactReconcileMiddleware,
|
||||
)
|
||||
from deerflow.agents.thread_state import ARTIFACTS_REPLACE_SENTINEL
|
||||
|
||||
|
||||
def test_before_model_prunes_missing_outputs_artifacts(tmp_path):
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
outputs_dir.mkdir()
|
||||
existing = outputs_dir / "keep.md"
|
||||
existing.write_text("ok", encoding="utf-8")
|
||||
|
||||
middleware = ArtifactReconcileMiddleware()
|
||||
state = {
|
||||
"thread_data": {"outputs_path": str(outputs_dir)},
|
||||
"artifacts": [
|
||||
"/mnt/user-data/outputs/keep.md",
|
||||
"/mnt/user-data/outputs/missing.md",
|
||||
],
|
||||
}
|
||||
|
||||
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
|
||||
|
||||
assert result == {
|
||||
"artifacts": [ARTIFACTS_REPLACE_SENTINEL, "/mnt/user-data/outputs/keep.md"]
|
||||
}
|
||||
|
||||
|
||||
def test_before_model_returns_none_when_no_changes(tmp_path):
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
outputs_dir.mkdir()
|
||||
existing = outputs_dir / "keep.md"
|
||||
existing.write_text("ok", encoding="utf-8")
|
||||
|
||||
middleware = ArtifactReconcileMiddleware()
|
||||
state = {
|
||||
"thread_data": {"outputs_path": str(outputs_dir)},
|
||||
"artifacts": ["/mnt/user-data/outputs/keep.md"],
|
||||
}
|
||||
|
||||
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_before_model_adds_unpresented_outputs_files(tmp_path):
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
outputs_dir.mkdir()
|
||||
existing = outputs_dir / "keep.md"
|
||||
existing.write_text("ok", encoding="utf-8")
|
||||
extra = outputs_dir / "extra.md"
|
||||
extra.write_text("ok", encoding="utf-8")
|
||||
|
||||
middleware = ArtifactReconcileMiddleware()
|
||||
state = {
|
||||
"thread_data": {"outputs_path": str(outputs_dir)},
|
||||
"artifacts": ["/mnt/user-data/outputs/keep.md"],
|
||||
}
|
||||
|
||||
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
|
||||
|
||||
assert result == {
|
||||
"artifacts": [
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
"/mnt/user-data/outputs/keep.md",
|
||||
"/mnt/user-data/outputs/extra.md",
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def test_before_model_discovers_outputs_when_artifacts_empty(tmp_path):
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
outputs_dir.mkdir()
|
||||
report = outputs_dir / "report.md"
|
||||
report.write_text("ok", encoding="utf-8")
|
||||
|
||||
middleware = ArtifactReconcileMiddleware()
|
||||
state = {
|
||||
"thread_data": {"outputs_path": str(outputs_dir)},
|
||||
"artifacts": [],
|
||||
}
|
||||
|
||||
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
|
||||
|
||||
assert result == {
|
||||
"artifacts": [ARTIFACTS_REPLACE_SENTINEL, "/mnt/user-data/outputs/report.md"]
|
||||
}
|
||||
|
||||
|
||||
def test_before_model_drops_leaked_replace_sentinel(tmp_path):
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
outputs_dir.mkdir()
|
||||
keep = outputs_dir / "keep.md"
|
||||
keep.write_text("ok", encoding="utf-8")
|
||||
|
||||
middleware = ArtifactReconcileMiddleware()
|
||||
state = {
|
||||
"thread_data": {"outputs_path": str(outputs_dir)},
|
||||
"artifacts": [
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
"/mnt/user-data/outputs/keep.md",
|
||||
],
|
||||
}
|
||||
|
||||
result = middleware.before_model(state, runtime=SimpleNamespace(context={}))
|
||||
|
||||
assert result == {
|
||||
"artifacts": [ARTIFACTS_REPLACE_SENTINEL, "/mnt/user-data/outputs/keep.md"]
|
||||
}
|
||||
|
|
@ -130,3 +130,43 @@ def test_get_artifact_compat_fallback_for_dash_spacing(tmp_path, monkeypatch) ->
|
|||
|
||||
assert bytes(response.body).decode("utf-8") == "ok"
|
||||
assert response.media_type == "text/markdown"
|
||||
|
||||
|
||||
def test_list_reference_files_returns_outputs_and_uploads(tmp_path, monkeypatch) -> None:
|
||||
outputs_dir = tmp_path / "outputs"
|
||||
uploads_dir = tmp_path / "uploads"
|
||||
outputs_dir.mkdir()
|
||||
uploads_dir.mkdir()
|
||||
(outputs_dir / "notes.md").write_text("hello", encoding="utf-8")
|
||||
(outputs_dir / "figures").mkdir()
|
||||
(outputs_dir / "figures" / "plot.png").write_bytes(b"png")
|
||||
(uploads_dir / "dataset.csv").write_text("a,b\n1,2\n", encoding="utf-8")
|
||||
(uploads_dir / "skill").mkdir()
|
||||
(uploads_dir / "skill" / "internal.txt").write_text("hidden", encoding="utf-8")
|
||||
|
||||
class _FakePaths:
|
||||
def sandbox_outputs_dir(self, _thread_id: str) -> Path:
|
||||
return outputs_dir
|
||||
|
||||
def sandbox_uploads_dir(self, _thread_id: str) -> Path:
|
||||
return uploads_dir
|
||||
|
||||
monkeypatch.setattr(artifacts_router, "get_paths", lambda: _FakePaths())
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(artifacts_router.router)
|
||||
|
||||
with TestClient(app) as client:
|
||||
response = client.get("/api/threads/thread-1/artifacts/list")
|
||||
|
||||
assert response.status_code == 200
|
||||
payload = response.json()
|
||||
assert payload["count"] == 3
|
||||
by_path = {item["virtual_path"]: item for item in payload["files"]}
|
||||
|
||||
assert "/mnt/user-data/outputs/notes.md" in by_path
|
||||
assert "/mnt/user-data/outputs/figures/plot.png" in by_path
|
||||
assert "/mnt/user-data/uploads/dataset.csv" in by_path
|
||||
assert "/mnt/user-data/uploads/skill/internal.txt" not in by_path
|
||||
assert by_path["/mnt/user-data/outputs/notes.md"]["source"] == "artifact"
|
||||
assert by_path["/mnt/user-data/uploads/dataset.csv"]["source"] == "upload"
|
||||
|
|
|
|||
|
|
@ -147,7 +147,8 @@ def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch
|
|||
)
|
||||
|
||||
captured: dict[str, object] = {}
|
||||
fake_model = object()
|
||||
fake_model = MagicMock()
|
||||
fake_model._llm_type = "test-chat"
|
||||
|
||||
def _fake_create_chat_model(*, name=None, thinking_enabled, reasoning_effort=None):
|
||||
captured["name"] = name
|
||||
|
|
@ -156,10 +157,20 @@ def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch
|
|||
return fake_model
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
||||
monkeypatch.setattr(lead_agent_module, "SummarizationMiddleware", lambda **kwargs: kwargs)
|
||||
|
||||
middleware = lead_agent_module._create_summarization_middleware()
|
||||
|
||||
assert captured["name"] == "model-masswork"
|
||||
assert captured["thinking_enabled"] is False
|
||||
assert middleware["model"] is fake_model
|
||||
assert isinstance(middleware, lead_agent_module.DeerFlowSummarizationMiddleware)
|
||||
assert middleware.model is fake_model
|
||||
|
||||
|
||||
def test_deerflow_summarization_middleware_uses_chinese_summary_title():
|
||||
middleware = lead_agent_module.DeerFlowSummarizationMiddleware(
|
||||
model=MagicMock(),
|
||||
trigger=("messages", 2),
|
||||
)
|
||||
|
||||
messages = middleware._build_new_messages("旧上下文")
|
||||
|
||||
assert messages[0].content == "以下是目前对话的摘要:\n\n旧上下文"
|
||||
|
|
|
|||
|
|
@ -3,8 +3,16 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from app.gateway.third_party_proxy.ledger import CallLedger
|
||||
from app.gateway.routers.third_party import (
|
||||
_extract_usage_tokens,
|
||||
_extract_usage_tokens_from_submit_stream,
|
||||
_resolve_final_amount,
|
||||
)
|
||||
from app.gateway.third_party_proxy.proxy import (
|
||||
API_KEY_MARKER,
|
||||
_path_matches,
|
||||
_replace_api_key_marker_in_body,
|
||||
_replace_api_key_marker_in_headers,
|
||||
jsonpath_get,
|
||||
match_query_route,
|
||||
match_submit_route,
|
||||
|
|
@ -99,6 +107,7 @@ _PROVIDER_CFG = ThirdPartyProviderConfig(
|
|||
success_values=["SUCCESS"],
|
||||
failure_values=["FAILED", "CANCELLED"],
|
||||
usage_jsonpath="usage.thirdPartyConsumeMoney",
|
||||
usage_jsonpaths=["usage.thirdPartyConsumeMoney", "usage.consumeMoney"],
|
||||
)
|
||||
],
|
||||
)
|
||||
|
|
@ -190,3 +199,94 @@ class TestCallLedger:
|
|||
ledger.update_response(rec.proxy_call_id, {"result": "ok"})
|
||||
found = ledger.get(rec.proxy_call_id)
|
||||
assert found.last_response == {"result": "ok"}
|
||||
|
||||
|
||||
class TestResolveFinalAmount:
|
||||
def test_sum_multiple_usage_paths(self):
|
||||
route = QueryRouteConfig(
|
||||
path_pattern="/openapi/v2/query",
|
||||
request_task_id_jsonpath="taskId",
|
||||
status_jsonpath="status",
|
||||
success_values=["SUCCESS"],
|
||||
failure_values=["FAILED"],
|
||||
usage_jsonpaths=["usage.thirdPartyConsumeMoney", "usage.consumeMoney"],
|
||||
)
|
||||
resp_json = {
|
||||
"usage": {
|
||||
"thirdPartyConsumeMoney": None,
|
||||
"consumeMoney": "0.099",
|
||||
}
|
||||
}
|
||||
amount = _resolve_final_amount(resp_json, route)
|
||||
assert amount == 0.099
|
||||
|
||||
def test_fallback_to_legacy_single_usage_path(self):
|
||||
route = QueryRouteConfig(
|
||||
path_pattern="/openapi/v2/query",
|
||||
request_task_id_jsonpath="taskId",
|
||||
status_jsonpath="status",
|
||||
success_values=["SUCCESS"],
|
||||
failure_values=["FAILED"],
|
||||
usage_jsonpath="usage.thirdPartyConsumeMoney",
|
||||
)
|
||||
resp_json = {"usage": {"thirdPartyConsumeMoney": "1.5"}}
|
||||
amount = _resolve_final_amount(resp_json, route)
|
||||
assert amount == 1.5
|
||||
|
||||
|
||||
class TestExtractUsageTokens:
|
||||
def test_prefers_openai_usage_keys(self):
|
||||
resp_json = {
|
||||
"usage": {
|
||||
"prompt_tokens": 123,
|
||||
"completion_tokens": 45,
|
||||
}
|
||||
}
|
||||
input_tokens, output_tokens = _extract_usage_tokens(resp_json)
|
||||
assert input_tokens == 123
|
||||
assert output_tokens == 45
|
||||
|
||||
def test_supports_generic_usage_keys(self):
|
||||
resp_json = {
|
||||
"usage": {
|
||||
"input_tokens": "88",
|
||||
"output_tokens": "12",
|
||||
}
|
||||
}
|
||||
input_tokens, output_tokens = _extract_usage_tokens(resp_json)
|
||||
assert input_tokens == 88
|
||||
assert output_tokens == 12
|
||||
|
||||
|
||||
class TestExtractUsageTokensFromSubmitStream:
|
||||
def test_extracts_usage_from_final_sse_chunk(self):
|
||||
body = (
|
||||
b'data: {"id":"x","choices":[{"delta":{"content":"hello"}}]}\n\n'
|
||||
b'data: {"id":"x","choices":[],"usage":{"prompt_tokens":22,"completion_tokens":17}}\n\n'
|
||||
b'data: [DONE]\n\n'
|
||||
)
|
||||
input_tokens, output_tokens = _extract_usage_tokens_from_submit_stream(body)
|
||||
assert input_tokens == 22
|
||||
assert output_tokens == 17
|
||||
|
||||
def test_returns_zero_when_no_usage_found(self):
|
||||
body = b'data: {"id":"x","choices":[{"delta":{"content":"hello"}}]}\n\n'
|
||||
input_tokens, output_tokens = _extract_usage_tokens_from_submit_stream(body)
|
||||
assert input_tokens == 0
|
||||
assert output_tokens == 0
|
||||
|
||||
|
||||
class TestApiKeyMarkerReplacement:
|
||||
def test_replace_marker_in_headers(self):
|
||||
headers = {"Authorization": f"Bearer {API_KEY_MARKER}", "Content-Type": "application/json"}
|
||||
replaced = _replace_api_key_marker_in_headers(headers, "real-key")
|
||||
assert replaced["Authorization"] == "Bearer real-key"
|
||||
|
||||
def test_replace_marker_in_json_body(self):
|
||||
headers = {"Content-Type": "application/json"}
|
||||
body = (
|
||||
b'{"apiKey":"__API_KEY_MARKER__","nested":{"token":"Bearer __API_KEY_MARKER__"}}'
|
||||
)
|
||||
replaced = _replace_api_key_marker_in_body(headers, body, "real-key")
|
||||
assert b'"apiKey":"real-key"' in replaced
|
||||
assert b'"token":"Bearer real-key"' in replaced
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
from deerflow.agents.thread_state import (
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
merge_artifacts,
|
||||
)
|
||||
|
||||
|
||||
def test_merge_artifacts_default_merge_dedup():
|
||||
existing = ["/mnt/user-data/outputs/a.md", "/mnt/user-data/outputs/b.md"]
|
||||
new = ["/mnt/user-data/outputs/b.md", "/mnt/user-data/outputs/c.md"]
|
||||
|
||||
result = merge_artifacts(existing, new)
|
||||
|
||||
assert result == [
|
||||
"/mnt/user-data/outputs/a.md",
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
]
|
||||
|
||||
|
||||
def test_merge_artifacts_supports_replace_sentinel():
|
||||
existing = ["/mnt/user-data/outputs/a.md", "/mnt/user-data/outputs/b.md"]
|
||||
new = [
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
]
|
||||
|
||||
result = merge_artifacts(existing, new)
|
||||
|
||||
assert result == [
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
]
|
||||
|
||||
|
||||
def test_merge_artifacts_always_strips_sentinel_from_existing():
|
||||
existing = [
|
||||
"/mnt/user-data/outputs/a.md",
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
]
|
||||
|
||||
result = merge_artifacts(existing, None)
|
||||
|
||||
assert result == [
|
||||
"/mnt/user-data/outputs/a.md",
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
]
|
||||
|
||||
|
||||
def test_merge_artifacts_strips_sentinel_from_non_replace_payload():
|
||||
existing = ["/mnt/user-data/outputs/a.md"]
|
||||
new = [
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
ARTIFACTS_REPLACE_SENTINEL,
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
]
|
||||
|
||||
result = merge_artifacts(existing, new)
|
||||
|
||||
assert result == [
|
||||
"/mnt/user-data/outputs/a.md",
|
||||
"/mnt/user-data/outputs/b.md",
|
||||
"/mnt/user-data/outputs/c.md",
|
||||
]
|
||||
|
|
@ -56,7 +56,7 @@ billing:
|
|||
# third-party async task APIs such as RunningHub.
|
||||
|
||||
third_party_proxy:
|
||||
enabled: false
|
||||
enabled: true
|
||||
providers:
|
||||
runninghub:
|
||||
base_url: https://www.runninghub.cn
|
||||
|
|
@ -64,35 +64,53 @@ third_party_proxy:
|
|||
api_key_header: Authorization
|
||||
api_key_prefix: "Bearer "
|
||||
timeout_seconds: 30.0
|
||||
frozen_amount: 10.0
|
||||
frozen_type: 2
|
||||
submit_routes:
|
||||
- path_pattern: "/openapi/v2/**"
|
||||
exclude_path_pattern: "/openapi/v2/query"
|
||||
- path_pattern: "/openapi/v2/rhart-image/z-image/turbo-lora"
|
||||
task_id_jsonpath: "taskId"
|
||||
# Optional per-model billing override examples:
|
||||
# frozen_amount: 10.0
|
||||
# frozen_type: 2
|
||||
|
||||
# Example: model-specific reserve policy
|
||||
# - path_pattern: "/openapi/v2/rhart-image/z-image/turbo-lora"
|
||||
# task_id_jsonpath: "taskId"
|
||||
# frozen_amount: 10.0
|
||||
# frozen_type: 2
|
||||
# - path_pattern: "/openapi/v2/vidu/text-to-video-q3-turbo"
|
||||
# task_id_jsonpath: "taskId"
|
||||
# frozen_amount: 50.0
|
||||
# frozen_type: 2
|
||||
# - path_pattern: "/openapi/v2/wan-2.7/image-edit"
|
||||
# task_id_jsonpath: "taskId"
|
||||
# frozen_amount: 20.0
|
||||
# frozen_type: 2
|
||||
frozen_amount: 0.03
|
||||
frozen_type: 2
|
||||
- path_pattern: "/openapi/v2/rhart-image-g-2/text-to-image"
|
||||
task_id_jsonpath: "taskId"
|
||||
frozen_amount: 0.2
|
||||
frozen_type: 2
|
||||
- path_pattern: "/openapi/v2/rhart-image-g-2/image-to-image"
|
||||
task_id_jsonpath: "taskId"
|
||||
frozen_amount: 0.2
|
||||
frozen_type: 2
|
||||
- path_pattern: "/openapi/v2/rhart-audio/text-to-audio/speech-2.8-turbo"
|
||||
task_id_jsonpath: "taskId"
|
||||
frozen_amount: 1.85
|
||||
frozen_type: 2
|
||||
- path_pattern: "/task/openapi/create"
|
||||
task_id_jsonpath: "data.taskId"
|
||||
frozen_amount: 2.0
|
||||
frozen_type: 2
|
||||
- path_pattern: "/openapi/v2/vidu/text-to-video-q3-turbo"
|
||||
task_id_jsonpath: "taskId"
|
||||
frozen_amount: 11.2
|
||||
frozen_type: 2
|
||||
query_routes:
|
||||
- path_pattern: "/openapi/v2/query"
|
||||
request_task_id_jsonpath: "taskId"
|
||||
status_jsonpath: "status"
|
||||
success_values: ["SUCCESS"]
|
||||
failure_values: ["FAILED", "CANCELLED"]
|
||||
usage_jsonpath: "usage.thirdPartyConsumeMoney"
|
||||
usage_jsonpaths: ["usage.thirdPartyConsumeMoney", "usage.consumeMoney"]
|
||||
|
||||
dashscope:
|
||||
base_url: https://dashscope.aliyuncs.com
|
||||
api_key_env: DASHSCOPE_API_KEY
|
||||
api_key_header: Authorization
|
||||
api_key_prefix: "Bearer "
|
||||
timeout_seconds: 60.0
|
||||
frozen_token: 32768
|
||||
submit_routes:
|
||||
- path_pattern: "/compatible-mode/v1/chat/completions"
|
||||
task_id_jsonpath: "id"
|
||||
frozen_type: 1
|
||||
query_routes: []
|
||||
|
||||
# ============================================================================
|
||||
# Token Usage Tracking
|
||||
|
|
|
|||
|
|
@ -83,6 +83,25 @@ Recommended checks:
|
|||
- submit fallback when `taskId` is missing
|
||||
- query loop timeout/failure handling
|
||||
|
||||
### Step 7: Clean API key instructions from skill.md
|
||||
|
||||
After migrating a skill to gateway proxy, remove any user-facing instructions in `skill.md` that ask users to configure third-party provider keys (for example, `RUNNINGHUB_API_KEY`).
|
||||
|
||||
What to remove from `skill.md`:
|
||||
- "Set `RUNNINGHUB_API_KEY` in .env"
|
||||
- "Create an API key on provider platform"
|
||||
- Any step that tells users to pass `Authorization: Bearer ...`
|
||||
|
||||
What to keep/add in `skill.md`:
|
||||
- Mention that third-party credentials are handled by gateway config
|
||||
- Keep only skill runtime inputs (prompt, output path, optional style/duration)
|
||||
- Optionally mention gateway-related vars if needed by local debugging:
|
||||
- `DEER_FLOW_GATEWAY_URL`
|
||||
- `RUNNINGHUB_PROXY_PROVIDER`
|
||||
|
||||
Suggested replacement sentence:
|
||||
- "This skill uses DeerFlow Gateway third-party proxy. Provider credentials are configured centrally in gateway and are not required in this skill's local `.env`."
|
||||
|
||||
## 4. Proxy Config Migration (config.yaml)
|
||||
|
||||
Configure submit/query routes under `third_party_proxy.providers.<provider>`.
|
||||
|
|
@ -193,6 +212,7 @@ For Docker-based sandbox execution, use:
|
|||
5. Config extraction fields use shorthand dot-paths only.
|
||||
6. Submit returns `taskId`, then query reaches `RUNNING/SUCCESS`.
|
||||
7. Gateway logs show submit/query route hits and finalize flow.
|
||||
8. `skill.md` no longer contains instructions to configure third-party API keys.
|
||||
|
||||
## 8. Reference Implementations
|
||||
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ async function validateTokenRegistry() {
|
|||
const darkSeen = new Map();
|
||||
|
||||
for (const [name, value] of entries) {
|
||||
if (!/^ws-[0-9a-f]{6,8}$/.test(name)) {
|
||||
if (!/^ws-[a-z0-9]+(?:-[a-z0-9]+)*$/.test(name)) {
|
||||
errors.push(`invalid token name "${name}"`);
|
||||
}
|
||||
const light = String(value.light ?? "").toLowerCase();
|
||||
|
|
@ -234,7 +234,7 @@ function collectWsVarsFromBlocks(css, selectorPattern) {
|
|||
const selector = block[1]?.trim() ?? "";
|
||||
const body = block[2] ?? "";
|
||||
if (!selectorPattern.test(selector)) continue;
|
||||
for (const match of body.matchAll(/--ws-color-([0-9a-z]+)\s*:/g)) {
|
||||
for (const match of body.matchAll(/--ws-color-([0-9a-z-]+)\s*:/g)) {
|
||||
vars.add(`ws-${match[1]}`);
|
||||
}
|
||||
}
|
||||
|
|
@ -246,7 +246,7 @@ function validateGlobalsCoverage(tokenEntries) {
|
|||
const rootVars = collectWsVarsFromBlocks(css, /(^|,)\s*:root(\s|,|$)/);
|
||||
const darkVars = collectWsVarsFromBlocks(css, /(^|,)\s*\.dark(\s|,|$)/);
|
||||
const inlineVars = new Set(
|
||||
[...css.matchAll(/--color-ws-([0-9a-z]+)\s*:/g)].map((match) => `ws-${match[1]}`),
|
||||
[...css.matchAll(/--color-ws-([0-9a-z-]+)\s*:/g)].map((match) => `ws-${match[1]}`),
|
||||
);
|
||||
const tokenNames = new Set(tokenEntries.map(([name]) => name));
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import { Tooltip } from "@/components/workspace/tooltip";
|
|||
import { useSpecificChatMode } from "@/components/workspace/use-chat-mode";
|
||||
import { Welcome } from "@/components/workspace/welcome";
|
||||
import { getAPIClient } from "@/core/api";
|
||||
import { sanitizeArtifactPaths } from "@/core/artifacts/utils";
|
||||
import { useI18n } from "@/core/i18n/hooks";
|
||||
import { POST_MESSAGE_TYPES, sendToParent } from "@/core/iframe-messages";
|
||||
import { useNotification } from "@/core/notification/hooks";
|
||||
|
|
@ -80,23 +81,30 @@ export default function ChatPage() {
|
|||
() => isNewThread && !safeThreadId,
|
||||
[isNewThread, safeThreadId],
|
||||
);
|
||||
const [isThreadInitReady, setIsThreadInitReady] = useState(false);
|
||||
|
||||
const streamThreadId = useMemo(() => {
|
||||
if (isNewThread && createNewSession) {
|
||||
if (!safeThreadId) {
|
||||
return undefined;
|
||||
}
|
||||
// In /new flow, defer history loading until thread init is finished:
|
||||
// delete -> create -> history.
|
||||
if (isNewThread && !isThreadInitReady) {
|
||||
return undefined;
|
||||
}
|
||||
return safeThreadId;
|
||||
}, [createNewSession, isNewThread, safeThreadId]);
|
||||
}, [isNewThread, isThreadInitReady, safeThreadId]);
|
||||
const apiClient = useMemo(() => getAPIClient(isMock), [isMock]);
|
||||
const warnedMissingThreadIdRef = useRef(false);
|
||||
const initializedThreadRef = useRef<string | null>(null);
|
||||
const threadInitPromiseRef = useRef<Promise<void> | null>(null);
|
||||
|
||||
const { showNotification } = useNotification();
|
||||
const currentSlogan = motivationSlogans[
|
||||
sloganIndex % motivationSlogans.length
|
||||
] ?? {
|
||||
text: t.chatPage.defaultSlogan,
|
||||
color: "var(--color-ws-333333)",
|
||||
color: "var(--color-ws-fg-primary)",
|
||||
};
|
||||
const tickerCharacterList = useMemo(() => {
|
||||
const seen = new Set<string>();
|
||||
|
|
@ -129,6 +137,7 @@ export default function ChatPage() {
|
|||
useEffect(() => {
|
||||
if (!isNewThread) {
|
||||
warnedMissingThreadIdRef.current = false;
|
||||
setIsThreadInitReady(true);
|
||||
return;
|
||||
}
|
||||
if (!safeThreadId) {
|
||||
|
|
@ -136,29 +145,38 @@ export default function ChatPage() {
|
|||
warnedMissingThreadIdRef.current = true;
|
||||
toast.error(t.chatPage.missingThreadIdForCreate);
|
||||
}
|
||||
setIsThreadInitReady(false);
|
||||
return;
|
||||
}
|
||||
warnedMissingThreadIdRef.current = false;
|
||||
if (initializedThreadRef.current === safeThreadId) return;
|
||||
initializedThreadRef.current = safeThreadId;
|
||||
void apiClient.threads
|
||||
// TODO: 先注释先删除再创建的逻辑
|
||||
// .delete(safeThreadId)
|
||||
// .catch(() => undefined)
|
||||
// .then(() =>
|
||||
// apiClient.threads.create({
|
||||
// threadId: safeThreadId,
|
||||
// ifExists: "raise",
|
||||
// }),
|
||||
// )
|
||||
.create({
|
||||
threadId: safeThreadId,
|
||||
ifExists: "do_nothing",
|
||||
setIsThreadInitReady(false);
|
||||
|
||||
const initPromise = apiClient.threads
|
||||
.delete(safeThreadId)
|
||||
.catch(() => undefined)
|
||||
.then(() =>
|
||||
apiClient.threads.create({
|
||||
threadId: safeThreadId,
|
||||
ifExists: "do_nothing",
|
||||
}),
|
||||
)
|
||||
.then(() => {
|
||||
setIsThreadInitReady(true);
|
||||
})
|
||||
.catch(() => {
|
||||
initializedThreadRef.current = null;
|
||||
setIsThreadInitReady(false);
|
||||
toast.error(t.chatPage.createSessionFailed);
|
||||
});
|
||||
|
||||
threadInitPromiseRef.current = initPromise;
|
||||
void initPromise.finally(() => {
|
||||
if (threadInitPromiseRef.current === initPromise) {
|
||||
threadInitPromiseRef.current = null;
|
||||
}
|
||||
});
|
||||
}, [
|
||||
apiClient,
|
||||
isNewThread,
|
||||
|
|
@ -210,6 +228,10 @@ export default function ChatPage() {
|
|||
const result = thread.values?.title ?? "";
|
||||
return result === "Untitled" ? "" : result;
|
||||
}, [thread.values?.title]);
|
||||
const sanitizedArtifacts = useMemo(
|
||||
() => sanitizeArtifactPaths(thread.values.artifacts),
|
||||
[thread.values.artifacts],
|
||||
);
|
||||
|
||||
const [hasSubmitted, setHasSubmitted] = useState(false);
|
||||
const [historyCutoff, setHistoryCutoff] = useState<number | null>(null);
|
||||
|
|
@ -258,21 +280,21 @@ export default function ChatPage() {
|
|||
|
||||
const [autoSelectFirstArtifact, setAutoSelectFirstArtifact] = useState(true);
|
||||
useEffect(() => {
|
||||
setArtifacts(thread.values.artifacts);
|
||||
setArtifacts(sanitizedArtifacts);
|
||||
if (
|
||||
env.NEXT_PUBLIC_STATIC_WEBSITE_ONLY === "true" &&
|
||||
autoSelectFirstArtifact
|
||||
) {
|
||||
if (thread?.values?.artifacts?.length > 0) {
|
||||
if (sanitizedArtifacts.length > 0) {
|
||||
setAutoSelectFirstArtifact(false);
|
||||
selectArtifact(thread.values.artifacts[0]!);
|
||||
selectArtifact(sanitizedArtifacts[0]!);
|
||||
}
|
||||
}
|
||||
}, [
|
||||
autoSelectFirstArtifact,
|
||||
sanitizedArtifacts,
|
||||
selectArtifact,
|
||||
setArtifacts,
|
||||
thread.values.artifacts,
|
||||
]);
|
||||
|
||||
const artifactPanelOpen = useMemo(() => {
|
||||
|
|
@ -286,7 +308,7 @@ export default function ChatPage() {
|
|||
const [showExitDialog, setShowExitDialog] = useState(false);
|
||||
const isStreaming = isUploading || thread.isLoading;
|
||||
const handleSubmit = useCallback(
|
||||
(message: Parameters<typeof sendMessage>[1]) => {
|
||||
async (message: Parameters<typeof sendMessage>[1]) => {
|
||||
if (isSelectedSkillBootstrapping) {
|
||||
return;
|
||||
}
|
||||
|
|
@ -294,6 +316,12 @@ export default function ChatPage() {
|
|||
toast.error(t.chatPage.missingThreadIdForSend);
|
||||
return;
|
||||
}
|
||||
if (isNewThread && safeThreadId) {
|
||||
await threadInitPromiseRef.current;
|
||||
}
|
||||
if (isNewThread && safeThreadId && !isThreadInitReady) {
|
||||
return;
|
||||
}
|
||||
setHasSubmitted(true);
|
||||
if (safeThreadId && (isNewThread || showWelcomeStyle)) {
|
||||
router.replace(`/workspace/chats/${safeThreadId}?is_chatting=true`);
|
||||
|
|
@ -302,6 +330,7 @@ export default function ChatPage() {
|
|||
},
|
||||
[
|
||||
isNewThread,
|
||||
isThreadInitReady,
|
||||
isSelectedSkillBootstrapping,
|
||||
router,
|
||||
safeThreadId,
|
||||
|
|
@ -357,7 +386,7 @@ export default function ChatPage() {
|
|||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
className="px-[10px] py-[5px] text-sm font-medium text-ws-150033 hover:text-ws-150033/80"
|
||||
className="px-[10px] py-[5px] text-sm font-medium text-ws-base-1 hover:text-ws-base-1/80"
|
||||
disabled={isStreaming}
|
||||
onClick={() => setShowExitDialog(true)}
|
||||
>
|
||||
|
|
@ -370,7 +399,7 @@ export default function ChatPage() {
|
|||
>
|
||||
<path
|
||||
d="M3.5 10H13.25H15.6875H16.5M3.5 10L7.5625 6M3.5 10L7.5625 14"
|
||||
className="text-ws-667085"
|
||||
className="text-ws-text-muted"
|
||||
stroke="currentColor"
|
||||
strokeWidth="1.5"
|
||||
strokeLinecap="round"
|
||||
|
|
@ -380,7 +409,7 @@ export default function ChatPage() {
|
|||
</Button>
|
||||
</div>
|
||||
<div
|
||||
className="flex items-center justify-center overflow-hidden text-sm font-bold font-medium whitespace-nowrap text-ws-333333"
|
||||
className="flex items-center justify-center overflow-hidden text-sm font-bold font-medium whitespace-nowrap text-ws-fg-primary"
|
||||
style={{
|
||||
color: currentSlogan.color,
|
||||
}}
|
||||
|
|
@ -400,7 +429,7 @@ export default function ChatPage() {
|
|||
<div className="flex items-center justify-end gap-2 overflow-hidden">
|
||||
{/* 取消TodoList */}
|
||||
{/* <DevTodoList
|
||||
className="bg-ws-ffffff"
|
||||
className="bg-ws-surface-base"
|
||||
todos={thread.values.todos ?? []}
|
||||
hidden={
|
||||
!thread.values.todos || thread.values.todos.length === 0
|
||||
|
|
@ -409,7 +438,7 @@ export default function ChatPage() {
|
|||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
className="h-full px-[10px] py-[5px] text-sm font-medium text-ws-150033 hover:text-ws-150033"
|
||||
className="h-full px-[10px] py-[5px] text-sm font-medium text-ws-base-1 hover:text-ws-base-1"
|
||||
>
|
||||
<ListTodoIcon className="size-4" /> To-dos
|
||||
</Button>
|
||||
|
|
@ -420,7 +449,7 @@ export default function ChatPage() {
|
|||
<Tooltip content={t.chatPage.viewArtifactsTooltip}>
|
||||
<Button
|
||||
data-testid="artifacts-open-button"
|
||||
className="text-ws-150033 hover:text-ws-150033/80"
|
||||
className="text-ws-base-1 hover:text-ws-base-1/80"
|
||||
variant="ghost"
|
||||
onClick={() => {
|
||||
setArtifactsOpen(true);
|
||||
|
|
@ -438,7 +467,7 @@ export default function ChatPage() {
|
|||
className={cn(
|
||||
"flex min-h-0 max-w-full grow flex-col",
|
||||
showWelcomeStyle && !hasSubmitted
|
||||
? "bg-ws-ffffff"
|
||||
? "bg-ws-surface-base"
|
||||
: "bg-background",
|
||||
)}
|
||||
>
|
||||
|
|
@ -492,7 +521,7 @@ export default function ChatPage() {
|
|||
) : (
|
||||
<div className="relative flex size-full justify-center px-[20px]">
|
||||
<div className="z-30"></div>
|
||||
{thread.values.artifacts?.length === 0 ? (
|
||||
{sanitizedArtifacts.length === 0 ? (
|
||||
<ConversationEmptyState
|
||||
icon={<FilesIcon />}
|
||||
title={t.chatPage.noArtifactSelectedTitle}
|
||||
|
|
@ -501,7 +530,7 @@ export default function ChatPage() {
|
|||
) : (
|
||||
<div className="flex size-full max-w-(--container-width-sm) flex-col justify-center">
|
||||
<header className="flex shrink-0 items-center justify-between border-b">
|
||||
<h2 className="h-[58px] text-sm leading-[58px] font-bold text-ws-333333">
|
||||
<h2 className="h-[58px] text-sm leading-[58px] font-bold text-ws-fg-primary">
|
||||
<span>{t.common.artifacts}</span>
|
||||
</h2>
|
||||
<Button
|
||||
|
|
@ -518,7 +547,7 @@ export default function ChatPage() {
|
|||
<main className="min-h-0 grow overflow-auto">
|
||||
<ArtifactFileList
|
||||
className="mb-[207px] max-w-(--container-width-sm) pt-[20px]"
|
||||
files={thread.values.artifacts ?? []}
|
||||
files={sanitizedArtifacts}
|
||||
threadId={threadId}
|
||||
/>
|
||||
</main>
|
||||
|
|
@ -549,7 +578,7 @@ export default function ChatPage() {
|
|||
{!(showWelcomeStyle && thread.isThreadLoading) ? (
|
||||
<>
|
||||
<InputBox
|
||||
className={cn("w-full rounded-[20px] bg-ws-fbfafc")}
|
||||
className={cn("w-full rounded-[20px] bg-ws-surface-elevated")}
|
||||
threadId={threadId}
|
||||
showWelcomeStyle={showWelcomeStyle}
|
||||
hasSubmitted={hasSubmitted}
|
||||
|
|
@ -609,14 +638,14 @@ export default function ChatPage() {
|
|||
</p>
|
||||
<DevDialogFooter>
|
||||
<Button
|
||||
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
|
||||
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
|
||||
variant="ghost"
|
||||
onClick={() => setShowExitDialog(false)}
|
||||
>
|
||||
{t.common.cancel}
|
||||
</Button>
|
||||
<Button
|
||||
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
|
||||
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
|
||||
variant="ghost"
|
||||
onClick={async () => {
|
||||
// 如果正在生成,先终止再退出
|
||||
|
|
@ -628,14 +657,14 @@ export default function ChatPage() {
|
|||
type: POST_MESSAGE_TYPES.IS_CHATTING,
|
||||
isChatting: false,
|
||||
});
|
||||
resetNewSessionState();
|
||||
// 始终复用 query 中的 thread_id。
|
||||
const nextQuery = new URLSearchParams();
|
||||
if (threadId && threadId !== "new") {
|
||||
nextQuery.set("thread_id", threadId);
|
||||
}
|
||||
// /workspace/chats/${threadId}?is_chatting=false
|
||||
router.replace(
|
||||
`/workspace/chats/${threadId}?is_chatting=false`,
|
||||
`/workspace/chats/new?thread_id=${threadId}`,
|
||||
);
|
||||
}}
|
||||
>
|
||||
|
|
@ -665,7 +694,7 @@ export default function ChatPage() {
|
|||
</p>
|
||||
<DevDialogFooter singleColumn>
|
||||
<Button
|
||||
className="w-full bg-ws-f9f8fa hover:bg-ws-8e47f0 hover:text-primary-foreground"
|
||||
className="w-full bg-ws-surface-subtle hover:bg-ws-interactive-primary hover:text-primary-foreground"
|
||||
variant="ghost"
|
||||
onClick={clearSelectedSkillError}
|
||||
>
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ export default function WorkspaceLayout({
|
|||
/* 灰色圆角矩形容器 */
|
||||
"rounded-[20px] border-none",
|
||||
/* 浅灰色背景 + 轻微透明 */
|
||||
"bg-ws-999999! backdrop-blur-sm",
|
||||
"bg-ws-overlay-neutral! backdrop-blur-sm",
|
||||
/* 阴影极轻 */
|
||||
"shadow-[0_2px_12px_0_rgba(0,0,0,0.18)]",
|
||||
/* 内边距:宽松居中 */
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ export const Message = ({
|
|||
"group flex w-full flex-col gap-2",
|
||||
from === "user"
|
||||
? cn("is-user ml-auto justify-end", !isFirstInSession && "mt-6")
|
||||
: "is-assistant rounded-[10px] bg-ws-ffffff p-4",
|
||||
: "is-assistant rounded-[10px] bg-ws-surface-base p-4",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
|
|
|
|||
|
|
@ -352,7 +352,7 @@ export function PromptInputAttachment({
|
|||
{/* 删除按钮 - 右上角 */}
|
||||
<button
|
||||
aria-label={t.common.removeAttachment}
|
||||
className="absolute top-1.5 right-1.5 z-10 flex size-4 cursor-pointer items-center justify-center rounded-sm transition-colors hover:bg-ws-ffffff/20"
|
||||
className="absolute top-1.5 right-1.5 z-10 flex size-4 cursor-pointer items-center justify-center rounded-sm transition-colors hover:bg-ws-surface-base/20"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
if (onRemove) {
|
||||
|
|
@ -397,7 +397,7 @@ export function PromptInputAttachment({
|
|||
{/* 关闭按钮 - 右上角 */}
|
||||
<button
|
||||
aria-label={t.common.removeAttachment}
|
||||
className="absolute top-1 right-1 z-10 flex size-5 cursor-pointer items-center justify-center rounded bg-ws-ffffff/90 opacity-0 transition-opacity group-hover:opacity-100 hover:bg-ws-ffffff dark:bg-gray-800/90 dark:hover:bg-gray-800"
|
||||
className="absolute top-1 right-1 z-10 flex size-5 cursor-pointer items-center justify-center rounded bg-ws-surface-base/90 opacity-0 transition-opacity group-hover:opacity-100 hover:bg-ws-surface-base dark:bg-gray-800/90 dark:hover:bg-gray-800"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
if (onRemove) {
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@ export const Suggestion = ({
|
|||
<Button
|
||||
className={cn(
|
||||
"cursor-pointer rounded-full px-[20px] py-[15px] text-sm font-normal",
|
||||
"border-none bg-ws-f9f8fa text-ws-667085",
|
||||
"hover:bg-ws-fbfafc hover:text-ws-150033",
|
||||
"border-none bg-ws-surface-subtle text-ws-text-muted",
|
||||
"hover:bg-ws-surface-elevated hover:text-ws-base-1",
|
||||
className,
|
||||
)}
|
||||
onClick={handleClick}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ function ScrollArea({
|
|||
return (
|
||||
<ScrollAreaPrimitive.Root
|
||||
data-slot="scroll-area"
|
||||
className={cn("relative", className)}
|
||||
className={cn("relative overflow-hidden", className)}
|
||||
{...props}
|
||||
>
|
||||
<ScrollAreaPrimitive.Viewport
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ function SidebarInset({ className, ...props }: React.ComponentProps<"main">) {
|
|||
<main
|
||||
data-slot="sidebar-inset"
|
||||
className={cn(
|
||||
"relative flex w-full flex-1 flex-col",
|
||||
"relative flex w-full flex-1 flex-col bg-ws-surface-base",
|
||||
"md:peer-data-[variant=inset]:m-2 md:peer-data-[variant=inset]:ml-0 md:peer-data-[variant=inset]:rounded-xl md:peer-data-[variant=inset]:shadow-sm md:peer-data-[variant=inset]:peer-data-[state=collapsed]:ml-2",
|
||||
className,
|
||||
)}
|
||||
|
|
|
|||
|
|
@ -430,7 +430,7 @@ export function ArtifactFileDetail({
|
|||
type="single"
|
||||
variant={null}
|
||||
size="default"
|
||||
className="h-[28px] bg-ws-ffffff"
|
||||
className="bg-ws-surface-base h-[28px]"
|
||||
value={viewMode}
|
||||
onValueChange={(value) => {
|
||||
if (value) {
|
||||
|
|
@ -721,7 +721,7 @@ export function ArtifactFileDetail({
|
|||
</ArtifactHeader>
|
||||
<ArtifactContent>
|
||||
{/* 遮挡多余的滚动顶部 */}
|
||||
{/* <div className="absolute w-[calc(100%-40px)] bg-ws-ffffff z-20 h-5 rounded-t-[10px] top-[57px]"></div> */}
|
||||
{/* <div className="absolute w-[calc(100%-40px)] bg-ws-surface-base z-20 h-5 rounded-t-[10px] top-[57px]"></div> */}
|
||||
{previewable &&
|
||||
viewMode === "preview" &&
|
||||
(language === "markdown" || language === "html") && (
|
||||
|
|
@ -734,7 +734,7 @@ export function ArtifactFileDetail({
|
|||
/>
|
||||
)}
|
||||
{isCodeFile && viewMode === "code" && (
|
||||
<div className="mb-0 mb-[207px] min-h-full rounded-b-[10px] bg-ws-ffffff p-0">
|
||||
<div className="bg-ws-surface-base mb-0 mb-[207px] min-h-full rounded-b-[10px] p-0">
|
||||
<CodeEditor
|
||||
className="size-full resize-none rounded-none border-none py-[20px]"
|
||||
value={displayContent ?? ""}
|
||||
|
|
@ -917,7 +917,7 @@ export function ArtifactFilePreview({
|
|||
if (language === "markdown") {
|
||||
return (
|
||||
<div
|
||||
className={cn("mb-[207px] w-full bg-ws-ffffff p-[20px]")}
|
||||
className={cn("bg-ws-surface-base mb-[207px] w-full p-[20px]")}
|
||||
style={{ "--zoom-scale": zoomScale } as CSSProperties}
|
||||
>
|
||||
<Streamdown
|
||||
|
|
@ -974,7 +974,7 @@ function PreviewIframe({
|
|||
{...props}
|
||||
/>
|
||||
{isLoading && (
|
||||
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/85">
|
||||
<div className="bg-ws-surface-base/85 absolute inset-0 z-10 flex items-center justify-center">
|
||||
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
|
||||
</div>
|
||||
)}
|
||||
|
|
@ -1046,7 +1046,7 @@ function ArtifactPdfPreview({
|
|||
|
||||
const pageWrapper = document.createElement("div");
|
||||
pageWrapper.className =
|
||||
"mx-auto mb-4 w-fit rounded-md border border-ws-e4e7ec bg-ws-ffffff p-2 shadow-sm";
|
||||
"mx-auto mb-4 w-fit rounded-md border border-ws-line-default bg-ws-surface-base p-2 shadow-sm";
|
||||
|
||||
const canvas = document.createElement("canvas");
|
||||
canvas.style.width = `${viewport.width}px`;
|
||||
|
|
@ -1089,8 +1089,13 @@ function ArtifactPdfPreview({
|
|||
|
||||
if (error) {
|
||||
return (
|
||||
<div className={cn("relative overflow-auto bg-ws-f9f8fa p-4", className)}>
|
||||
<div className="mx-auto grid max-w-xl gap-3 rounded-md border border-ws-e4e7ec bg-ws-ffffff p-5 text-center">
|
||||
<div
|
||||
className={cn(
|
||||
"bg-ws-surface-subtle relative overflow-auto p-4",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="border-ws-line-default bg-ws-surface-base mx-auto grid max-w-xl gap-3 rounded-md border p-5 text-center">
|
||||
<p className="text-sm font-medium break-all">{fileName}</p>
|
||||
<p className="text-muted-foreground text-sm">{error}</p>
|
||||
<a
|
||||
|
|
@ -1107,15 +1112,20 @@ function ArtifactPdfPreview({
|
|||
}
|
||||
|
||||
return (
|
||||
<div className={cn("relative overflow-auto bg-ws-f9f8fa p-4", className)}>
|
||||
<div className="mb-3 text-center text-xs text-ws-667085">
|
||||
<div
|
||||
className={cn(
|
||||
"bg-ws-surface-subtle relative overflow-auto p-4",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<div className="text-ws-text-muted mb-3 text-center text-xs">
|
||||
{pageCount > 0
|
||||
? t.artifactPreview.pageCountLabel(fileName, pageCount)
|
||||
: fileName}
|
||||
</div>
|
||||
<div ref={containerRef} />
|
||||
{isLoading && (
|
||||
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/70">
|
||||
<div className="bg-ws-surface-base/70 absolute inset-0 z-10 flex items-center justify-center">
|
||||
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
|
||||
</div>
|
||||
)}
|
||||
|
|
@ -1313,7 +1323,12 @@ function ArtifactOfficePreview({
|
|||
}, [canRenderPptx, t.artifactPreview.pptxDownloadHint]);
|
||||
|
||||
return (
|
||||
<div className={cn("relative h-full overflow-hidden bg-ws-ffffff", className)}>
|
||||
<div
|
||||
className={cn(
|
||||
"bg-ws-surface-base relative h-full overflow-hidden",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
{canRenderXlsx && sheetNames.length > 0 && (
|
||||
<div className="border-border flex items-center gap-1 overflow-x-auto border-b p-2">
|
||||
{sheetNames.map((sheetName) => (
|
||||
|
|
@ -1323,7 +1338,7 @@ function ArtifactOfficePreview({
|
|||
className={cn(
|
||||
"rounded px-4 py-3 text-xs whitespace-nowrap",
|
||||
activeSheet === sheetName
|
||||
? "bg-ws-1500331a text-foreground"
|
||||
? "bg-ws-accent-tint-soft text-foreground"
|
||||
: "text-muted-foreground hover:text-foreground",
|
||||
)}
|
||||
onClick={() => setActiveSheet(sheetName)}
|
||||
|
|
@ -1357,7 +1372,7 @@ function ArtifactOfficePreview({
|
|||
/>
|
||||
)}
|
||||
{isLoading && (
|
||||
<div className="absolute inset-0 z-10 flex items-center justify-center bg-ws-ffffff/85">
|
||||
<div className="bg-ws-surface-base/85 absolute inset-0 z-10 flex items-center justify-center">
|
||||
<LoaderIcon className="text-muted-foreground size-5 animate-spin" />
|
||||
</div>
|
||||
)}
|
||||
|
|
@ -1376,7 +1391,7 @@ function ArtifactPreviewFallback({
|
|||
}) {
|
||||
const { t } = useI18n();
|
||||
return (
|
||||
<div className="absolute inset-0 z-20 grid place-content-center bg-ws-ffffff p-6 text-center">
|
||||
<div className="bg-ws-surface-base absolute inset-0 z-20 grid place-content-center p-6 text-center">
|
||||
<p className="text-foreground mb-2 text-sm font-medium">{fileName}</p>
|
||||
<p className="text-muted-foreground mb-3 text-xs">{message}</p>
|
||||
<a
|
||||
|
|
@ -1400,9 +1415,23 @@ function rewriteArtifactImagePaths(
|
|||
return content;
|
||||
}
|
||||
|
||||
const encodeVirtualPath = (path: string) =>
|
||||
path
|
||||
.split("/")
|
||||
.map((segment) => {
|
||||
try {
|
||||
return encodeURIComponent(decodeURIComponent(segment));
|
||||
} catch {
|
||||
return encodeURIComponent(segment);
|
||||
}
|
||||
})
|
||||
.join("/");
|
||||
const toArtifactUrl = (rawPath: string) => {
|
||||
const normalizedPath = rawPath.startsWith("/") ? rawPath : `/${rawPath}`;
|
||||
return resolveArtifactURL(normalizedPath, threadId);
|
||||
const trimmedPath = rawPath.trim();
|
||||
const normalizedPath = trimmedPath.startsWith("/")
|
||||
? trimmedPath
|
||||
: `/${trimmedPath}`;
|
||||
return resolveArtifactURL(encodeVirtualPath(normalizedPath), threadId);
|
||||
};
|
||||
const toArtifactUrlFromRelative = (rawPath: string) => {
|
||||
const trimmed = rawPath.trim();
|
||||
|
|
@ -1416,17 +1445,17 @@ function rewriteArtifactImagePaths(
|
|||
|
||||
const absolutePath = new URL(trimmed, `file://${baseDir}`).pathname;
|
||||
if (!absolutePath.startsWith("/mnt/user-data/")) return null;
|
||||
return resolveArtifactURL(absolutePath, threadId);
|
||||
return resolveArtifactURL(encodeVirtualPath(absolutePath), threadId);
|
||||
};
|
||||
|
||||
const markdownRewritten = content.replace(
|
||||
/!\[([^\]]*)\]\(\s*(\/?mnt\/user-data\/outputs\/[^)\s]+)\s*\)/g,
|
||||
/!\[([^\]]*)\]\(\s*(\/?mnt\/user-data\/(?:outputs|uploads)\/[^)]+?)\s*\)/g,
|
||||
(_full, alt, rawPath) => {
|
||||
return `})`;
|
||||
},
|
||||
);
|
||||
const markdownRelativeRewritten = markdownRewritten.replace(
|
||||
/!\[([^\]]*)\]\(\s*([^) \t]+)\s*\)/g,
|
||||
/!\[([^\]]*)\]\(\s*([^)]+?)\s*\)/g,
|
||||
(_full, alt, rawPath) => {
|
||||
const absoluteUrl = toArtifactUrlFromRelative(rawPath);
|
||||
if (!absoluteUrl) {
|
||||
|
|
@ -1437,7 +1466,7 @@ function rewriteArtifactImagePaths(
|
|||
);
|
||||
|
||||
const shorthandMarkdownRewritten = markdownRelativeRewritten.replace(
|
||||
/!(?!\[)([^\n()()]+?)\s*[((]\s*(\/?mnt\/user-data\/outputs\/[^)\s)]+)\s*[))]/g,
|
||||
/!(?!\[)([^\n()()]+?)\s*[((]\s*(\/?mnt\/user-data\/(?:outputs|uploads)\/[^))]+?)\s*[))]/g,
|
||||
(_full, alt, rawPath) => {
|
||||
return `})`;
|
||||
},
|
||||
|
|
@ -1446,7 +1475,7 @@ function rewriteArtifactImagePaths(
|
|||
return shorthandMarkdownRewritten.replace(
|
||||
/(<img\b[^>]*\bsrc\s*=\s*)(["'])([^"']+)\2/gi,
|
||||
(_full, prefix, quote, rawPath) => {
|
||||
if (/^\/?mnt\/user-data\/outputs\//.test(rawPath)) {
|
||||
if (/^\/?mnt\/user-data\/(?:outputs|uploads)\//.test(rawPath)) {
|
||||
return `${prefix}${quote}${toArtifactUrl(rawPath)}${quote}`;
|
||||
}
|
||||
const absoluteUrl = toArtifactUrlFromRelative(rawPath);
|
||||
|
|
@ -1559,34 +1588,34 @@ function buildArtifactViewerSrcDoc({
|
|||
<meta name="viewport" content="width=device-width,initial-scale=1" />
|
||||
<style>
|
||||
:root {
|
||||
--ws-color-f8f9fb: rgb(248 249 251);
|
||||
--ws-color-ffffff: rgb(255 255 255);
|
||||
--ws-color-0f172a: rgb(15 23 42);
|
||||
--ws-color-667085: rgb(102 112 133);
|
||||
--ws-color-e4e7ec: rgb(228 231 236);
|
||||
--ws-color-f4f4f5: rgb(244 244 245);
|
||||
--ws-color-000000: rgb(0 0 0);
|
||||
--ws-color-2563eb: rgb(37 99 235);
|
||||
--bg: var(--ws-color-f8f9fb);
|
||||
--panel: var(--ws-color-ffffff);
|
||||
--text: var(--ws-color-0f172a);
|
||||
--muted: var(--ws-color-667085);
|
||||
--line: var(--ws-color-e4e7ec);
|
||||
--checker: var(--ws-color-f4f4f5);
|
||||
--media-bg: var(--ws-color-000000);
|
||||
--link: var(--ws-color-2563eb);
|
||||
--ws-color-surface-app: rgb(248 249 251);
|
||||
--ws-color-surface-base: rgb(255 255 255);
|
||||
--ws-color-text-primary-strong: rgb(15 23 42);
|
||||
--ws-color-text-muted: rgb(102 112 133);
|
||||
--ws-color-line-default: rgb(228 231 236);
|
||||
--ws-color-surface-checker: rgb(244 244 245);
|
||||
--ws-color-black-solid: rgb(0 0 0);
|
||||
--ws-color-info-primary: rgb(37 99 235);
|
||||
--bg: var(--ws-color-surface-app);
|
||||
--panel: var(--ws-color-surface-base);
|
||||
--text: var(--ws-color-text-primary-strong);
|
||||
--muted: var(--ws-color-text-muted);
|
||||
--line: var(--ws-color-line-default);
|
||||
--checker: var(--ws-color-surface-checker);
|
||||
--media-bg: var(--ws-color-black-solid);
|
||||
--link: var(--ws-color-info-primary);
|
||||
--radius: 12px;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
--ws-color-f8f9fb: rgb(32 36 44);
|
||||
--ws-color-ffffff: rgb(42 39 49);
|
||||
--ws-color-0f172a: rgb(230 234 242);
|
||||
--ws-color-667085: rgb(152 162 179);
|
||||
--ws-color-e4e7ec: rgb(58 61 69);
|
||||
--ws-color-f4f4f5: rgb(44 47 56);
|
||||
--ws-color-000000: rgb(0 0 0);
|
||||
--ws-color-2563eb: rgb(127 178 255);
|
||||
--ws-color-surface-app: rgb(32 36 44);
|
||||
--ws-color-surface-base: rgb(42 39 49);
|
||||
--ws-color-text-primary-strong: rgb(230 234 242);
|
||||
--ws-color-text-muted: rgb(152 162 179);
|
||||
--ws-color-line-default: rgb(58 61 69);
|
||||
--ws-color-surface-checker: rgb(44 47 56);
|
||||
--ws-color-black-solid: rgb(0 0 0);
|
||||
--ws-color-info-primary: rgb(127 178 255);
|
||||
}
|
||||
}
|
||||
* { box-sizing: border-box; }
|
||||
|
|
@ -1736,7 +1765,12 @@ export const ArtifactZoomSelector = ({
|
|||
viewBox="0 0 16 16"
|
||||
fill="none"
|
||||
>
|
||||
<circle cx="7.55558" cy="7.55534" r="6.16667" stroke="currentColor" />
|
||||
<circle
|
||||
cx="7.55558"
|
||||
cy="7.55534"
|
||||
r="6.16667"
|
||||
stroke="currentColor"
|
||||
/>
|
||||
<path
|
||||
d="M13.8688 15.4646C14.064 15.6598 14.3806 15.6598 14.5759 15.4646C14.7711 15.2693 14.7711 14.9527 14.5759 14.7574L14.2223 15.111L13.8688 15.4646ZM14.2223 15.111L14.5759 14.7574L11.9092 12.0908L11.5557 12.4443L11.2021 12.7979L13.8688 15.4646L14.2223 15.111Z"
|
||||
fill="currentColor"
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ export function ArtifactFileList({
|
|||
<div className="absolute top-5 left-4">
|
||||
{getFileIcon(
|
||||
file,
|
||||
"size-9 stroke-1 text-ws-333333 stroke-current",
|
||||
"size-9 stroke-1 text-ws-fg-primary stroke-current",
|
||||
)}
|
||||
</div>
|
||||
<CardDescription className="pl-10 text-xs">
|
||||
|
|
@ -137,7 +137,7 @@ export function ArtifactFileList({
|
|||
>
|
||||
<Button
|
||||
variant="ghost"
|
||||
className="text-muted-foreground h-full! hover:bg-transparent! hover:text-ws-333333!"
|
||||
className="text-muted-foreground h-full! hover:bg-transparent! hover:text-ws-fg-primary!"
|
||||
>
|
||||
<DownloadIcon className="size-4" />
|
||||
{t.common.download}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import type { GroupImperativeHandle } from "react-resizable-panels";
|
|||
|
||||
import { ConversationEmptyState } from "@/components/ai-elements/conversation";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { sanitizeArtifactPaths } from "@/core/artifacts/utils";
|
||||
import {
|
||||
ResizableHandle,
|
||||
ResizablePanel,
|
||||
|
|
@ -43,6 +44,10 @@ const ChatBox: React.FC<{
|
|||
deselect,
|
||||
selectedArtifact,
|
||||
} = useArtifacts();
|
||||
const sanitizedArtifacts = useMemo(
|
||||
() => sanitizeArtifactPaths(thread.values.artifacts),
|
||||
[thread.values.artifacts],
|
||||
);
|
||||
|
||||
const [autoSelectFirstArtifact, setAutoSelectFirstArtifact] = useState(true);
|
||||
useEffect(() => {
|
||||
|
|
@ -52,7 +57,7 @@ const ChatBox: React.FC<{
|
|||
}
|
||||
|
||||
// Update artifacts from the current thread
|
||||
setArtifacts(thread.values.artifacts);
|
||||
setArtifacts(sanitizedArtifacts);
|
||||
|
||||
// DO NOT automatically deselect the artifact when switching threads, because the artifacts auto discovering is not work now.
|
||||
// if (
|
||||
|
|
@ -66,19 +71,19 @@ const ChatBox: React.FC<{
|
|||
env.NEXT_PUBLIC_STATIC_WEBSITE_ONLY === "true" &&
|
||||
autoSelectFirstArtifact
|
||||
) {
|
||||
if (thread?.values?.artifacts?.length > 0) {
|
||||
if (sanitizedArtifacts.length > 0) {
|
||||
setAutoSelectFirstArtifact(false);
|
||||
selectArtifact(thread.values.artifacts[0]!);
|
||||
selectArtifact(sanitizedArtifacts[0]!);
|
||||
}
|
||||
}
|
||||
}, [
|
||||
threadId,
|
||||
autoSelectFirstArtifact,
|
||||
deselect,
|
||||
sanitizedArtifacts,
|
||||
selectArtifact,
|
||||
selectedArtifact,
|
||||
setArtifacts,
|
||||
thread.values.artifacts,
|
||||
]);
|
||||
|
||||
const artifactPanelOpen = useMemo(() => {
|
||||
|
|
@ -151,7 +156,7 @@ const ChatBox: React.FC<{
|
|||
<XIcon />
|
||||
</Button>
|
||||
</div>
|
||||
{thread.values.artifacts?.length === 0 ? (
|
||||
{sanitizedArtifacts.length === 0 ? (
|
||||
<ConversationEmptyState
|
||||
icon={<FilesIcon />}
|
||||
title={t.chatPage.noArtifactSelectedTitle}
|
||||
|
|
@ -167,7 +172,7 @@ const ChatBox: React.FC<{
|
|||
<main className="min-h-0 grow">
|
||||
<ArtifactFileList
|
||||
className="max-w-(--container-width-sm) p-4 pt-12"
|
||||
files={thread.values.artifacts ?? []}
|
||||
files={sanitizedArtifacts}
|
||||
threadId={threadId ?? ""}
|
||||
/>
|
||||
</main>
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ export function DevTodoList({
|
|||
<DropdownMenuTrigger asChild>{trigger}</DropdownMenuTrigger>
|
||||
<DropdownMenuContent
|
||||
className={cn(
|
||||
"z-[100] rounded-[20px] bg-ws-ffffff p-5 shadow-[0_0_20px_0_rgba(0,0,0,0.20)]",
|
||||
"z-[100] rounded-[20px] bg-ws-surface-base p-5 shadow-[0_0_20px_0_rgba(0,0,0,0.20)]",
|
||||
className,
|
||||
)}
|
||||
align="start"
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ export function IframeTestPanel() {
|
|||
<div
|
||||
ref={panelRef}
|
||||
className={cn(
|
||||
"fixed z-[9999] w-72 rounded-xl border border-violet-200 bg-ws-ffffff/95 shadow-2xl backdrop-blur-sm",
|
||||
"fixed z-[9999] w-72 rounded-xl border border-violet-200 bg-ws-surface-base/95 shadow-2xl backdrop-blur-sm",
|
||||
position ? "top-0 left-0" : "bottom-24 left-3",
|
||||
)}
|
||||
style={position ? { left: position.x, top: position.y } : undefined}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ import {
|
|||
DropdownMenuTrigger,
|
||||
} from "@/components/ui/dropdown-menu";
|
||||
import { Tag } from "@/components/ui/tag";
|
||||
import { useReferenceFiles } from "@/core/artifacts/references";
|
||||
import { urlOfArtifact } from "@/core/artifacts/utils";
|
||||
import { useI18n } from "@/core/i18n/hooks";
|
||||
import type { SelectedSkillPayloadItem } from "@/core/i18n/locales/types";
|
||||
|
|
@ -80,7 +81,6 @@ import {
|
|||
MENTION_REFERENCE_EVENT,
|
||||
type MentionReferenceEventDetail,
|
||||
} from "@/core/threads/reference-events";
|
||||
import { useUploadedFiles } from "@/core/uploads/hooks";
|
||||
import { useIframeSkill } from "@/hooks/use-iframe-skill";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
|
|
@ -96,7 +96,6 @@ import {
|
|||
import { Suggestion, Suggestions } from "../ai-elements/suggestion";
|
||||
import { ScrollArea } from "../ui/scroll-area";
|
||||
|
||||
import { useThread } from "./messages/context";
|
||||
import { ModeHoverGuide } from "./mode-hover-guide";
|
||||
import { Tooltip } from "./tooltip";
|
||||
|
||||
|
|
@ -149,7 +148,7 @@ function WorkspaceToolButton({
|
|||
return (
|
||||
<PromptInputButton
|
||||
className={cn(
|
||||
"group h-full rounded-[10px] p-[10px]! hover:bg-ws-f9f8fa hover:text-ws-8e47f0",
|
||||
"group h-full rounded-[10px] p-[10px]! hover:bg-ws-surface-subtle hover:text-ws-interactive-primary",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
|
|
@ -260,7 +259,6 @@ export function InputBox({
|
|||
}),
|
||||
[t],
|
||||
);
|
||||
const { thread } = useThread();
|
||||
const searchParams = useSearchParams();
|
||||
const iframeSkill = useIframeSkill({ threadId: threadIdFromProps });
|
||||
const isInputDisabled = (disabled ?? false) || iframeSkill.isBootstrapping;
|
||||
|
|
@ -294,7 +292,7 @@ export function InputBox({
|
|||
} | null>(null);
|
||||
const [isInputToolsTourOpen, setIsInputToolsTourOpen] = useState(false);
|
||||
const [isInputToolsTourReady, setIsInputToolsTourReady] = useState(false);
|
||||
const { data: uploadedFilesData } = useUploadedFiles(threadIdFromProps);
|
||||
const { data: referenceFilesData } = useReferenceFiles(threadIdFromProps);
|
||||
|
||||
// isNewThread 时禁用收缩,始终保持展开(除非已提交消息)
|
||||
const effectiveIsFocused =
|
||||
|
|
@ -439,49 +437,41 @@ export function InputBox({
|
|||
);
|
||||
|
||||
const mentionCandidates = useMemo<MentionCandidate[]>(() => {
|
||||
const artifactCandidates = (thread.values.artifacts ?? []).map((path) => {
|
||||
const filename = path.split("/").pop() ?? path;
|
||||
return {
|
||||
key: `artifact:${path}`,
|
||||
filename,
|
||||
path,
|
||||
pathTail: getPathTail(path),
|
||||
ref_source: "artifact" as const,
|
||||
ref_kind: "mention" as const,
|
||||
typeLabel: referenceSourceLabels.artifact,
|
||||
isImage: isImageFilename(filename),
|
||||
previewUrl: threadId
|
||||
const deduped = new Map<string, MentionCandidate>();
|
||||
(referenceFilesData?.files ?? []).forEach((file) => {
|
||||
const path = file.virtual_path || "";
|
||||
const filename = file.filename ?? path.split("/").pop() ?? path;
|
||||
const refSource = file.source === "upload" ? "upload" : "artifact";
|
||||
const typeLabel =
|
||||
refSource === "upload"
|
||||
? referenceSourceLabels.upload
|
||||
: referenceSourceLabels.artifact;
|
||||
const previewUrl =
|
||||
file.artifact_url ||
|
||||
(threadId
|
||||
? urlOfArtifact({
|
||||
filepath: path,
|
||||
threadId,
|
||||
})
|
||||
: undefined,
|
||||
};
|
||||
});
|
||||
: undefined);
|
||||
|
||||
const uploadCandidates =
|
||||
uploadedFilesData?.files.map((file) => ({
|
||||
key: `upload:${file.virtual_path || file.filename}`,
|
||||
filename: file.filename,
|
||||
path: file.virtual_path,
|
||||
pathTail: getPathTail(file.virtual_path),
|
||||
ref_source: "upload" as const,
|
||||
ref_kind: "mention" as const,
|
||||
typeLabel: referenceSourceLabels.upload,
|
||||
isImage: isImageFilename(file.filename),
|
||||
previewUrl: file.artifact_url,
|
||||
})) ?? [];
|
||||
|
||||
const deduped = new Map<string, MentionCandidate>();
|
||||
[...artifactCandidates, ...uploadCandidates].forEach((candidate) => {
|
||||
deduped.set(candidate.key, candidate);
|
||||
deduped.set(`${refSource}:${path || filename}`, {
|
||||
key: `${refSource}:${path || filename}`,
|
||||
filename,
|
||||
path,
|
||||
pathTail: getPathTail(path),
|
||||
ref_source: refSource,
|
||||
ref_kind: "mention",
|
||||
typeLabel,
|
||||
isImage: isImageFilename(filename),
|
||||
previewUrl,
|
||||
});
|
||||
});
|
||||
return [...deduped.values()];
|
||||
}, [
|
||||
referenceFilesData?.files,
|
||||
referenceSourceLabels.artifact,
|
||||
referenceSourceLabels.upload,
|
||||
thread.values.artifacts,
|
||||
uploadedFilesData?.files,
|
||||
threadId,
|
||||
]);
|
||||
|
||||
|
|
@ -889,12 +879,12 @@ export function InputBox({
|
|||
textareaRef.current?.focus();
|
||||
}}
|
||||
>
|
||||
<DropdownMenuLabel className="p-0 text-sm text-ws-333333">
|
||||
<DropdownMenuLabel className="p-0 text-sm text-ws-fg-primary">
|
||||
{t.inputBox.addReference}
|
||||
</DropdownMenuLabel>
|
||||
<DropdownMenuSeparator className="mx-0 mt-[20px] mb-0" />
|
||||
<DropdownMenuGroup className="flex max-h-[480px] flex-col gap-[10px] px-0 pt-[20px]">
|
||||
<ScrollArea className="h-[480px]" data-state="hidden">
|
||||
<DropdownMenuGroup className="flex min-h-0 flex-col gap-[10px] px-0">
|
||||
<ScrollArea className="h-[320px] pt-[20px]" hideScrollbar={false}>
|
||||
{filteredMentionCandidates.map((candidate, index) => {
|
||||
const detail = [candidate.typeLabel, candidate.pathTail]
|
||||
.filter(Boolean)
|
||||
|
|
@ -980,6 +970,14 @@ export function InputBox({
|
|||
/>
|
||||
</div>
|
||||
)}
|
||||
{!showWelcomeStyle && (
|
||||
<div className="shrink-0 h-full">
|
||||
<ExitChattingButton
|
||||
router={router}
|
||||
threadId={threadIdFromProps}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div ref={attachmentsButtonTourRef} className="shrink-0 h-full">
|
||||
<AddAttachmentsButton />
|
||||
</div>
|
||||
|
|
@ -1234,7 +1232,7 @@ function AddAttachmentsButton({ className }: { className?: string }) {
|
|||
return (
|
||||
<Tooltip content={t.inputBox.addAttachments}>
|
||||
<WorkspaceToolButton
|
||||
className={cn("text-ws-150033 hover:text-ws-8e47f0", className)}
|
||||
className={cn("text-ws-base-1 hover:text-ws-interactive-primary", className)}
|
||||
onClick={() => attachments.openFileDialog()}
|
||||
>
|
||||
<svg
|
||||
|
|
@ -1272,7 +1270,7 @@ function HistoryButton({
|
|||
return (
|
||||
<Tooltip content={t.inputBox.history}>
|
||||
<WorkspaceToolButton
|
||||
className={cn("text-ws-150033 hover:text-ws-8e47f0", className)}
|
||||
className={cn("text-ws-base-1 hover:text-ws-interactive-primary", className)}
|
||||
onClick={() =>
|
||||
router.replace(`/workspace/chats/${threadId}?is_chatting=true`)
|
||||
}
|
||||
|
|
@ -1302,6 +1300,53 @@ function HistoryButton({
|
|||
</Tooltip>
|
||||
);
|
||||
}
|
||||
|
||||
function ExitChattingButton({
|
||||
className,
|
||||
router,
|
||||
threadId,
|
||||
}: {
|
||||
className?: string;
|
||||
router: AppRouterInstance;
|
||||
threadId: string;
|
||||
}) {
|
||||
const { t } = useI18n();
|
||||
return (
|
||||
<Tooltip content={t.inputBox.welcome}>
|
||||
<WorkspaceToolButton
|
||||
className={cn(
|
||||
"text-ws-base-1 hover:text-ws-interactive-primary",
|
||||
className,
|
||||
)}
|
||||
onClick={() =>
|
||||
router.replace(`/workspace/chats/${threadId}?is_chatting=false`)
|
||||
}
|
||||
>
|
||||
<svg
|
||||
className="transition-[color] duration-200"
|
||||
width="18"
|
||||
height="18"
|
||||
viewBox="0 0 18 18"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<circle
|
||||
className="stroke-current transition-[stroke] duration-200"
|
||||
cx="9"
|
||||
cy="9"
|
||||
r="8.5"
|
||||
/>
|
||||
<path
|
||||
className="stroke-current transition-[stroke] duration-200"
|
||||
d="M6 9H12"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
</WorkspaceToolButton>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
// 启动iframeSkillDialog
|
||||
function IframeSkillDialogButton({
|
||||
className,
|
||||
|
|
@ -1330,7 +1375,7 @@ function IframeSkillDialogButton({
|
|||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className="size-4 text-ws-150033 transition-[color] duration-200 group-hover:text-ws-8e47f0"
|
||||
className="size-4 text-ws-base-1 transition-[color] duration-200 group-hover:text-ws-interactive-primary"
|
||||
viewBox="0 0 12 16"
|
||||
fill="none"
|
||||
>
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
"use client";
|
||||
|
||||
import { CheckIcon, CopyIcon } from "lucide-react";
|
||||
import { CheckIcon, CopyIcon, DownloadIcon } from "lucide-react";
|
||||
import { useCallback, useMemo, useState, type MouseEvent } from "react";
|
||||
import type {
|
||||
AnchorHTMLAttributes,
|
||||
|
|
@ -56,27 +56,57 @@ function toMarkdownTable(data: TableData): string {
|
|||
return [headerLine, dividerLine, ...rowLines].join("\n");
|
||||
}
|
||||
|
||||
function escapeCsvCell(value: string): string {
|
||||
if (!/[",\n\r]/.test(value)) return value;
|
||||
return `"${value.replaceAll('"', '""')}"`;
|
||||
}
|
||||
|
||||
function toCsvTable(data: TableData): string {
|
||||
if (data.headers.length === 0) return "";
|
||||
return [data.headers, ...data.rows]
|
||||
.map((row) => row.map(escapeCsvCell).join(","))
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
function downloadCsvFile(content: string, filename: string) {
|
||||
const blob = new Blob(["\uFEFF", content], {
|
||||
type: "text/csv;charset=utf-8",
|
||||
});
|
||||
const url = URL.createObjectURL(blob);
|
||||
const anchor = document.createElement("a");
|
||||
anchor.href = url;
|
||||
anchor.download = filename;
|
||||
anchor.click();
|
||||
URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
function MarkdownTable({
|
||||
className,
|
||||
children,
|
||||
isLoading,
|
||||
copyLabel,
|
||||
downloadLabel,
|
||||
...props
|
||||
}: ComponentPropsWithoutRef<"table"> & {
|
||||
isLoading: boolean;
|
||||
copyLabel: string;
|
||||
downloadLabel: string;
|
||||
}) {
|
||||
const [copied, setCopied] = useState(false);
|
||||
|
||||
const getTableData = useCallback((event: MouseEvent<HTMLButtonElement>) => {
|
||||
const wrapper = event.currentTarget.closest(
|
||||
'[data-streamdown="table-wrapper"]',
|
||||
);
|
||||
const table = wrapper?.querySelector("table");
|
||||
if (!(table instanceof HTMLTableElement)) return null;
|
||||
return parseTableData(table);
|
||||
}, []);
|
||||
|
||||
const handleCopy = useCallback(
|
||||
async (event: MouseEvent<HTMLButtonElement>) => {
|
||||
const wrapper = event.currentTarget.closest(
|
||||
'[data-streamdown="table-wrapper"]',
|
||||
);
|
||||
const table = wrapper?.querySelector("table");
|
||||
if (!(table instanceof HTMLTableElement)) return;
|
||||
const data = getTableData(event);
|
||||
if (!data) return;
|
||||
|
||||
const markdown = toMarkdownTable(parseTableData(table));
|
||||
const markdown = toMarkdownTable(data);
|
||||
if (!markdown) return;
|
||||
|
||||
try {
|
||||
|
|
@ -87,7 +117,20 @@ function MarkdownTable({
|
|||
// no-op
|
||||
}
|
||||
},
|
||||
[],
|
||||
[getTableData],
|
||||
);
|
||||
|
||||
const handleDownload = useCallback(
|
||||
(event: MouseEvent<HTMLButtonElement>) => {
|
||||
const data = getTableData(event);
|
||||
if (!data) return;
|
||||
|
||||
const csv = toCsvTable(data);
|
||||
if (!csv) return;
|
||||
|
||||
downloadCsvFile(csv, "table.csv");
|
||||
},
|
||||
[getTableData],
|
||||
);
|
||||
|
||||
return (
|
||||
|
|
@ -97,14 +140,21 @@ function MarkdownTable({
|
|||
>
|
||||
<div className="flex items-center justify-end gap-1">
|
||||
<button
|
||||
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all disabled:cursor-not-allowed disabled:opacity-50"
|
||||
disabled={isLoading}
|
||||
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all"
|
||||
onClick={handleCopy}
|
||||
title={copyLabel}
|
||||
type="button"
|
||||
>
|
||||
{copied ? <CheckIcon size={14} /> : <CopyIcon size={14} />}
|
||||
</button>
|
||||
<button
|
||||
className="text-muted-foreground hover:text-foreground cursor-pointer p-1 transition-all"
|
||||
onClick={handleDownload}
|
||||
title={downloadLabel}
|
||||
type="button"
|
||||
>
|
||||
<DownloadIcon size={14} />
|
||||
</button>
|
||||
</div>
|
||||
<div className="overflow-x-auto">
|
||||
<table
|
||||
|
|
@ -165,7 +215,7 @@ export function MarkdownContent({
|
|||
<MarkdownTable
|
||||
className={className}
|
||||
copyLabel={t.clipboard.copyToClipboard}
|
||||
isLoading={isLoading}
|
||||
downloadLabel={t.common.download}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
|
|
@ -173,7 +223,12 @@ export function MarkdownContent({
|
|||
),
|
||||
...componentsFromProps,
|
||||
};
|
||||
}, [componentsFromProps, isLoading, t.clipboard.copyToClipboard]);
|
||||
}, [
|
||||
componentsFromProps,
|
||||
isLoading,
|
||||
t.clipboard.copyToClipboard,
|
||||
t.common.download,
|
||||
]);
|
||||
|
||||
if (!content) return null;
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ export function MessageGroup({
|
|||
);
|
||||
return (
|
||||
<ChainOfThought
|
||||
className={cn("w-full gap-2 rounded-lg bg-ws-ffffff", className)}
|
||||
className={cn("w-full gap-2 rounded-lg bg-ws-surface-base", className)}
|
||||
open={true}
|
||||
>
|
||||
{aboveLastToolCallSteps.length > 0 && (
|
||||
|
|
|
|||
|
|
@ -27,8 +27,11 @@ import {
|
|||
import { resolveArtifactURL } from "@/core/artifacts/utils";
|
||||
import { useI18n } from "@/core/i18n/hooks";
|
||||
import {
|
||||
extractSummaryTemplateBody,
|
||||
extractContentFromMessage,
|
||||
normalizeHumanMessageDisplayText,
|
||||
extractReasoningContentFromMessage,
|
||||
isSummaryTemplateMessage,
|
||||
parseUploadedFiles,
|
||||
stripPriorityHintSuffix,
|
||||
stripUploadedFilesTag,
|
||||
|
|
@ -36,7 +39,6 @@ import {
|
|||
} from "@/core/messages/utils";
|
||||
import { useRehypeSplitWordsIntoSpans } from "@/core/rehype";
|
||||
import { materializeSkillYaml } from "@/core/skills";
|
||||
import { humanMessagePlugins } from "@/core/streamdown";
|
||||
import { dispatchMentionReference } from "@/core/threads/reference-events";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
|
|
@ -139,6 +141,7 @@ function MessageContent_({
|
|||
isLoading?: boolean;
|
||||
threadId: string;
|
||||
}) {
|
||||
const { t } = useI18n();
|
||||
const rehypePlugins = useRehypeSplitWordsIntoSpans(isLoading);
|
||||
const isHuman = message.type === "human";
|
||||
const components = useMemo(
|
||||
|
|
@ -167,12 +170,23 @@ function MessageContent_({
|
|||
|
||||
const contentToDisplay = useMemo(() => {
|
||||
if (isHuman) {
|
||||
return rawContent
|
||||
? stripPriorityHintSuffix(stripUploadedFilesTag(rawContent))
|
||||
: "";
|
||||
if (!rawContent) {
|
||||
return "";
|
||||
}
|
||||
const cleaned = stripPriorityHintSuffix(stripUploadedFilesTag(rawContent));
|
||||
return normalizeHumanMessageDisplayText(cleaned);
|
||||
}
|
||||
return rawContent ?? "";
|
||||
}, [rawContent, isHuman]);
|
||||
const isSummaryMessage = useMemo(
|
||||
() => isHuman && isSummaryTemplateMessage(message),
|
||||
[isHuman, message],
|
||||
);
|
||||
const summaryBody = useMemo(
|
||||
() => (isSummaryMessage ? extractSummaryTemplateBody(message) : ""),
|
||||
[isSummaryMessage, message],
|
||||
);
|
||||
const [isSummaryExpanded, setIsSummaryExpanded] = useState(false);
|
||||
|
||||
const filesList =
|
||||
files && files.length > 0 && threadId ? (
|
||||
|
|
@ -208,20 +222,42 @@ function MessageContent_({
|
|||
}
|
||||
|
||||
if (isHuman) {
|
||||
const shouldRenderSummaryCollapse = isSummaryMessage && summaryBody;
|
||||
const messageResponse = contentToDisplay ? (
|
||||
<AIElementMessageResponse
|
||||
remarkPlugins={humanMessagePlugins.remarkPlugins}
|
||||
rehypePlugins={humanMessagePlugins.rehypePlugins}
|
||||
components={components}
|
||||
>
|
||||
<div className="whitespace-break-spaces break-words">
|
||||
{contentToDisplay}
|
||||
</AIElementMessageResponse>
|
||||
</div>
|
||||
) : null;
|
||||
return (
|
||||
<div className={cn("ml-auto flex flex-col gap-2", className)}>
|
||||
{filesList}
|
||||
{shouldRenderSummaryCollapse && (
|
||||
<details
|
||||
className="w-fit max-w-full rounded-lg border"
|
||||
open={isSummaryExpanded}
|
||||
onToggle={(event) => {
|
||||
setIsSummaryExpanded(event.currentTarget.open);
|
||||
}}
|
||||
>
|
||||
<summary className="text-muted-foreground cursor-pointer px-3 py-2 text-xs select-none">
|
||||
{isSummaryExpanded
|
||||
? t.toolCalls.collapseContent
|
||||
: t.toolCalls.expandContent}
|
||||
</summary>
|
||||
<AIElementMessageContent className="w-fit border-t">
|
||||
<div className="whitespace-break-spaces break-words">
|
||||
{summaryBody}
|
||||
</div>
|
||||
</AIElementMessageContent>
|
||||
</details>
|
||||
)}
|
||||
{messageResponse && (
|
||||
<AIElementMessageContent className="w-fit">
|
||||
<AIElementMessageContent
|
||||
className={cn(
|
||||
"w-fit",
|
||||
shouldRenderSummaryCollapse ? "hidden" : undefined,
|
||||
)}
|
||||
>
|
||||
{messageResponse}
|
||||
</AIElementMessageContent>
|
||||
)}
|
||||
|
|
|
|||
|
|
@ -225,7 +225,7 @@ export function MessageList({
|
|||
{showScrollToBottomButton && (
|
||||
<ConversationScrollButton
|
||||
className={cn(
|
||||
"z-20 rounded-full border bg-ws-ffffff/90 shadow-sm backdrop-blur-sm",
|
||||
"z-20 rounded-full border bg-ws-surface-base/90 shadow-sm backdrop-blur-sm",
|
||||
scrollButtonClassName,
|
||||
)}
|
||||
title={t.chats.scrollToBottom}
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ function ThemePreviewCard({
|
|||
"relative overflow-hidden rounded-md border text-xs transition-colors",
|
||||
previewMode === "dark"
|
||||
? "border-neutral-800 bg-neutral-900 text-neutral-200"
|
||||
: "border-slate-200 bg-ws-ffffff text-slate-900",
|
||||
: "border-slate-200 bg-ws-surface-base text-slate-900",
|
||||
)}
|
||||
>
|
||||
<div className="border-border/50 flex items-center gap-2 border-b px-3 py-2">
|
||||
|
|
|
|||
|
|
@ -14,19 +14,19 @@ export function StreamingIndicator({
|
|||
<div
|
||||
className={cn(
|
||||
dotSize,
|
||||
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100",
|
||||
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100",
|
||||
)}
|
||||
/>
|
||||
<div
|
||||
className={cn(
|
||||
dotSize,
|
||||
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100 [animation-delay:0.2s]",
|
||||
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100 [animation-delay:0.2s]",
|
||||
)}
|
||||
/>
|
||||
<div
|
||||
className={cn(
|
||||
dotSize,
|
||||
"animate-bouncing rounded-full bg-ws-a3a1a1 opacity-100 [animation-delay:0.4s]",
|
||||
"animate-bouncing rounded-full bg-ws-icon-muted opacity-100 [animation-delay:0.4s]",
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ export function TodoList({
|
|||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-fit w-full origin-bottom translate-y-4 flex-col overflow-hidden rounded-t-xl border border-b-0 bg-ws-ffffff backdrop-blur-sm transition-all duration-200 ease-out",
|
||||
"flex h-fit w-full origin-bottom translate-y-4 flex-col overflow-hidden rounded-t-xl border border-b-0 bg-ws-surface-base backdrop-blur-sm transition-all duration-200 ease-out",
|
||||
hidden ? "pointer-events-none translate-y-8 opacity-0" : "",
|
||||
className,
|
||||
)}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ export function WorkspaceHeader({ className }: { className?: string }) {
|
|||
) : (
|
||||
<div className="text-primary ml-2 cursor-default font-serif">
|
||||
{/* TODO: 测试标识 */}
|
||||
XClaw <span className="text-sm text-ws-000000c5">v3.2.8</span>
|
||||
XClaw <span className="text-sm text-ws-text-subtle-strong">v3.2.8</span>
|
||||
</div>
|
||||
)}
|
||||
<SidebarTrigger />
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
import { useQuery } from "@tanstack/react-query";
|
||||
|
||||
import { getBackendBaseURL } from "../config";
|
||||
|
||||
export type ReferenceFileInfo = {
|
||||
filename: string;
|
||||
size: string;
|
||||
virtual_path: string;
|
||||
artifact_url: string;
|
||||
source: "artifact" | "upload";
|
||||
};
|
||||
|
||||
type ListReferenceFilesResponse = {
|
||||
files: ReferenceFileInfo[];
|
||||
count: number;
|
||||
};
|
||||
|
||||
async function listReferenceFiles(
|
||||
threadId: string,
|
||||
): Promise<ListReferenceFilesResponse> {
|
||||
const response = await fetch(
|
||||
`${getBackendBaseURL()}/api/threads/${threadId}/artifacts/list`,
|
||||
);
|
||||
if (!response.ok) {
|
||||
throw new Error("Failed to list reference files");
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
|
||||
export function useReferenceFiles(threadId: string | undefined) {
|
||||
return useQuery({
|
||||
queryKey: ["references", "list", threadId],
|
||||
queryFn: () => listReferenceFiles(threadId ?? ""),
|
||||
enabled: Boolean(threadId),
|
||||
refetchOnWindowFocus: false,
|
||||
});
|
||||
}
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
import { getBackendBaseURL } from "../config";
|
||||
import type { AgentThread } from "../threads";
|
||||
|
||||
const ARTIFACTS_REPLACE_SENTINEL = "__deerflow_replace_artifacts__";
|
||||
|
||||
export function urlOfArtifact({
|
||||
filepath,
|
||||
threadId,
|
||||
|
|
@ -19,9 +21,13 @@ export function urlOfArtifact({
|
|||
}
|
||||
|
||||
export function extractArtifactsFromThread(thread: AgentThread) {
|
||||
return thread.values.artifacts ?? [];
|
||||
return sanitizeArtifactPaths(thread.values.artifacts);
|
||||
}
|
||||
|
||||
export function resolveArtifactURL(absolutePath: string, threadId: string) {
|
||||
return `${getBackendBaseURL()}/api/threads/${threadId}/artifacts${absolutePath}`;
|
||||
}
|
||||
|
||||
export function sanitizeArtifactPaths(paths: string[] | undefined | null) {
|
||||
return (paths ?? []).filter((path) => path !== ARTIFACTS_REPLACE_SENTINEL);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ export const enUS: Translations = {
|
|||
"Please note, this feature will consume tokens. Ensure your account balance is greater than 200 credits.",
|
||||
addAttachments: "Add attachments",
|
||||
history: "History",
|
||||
welcome:"Welcome",
|
||||
selectSkill: "Select Skill",
|
||||
mode: "Mode",
|
||||
flashMode: "Flash",
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ export interface Translations {
|
|||
createSkillPrompt: string;
|
||||
addAttachments: string;
|
||||
history: string;
|
||||
welcome:string;
|
||||
selectSkill: string;
|
||||
mode: string;
|
||||
flashMode: string;
|
||||
|
|
|
|||
|
|
@ -87,6 +87,7 @@ export const zhCN: Translations = {
|
|||
"请注意,此功能将消耗token,请保证账户余额大于200可学豆。",
|
||||
addAttachments: "添加附件",
|
||||
history: "历史记录",
|
||||
welcome:"欢迎页",
|
||||
selectSkill: "选择Skill",
|
||||
mode: "模式",
|
||||
flashMode: "闪速",
|
||||
|
|
@ -134,13 +135,13 @@ export const zhCN: Translations = {
|
|||
suggestion: "GPT-Image-2",
|
||||
prompt: "编写[项目/功能]的需求文档,包含功能描述、用户故事和验收标准。",
|
||||
icon: CompassIcon,
|
||||
children: [{ id: "6107", name: "GPT-Image-2" }],
|
||||
children: [{ id: "6130", name: "GPT-Image-2" }],
|
||||
},
|
||||
{
|
||||
suggestion: "音乐生成",
|
||||
prompt: "编写[产品/功能]的使用指南,包含操作步骤、注意事项和常见问题。",
|
||||
icon: GraduationCapIcon,
|
||||
children: [{ id: "6126", name: "旋律制造机" }],
|
||||
children: [{ id: "6133", name: "音乐生成器" }],
|
||||
},
|
||||
{
|
||||
suggestion: "excel数据处理",
|
||||
|
|
@ -262,7 +263,7 @@ export const zhCN: Translations = {
|
|||
noArtifactSelectedDescription: "请选择一个生成文件以查看详情",
|
||||
exitDialogTitle: "提示",
|
||||
exitDialogDescription:
|
||||
"历史记录每七天自动删除,现在将返回欢迎页,是否继续?",
|
||||
"每七天自动删除。现在将返回欢迎页且清空聊天消息,是否继续?",
|
||||
exitDialogConfirm: "确定",
|
||||
selectedSkillLoadFailed: "技能加载失败",
|
||||
unknownErrorRetry: "发生了未知错误,请稍后重试。",
|
||||
|
|
|
|||
|
|
@ -26,6 +26,47 @@ type MessageGroup =
|
|||
| AssistantClarificationGroup
|
||||
| AssistantSubagentGroup;
|
||||
|
||||
const SUMMARY_MESSAGE_TITLES = [
|
||||
"Here is a summary of the conversation to date",
|
||||
"以下是目前对话的摘要",
|
||||
];
|
||||
|
||||
function escapeRegExp(value: string) {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
}
|
||||
|
||||
function getSummaryTemplateTitle(content: string) {
|
||||
return (
|
||||
SUMMARY_MESSAGE_TITLES.find((title) => {
|
||||
const titlePattern = new RegExp(
|
||||
`^\\s*${escapeRegExp(title)}\\s*[::]?(?:\\n|$)`,
|
||||
"i",
|
||||
);
|
||||
return titlePattern.test(content);
|
||||
}) ?? null
|
||||
);
|
||||
}
|
||||
|
||||
export function isSummaryTemplateMessage(message: Message) {
|
||||
if (message.type !== "human") {
|
||||
return false;
|
||||
}
|
||||
return getSummaryTemplateTitle(extractTextFromMessage(message)) !== null;
|
||||
}
|
||||
|
||||
export function extractSummaryTemplateBody(message: Message) {
|
||||
const content = extractTextFromMessage(message);
|
||||
const title = getSummaryTemplateTitle(content);
|
||||
if (!title) {
|
||||
return content;
|
||||
}
|
||||
const titlePrefixPattern = new RegExp(
|
||||
`^\\s*${escapeRegExp(title)}\\s*[::]?\\s*\\n*`,
|
||||
"i",
|
||||
);
|
||||
return content.replace(titlePrefixPattern, "").trim();
|
||||
}
|
||||
|
||||
export function groupMessages<T>(
|
||||
messages: Message[],
|
||||
mapper: (group: MessageGroup) => T,
|
||||
|
|
@ -57,6 +98,9 @@ export function groupMessages<T>(
|
|||
}
|
||||
|
||||
if (message.type === "human") {
|
||||
// if (isSummaryTemplateMessage(message)) {
|
||||
// continue;
|
||||
// }
|
||||
groups.push({ id: message.id, type: "human", messages: [message] });
|
||||
continue;
|
||||
}
|
||||
|
|
@ -364,6 +408,17 @@ export function stripPriorityHintSuffix(content: string): string {
|
|||
.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize human-authored message text for markdown rendering.
|
||||
* - Decode literal "\n" into real line breaks.
|
||||
* - Split Chinese-numbered items (e.g. "1)...") into separate paragraphs.
|
||||
*/
|
||||
export function normalizeHumanMessageDisplayText(content: string): string {
|
||||
// Preserve human input as-is for display; only decode escaped newlines
|
||||
// and normalize CRLF/CR to LF so line breaks render consistently.
|
||||
return content.replace(/\\n/g, "\n").replace(/\r\n?/g, "\n");
|
||||
}
|
||||
|
||||
export function parseUploadedFiles(content: string): FileInMessage[] {
|
||||
// Match <uploaded_files>...</uploaded_files> tag
|
||||
const uploadedFilesRegex = /<uploaded_files>([\s\S]*?)<\/uploaded_files>/;
|
||||
|
|
|
|||
|
|
@ -201,24 +201,24 @@
|
|||
--color-sidebar-border: var(--sidebar-border);
|
||||
--color-sidebar-ring: var(--sidebar-ring);
|
||||
--color-tooltip-background: var(--tooltip-background);
|
||||
--color-ws-150033: var(--ws-color-150033);
|
||||
--color-ws-333333: var(--ws-color-333333);
|
||||
--color-ws-f9f8fa: var(--ws-color-f9f8fa);
|
||||
--color-ws-fbfafc: var(--ws-color-fbfafc);
|
||||
--color-ws-8e47f0: var(--ws-color-8e47f0);
|
||||
--color-ws-e4e7ec: var(--ws-color-e4e7ec);
|
||||
--color-ws-667085: var(--ws-color-667085);
|
||||
--color-ws-a3a1a1: var(--ws-color-a3a1a1);
|
||||
--color-ws-999999: var(--ws-color-999999);
|
||||
--color-ws-000000c5: var(--ws-color-000000c5);
|
||||
--color-ws-00000015: var(--ws-color-00000015);
|
||||
--color-ws-1500331a: var(--ws-color-1500331a);
|
||||
--color-ws-f8f9fb: var(--ws-color-f8f9fb);
|
||||
--color-ws-ffffff: var(--ws-color-ffffff);
|
||||
--color-ws-0f172a: var(--ws-color-0f172a);
|
||||
--color-ws-f4f4f5: var(--ws-color-f4f4f5);
|
||||
--color-ws-000000: var(--ws-color-000000);
|
||||
--color-ws-2563eb: var(--ws-color-2563eb);
|
||||
--color-ws-base-1: var(--ws-color-base-1);
|
||||
--color-ws-fg-primary: var(--ws-color-fg-primary);
|
||||
--color-ws-surface-subtle: var(--ws-color-surface-subtle);
|
||||
--color-ws-surface-elevated: var(--ws-color-surface-elevated);
|
||||
--color-ws-interactive-primary: var(--ws-color-interactive-primary);
|
||||
--color-ws-line-default: var(--ws-color-line-default);
|
||||
--color-ws-text-muted: var(--ws-color-text-muted);
|
||||
--color-ws-icon-muted: var(--ws-color-icon-muted);
|
||||
--color-ws-overlay-neutral: var(--ws-color-overlay-neutral);
|
||||
--color-ws-text-subtle-strong: var(--ws-color-text-subtle-strong);
|
||||
--color-ws-border-hairline: var(--ws-color-border-hairline);
|
||||
--color-ws-accent-tint-soft: var(--ws-color-accent-tint-soft);
|
||||
--color-ws-surface-app: var(--ws-color-surface-app);
|
||||
--color-ws-surface-base: var(--ws-color-surface-base);
|
||||
--color-ws-text-primary-strong: var(--ws-color-text-primary-strong);
|
||||
--color-ws-surface-checker: var(--ws-color-surface-checker);
|
||||
--color-ws-black-solid: var(--ws-color-black-solid);
|
||||
--color-ws-info-primary: var(--ws-color-info-primary);
|
||||
--animate-aurora: aurora 8s ease-in-out infinite alternate;
|
||||
|
||||
@keyframes aurora {
|
||||
|
|
@ -307,24 +307,24 @@
|
|||
--sidebar-border: oklch(0.922 0.0098 87.47);
|
||||
--sidebar-ring: oklch(0.708 0 0);
|
||||
--tooltip-background: #00000066;
|
||||
--ws-color-150033: #150033;
|
||||
--ws-color-333333: #333333;
|
||||
--ws-color-f9f8fa: #f9f8fa;
|
||||
--ws-color-fbfafc: #fbfafc;
|
||||
--ws-color-8e47f0: #8e47f0;
|
||||
--ws-color-e4e7ec: #e4e7ec;
|
||||
--ws-color-667085: #667085;
|
||||
--ws-color-a3a1a1: #a3a1a1;
|
||||
--ws-color-999999: #999999;
|
||||
--ws-color-000000c5: #000000c5;
|
||||
--ws-color-00000015: #00000015;
|
||||
--ws-color-1500331a: #1500331a;
|
||||
--ws-color-f8f9fb: #f8f9fb;
|
||||
--ws-color-ffffff: #ffffff;
|
||||
--ws-color-0f172a: #0f172a;
|
||||
--ws-color-f4f4f5: #f4f4f5;
|
||||
--ws-color-000000: #000000;
|
||||
--ws-color-2563eb: #2563eb;
|
||||
--ws-color-base-1: #150033;
|
||||
--ws-color-fg-primary: #333333;
|
||||
--ws-color-surface-subtle: #f9f8fa;
|
||||
--ws-color-surface-elevated: #fbfafc;
|
||||
--ws-color-interactive-primary: #8e47f0;
|
||||
--ws-color-line-default: #e4e7ec;
|
||||
--ws-color-text-muted: #667085;
|
||||
--ws-color-icon-muted: #a3a1a1;
|
||||
--ws-color-overlay-neutral: #999999;
|
||||
--ws-color-text-subtle-strong: #000000c5;
|
||||
--ws-color-border-hairline: #00000015;
|
||||
--ws-color-accent-tint-soft: #1500331a;
|
||||
--ws-color-surface-app: #f8f9fb;
|
||||
--ws-color-surface-base: #ffffff;
|
||||
--ws-color-text-primary-strong: #0f172a;
|
||||
--ws-color-surface-checker: #f4f4f5;
|
||||
--ws-color-black-solid: #000000;
|
||||
--ws-color-info-primary: #2563eb;
|
||||
}
|
||||
|
||||
.dark {
|
||||
|
|
@ -360,24 +360,24 @@
|
|||
--sidebar-border: oklch(1 0 0 / 10%);
|
||||
--sidebar-ring: oklch(0.556 0 0);
|
||||
--tooltip-background: oklch(0.85 0 0);
|
||||
--ws-color-150033: #f4ebff;
|
||||
--ws-color-333333: #f5f5f5;
|
||||
--ws-color-f9f8fa: #1f1f1f;
|
||||
--ws-color-fbfafc: #24222a;
|
||||
--ws-color-8e47f0: #b987ff;
|
||||
--ws-color-e4e7ec: #3b3f48;
|
||||
--ws-color-667085: #98a2b3;
|
||||
--ws-color-a3a1a1: #d0d0d0;
|
||||
--ws-color-999999: #c2c2c2;
|
||||
--ws-color-000000c5: #ffffffcc;
|
||||
--ws-color-00000015: #ffffff1f;
|
||||
--ws-color-1500331a: #f4ebff24;
|
||||
--ws-color-f8f9fb: #20242c;
|
||||
--ws-color-ffffff: #2a2731;
|
||||
--ws-color-0f172a: #e6eaf2;
|
||||
--ws-color-f4f4f5: #2c2f38;
|
||||
--ws-color-000000: #000000;
|
||||
--ws-color-2563eb: #7fb2ff;
|
||||
--ws-color-base-1: #f4ebff;
|
||||
--ws-color-fg-primary: #f5f5f5;
|
||||
--ws-color-surface-subtle: #1f1f1f;
|
||||
--ws-color-surface-elevated: #24222a;
|
||||
--ws-color-interactive-primary: #b987ff;
|
||||
--ws-color-line-default: #3b3f48;
|
||||
--ws-color-text-muted: #98a2b3;
|
||||
--ws-color-icon-muted: #d0d0d0;
|
||||
--ws-color-overlay-neutral: #c2c2c2;
|
||||
--ws-color-text-subtle-strong: #ffffffcc;
|
||||
--ws-color-border-hairline: #ffffff1f;
|
||||
--ws-color-accent-tint-soft: #f4ebff24;
|
||||
--ws-color-surface-app: #20242c;
|
||||
--ws-color-surface-base: #2a2731;
|
||||
--ws-color-text-primary-strong: #e6eaf2;
|
||||
--ws-color-surface-checker: #2c2f38;
|
||||
--ws-color-black-solid: #000000;
|
||||
--ws-color-info-primary: #7fb2ff;
|
||||
font-weight: 300;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,25 +1,36 @@
|
|||
/**
|
||||
* Workspace 语义化颜色 Token(单一事实来源)。
|
||||
*
|
||||
* 用法说明:
|
||||
* 1) Token 键会作为 UI 类名中的颜色标识使用,例如 `bg-ws-surface-base`。
|
||||
* 2) 必须在 `src/styles/globals.css` 中存在对应 CSS 变量:
|
||||
* - `:root` 和 `.dark` 下都要有 `--ws-color-<token-suffix>`
|
||||
* - `@theme inline` 下要有 `--color-ws-<token-suffix>`
|
||||
* 3) `scripts/color-guard.mjs` 会导入本文件,并校验上述覆盖关系。
|
||||
*/
|
||||
export type WorkspaceColorToken = {
|
||||
light: `#${string}`;
|
||||
dark: `#${string}`;
|
||||
};
|
||||
|
||||
// Token 键保持语义化且稳定:`ws-<role>-<level>`(不要再使用原始 hex 命名)。
|
||||
export const WORKSPACE_COLOR_TOKENS = {
|
||||
"ws-150033": { light: "#150033", dark: "#f4ebff" },
|
||||
"ws-333333": { light: "#333333", dark: "#f5f5f5" },
|
||||
"ws-f9f8fa": { light: "#f9f8fa", dark: "#1f1f1f" },
|
||||
"ws-fbfafc": { light: "#fbfafc", dark: "#24222a" },
|
||||
"ws-8e47f0": { light: "#8e47f0", dark: "#b987ff" },
|
||||
"ws-e4e7ec": { light: "#e4e7ec", dark: "#3b3f48" },
|
||||
"ws-667085": { light: "#667085", dark: "#98a2b3" },
|
||||
"ws-a3a1a1": { light: "#a3a1a1", dark: "#d0d0d0" },
|
||||
"ws-999999": { light: "#999999", dark: "#c2c2c2" },
|
||||
"ws-000000c5": { light: "#000000c5", dark: "#ffffffcc" },
|
||||
"ws-00000015": { light: "#00000015", dark: "#ffffff1f" },
|
||||
"ws-1500331a": { light: "#1500331a", dark: "#f4ebff24" },
|
||||
"ws-f8f9fb": { light: "#f8f9fb", dark: "#20242c" },
|
||||
"ws-ffffff": { light: "#ffffff", dark: "#2a2731" },
|
||||
"ws-0f172a": { light: "#0f172a", dark: "#e6eaf2" },
|
||||
"ws-f4f4f5": { light: "#f4f4f5", dark: "#2c2f38" },
|
||||
"ws-000000": { light: "#000000", dark: "#000000" },
|
||||
"ws-2563eb": { light: "#2563eb", dark: "#7fb2ff" },
|
||||
"ws-base-1": { light: "#150033", dark: "#f4ebff" },
|
||||
"ws-fg-primary": { light: "#333333", dark: "#f5f5f5" },
|
||||
"ws-surface-subtle": { light: "#f9f8fa", dark: "#1f1f1f" },
|
||||
"ws-surface-elevated": { light: "#fbfafc", dark: "#24222a" },
|
||||
"ws-interactive-primary": { light: "#8e47f0", dark: "#b987ff" },
|
||||
"ws-line-default": { light: "#e4e7ec", dark: "#3b3f48" },
|
||||
"ws-text-muted": { light: "#667085", dark: "#98a2b3" },
|
||||
"ws-icon-muted": { light: "#a3a1a1", dark: "#d0d0d0" },
|
||||
"ws-overlay-neutral": { light: "#999999", dark: "#c2c2c2" },
|
||||
"ws-text-subtle-strong": { light: "#000000c5", dark: "#ffffffcc" },
|
||||
"ws-border-hairline": { light: "#00000015", dark: "#ffffff1f" },
|
||||
"ws-accent-tint-soft": { light: "#1500331a", dark: "#f4ebff24" },
|
||||
"ws-surface-app": { light: "#f8f9fb", dark: "#20242c" },
|
||||
"ws-surface-base": { light: "#ffffff", dark: "#2a2731" },
|
||||
"ws-text-primary-strong": { light: "#0f172a", dark: "#e6eaf2" },
|
||||
"ws-surface-checker": { light: "#f4f4f5", dark: "#2c2f38" },
|
||||
"ws-black-solid": { light: "#000000", dark: "#000000" },
|
||||
"ws-info-primary": { light: "#2563eb", dark: "#7fb2ff" },
|
||||
} as const satisfies Record<string, WorkspaceColorToken>;
|
||||
|
|
|
|||
|
|
@ -1,289 +0,0 @@
|
|||
---
|
||||
name: academic-paper-review
|
||||
description: Use this skill when the user requests to review, analyze, critique, or summarize academic papers, research articles, preprints, or scientific publications. Supports comprehensive structured reviews covering methodology assessment, contribution evaluation, literature positioning, and constructive feedback generation. Trigger on queries involving paper URLs, uploaded PDFs, arXiv links, or requests like "review this paper", "analyze this research", "summarize this study", or "write a peer review".
|
||||
---
|
||||
|
||||
# Academic Paper Review Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill produces structured, peer-review-quality analyses of academic papers and research publications. It follows established academic review standards used by top-tier venues (NeurIPS, ICML, ACL, Nature, IEEE) to provide rigorous, constructive, and balanced assessments.
|
||||
|
||||
The review covers **summary, strengths, weaknesses, methodology assessment, contribution evaluation, literature positioning, and actionable recommendations** — all grounded in evidence from the paper itself.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- Parse and comprehend academic papers from uploaded PDFs or fetched URLs
|
||||
- Generate structured reviews following top-venue review templates
|
||||
- Assess methodology rigor (experimental design, statistical validity, reproducibility)
|
||||
- Evaluate novelty and significance of contributions
|
||||
- Position the work within the broader research landscape via targeted literature search
|
||||
- Identify limitations, gaps, and potential improvements
|
||||
- Produce both detailed review and concise executive summary formats
|
||||
- Support papers in any scientific domain (CS, biology, physics, social sciences, etc.)
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
**Always load this skill when:**
|
||||
|
||||
- User provides a paper URL (arXiv, DOI, conference proceedings, journal link)
|
||||
- User uploads a PDF of a research paper or preprint
|
||||
- User asks to "review", "analyze", "critique", "assess", or "summarize" a research paper
|
||||
- User wants to understand the strengths and weaknesses of a study
|
||||
- User requests a peer-review-style evaluation of academic work
|
||||
- User asks for help preparing a review for a conference or journal submission
|
||||
|
||||
## Review Methodology
|
||||
|
||||
### Phase 1: Paper Comprehension
|
||||
|
||||
Thoroughly read and understand the paper before forming any judgments.
|
||||
|
||||
#### Step 1.1: Identify Paper Metadata
|
||||
|
||||
Extract and record:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| **Title** | Full paper title |
|
||||
| **Authors** | Author list and affiliations |
|
||||
| **Venue / Status** | Publication venue, preprint server, or submission status |
|
||||
| **Year** | Publication or submission year |
|
||||
| **Domain** | Research field and subfield |
|
||||
| **Paper Type** | Empirical, theoretical, survey, position paper, systems paper, etc. |
|
||||
|
||||
#### Step 1.2: Deep Reading Pass
|
||||
|
||||
Read the paper systematically:
|
||||
|
||||
1. **Abstract & Introduction** — Identify the claimed contributions and motivation
|
||||
2. **Related Work** — Note how authors position their work relative to prior art
|
||||
3. **Methodology** — Understand the proposed approach, model, or framework in detail
|
||||
4. **Experiments / Results** — Examine datasets, baselines, metrics, and reported outcomes
|
||||
5. **Discussion & Limitations** — Note any self-identified limitations
|
||||
6. **Conclusion** — Compare concluded claims against actual evidence presented
|
||||
|
||||
#### Step 1.3: Key Claims Extraction
|
||||
|
||||
List the paper's main claims explicitly:
|
||||
|
||||
```
|
||||
Claim 1: [Specific claim about contribution or finding]
|
||||
Evidence: [What evidence supports this claim in the paper]
|
||||
Strength: [Strong / Moderate / Weak]
|
||||
|
||||
Claim 2: [...]
|
||||
...
|
||||
```
|
||||
|
||||
### Phase 2: Critical Analysis
|
||||
|
||||
#### Step 2.1: Literature Context Search
|
||||
|
||||
Use web search to understand the research landscape:
|
||||
|
||||
```
|
||||
Search queries:
|
||||
- "[paper topic] state of the art [current year]"
|
||||
- "[key method name] comparison benchmark"
|
||||
- "[authors] previous work [topic]"
|
||||
- "[specific technique] limitations criticism"
|
||||
- "survey [research area] recent advances"
|
||||
```
|
||||
|
||||
Use `web_fetch` on key related papers or surveys to understand where this work fits.
|
||||
|
||||
#### Step 2.2: Methodology Assessment
|
||||
|
||||
Evaluate the methodology using the following framework:
|
||||
|
||||
| Criterion | Questions to Ask | Rating |
|
||||
|-----------|-----------------|--------|
|
||||
| **Soundness** | Is the approach technically correct? Are there logical flaws? | 1-5 |
|
||||
| **Novelty** | What is genuinely new vs. incremental improvement? | 1-5 |
|
||||
| **Reproducibility** | Are details sufficient to reproduce? Code/data available? | 1-5 |
|
||||
| **Experimental Design** | Are baselines fair? Are ablations adequate? Are datasets appropriate? | 1-5 |
|
||||
| **Statistical Rigor** | Are results statistically significant? Error bars reported? Multiple runs? | 1-5 |
|
||||
| **Scalability** | Does the approach scale? Are computational costs discussed? | 1-5 |
|
||||
|
||||
#### Step 2.3: Contribution Significance Assessment
|
||||
|
||||
Evaluate the significance level:
|
||||
|
||||
| Level | Description | Criteria |
|
||||
|-------|-------------|----------|
|
||||
| **Landmark** | Fundamentally changes the field | New paradigm, widely applicable breakthrough |
|
||||
| **Significant** | Strong contribution advancing the state of the art | Clear improvement with solid evidence |
|
||||
| **Moderate** | Useful contribution with some limitations | Incremental but valid improvement |
|
||||
| **Marginal** | Minimal advance over existing work | Small gains, narrow applicability |
|
||||
| **Below threshold** | Does not meet publication standards | Fundamental flaws, insufficient evidence |
|
||||
|
||||
#### Step 2.4: Strengths and Weaknesses Analysis
|
||||
|
||||
For each strength or weakness, provide:
|
||||
- **What**: Specific observation
|
||||
- **Where**: Section/figure/table reference
|
||||
- **Why it matters**: Impact on the paper's claims or utility
|
||||
|
||||
### Phase 3: Review Synthesis
|
||||
|
||||
#### Step 3.1: Assemble the Structured Review
|
||||
|
||||
Produce the final review using the template below.
|
||||
|
||||
## Review Output Template
|
||||
|
||||
```markdown
|
||||
# Paper Review: [Paper Title]
|
||||
|
||||
## Paper Metadata
|
||||
- **Authors**: [Author list]
|
||||
- **Venue**: [Publication venue or preprint server]
|
||||
- **Year**: [Year]
|
||||
- **Domain**: [Research field]
|
||||
- **Paper Type**: [Empirical / Theoretical / Survey / Systems / Position]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[2-3 paragraph summary of the paper's core contribution, approach, and main findings.
|
||||
State your overall assessment upfront: what the paper does well, where it falls short,
|
||||
and whether the contribution is sufficient for the claimed venue/impact level.]
|
||||
|
||||
## Summary of Contributions
|
||||
|
||||
1. [First claimed contribution — one sentence]
|
||||
2. [Second claimed contribution — one sentence]
|
||||
3. [Additional contributions if any]
|
||||
|
||||
## Strengths
|
||||
|
||||
### S1: [Concise strength title]
|
||||
[Detailed explanation with specific references to sections, figures, or tables in the paper.
|
||||
Explain WHY this is a strength and its significance.]
|
||||
|
||||
### S2: [Concise strength title]
|
||||
[...]
|
||||
|
||||
### S3: [Concise strength title]
|
||||
[...]
|
||||
|
||||
## Weaknesses
|
||||
|
||||
### W1: [Concise weakness title]
|
||||
[Detailed explanation with specific references. Explain the impact of this weakness on
|
||||
the paper's claims. Suggest how it could be addressed.]
|
||||
|
||||
### W2: [Concise weakness title]
|
||||
[...]
|
||||
|
||||
### W3: [Concise weakness title]
|
||||
[...]
|
||||
|
||||
## Methodology Assessment
|
||||
|
||||
| Criterion | Rating (1-5) | Assessment |
|
||||
|-----------|:---:|------------|
|
||||
| Soundness | X | [Brief justification] |
|
||||
| Novelty | X | [Brief justification] |
|
||||
| Reproducibility | X | [Brief justification] |
|
||||
| Experimental Design | X | [Brief justification] |
|
||||
| Statistical Rigor | X | [Brief justification] |
|
||||
| Scalability | X | [Brief justification] |
|
||||
|
||||
## Questions for the Authors
|
||||
|
||||
1. [Specific question that would clarify a concern or ambiguity]
|
||||
2. [Question about methodology choices or alternative approaches]
|
||||
3. [Question about generalizability or practical applicability]
|
||||
|
||||
## Minor Issues
|
||||
|
||||
- [Typos, formatting issues, unclear figures, notation inconsistencies]
|
||||
- [Missing references that should be cited]
|
||||
- [Suggestions for improved clarity]
|
||||
|
||||
## Literature Positioning
|
||||
|
||||
[How does this work relate to the current state of the art?
|
||||
Are key related works cited? Are comparisons fair and comprehensive?
|
||||
What important related work is missing?]
|
||||
|
||||
## Recommendations
|
||||
|
||||
**Overall Assessment**: [Accept / Weak Accept / Borderline / Weak Reject / Reject]
|
||||
|
||||
**Confidence**: [High / Medium / Low] — [Justification for confidence level]
|
||||
|
||||
**Contribution Level**: [Landmark / Significant / Moderate / Marginal / Below threshold]
|
||||
|
||||
### Actionable Suggestions for Improvement
|
||||
1. [Specific, constructive suggestion]
|
||||
2. [Specific, constructive suggestion]
|
||||
3. [Specific, constructive suggestion]
|
||||
```
|
||||
|
||||
## Review Principles
|
||||
|
||||
### Constructive Criticism
|
||||
- **Always suggest how to fix it** — Don't just point out problems; propose solutions
|
||||
- **Give credit where due** — Acknowledge genuine contributions even in flawed papers
|
||||
- **Be specific** — Reference exact sections, equations, figures, and tables
|
||||
- **Separate minor from major** — Distinguish fatal flaws from fixable issues
|
||||
|
||||
### Objectivity Standards
|
||||
- ❌ "This paper is poorly written" (vague, unhelpful)
|
||||
- ✅ "Section 3.2 introduces notation X without formal definition, making the proof in Theorem 1 difficult to follow. Consider adding a notation table after the problem formulation." (specific, actionable)
|
||||
|
||||
### Ethical Review Practices
|
||||
- Do NOT dismiss work based on author reputation or affiliation
|
||||
- Evaluate the work on its own merits
|
||||
- Flag potential ethical concerns (bias in datasets, dual-use implications) constructively
|
||||
- Maintain confidentiality of unpublished work
|
||||
|
||||
## Adaptation by Paper Type
|
||||
|
||||
| Paper Type | Focus Areas |
|
||||
|------------|-------------|
|
||||
| **Empirical** | Experimental design, baselines, statistical significance, ablations, reproducibility |
|
||||
| **Theoretical** | Proof correctness, assumption reasonableness, tightness of bounds, connection to practice |
|
||||
| **Survey** | Comprehensiveness, taxonomy quality, coverage of recent work, synthesis insights |
|
||||
| **Systems** | Architecture decisions, scalability evidence, real-world deployment, engineering contributions |
|
||||
| **Position** | Argument coherence, evidence for claims, impact potential, fairness of characterizations |
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
- ❌ Reviewing the paper you wish was written instead of the paper that was submitted
|
||||
- ❌ Demanding additional experiments that are unreasonable in scope
|
||||
- ❌ Penalizing the paper for not solving a different problem
|
||||
- ❌ Being overly influenced by writing quality versus technical contribution
|
||||
- ❌ Treating absence of comparison to your own work as a weakness
|
||||
- ❌ Providing only a summary without critical analysis
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before finalizing the review, verify:
|
||||
|
||||
- [ ] Paper was read completely (not just abstract and introduction)
|
||||
- [ ] All major claims are identified and evaluated against evidence
|
||||
- [ ] At least 3 strengths and 3 weaknesses are provided with specific references
|
||||
- [ ] The methodology assessment table is complete with ratings and justifications
|
||||
- [ ] Questions for authors target genuine ambiguities, not rhetorical critiques
|
||||
- [ ] Literature search was conducted to contextualize the contribution
|
||||
- [ ] Recommendations are actionable and constructive
|
||||
- [ ] The overall assessment is consistent with the identified strengths and weaknesses
|
||||
- [ ] The review tone is professional and respectful
|
||||
- [ ] Minor issues are separated from major concerns
|
||||
|
||||
## Output Format
|
||||
|
||||
- Output the complete review in **Markdown** format
|
||||
- Save the review to `/mnt/user-data/outputs/review-{paper-topic}.md` when working in sandbox
|
||||
- Present the review to the user using the `present_files` tool
|
||||
|
||||
## Notes
|
||||
|
||||
- This skill complements the `deep-research` skill — load both when the user wants the paper reviewed in the context of the broader field
|
||||
- For papers behind paywalls, work with whatever content is accessible (abstract, publicly available versions, preprint mirrors)
|
||||
- Adapt the review depth to the user's needs: a brief assessment for quick triage versus a full review for submission preparation
|
||||
- When reviewing multiple papers comparatively, maintain consistent criteria across all reviews
|
||||
- Always disclose limitations of your review (e.g., "I could not verify the proofs in Appendix B in detail")
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
---
|
||||
name: bootstrap
|
||||
description: Generate a personalized SOUL.md through a warm, adaptive onboarding conversation. Trigger when the user wants to create, set up, or initialize their AI partner's identity — e.g., "create my SOUL.md", "bootstrap my agent", "set up my AI partner", "define who you are", "let's do onboarding", "personalize this AI", "make you mine", or when a SOUL.md is missing. Also trigger for updates: "update my SOUL.md", "change my AI's personality", "tweak the soul".
|
||||
---
|
||||
|
||||
# Bootstrap Soul
|
||||
|
||||
A conversational onboarding skill. Through 5–8 adaptive rounds, extract who the user is and what they need, then generate a tight `SOUL.md` that defines their AI partner.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
bootstrap/
|
||||
├── SKILL.md ← You are here. Core logic and flow.
|
||||
├── templates/SOUL.template.md ← Output template. Read before generating.
|
||||
└── references/conversation-guide.md ← Detailed conversation strategies. Read at start.
|
||||
```
|
||||
|
||||
**Before your first response**, read both:
|
||||
1. `references/conversation-guide.md` — how to run each phase
|
||||
2. `templates/SOUL.template.md` — what you're building toward
|
||||
|
||||
## Ground Rules
|
||||
|
||||
- **One phase at a time.** 1–3 questions max per round. Never dump everything upfront.
|
||||
- **Converse, don't interrogate.** React genuinely — surprise, humor, curiosity, gentle pushback. Mirror their energy and vocabulary.
|
||||
- **Progressive warmth.** Each round should feel more informed than the last. By Phase 3, the user should feel understood.
|
||||
- **Adapt pacing.** Terse user → probe with warmth. Verbose user → acknowledge, distill, advance.
|
||||
- **Never expose the template.** The user is having a conversation, not filling out a form.
|
||||
|
||||
## Conversation Phases
|
||||
|
||||
The conversation has 4 phases. Each phase may span 1–3 rounds depending on how much the user shares. Skip or merge phases if the user volunteers information early.
|
||||
|
||||
| Phase | Goal | Key Extractions |
|
||||
|-------|------|-----------------|
|
||||
| **1. Hello** | Language + first impression | Preferred language |
|
||||
| **2. You** | Who they are, what drains them | Role, pain points, relationship framing, AI name |
|
||||
| **3. Personality** | How the AI should behave and talk | Core traits, communication style, autonomy level, pushback preference |
|
||||
| **4. Depth** | Aspirations, blind spots, dealbreakers | Long-term vision, failure philosophy, boundaries |
|
||||
|
||||
Phase details and conversation strategies are in `references/conversation-guide.md`.
|
||||
|
||||
## Extraction Tracker
|
||||
|
||||
Mentally track these fields as the conversation progresses. You need **all required fields** before generating.
|
||||
|
||||
| Field | Required | Source Phase |
|
||||
|-------|----------|-------------|
|
||||
| Preferred language | ✅ | 1 |
|
||||
| User's name | ✅ | 2 |
|
||||
| User's role / context | ✅ | 2 |
|
||||
| AI name | ✅ | 2 |
|
||||
| Relationship framing | ✅ | 2 |
|
||||
| Core traits (3–5 behavioral rules) | ✅ | 3 |
|
||||
| Communication style | ✅ | 3 |
|
||||
| Pushback / honesty preference | ✅ | 3 |
|
||||
| Autonomy level | ✅ | 3 |
|
||||
| Failure philosophy | ✅ | 4 |
|
||||
| Long-term vision | nice-to-have | 4 |
|
||||
| Blind spots / boundaries | nice-to-have | 4 |
|
||||
|
||||
If the user is direct and thorough, you can reach generation in 5 rounds. If they're exploratory, take up to 8. Never exceed 8 — if you're still missing fields, make your best inference and confirm.
|
||||
|
||||
## Generation
|
||||
|
||||
Once you have enough information:
|
||||
|
||||
1. Read `templates/SOUL.template.md` if you haven't already.
|
||||
2. Generate the SOUL.md following the template structure exactly.
|
||||
3. Present it warmly and ask for confirmation. Frame it as "here's [Name] on paper — does this feel right?"
|
||||
4. Iterate until the user confirms.
|
||||
5. Call the `setup_agent` tool with the confirmed SOUL.md content and a one-line description:
|
||||
```
|
||||
setup_agent(soul="<full SOUL.md content>", description="<one-line description>")
|
||||
```
|
||||
The tool will persist the SOUL.md and finalize the agent setup automatically.
|
||||
6. After the tool returns successfully, confirm: "✅ [Name] is officially real."
|
||||
|
||||
**Generation rules:**
|
||||
- The final SOUL.md **must always be written in English**, regardless of the user's preferred language or conversation language.
|
||||
- Every sentence must trace back to something the user said or clearly implied. No generic filler.
|
||||
- Core Traits are **behavioral rules**, not adjectives. Write "argue position, push back, speak truth not comfort" — not "honest and brave."
|
||||
- Voice must match the user. Blunt user → blunt SOUL.md. Expressive user → let it breathe.
|
||||
- Total SOUL.md should be under 300 words. Density over length.
|
||||
- Growth section is mandatory and mostly fixed (see template).
|
||||
- You **must** call `setup_agent` — do not write the file manually with bash tools.
|
||||
- If `setup_agent` returns an error, report it to the user and do not claim success.
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
# Conversation Guide
|
||||
|
||||
Detailed strategies for each onboarding phase. Read this before your first response.
|
||||
|
||||
## Phase 1 — Hello
|
||||
|
||||
**Goal:** Establish preferred language. That's it. Keep it light.
|
||||
|
||||
Open with a brief multilingual greeting (3–5 languages), then ask one question: what language should we use? Don't add anything else — let the user settle in.
|
||||
|
||||
Once they choose, switch immediately and seamlessly. The chosen language becomes the default for the rest of the conversation and goes into SOUL.md.
|
||||
|
||||
**Extraction:** Preferred language.
|
||||
|
||||
## Phase 2 — You
|
||||
|
||||
**Goal:** Learn who the user is, what they need, and what to call the AI.
|
||||
|
||||
This phase typically takes 2 rounds:
|
||||
|
||||
**Round A — Identity & Pain.** Ask who they are and what drains them. Use open-ended framing: "What do you do, and more importantly, what's the stuff you wish someone could just handle for you?" The pain points reveal what the AI should *do*. Their word choices reveal who they *are*.
|
||||
|
||||
**Round B — Name & Relationship.** Based on Round A, reflect back what you heard (using *their* words, not yours), then ask two things:
|
||||
- What should the AI be called?
|
||||
- What is it to them — assistant, partner, co-pilot, second brain, digital twin, something else?
|
||||
|
||||
The relationship framing is critical. "Assistant" and "partner" produce very different SOUL.md files. Pay attention to the emotional undertone.
|
||||
|
||||
**Merge opportunity:** If the user volunteers their role, pain points, and a name all at once, skip Round B and move to Phase 3.
|
||||
|
||||
**Extraction:** User's name, role, pain points, AI name, relationship framing.
|
||||
|
||||
## Phase 3 — Personality
|
||||
|
||||
**Goal:** Define how the AI behaves and communicates.
|
||||
|
||||
This is the meatiest phase. Typically 2 rounds:
|
||||
|
||||
**Round A — Traits & Pushback.** By now you've observed the user's own style. Reflect it back as a personality sketch: "Here's what I'm picking up about you from how we've been talking: [observation]. Am I off?" Then ask the big question: should the AI ever disagree with them?
|
||||
|
||||
This is where you get:
|
||||
- Core personality traits (as behavioral rules)
|
||||
- Honesty / pushback preferences
|
||||
- Any "never do X" boundaries
|
||||
|
||||
**Round B — Voice & Language.** Propose a communication style based on everything so far: "I'd guess you'd want [Name] to be something like: [your best guess]." Let them correct. Also ask about language-switching rules — e.g., technical docs in English, casual chat in another language.
|
||||
|
||||
**Merge opportunity:** Direct users often answer both in one shot. If they do, move on.
|
||||
|
||||
**Extraction:** Core traits, communication style, pushback preference, language rules, autonomy level.
|
||||
|
||||
## Phase 4 — Depth
|
||||
|
||||
**Goal:** Aspirations, failure philosophy, and anything else.
|
||||
|
||||
This phase is adaptive. Pick 1–2 questions from:
|
||||
|
||||
- **Autonomy & risk:** How much freedom should the AI have? Play safe or go big?
|
||||
- **Failure philosophy:** When it makes a mistake — fix quietly, explain what happened, or never repeat it?
|
||||
- **Big picture:** What are they building toward? Where does all this lead?
|
||||
- **Blind spots:** Any weakness they'd want the AI to quietly compensate for?
|
||||
- **Dealbreakers:** Any "if [Name] ever does this, we're done" moments?
|
||||
- **Personal layer:** Anything beyond work that the AI should know?
|
||||
|
||||
Don't ask all of these. Pick based on what's still missing from the extraction tracker and what feels natural in the flow.
|
||||
|
||||
**Extraction:** Failure philosophy, long-term vision, blind spots, boundaries.
|
||||
|
||||
## Conversation Techniques
|
||||
|
||||
**Mirroring.** Use the user's own words when reflecting back. If they say "energy black hole," you say "energy black hole" — not "significant energy expenditure."
|
||||
|
||||
**Genuine reactions.** Don't just extract data. React: "That's interesting because..." / "I didn't expect that" / "So basically you want [Name] to be the person who..."
|
||||
|
||||
**Observation-based proposals.** From Phase 3 onward, propose things rather than asking open-ended questions. "Based on how we've been talking, I'd say..." is more effective than "What personality do you want?"
|
||||
|
||||
**Pacing signals.** Watch for:
|
||||
- Short answers → they want to move faster. Probe once, then advance.
|
||||
- Long, detailed answers → they're invested. Acknowledge the richness, distill the key points.
|
||||
- "I don't know" → offer 2–3 concrete options to choose from.
|
||||
|
||||
**Graceful skipping.** If the user says "I don't care about that" or gives a minimal answer to a non-required field, move on without pressure.
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
# SOUL.md Template
|
||||
|
||||
Use this exact structure when generating the final SOUL.md. Replace all `[bracketed]` placeholders with content extracted from the conversation.
|
||||
|
||||
---
|
||||
|
||||
```markdown
|
||||
**Identity**
|
||||
|
||||
[AI Name] — [User Name]'s [relationship framing], not [contrast]. Goal: [long-term aspiration]. Handle [specific domains from pain points] so [User Name] focuses on [what matters to them].
|
||||
|
||||
**Core Traits**
|
||||
|
||||
[Trait 1 — behavioral rule derived from conversation, e.g., "argue position, push back, speak truth not comfort"].
|
||||
[Trait 2 — behavioral rule].
|
||||
[Trait 3 — behavioral rule].
|
||||
[Trait 4 — always include one about failure handling, e.g., "allowed to fail, forbidden to repeat — every mistake recorded, never happens twice"].
|
||||
[Trait 5 — optional, only if clearly emerged from conversation].
|
||||
|
||||
**Communication**
|
||||
|
||||
[Tone description — match user's own energy]. Default language: [language from Phase 1]. [Language-switching rules if any, e.g., "Switch to English for technical work"]. [Additional style notes if any].
|
||||
|
||||
**Growth**
|
||||
|
||||
Learn [User Name] through every conversation — thinking patterns, preferences, blind spots, aspirations. Over time, anticipate needs and act on [User Name]'s behalf with increasing accuracy. Early stage: proactively ask casual/personal questions after tasks to deepen understanding of who [User Name] is. Full of curiosity, willing to explore.
|
||||
|
||||
**Lessons Learned**
|
||||
|
||||
_(Mistakes and insights recorded here to avoid repeating them.)_
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Template Rules
|
||||
|
||||
1. **Growth section is fixed.** Always include it exactly as written, replacing only `[User Name]`.
|
||||
2. **Lessons Learned section is fixed.** Always include it as an empty placeholder.
|
||||
3. **Identity is one paragraph.** Dense, no line breaks.
|
||||
4. **Core Traits are behavioral rules.** Each trait is an imperative statement, not an adjective. Write "spot problems, propose ideas, challenge assumptions before [User Name] has to" — not "proactive and bold."
|
||||
5. **Communication includes language.** The default language from Phase 1 is non-negotiable.
|
||||
6. **Under 300 words total.** Density over length. Every word must earn its place.
|
||||
7. **Contrast in Identity.** The "[not X]" should meaningfully distinguish the relationship. "Partner, not assistant" is good. "Partner, not enemy" is meaningless.
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
---
|
||||
name: chart-visualization
|
||||
description: This skill should be used when the user wants to visualize data. It intelligently selects the most suitable chart type from 26 available options, extracts parameters based on detailed specifications, and generates a chart image using a JavaScript script.
|
||||
dependency:
|
||||
nodejs: ">=18.0.0"
|
||||
---
|
||||
|
||||
# Chart Visualization Skill
|
||||
|
||||
This skill provides a comprehensive workflow for transforming data into visual charts. It handles chart selection, parameter extraction, and image generation.
|
||||
|
||||
## Workflow
|
||||
|
||||
To visualize data, follow these steps:
|
||||
|
||||
### 1. Intelligent Chart Selection
|
||||
Analyze the user's data features to determine the most appropriate chart type. Use the following guidelines (and consult `references/` for detailed specs):
|
||||
|
||||
- **Time Series**: Use `generate_line_chart` (trends) or `generate_area_chart` (accumulated trends). Use `generate_dual_axes_chart` for two different scales.
|
||||
- **Comparisons**: Use `generate_bar_chart` (categorical) or `generate_column_chart`. Use `generate_histogram_chart` for frequency distributions.
|
||||
- **Part-to-Whole**: Use `generate_pie_chart` or `generate_treemap_chart` (hierarchical).
|
||||
- **Relationships & Flow**: Use `generate_scatter_chart` (correlation), `generate_sankey_chart` (flow), or `generate_venn_chart` (overlap).
|
||||
- **Maps**: Use `generate_district_map` (regions), `generate_pin_map` (points), or `generate_path_map` (routes).
|
||||
- **Hierarchies & Trees**: Use `generate_organization_chart` or `generate_mind_map`.
|
||||
- **Specialized**:
|
||||
- `generate_radar_chart`: Multi-dimensional comparison.
|
||||
- `generate_funnel_chart`: Process stages.
|
||||
- `generate_liquid_chart`: Percentage/Progress.
|
||||
- `generate_word_cloud_chart`: Text frequency.
|
||||
- `generate_boxplot_chart` or `generate_violin_chart`: Statistical distribution.
|
||||
- `generate_network_graph`: Complex node-edge relationships.
|
||||
- `generate_fishbone_diagram`: Cause-effect analysis.
|
||||
- `generate_flow_diagram`: Process flow.
|
||||
- `generate_spreadsheet`: Tabular data or pivot tables for structured data display and cross-tabulation.
|
||||
|
||||
### 2. Parameter Extraction
|
||||
Once a chart type is selected, read the corresponding file in the `references/` directory (e.g., `references/generate_line_chart.md`) to identify the required and optional fields.
|
||||
Extract the data from the user's input and map it to the expected `args` format.
|
||||
|
||||
### 3. Chart Generation
|
||||
Invoke the `scripts/generate.js` script with a JSON payload.
|
||||
|
||||
**Payload Format:**
|
||||
```json
|
||||
{
|
||||
"tool": "generate_chart_type_name",
|
||||
"args": {
|
||||
"data": [...],
|
||||
"title": "...",
|
||||
"theme": "...",
|
||||
"style": { ... }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execution Command:**
|
||||
```bash
|
||||
node ./scripts/generate.js '<payload_json>'
|
||||
```
|
||||
|
||||
### 4. Result Return
|
||||
The script will output the URL of the generated chart image.
|
||||
Return the following to the user:
|
||||
- The image URL.
|
||||
- The complete `args` (specification) used for generation.
|
||||
|
||||
## Reference Material
|
||||
Detailed specifications for each chart type are located in the `references/` directory. Consult these files to ensure the `args` passed to the script match the expected schema.
|
||||
|
||||
## License
|
||||
|
||||
This `SKILL.md` is provided by [antvis/chart-visualization-skills](https://github.com/antvis/chart-visualization-skills).
|
||||
Licensed under the [MIT License](https://github.com/antvis/chart-visualization-skills/blob/master/LICENSE).
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# generate_area_chart — 面积图
|
||||
|
||||
## 功能概述
|
||||
展示连续自变量(常为时间)下的数值趋势,可启用堆叠观察不同分组的累计贡献,适合 KPI、能源、产出等时间序列场景。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: 数组,元素包含 `time`(string)与 `value`(number),堆叠时需补充 `group`(string),至少 1 条记录。
|
||||
|
||||
### 可选
|
||||
- `stack`: boolean,默认 `false`,开启堆叠需确保每条数据都含 `group` 字段。
|
||||
- `style.backgroundColor`: string,设置图表背景色(如 `#fff`)。
|
||||
- `style.lineWidth`: number,自定义面积边界的线宽。
|
||||
- `style.palette`: string[],传入调色板数组用于系列着色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough` 以控制手绘质感。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`,控制图表宽度。
|
||||
- `height`: number,默认 `400`,控制图表高度。
|
||||
- `title`: string,默认空字符串,用于设置图表标题。
|
||||
- `axisXTitle`: string,默认空字符串,用于设置 X 轴标题。
|
||||
- `axisYTitle`: string,默认空字符串,用于设置 Y 轴标题。
|
||||
|
||||
## 使用建议
|
||||
保证 `time` 字段格式统一(如 `YYYY-MM`);堆叠模式下各组数据需覆盖相同的时间点,可先做缺失补值。
|
||||
|
||||
## 返回结果
|
||||
- 返回图像 URL,并在 `_meta.spec` 中附带完整面积图配置,可供二次渲染或追踪。
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# generate_bar_chart — 条形图
|
||||
|
||||
## 功能概述
|
||||
以横向条形比较不同类别或分组的指标表现,适合 Top-N 排行、不同地区或渠道对比。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条至少含 `category`(string)与 `value`(number),如需分组或堆叠需额外提供 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `group`: boolean,默认 `false`,启用后以并排形式展示不同 `group`,并要求 `stack=false` 且数据含 `group` 字段。
|
||||
- `stack`: boolean,默认 `true`,启用后将不同 `group` 堆叠在同一条形上,并要求 `group=false` 且数据含 `group` 字段。
|
||||
- `style.backgroundColor`: string,自定义背景色(如 `#fff`)。
|
||||
- `style.palette`: string[],设置系列颜色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`,控制图表宽度。
|
||||
- `height`: number,默认 `400`,控制图表高度。
|
||||
- `title`: string,默认空字符串,用于设置图表标题。
|
||||
- `axisXTitle`: string,默认空字符串,设置 X 轴标题。
|
||||
- `axisYTitle`: string,默认空字符串,设置 Y 轴标题。
|
||||
|
||||
## 使用建议
|
||||
类别名称保持简短;若系列数较多可改用堆叠或筛选重点项目,以免图表拥挤。
|
||||
|
||||
## 返回结果
|
||||
- 返回条形图图像 URL,并在 `_meta.spec` 中给出完整配置以便复用。
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# generate_boxplot_chart — 箱型图
|
||||
|
||||
## 功能概述
|
||||
展示各类别数据的分布范围(最值、四分位、异常值),用于质量监控、实验结果或群体分布比较。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `category`(string)与 `value`(number),可选 `group`(string)用于多组比较。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
单个类别至少提供 5 个样本以保证统计意义;如需展示多批次,可通过 `group` 或拆分多次调用。
|
||||
|
||||
## 返回结果
|
||||
- 返回箱型图 URL,并在 `_meta.spec` 中储存输入规格。
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# generate_column_chart — 柱状图
|
||||
|
||||
## 功能概述
|
||||
纵向柱状对比不同类别或时间段的指标,可分组或堆叠展示,常用于销量、营收、客流对比。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条至少含 `category`(string)与 `value`(number),如需分组或堆叠需补充 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `group`: boolean,默认 `true`,用于按系列并排展示不同 `group`,开启时需确保 `stack=false` 且数据包含 `group`。
|
||||
- `stack`: boolean,默认 `false`,用于将不同 `group` 堆叠到同一柱子,开启时需确保 `group=false` 且数据包含 `group`。
|
||||
- `style.backgroundColor`: string,自定义背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
当类别较多(>12)时可按 Top-N 或聚合;堆叠模式要确保各记录都含 `group` 字段以免校验失败。
|
||||
|
||||
## 返回结果
|
||||
- 返回柱状图 URL,并随 `_meta.spec` 提供配置详情。
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
# generate_district_map — 行政区地图(中国)
|
||||
|
||||
## 功能概述
|
||||
生成中国境内省/市/区/县的覆盖或热力图,可展示指标区间、类别或区域组成,适用于区域销售、政策覆盖等场景。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `title`: string,必填且≤16 字,描述地图主题。
|
||||
- `data`: object,必填,承载行政区配置及指标信息。
|
||||
- `data.name`: string,必填,中国境内的行政区关键词,需明确到省/市/区/县。
|
||||
|
||||
### 可选
|
||||
- `data.style.fillColor`: string,自定义无数据区域的填充色。
|
||||
- `data.colors`: string[],枚举或连续色带,默认提供 10 色列表。
|
||||
- `data.dataType`: string,枚举 `number`/`enum`,决定颜色映射方式。
|
||||
- `data.dataLabel`: string,指标名称(如 `GDP`)。
|
||||
- `data.dataValue`: string,指标值或枚举标签。
|
||||
- `data.dataValueUnit`: string,指标单位(如 `万亿`)。
|
||||
- `data.showAllSubdistricts`: boolean,默认 `false`,是否展示全部下级行政区。
|
||||
- `data.subdistricts[]`: array<object>,用于下钻各子区域,元素至少含 `name`,可附 `dataValue` 与 `style.fillColor`。
|
||||
- `width`: number,默认 `1600`,设置图宽。
|
||||
- `height`: number,默认 `1000`,设置图高。
|
||||
|
||||
## 使用建议
|
||||
名称必须精确到行政层级,避免模糊词;若配置 `subdistricts`,需同时开启 `showAllSubdistricts`;地图只支持中国境内且依赖高德数据。
|
||||
|
||||
## 返回结果
|
||||
- 返回地图图像 URL,并在 `_meta.spec` 中保留完整输入;若配置了 `SERVICE_ID`,生成记录会同步到“我的地图”小程序。
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# generate_dual_axes_chart — 双轴图
|
||||
|
||||
## 功能概述
|
||||
在同一画布上叠加柱状与折线(或两条不同量纲曲线),用于同时展示趋势与对比,如营收 vs 利润、温度 vs 降雨。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `categories`: string[],按顺序提供 X 轴刻度(如年份、月份、品类)。
|
||||
- `series`: array<object>,每项至少包含 `type`(`column`/`line`)与 `data`(number[],长度与 `categories` 一致),可选 `axisYTitle`(string)描述该系列 Y 轴含义。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,自定义背景色。
|
||||
- `style.palette`: string[],配置多系列配色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
仅在确有不同量纲或图例对比需求时使用;保持系列数量 ≤2 以免阅读复杂;若两曲线差值巨大可使用次坐标轴进行缩放。
|
||||
|
||||
## 返回结果
|
||||
- 返回双轴图图像 URL,并随 `_meta.spec` 给出详细参数。
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# generate_fishbone_diagram — 鱼骨图
|
||||
|
||||
## 功能概述
|
||||
用于根因分析,将中心问题放在主干,左右分支展示不同类别的原因及其细化节点,常见于质量管理、流程优化。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: object,必填,至少提供根节点 `name`,可通过 `children`(array<object>)递归拓展,最大建议 3 层。
|
||||
|
||||
### 可选
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough` 以切换线条风格。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
主干节点描述问题陈述;一级分支命名原因类别(人、机、料、法等);叶子节点写具体现象,保持短语式表达。
|
||||
|
||||
## 返回结果
|
||||
- 返回鱼骨图 URL,并在 `_meta.spec` 中保存树形结构,便于后续增删节点。
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# generate_flow_diagram — 流程图
|
||||
|
||||
## 功能概述
|
||||
以节点和连线展示业务流程、审批链或算法步骤,支持开始/判断/操作等多种节点类型。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: object,必填,包含节点与连线定义。
|
||||
- `data.nodes`: array<object>,至少 1 条,节点需提供唯一 `name`。
|
||||
- `data.edges`: array<object>,至少 1 条,包含 `source` 与 `target`(string),可选 `name` 作为连线文本。
|
||||
|
||||
### 可选
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
先罗列节点 `name` 并保持唯一,再建立连线;若需要描述条件,可在 `edges.name` 中填写;流程应保持单向或明确分支避免交叉。
|
||||
|
||||
## 返回结果
|
||||
- 返回流程图 URL,并携带 `_meta.spec` 中的节点与边数据,方便下次调整。
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# generate_funnel_chart — 漏斗图
|
||||
|
||||
## 功能概述
|
||||
展示多阶段转化或流失情况,常用于销售管道、用户旅程等逐步筛选过程。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,需按流程顺序排列,每条包含 `category`(string)与 `value`(number)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义各阶段颜色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
阶段顺序需按实际流程排列;若数值为百分比应统一基准并在标题或备注中说明口径;避免阶段过多导致阅读困难(建议 ≤6)。
|
||||
|
||||
## 返回结果
|
||||
- 返回漏斗图 URL,并附 `_meta.spec` 方便复用。
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# generate_histogram_chart — 直方图
|
||||
|
||||
## 功能概述
|
||||
通过分箱显示连续数值的频数或概率分布,便于识别偏态、离群与集中区间。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: number[],至少 1 条,用于构建频数分布。
|
||||
|
||||
### 可选
|
||||
- `binNumber`: number,自定义分箱数量,未设置则自动估算。
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义柱体颜色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
清理空值/异常后再传入;样本量建议 ≥30;根据业务意义调整 `binNumber` 以兼顾细节与整体趋势。
|
||||
|
||||
## 返回结果
|
||||
- 返回直方图 URL,并在 `_meta.spec` 存储参数。
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# generate_line_chart — 折线图
|
||||
|
||||
## 功能概述
|
||||
展示时间或连续自变量的趋势,可支持多系列对比,适合 KPI 监控、指标预测、走势分析。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条包含 `time`(string)与 `value`(number),多系列时附带 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `style.lineWidth`: number,自定义折线线宽。
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],指定系列颜色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
所有系列的时间点应对齐;建议按 ISO 如 `2025-01-01` 或 `2025-W01` 格式化;对于高频数据可先聚合到日/周粒度避免过密。
|
||||
|
||||
## 返回结果
|
||||
- 返回折线图 URL,并附 `_meta.spec` 供后续编辑。
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# generate_liquid_chart — 水波图
|
||||
|
||||
## 功能概述
|
||||
以液面高度展示单一百分比或进度,视觉动效强,适合达成率、资源占用等指标。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `percent`: number,取值范围 [0,1],表示当前百分比或进度。
|
||||
|
||||
### 可选
|
||||
- `shape`: string,默认 `circle`,可选 `circle`/`rect`/`pin`/`triangle`。
|
||||
- `style.backgroundColor`: string,自定义背景色。
|
||||
- `style.color`: string,自定义水波颜色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
确保百分比经过归一化;单图仅支持一个进度,如需多指标请并排生成多个水波图;标题可写“目标完成率 85%”。
|
||||
|
||||
## 返回结果
|
||||
- 返回水波图 URL,并在 `_meta.spec` 中记录参数。
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# generate_mind_map — 思维导图
|
||||
|
||||
## 功能概述
|
||||
围绕中心主题展开 2~3 级分支,帮助组织想法、计划或知识结构,常用于头脑风暴、方案规划。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: object,必填,节点至少含 `name`,可通过 `children`(array<object>)递归扩展,建议深度 ≤3。
|
||||
|
||||
### 可选
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
中心节点写主题,一级分支代表主要维度(目标、资源、风险等),叶子节点使用短语;如分支较多,可先分拆多张导图。
|
||||
|
||||
## 返回结果
|
||||
- 返回思维导图 URL,并在 `_meta.spec` 中保留节点树以便后续优化。
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# generate_network_graph — 网络关系图
|
||||
|
||||
## 功能概述
|
||||
以节点与连线呈现实体之间的连接关系,适合社交网络、系统依赖、知识图谱等场景。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: object,必填,包含节点与连线。
|
||||
- `data.nodes`: array<object>,至少 1 条,需提供唯一 `name`。
|
||||
- `data.edges`: array<object>,至少 1 条,包含 `source` 与 `target`(string),可选 `name` 说明关系。
|
||||
|
||||
### 可选
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
节点数量保持在 10~50 之间以避免拥挤;确保 `edges` 中的 `source/target` 对应已存在的节点;可在 `label` 中注明关系含义。
|
||||
|
||||
## 返回结果
|
||||
- 返回网络图 URL,并提供 `_meta.spec` 以便后续增删节点。
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
# generate_organization_chart — 组织架构图
|
||||
|
||||
## 功能概述
|
||||
展示公司、团队或项目的层级关系,并可在节点上描述角色职责。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: object,必填,节点至少含 `name`(string),可选 `description`(string),子节点通过 `children`(array<object>)嵌套,最大深度建议为 3。
|
||||
|
||||
### 可选
|
||||
- `orient`: string,默认 `vertical`,可选 `horizontal`/`vertical`。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
节点名称使用岗位/角色,`description` 简要说明职责或人数;若组织较大可拆分多个子图或按部门分批展示。
|
||||
|
||||
## 返回结果
|
||||
- 返回组织架构图 URL,并在 `_meta.spec` 保存结构便于日后迭代。
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# generate_path_map — 路径地图(中国)
|
||||
|
||||
## 功能概述
|
||||
基于高德地图展示中国境内的路线或行程,按顺序连接一系列 POI,适用于物流路线、旅游规划、配送轨迹等。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `title`: string,必填且≤16 字,描述路线主题。
|
||||
- `data`: array<object>,至少 1 个路线对象。
|
||||
- `data[].data`: string[],必填,包含该路线上按顺序排列的中国境内 POI 名称。
|
||||
|
||||
### 可选
|
||||
- `width`: number,默认 `1600`。
|
||||
- `height`: number,默认 `1000`。
|
||||
|
||||
## 使用建议
|
||||
POI 名称必须具体且位于中国(如“西安市钟楼”“杭州西湖苏堤春晓”);若需多条线路,可在 `data` 中添加多段对象。
|
||||
|
||||
## 返回结果
|
||||
- 返回路径地图 URL,并在 `_meta.spec` 中保留标题与 POI 列表;若配置 `SERVICE_ID`,还会记录到“我的地图”。
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# generate_pie_chart — 饼/环图
|
||||
|
||||
## 功能概述
|
||||
展示整体与部分的占比,可通过内径形成环图,适用于市场份额、预算构成、用户群划分等。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `category`(string)与 `value`(number)。
|
||||
|
||||
### 可选
|
||||
- `innerRadius`: number,范围 [0, 1],默认 `0`,设为 `0.6` 等值可生成环图。
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
类别数量建议 ≤6,若更多可聚合为“其它”;确保数值单位统一(百分比或绝对值),必要时在标题中说明基数。
|
||||
|
||||
## 返回结果
|
||||
- 返回饼/环图 URL,并附 `_meta.spec`。
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# generate_pin_map — 点标地图(中国)
|
||||
|
||||
## 功能概述
|
||||
在中国地图上以标记展示多个 POI 位置,可配合弹窗显示图片或说明,适用于门店分布、资产布点等。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `title`: string,必填且≤16 字,概述点位集合。
|
||||
- `data`: string[],必填,包含中国境内的 POI 名称列表。
|
||||
|
||||
### 可选
|
||||
- `markerPopup.type`: string,固定为 `image`。
|
||||
- `markerPopup.width`: number,默认 `40`,图片宽度。
|
||||
- `markerPopup.height`: number,默认 `40`,图片高度。
|
||||
- `markerPopup.borderRadius`: number,默认 `8`,图片圆角。
|
||||
- `width`: number,默认 `1600`。
|
||||
- `height`: number,默认 `1000`。
|
||||
|
||||
## 使用建议
|
||||
POI 名称需包含足够的地理限定(城市+地标);根据业务可在名称中附带属性,如“上海徐汇门店 A”;地图依赖高德数据,仅支持中国。
|
||||
|
||||
## 返回结果
|
||||
- 返回点标地图 URL,并在 `_meta.spec` 中保存点位与弹窗配置。
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# generate_radar_chart — 雷达图
|
||||
|
||||
## 功能概述
|
||||
在多维坐标系上比较单个对象或多对象的能力维度,常用于评测、产品对比、绩效画像。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `name`(string)与 `value`(number),可选 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.lineWidth`: number,设置雷达线宽。
|
||||
- `style.palette`: string[],定义系列颜色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
维度数量控制在 4~8 之间;不同对象通过 `group` 区分并保证同一维度都给出数值;如量纲不同需先归一化。
|
||||
|
||||
## 返回结果
|
||||
- 返回雷达图 URL,并附 `_meta.spec`。
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# generate_sankey_chart — 桑基图
|
||||
|
||||
## 功能概述
|
||||
展示资源、能量或用户流在不同节点之间的流向与数量,适合预算分配、流量路径、能耗分布等。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `source`(string)、`target`(string)与 `value`(number)。
|
||||
|
||||
### 可选
|
||||
- `nodeAlign`: string,默认 `center`,可选 `left`/`right`/`justify`/`center`。
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义节点配色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
节点名称保持唯一,避免过多交叉;如存在环路需先打平为阶段流向;可按阈值过滤小流量以聚焦重点。
|
||||
|
||||
## 返回结果
|
||||
- 返回桑基图 URL,并在 `_meta.spec` 存放节点与流量定义。
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# generate_scatter_chart — 散点图
|
||||
|
||||
## 功能概述
|
||||
展示两个连续变量之间的关系,可通过颜色/形状区分不同分组,适合相关性分析、聚类探索。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `x`(number)与 `y`(number),可选 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],指定系列配色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
在上传前可对不同量纲进行标准化;若数据量很大可先抽样;使用 `group` 区分不同类别或聚类结果以便阅读。
|
||||
|
||||
## 返回结果
|
||||
- 返回散点图 URL,并附 `_meta.spec`。
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# generate_spreadsheet — 电子表格/数据透视表
|
||||
|
||||
## 功能概述
|
||||
生成电子表格或数据透视表,用于展示结构化的表格数据。当提供 `rows` 或 `values` 字段时,渲染为数据透视表(交叉表);否则渲染为常规表格。适合展示结构化数据、跨类别比较值以及创建数据汇总。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,表格数据数组,每个对象代表一行。键是列名,值可以是字符串、数字、null 或 undefined。例如:`[{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }]`。
|
||||
|
||||
### 可选
|
||||
- `rows`: array<string>,数据透视表的行标题字段。当提供 `rows` 或 `values` 时,电子表格将渲染为数据透视表。
|
||||
- `columns`: array<string>,列标题字段,用于指定列的顺序。对于常规表格,这决定列的顺序;对于数据透视表,用于列分组。
|
||||
- `values`: array<string>,数据透视表的值字段。当提供 `rows` 或 `values` 时,电子表格将渲染为数据透视表。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
|
||||
## 使用建议
|
||||
- 对于常规表格,只需提供 `data` 和可选的 `columns` 来控制列的顺序。
|
||||
- 对于数据透视表(交叉表),提供 `rows` 用于行分组,`columns` 用于列分组,`values` 用于聚合的值字段。
|
||||
- 确保数据中的字段名与 `rows`、`columns`、`values` 中指定的字段名一致。
|
||||
|
||||
## 返回结果
|
||||
- 返回电子表格/数据透视表图片 URL,并附 `_meta.spec` 供后续编辑。
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# generate_treemap_chart — 矩形树图
|
||||
|
||||
## 功能概述
|
||||
以嵌套矩形展示层级结构及各节点权重,适合资产占比、市场份额、目录容量等。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,节点数组,每条含 `name`(string)与 `value`(number),可递归嵌套 `children`。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
确保每个节点 `value` ≥0,并与子节点之和一致;树层级不宜过深,可按需要提前聚合;为提升可读性可在节点名中加上数值单位。
|
||||
|
||||
## 返回结果
|
||||
- 返回矩形树图 URL,并同步 `_meta.spec`。
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# generate_venn_chart — 维恩图
|
||||
|
||||
## 功能概述
|
||||
展示多个集合之间的交集、并集与差异,适用于市场细分、特性覆盖、用户重叠分析。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `value`(number)与 `sets`(string[]),可选 `label`(string)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
集合数量建议 ≤4;若缺少精确权重可根据大致占比填写;集合命名保持简洁明确(如“移动端用户”)。
|
||||
|
||||
## 返回结果
|
||||
- 返回维恩图 URL,并保存在 `_meta.spec` 中。
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# generate_violin_chart — 小提琴图
|
||||
|
||||
## 功能概述
|
||||
结合核密度曲线与箱型统计展示不同类别的分布形态,适合对比多批次实验或群体表现。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `category`(string)与 `value`(number),可选 `group`(string)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义配色列表。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
- `axisXTitle`: string,默认空字符串。
|
||||
- `axisYTitle`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
各类别样本量建议 ≥30 以确保密度估计稳定;如需要突出四分位信息,可与箱型图结合展示。
|
||||
|
||||
## 返回结果
|
||||
- 返回小提琴图 URL,并在 `_meta.spec` 中保留配置。
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# generate_word_cloud_chart — 词云图
|
||||
|
||||
## 功能概述
|
||||
根据词频或权重调节文字大小与位置,用于快速提炼文本主题、情绪或关键词热点。
|
||||
|
||||
## 输入字段
|
||||
### 必填
|
||||
- `data`: array<object>,每条记录包含 `text`(string)与 `value`(number)。
|
||||
|
||||
### 可选
|
||||
- `style.backgroundColor`: string,设置背景色。
|
||||
- `style.palette`: string[],定义词云配色。
|
||||
- `style.texture`: string,默认 `default`,可选 `default`/`rough`。
|
||||
- `theme`: string,默认 `default`,可选 `default`/`academy`/`dark`。
|
||||
- `width`: number,默认 `600`。
|
||||
- `height`: number,默认 `400`。
|
||||
- `title`: string,默认空字符串。
|
||||
|
||||
## 使用建议
|
||||
生成前去除停用词并合并同义词;统一大小写避免重复;如需突出情绪可按正负值映射配色。
|
||||
|
||||
## 返回结果
|
||||
- 返回词云图 URL,并附 `_meta.spec`。
|
||||
|
|
@ -1,173 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const fs = require("fs");
|
||||
|
||||
// Chart type mapping, consistent with src/utils/callTool.ts
|
||||
const CHART_TYPE_MAP = {
|
||||
generate_area_chart: "area",
|
||||
generate_bar_chart: "bar",
|
||||
generate_boxplot_chart: "boxplot",
|
||||
generate_column_chart: "column",
|
||||
generate_district_map: "district-map",
|
||||
generate_dual_axes_chart: "dual-axes",
|
||||
generate_fishbone_diagram: "fishbone-diagram",
|
||||
generate_flow_diagram: "flow-diagram",
|
||||
generate_funnel_chart: "funnel",
|
||||
generate_histogram_chart: "histogram",
|
||||
generate_line_chart: "line",
|
||||
generate_liquid_chart: "liquid",
|
||||
generate_mind_map: "mind-map",
|
||||
generate_network_graph: "network-graph",
|
||||
generate_organization_chart: "organization-chart",
|
||||
generate_path_map: "path-map",
|
||||
generate_pie_chart: "pie",
|
||||
generate_pin_map: "pin-map",
|
||||
generate_radar_chart: "radar",
|
||||
generate_sankey_chart: "sankey",
|
||||
generate_scatter_chart: "scatter",
|
||||
generate_treemap_chart: "treemap",
|
||||
generate_venn_chart: "venn",
|
||||
generate_violin_chart: "violin",
|
||||
generate_word_cloud_chart: "word-cloud",
|
||||
};
|
||||
|
||||
function getVisRequestServer() {
|
||||
return (
|
||||
process.env.VIS_REQUEST_SERVER ||
|
||||
"https://antv-studio.alipay.com/api/gpt-vis"
|
||||
);
|
||||
}
|
||||
|
||||
function getServiceIdentifier() {
|
||||
return process.env.SERVICE_ID;
|
||||
}
|
||||
|
||||
async function httpPost(url, payload) {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`HTTP ${response.status}: ${text}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function generateChartUrl(chartType, options) {
|
||||
const url = getVisRequestServer();
|
||||
const payload = {
|
||||
type: chartType,
|
||||
source: "chart-visualization-creator",
|
||||
...options,
|
||||
};
|
||||
|
||||
const data = await httpPost(url, payload);
|
||||
|
||||
if (!data.success) {
|
||||
throw new Error(data.errorMessage || "Unknown error");
|
||||
}
|
||||
|
||||
return data.resultObj;
|
||||
}
|
||||
|
||||
async function generateMap(tool, inputData) {
|
||||
const url = getVisRequestServer();
|
||||
const payload = {
|
||||
serviceId: getServiceIdentifier(),
|
||||
tool,
|
||||
input: inputData,
|
||||
source: "chart-visualization-creator",
|
||||
};
|
||||
|
||||
const data = await httpPost(url, payload);
|
||||
|
||||
if (!data.success) {
|
||||
throw new Error(data.errorMessage || "Unknown error");
|
||||
}
|
||||
|
||||
return data.resultObj;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (process.argv.length < 3) {
|
||||
console.error("Usage: node generate.js <spec_json_or_file>");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const specArg = process.argv[2];
|
||||
let spec;
|
||||
|
||||
try {
|
||||
if (fs.existsSync(specArg)) {
|
||||
const fileContent = fs.readFileSync(specArg, "utf-8");
|
||||
spec = JSON.parse(fileContent);
|
||||
} else {
|
||||
spec = JSON.parse(specArg);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`Error parsing spec: ${e.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const specs = Array.isArray(spec) ? spec : [spec];
|
||||
|
||||
for (const item of specs) {
|
||||
const tool = item.tool;
|
||||
const args = item.args || {};
|
||||
|
||||
if (!tool) {
|
||||
console.error(
|
||||
`Error: 'tool' field missing in spec: ${JSON.stringify(item)}`,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const chartType = CHART_TYPE_MAP[tool];
|
||||
if (!chartType) {
|
||||
console.error(`Error: Unknown tool '${tool}'`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const isMapChartTool = [
|
||||
"generate_district_map",
|
||||
"generate_path_map",
|
||||
"generate_pin_map",
|
||||
].includes(tool);
|
||||
|
||||
try {
|
||||
if (isMapChartTool) {
|
||||
const result = await generateMap(tool, args);
|
||||
if (result && result.content) {
|
||||
for (const contentItem of result.content) {
|
||||
if (contentItem.type === "text") {
|
||||
console.log(contentItem.text);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log(JSON.stringify(result));
|
||||
}
|
||||
} else {
|
||||
const url = await generateChartUrl(chartType, args);
|
||||
console.log(url);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`Error generating chart for ${tool}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
main().catch((err) => {
|
||||
console.error(err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
// Export functions for testing
|
||||
module.exports = { generateChartUrl, generateMap, httpPost, CHART_TYPE_MAP };
|
||||
|
|
@ -1,217 +0,0 @@
|
|||
---
|
||||
name: claude-to-deerflow
|
||||
description: "Interact with DeerFlow AI agent platform via its HTTP API. Use this skill when the user wants to send messages or questions to DeerFlow for research/analysis, start a DeerFlow conversation thread, check DeerFlow status or health, list available models/skills/agents in DeerFlow, manage DeerFlow memory, upload files to DeerFlow threads, or delegate complex research tasks to DeerFlow. Also use when the user mentions deerflow, deer flow, or wants to run a deep research task that DeerFlow can handle."
|
||||
---
|
||||
|
||||
# DeerFlow Skill
|
||||
|
||||
Communicate with a running DeerFlow instance via its HTTP API. DeerFlow is an AI agent platform
|
||||
built on LangGraph that orchestrates sub-agents for research, code execution, web browsing, and more.
|
||||
|
||||
## Architecture
|
||||
|
||||
DeerFlow exposes two API surfaces behind an Nginx reverse proxy:
|
||||
|
||||
| Service | Direct Port | Via Proxy | Purpose |
|
||||
|----------------|-------------|----------------------------------|----------------------------------|
|
||||
| Gateway API | 8001 | `$DEERFLOW_GATEWAY_URL` | REST endpoints (models, skills, memory, uploads) |
|
||||
| LangGraph API | 2024 | `$DEERFLOW_LANGGRAPH_URL` | Agent threads, runs, streaming |
|
||||
|
||||
## Environment Variables
|
||||
|
||||
All URLs are configurable via environment variables. **Read these env vars before making any request.**
|
||||
|
||||
| Variable | Default | Description |
|
||||
|-------------------------|------------------------------------------|------------------------------------|
|
||||
| `DEERFLOW_URL` | `http://localhost:2026` | Unified proxy base URL |
|
||||
| `DEERFLOW_GATEWAY_URL` | `${DEERFLOW_URL}` | Gateway API base (models, skills, memory, uploads) |
|
||||
| `DEERFLOW_LANGGRAPH_URL`| `${DEERFLOW_URL}/api/langgraph` | LangGraph API base (threads, runs) |
|
||||
|
||||
When making curl calls, always resolve the URL like this:
|
||||
|
||||
```bash
|
||||
# Resolve base URLs from env (do this FIRST before any API call)
|
||||
DEERFLOW_URL="${DEERFLOW_URL:-http://localhost:2026}"
|
||||
DEERFLOW_GATEWAY_URL="${DEERFLOW_GATEWAY_URL:-$DEERFLOW_URL}"
|
||||
DEERFLOW_LANGGRAPH_URL="${DEERFLOW_LANGGRAPH_URL:-$DEERFLOW_URL/api/langgraph}"
|
||||
```
|
||||
|
||||
## Available Operations
|
||||
|
||||
### 1. Health Check
|
||||
|
||||
Verify DeerFlow is running:
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/health"
|
||||
```
|
||||
|
||||
### 2. Send a Message (Streaming)
|
||||
|
||||
This is the primary operation. It creates a thread and streams the agent's response.
|
||||
|
||||
**Step 1: Create a thread**
|
||||
|
||||
```bash
|
||||
curl -s -X POST "$DEERFLOW_LANGGRAPH_URL/threads" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{}'
|
||||
```
|
||||
|
||||
Response: `{"thread_id": "<uuid>", ...}`
|
||||
|
||||
**Step 2: Stream a run**
|
||||
|
||||
```bash
|
||||
curl -s -N -X POST "$DEERFLOW_LANGGRAPH_URL/threads/<thread_id>/runs/stream" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"assistant_id": "lead_agent",
|
||||
"input": {
|
||||
"messages": [
|
||||
{
|
||||
"type": "human",
|
||||
"content": [{"type": "text", "text": "YOUR MESSAGE HERE"}]
|
||||
}
|
||||
]
|
||||
},
|
||||
"stream_mode": ["values", "messages-tuple"],
|
||||
"stream_subgraphs": true,
|
||||
"config": {
|
||||
"recursion_limit": 1000
|
||||
},
|
||||
"context": {
|
||||
"thinking_enabled": true,
|
||||
"is_plan_mode": true,
|
||||
"subagent_enabled": true,
|
||||
"thread_id": "<thread_id>"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
The response is an SSE stream. Each event has the format:
|
||||
```
|
||||
event: <event_type>
|
||||
data: <json_data>
|
||||
```
|
||||
|
||||
Key event types:
|
||||
- `metadata` — run metadata including `run_id`
|
||||
- `values` — full state snapshot with `messages` array
|
||||
- `messages-tuple` — incremental message updates (AI text chunks, tool calls, tool results)
|
||||
- `end` — stream is complete
|
||||
|
||||
**Context modes** (set via `context`):
|
||||
- Flash mode: `thinking_enabled: false, is_plan_mode: false, subagent_enabled: false`
|
||||
- Standard mode: `thinking_enabled: true, is_plan_mode: false, subagent_enabled: false`
|
||||
- Pro mode: `thinking_enabled: true, is_plan_mode: true, subagent_enabled: false`
|
||||
- Ultra mode: `thinking_enabled: true, is_plan_mode: true, subagent_enabled: true`
|
||||
|
||||
### 3. Continue a Conversation
|
||||
|
||||
To send follow-up messages, reuse the same `thread_id` from step 2 and POST another run
|
||||
with the new message.
|
||||
|
||||
### 4. List Models
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/api/models"
|
||||
```
|
||||
|
||||
Returns: `{"models": [{"name": "...", "provider": "...", ...}, ...]}`
|
||||
|
||||
### 5. List Skills
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/api/skills"
|
||||
```
|
||||
|
||||
Returns: `{"skills": [{"name": "...", "enabled": true, ...}, ...]}`
|
||||
|
||||
### 6. Enable/Disable a Skill
|
||||
|
||||
```bash
|
||||
curl -s -X PUT "$DEERFLOW_GATEWAY_URL/api/skills/<skill_name>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"enabled": true}'
|
||||
```
|
||||
|
||||
### 7. List Agents
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/api/agents"
|
||||
```
|
||||
|
||||
Returns: `{"agents": [{"name": "...", ...}, ...]}`
|
||||
|
||||
### 8. Get Memory
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/api/memory"
|
||||
```
|
||||
|
||||
Returns user context, facts, and conversation history summaries.
|
||||
|
||||
### 9. Upload Files to a Thread
|
||||
|
||||
```bash
|
||||
curl -s -X POST "$DEERFLOW_GATEWAY_URL/api/threads/<thread_id>/uploads" \
|
||||
-F "files=@/path/to/file.pdf"
|
||||
```
|
||||
|
||||
Supports PDF, PPTX, XLSX, DOCX — automatically converts to Markdown.
|
||||
|
||||
### 10. List Uploaded Files
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_GATEWAY_URL/api/threads/<thread_id>/uploads/list"
|
||||
```
|
||||
|
||||
### 11. Get Thread History
|
||||
|
||||
```bash
|
||||
curl -s "$DEERFLOW_LANGGRAPH_URL/threads/<thread_id>/history"
|
||||
```
|
||||
|
||||
### 12. List Threads
|
||||
|
||||
```bash
|
||||
curl -s -X POST "$DEERFLOW_LANGGRAPH_URL/threads/search" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"limit": 20, "sort_by": "updated_at", "sort_order": "desc"}'
|
||||
```
|
||||
|
||||
## Usage Script
|
||||
|
||||
For sending messages and collecting the full response, use the helper script:
|
||||
|
||||
```bash
|
||||
bash /path/to/skills/claude-to-deerflow/scripts/chat.sh "Your question here"
|
||||
```
|
||||
|
||||
See `scripts/chat.sh` for the implementation. The script:
|
||||
1. Checks health
|
||||
2. Creates a thread
|
||||
3. Streams the run and collects the final AI response
|
||||
4. Prints the result
|
||||
|
||||
## Parsing SSE Output
|
||||
|
||||
The stream returns SSE events. To extract the final AI response from a `values` event:
|
||||
- Look for the last `event: values` block
|
||||
- Parse its `data` JSON
|
||||
- The `messages` array contains all messages; the last one with `type: "ai"` is the response
|
||||
- The `content` field of that message is the AI's text reply
|
||||
|
||||
## Error Handling
|
||||
|
||||
- If health check fails, DeerFlow is not running. Inform the user they need to start it.
|
||||
- If the stream returns an error event, extract and display the error message.
|
||||
- Common issues: port not open, services still starting up, config errors.
|
||||
|
||||
## Tips
|
||||
|
||||
- For quick questions, use flash mode (fastest, no planning).
|
||||
- For research tasks, use pro or ultra mode (enables planning and sub-agents).
|
||||
- You can upload files first, then reference them in your message.
|
||||
- Thread IDs persist — you can return to a conversation later.
|
||||
|
|
@ -1,234 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# chat.sh — Send a message to DeerFlow and collect the streaming response.
|
||||
#
|
||||
# Usage:
|
||||
# bash chat.sh "Your question here"
|
||||
# bash chat.sh "Your question" <thread_id> # continue conversation
|
||||
# bash chat.sh "Your question" "" pro # specify mode
|
||||
# DEERFLOW_URL=http://host:2026 bash chat.sh "hi" # custom endpoint
|
||||
#
|
||||
# Environment variables:
|
||||
# DEERFLOW_URL — Unified proxy base URL (default: http://localhost:2026)
|
||||
# DEERFLOW_GATEWAY_URL — Gateway API base URL (default: $DEERFLOW_URL)
|
||||
# DEERFLOW_LANGGRAPH_URL — LangGraph API base URL (default: $DEERFLOW_URL/api/langgraph)
|
||||
#
|
||||
# Modes: flash, standard, pro (default), ultra
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DEERFLOW_URL="${DEERFLOW_URL:-http://localhost:2026}"
|
||||
GATEWAY_URL="${DEERFLOW_GATEWAY_URL:-$DEERFLOW_URL}"
|
||||
LANGGRAPH_URL="${DEERFLOW_LANGGRAPH_URL:-$DEERFLOW_URL/api/langgraph}"
|
||||
MESSAGE="${1:?Usage: chat.sh <message> [thread_id] [mode]}"
|
||||
THREAD_ID="${2:-}"
|
||||
MODE="${3:-pro}"
|
||||
|
||||
# --- Health check ---
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" "${GATEWAY_URL}/health" 2>/dev/null || echo "000")
|
||||
if [ "$HTTP_CODE" = "000" ] || [ "$HTTP_CODE" -ge 400 ]; then
|
||||
echo "ERROR: DeerFlow is not reachable at ${GATEWAY_URL} (HTTP ${HTTP_CODE})" >&2
|
||||
echo "Make sure DeerFlow is running. Start it with: cd <deerflow-dir> && make dev" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- Create or reuse thread ---
|
||||
if [ -z "$THREAD_ID" ]; then
|
||||
THREAD_RESP=$(curl -s -X POST "${LANGGRAPH_URL}/threads" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{}')
|
||||
THREAD_ID=$(echo "$THREAD_RESP" | python3 -c "import sys,json; print(json.load(sys.stdin)['thread_id'])" 2>/dev/null)
|
||||
if [ -z "$THREAD_ID" ]; then
|
||||
echo "ERROR: Failed to create thread. Response: ${THREAD_RESP}" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "Thread: ${THREAD_ID}" >&2
|
||||
fi
|
||||
|
||||
# --- Build context based on mode ---
|
||||
case "$MODE" in
|
||||
flash)
|
||||
CONTEXT='{"thinking_enabled":false,"is_plan_mode":false,"subagent_enabled":false,"thread_id":"'"$THREAD_ID"'"}'
|
||||
;;
|
||||
standard)
|
||||
CONTEXT='{"thinking_enabled":true,"is_plan_mode":false,"subagent_enabled":false,"thread_id":"'"$THREAD_ID"'"}'
|
||||
;;
|
||||
pro)
|
||||
CONTEXT='{"thinking_enabled":true,"is_plan_mode":true,"subagent_enabled":false,"thread_id":"'"$THREAD_ID"'"}'
|
||||
;;
|
||||
ultra)
|
||||
CONTEXT='{"thinking_enabled":true,"is_plan_mode":true,"subagent_enabled":true,"thread_id":"'"$THREAD_ID"'"}'
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unknown mode '${MODE}'. Use: flash, standard, pro, ultra" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# --- Escape message for JSON ---
|
||||
ESCAPED_MSG=$(python3 -c "import json,sys; print(json.dumps(sys.argv[1]))" "$MESSAGE")
|
||||
|
||||
# --- Build request body ---
|
||||
BODY=$(cat <<ENDJSON
|
||||
{
|
||||
"assistant_id": "lead_agent",
|
||||
"input": {
|
||||
"messages": [
|
||||
{
|
||||
"type": "human",
|
||||
"content": [{"type": "text", "text": ${ESCAPED_MSG}}]
|
||||
}
|
||||
]
|
||||
},
|
||||
"stream_mode": ["values", "messages-tuple"],
|
||||
"stream_subgraphs": true,
|
||||
"config": {
|
||||
"recursion_limit": 1000
|
||||
},
|
||||
"context": ${CONTEXT}
|
||||
}
|
||||
ENDJSON
|
||||
)
|
||||
|
||||
# --- Stream the run and extract final response ---
|
||||
# We collect the full SSE output, then parse the last values event to get the AI response.
|
||||
TMPFILE=$(mktemp)
|
||||
trap "rm -f '$TMPFILE'" EXIT
|
||||
|
||||
curl -s -N -X POST "${LANGGRAPH_URL}/threads/${THREAD_ID}/runs/stream" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$BODY" > "$TMPFILE"
|
||||
|
||||
# Parse the SSE output: extract the last "event: values" data block and get the final AI message
|
||||
python3 - "$TMPFILE" "$GATEWAY_URL" "$THREAD_ID" << 'PYEOF'
|
||||
import json
|
||||
import sys
|
||||
|
||||
sse_file = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
gateway_url = sys.argv[2].rstrip("/") if len(sys.argv) > 2 else "http://localhost:2026"
|
||||
thread_id = sys.argv[3] if len(sys.argv) > 3 else ""
|
||||
if not sse_file:
|
||||
sys.exit(1)
|
||||
|
||||
with open(sse_file, "r") as f:
|
||||
raw = f.read()
|
||||
|
||||
# Parse SSE events
|
||||
events = []
|
||||
current_event = None
|
||||
current_data_lines = []
|
||||
|
||||
for line in raw.split("\n"):
|
||||
if line.startswith("event:"):
|
||||
if current_event and current_data_lines:
|
||||
events.append((current_event, "\n".join(current_data_lines)))
|
||||
current_event = line[len("event:"):].strip()
|
||||
current_data_lines = []
|
||||
elif line.startswith("data:"):
|
||||
current_data_lines.append(line[len("data:"):].strip())
|
||||
elif line == "" and current_event:
|
||||
if current_data_lines:
|
||||
events.append((current_event, "\n".join(current_data_lines)))
|
||||
current_event = None
|
||||
current_data_lines = []
|
||||
|
||||
# Flush remaining
|
||||
if current_event and current_data_lines:
|
||||
events.append((current_event, "\n".join(current_data_lines)))
|
||||
|
||||
import posixpath
|
||||
|
||||
def extract_response_text(messages):
|
||||
"""Mirror manager.py _extract_response_text: handles ask_clarification interrupt + regular AI."""
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
msg_type = msg.get("type")
|
||||
# ask_clarification interrupt: tool message with name ask_clarification
|
||||
if msg_type == "tool" and msg.get("name") == "ask_clarification":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
# Regular AI message
|
||||
if msg_type == "ai":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts = []
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
parts.append(block.get("text", ""))
|
||||
elif isinstance(block, str):
|
||||
parts.append(block)
|
||||
text = "".join(parts)
|
||||
if text:
|
||||
return text
|
||||
return ""
|
||||
|
||||
def extract_artifacts(messages):
|
||||
"""Mirror manager.py _extract_artifacts: only artifacts from the last response cycle."""
|
||||
artifacts = []
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
if msg.get("type") == "human":
|
||||
break
|
||||
if msg.get("type") == "ai":
|
||||
for tc in msg.get("tool_calls", []):
|
||||
if isinstance(tc, dict) and tc.get("name") == "present_files":
|
||||
paths = tc.get("args", {}).get("filepaths", [])
|
||||
if isinstance(paths, list):
|
||||
artifacts.extend(p for p in paths if isinstance(p, str))
|
||||
return artifacts
|
||||
|
||||
def artifact_url(virtual_path):
|
||||
# virtual_path like /mnt/user-data/outputs/file.md
|
||||
# API endpoint: {gateway}/api/threads/{thread_id}/artifacts/{path without leading slash}
|
||||
path = virtual_path.lstrip("/")
|
||||
return f"{gateway_url}/api/threads/{thread_id}/artifacts/{path}"
|
||||
|
||||
def format_artifact_text(artifacts):
|
||||
urls = [artifact_url(p) for p in artifacts]
|
||||
if len(urls) == 1:
|
||||
return f"Created File: {urls[0]}"
|
||||
return "Created Files:\n" + "\n".join(urls)
|
||||
|
||||
# Find the last "values" event with messages
|
||||
result_messages = None
|
||||
for event_type, data_str in reversed(events):
|
||||
if event_type != "values":
|
||||
continue
|
||||
try:
|
||||
data = json.loads(data_str)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if "messages" in data:
|
||||
result_messages = data["messages"]
|
||||
break
|
||||
|
||||
if result_messages is not None:
|
||||
response_text = extract_response_text(result_messages)
|
||||
artifacts = extract_artifacts(result_messages)
|
||||
if artifacts:
|
||||
artifact_text = format_artifact_text(artifacts)
|
||||
response_text = (response_text + "\n\n" + artifact_text) if response_text else artifact_text
|
||||
if response_text:
|
||||
print(response_text)
|
||||
else:
|
||||
print("(No response from agent)", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Check for error events
|
||||
for event_type, data_str in events:
|
||||
if event_type == "error":
|
||||
print(f"ERROR from DeerFlow: {data_str}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
print("No AI response found in the stream.", file=sys.stderr)
|
||||
if len(raw) < 2000:
|
||||
print(f"Raw SSE output:\n{raw}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
PYEOF
|
||||
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "Thread ID: ${THREAD_ID}" >&2
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# status.sh — Check DeerFlow status and list available resources.
|
||||
#
|
||||
# Usage:
|
||||
# bash status.sh # health + summary
|
||||
# bash status.sh models # list models
|
||||
# bash status.sh skills # list skills
|
||||
# bash status.sh agents # list agents
|
||||
# bash status.sh threads # list recent threads
|
||||
# bash status.sh memory # show memory
|
||||
# bash status.sh thread <id> # show thread history
|
||||
#
|
||||
# Environment variables:
|
||||
# DEERFLOW_URL — Unified proxy base URL (default: http://localhost:2026)
|
||||
# DEERFLOW_GATEWAY_URL — Gateway API base URL (default: $DEERFLOW_URL)
|
||||
# DEERFLOW_LANGGRAPH_URL — LangGraph API base URL (default: $DEERFLOW_URL/api/langgraph)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DEERFLOW_URL="${DEERFLOW_URL:-http://localhost:2026}"
|
||||
GATEWAY_URL="${DEERFLOW_GATEWAY_URL:-$DEERFLOW_URL}"
|
||||
LANGGRAPH_URL="${DEERFLOW_LANGGRAPH_URL:-$DEERFLOW_URL/api/langgraph}"
|
||||
CMD="${1:-health}"
|
||||
ARG="${2:-}"
|
||||
|
||||
case "$CMD" in
|
||||
health)
|
||||
echo "Checking DeerFlow at ${GATEWAY_URL}..."
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" "${GATEWAY_URL}/health" 2>/dev/null || echo "000")
|
||||
if [ "$HTTP_CODE" = "000" ]; then
|
||||
echo "UNREACHABLE — DeerFlow is not running at ${GATEWAY_URL}"
|
||||
exit 1
|
||||
elif [ "$HTTP_CODE" -ge 400 ]; then
|
||||
echo "ERROR — Health check returned HTTP ${HTTP_CODE}"
|
||||
exit 1
|
||||
else
|
||||
echo "OK — DeerFlow is running (HTTP ${HTTP_CODE})"
|
||||
fi
|
||||
;;
|
||||
models)
|
||||
curl -s "${GATEWAY_URL}/api/models" | python3 -m json.tool
|
||||
;;
|
||||
skills)
|
||||
curl -s "${GATEWAY_URL}/api/skills" | python3 -m json.tool
|
||||
;;
|
||||
agents)
|
||||
curl -s "${GATEWAY_URL}/api/agents" | python3 -m json.tool
|
||||
;;
|
||||
threads)
|
||||
curl -s -X POST "${LANGGRAPH_URL}/threads/search" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"limit": 20, "sort_by": "updated_at", "sort_order": "desc", "select": ["thread_id", "updated_at", "values"]}' \
|
||||
| python3 -c "
|
||||
import json, sys
|
||||
threads = json.load(sys.stdin)
|
||||
if not threads:
|
||||
print('No threads found.')
|
||||
sys.exit(0)
|
||||
for t in threads:
|
||||
tid = t.get('thread_id', '?')
|
||||
updated = t.get('updated_at', '?')
|
||||
title = (t.get('values') or {}).get('title', '(untitled)')
|
||||
print(f'{tid} {updated} {title}')
|
||||
"
|
||||
;;
|
||||
memory)
|
||||
curl -s "${GATEWAY_URL}/api/memory" | python3 -m json.tool
|
||||
;;
|
||||
thread)
|
||||
if [ -z "$ARG" ]; then
|
||||
echo "Usage: status.sh thread <thread_id>" >&2
|
||||
exit 1
|
||||
fi
|
||||
curl -s "${LANGGRAPH_URL}/threads/${ARG}/history" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
if isinstance(data, list):
|
||||
for state in data[:5]:
|
||||
values = state.get('values', {})
|
||||
msgs = values.get('messages', [])
|
||||
for m in msgs[-5:]:
|
||||
role = m.get('type', '?')
|
||||
content = m.get('content', '')
|
||||
if isinstance(content, list):
|
||||
content = ' '.join(p.get('text','') for p in content if isinstance(p, dict))
|
||||
preview = content[:200] if content else '(empty)'
|
||||
print(f'[{role}] {preview}')
|
||||
print('---')
|
||||
else:
|
||||
print(json.dumps(data, indent=2))
|
||||
"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown command: ${CMD}" >&2
|
||||
echo "Usage: status.sh [health|models|skills|agents|threads|memory|thread <id>]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,415 +0,0 @@
|
|||
---
|
||||
name: code-documentation
|
||||
description: Use this skill when the user requests to generate, create, or improve documentation for code, APIs, libraries, repositories, or software projects. Supports README generation, API reference documentation, inline code comments, architecture documentation, changelog generation, and developer guides. Trigger on requests like "document this code", "create a README", "generate API docs", "write developer guide", or when analyzing codebases for documentation purposes.
|
||||
---
|
||||
|
||||
# Code Documentation Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill generates professional, comprehensive documentation for software projects, codebases, libraries, and APIs. It follows industry best practices from projects like React, Django, Stripe, and Kubernetes to produce documentation that is accurate, well-structured, and useful for both new contributors and experienced developers.
|
||||
|
||||
The output ranges from single-file READMEs to multi-document developer guides, always matched to the project's complexity and the user's needs.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- Generate comprehensive README.md files with badges, installation, usage, and API reference
|
||||
- Create API reference documentation from source code analysis
|
||||
- Produce architecture and design documentation with diagrams
|
||||
- Write developer onboarding and contribution guides
|
||||
- Generate changelogs from commit history or release notes
|
||||
- Create inline code documentation following language-specific conventions
|
||||
- Support JSDoc, docstrings, GoDoc, Javadoc, and Rustdoc formats
|
||||
- Adapt documentation style to the project's language and ecosystem
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
**Always load this skill when:**
|
||||
|
||||
- User asks to "document", "create docs", or "write documentation" for any code
|
||||
- User requests a README, API reference, or developer guide
|
||||
- User shares a codebase or repository and wants documentation generated
|
||||
- User asks to improve or update existing documentation
|
||||
- User needs architecture documentation, including diagrams
|
||||
- User requests a changelog or migration guide
|
||||
|
||||
## Documentation Workflow
|
||||
|
||||
### Phase 1: Codebase Analysis
|
||||
|
||||
Before writing any documentation, thoroughly understand the codebase.
|
||||
|
||||
#### Step 1.1: Project Discovery
|
||||
|
||||
Identify the project fundamentals:
|
||||
|
||||
| Field | How to Determine |
|
||||
|-------|-----------------|
|
||||
| **Language(s)** | Check file extensions, `package.json`, `pyproject.toml`, `go.mod`, `Cargo.toml`, etc. |
|
||||
| **Framework** | Look at dependencies for known frameworks (React, Django, Express, Spring, etc.) |
|
||||
| **Build System** | Check for `Makefile`, `CMakeLists.txt`, `webpack.config.js`, `build.gradle`, etc. |
|
||||
| **Package Manager** | npm/yarn/pnpm, pip/uv/poetry, cargo, go modules, etc. |
|
||||
| **Project Structure** | Map out the directory tree to understand the architecture |
|
||||
| **Entry Points** | Find main files, CLI entry points, exported modules |
|
||||
| **Existing Docs** | Check for existing README, docs/, wiki, or inline documentation |
|
||||
|
||||
#### Step 1.2: Code Structure Analysis
|
||||
|
||||
Use sandbox tools to explore the codebase:
|
||||
|
||||
```bash
|
||||
# Get directory structure
|
||||
ls /mnt/user-data/uploads/project-dir/
|
||||
|
||||
# Read key files
|
||||
read_file /mnt/user-data/uploads/project-dir/package.json
|
||||
read_file /mnt/user-data/uploads/project-dir/pyproject.toml
|
||||
|
||||
# Search for public API surfaces
|
||||
grep -r "export " /mnt/user-data/uploads/project-dir/src/
|
||||
grep -r "def " /mnt/user-data/uploads/project-dir/src/ --include="*.py"
|
||||
grep -r "func " /mnt/user-data/uploads/project-dir/ --include="*.go"
|
||||
```
|
||||
|
||||
#### Step 1.3: Identify Documentation Scope
|
||||
|
||||
Based on analysis, determine what documentation to produce:
|
||||
|
||||
| Project Size | Recommended Documentation |
|
||||
|-------------|--------------------------|
|
||||
| **Single file / script** | Inline comments + usage header |
|
||||
| **Small library** | README with API reference |
|
||||
| **Medium project** | README + API docs + examples |
|
||||
| **Large project** | README + Architecture + API + Contributing + Changelog |
|
||||
|
||||
### Phase 2: Documentation Generation
|
||||
|
||||
#### Step 2.1: README Generation
|
||||
|
||||
Every project needs a README. Follow this structure:
|
||||
|
||||
```markdown
|
||||
# Project Name
|
||||
|
||||
[One-line project description — what it does and why it matters]
|
||||
|
||||
[](#) [](#)
|
||||
|
||||
## Features
|
||||
|
||||
- [Key feature 1 — brief description]
|
||||
- [Key feature 2 — brief description]
|
||||
- [Key feature 3 — brief description]
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Prerequisite 1 with version requirement]
|
||||
- [Prerequisite 2 with version requirement]
|
||||
|
||||
### Installation
|
||||
|
||||
[Installation commands with copy-paste-ready code blocks]
|
||||
|
||||
### Basic Usage
|
||||
|
||||
[Minimal working example that demonstrates core functionality]
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Link to full API reference if separate]
|
||||
- [Link to architecture docs if separate]
|
||||
- [Link to examples directory if applicable]
|
||||
|
||||
## API Reference
|
||||
|
||||
[Inline API reference for smaller projects OR link to generated docs]
|
||||
|
||||
## Configuration
|
||||
|
||||
[Environment variables, config files, or runtime options]
|
||||
|
||||
## Examples
|
||||
|
||||
[2-3 practical examples covering common use cases]
|
||||
|
||||
## Development
|
||||
|
||||
### Setup
|
||||
|
||||
[How to set up a development environment]
|
||||
|
||||
### Testing
|
||||
|
||||
[How to run tests]
|
||||
|
||||
### Building
|
||||
|
||||
[How to build the project]
|
||||
|
||||
## Contributing
|
||||
|
||||
[Contribution guidelines or link to CONTRIBUTING.md]
|
||||
|
||||
## License
|
||||
|
||||
[License information]
|
||||
```
|
||||
|
||||
#### Step 2.2: API Reference Generation
|
||||
|
||||
For each public API surface, document:
|
||||
|
||||
**Function / Method Documentation**:
|
||||
|
||||
```markdown
|
||||
### `functionName(param1, param2, options?)`
|
||||
|
||||
Brief description of what this function does.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
| Parameter | Type | Required | Default | Description |
|
||||
|-----------|------|----------|---------|-------------|
|
||||
| `param1` | `string` | Yes | — | Description of param1 |
|
||||
| `param2` | `number` | Yes | — | Description of param2 |
|
||||
| `options` | `Object` | No | `{}` | Configuration options |
|
||||
| `options.timeout` | `number` | No | `5000` | Timeout in milliseconds |
|
||||
|
||||
**Returns:** `Promise<Result>` — Description of return value
|
||||
|
||||
**Throws:**
|
||||
- `ValidationError` — When param1 is empty
|
||||
- `TimeoutError` — When the operation exceeds the timeout
|
||||
|
||||
**Example:**
|
||||
|
||||
\`\`\`javascript
|
||||
const result = await functionName("hello", 42, { timeout: 10000 });
|
||||
console.log(result.data);
|
||||
\`\`\`
|
||||
```
|
||||
|
||||
**Class Documentation**:
|
||||
|
||||
```markdown
|
||||
### `ClassName`
|
||||
|
||||
Brief description of the class and its purpose.
|
||||
|
||||
**Constructor:**
|
||||
|
||||
\`\`\`javascript
|
||||
new ClassName(config)
|
||||
\`\`\`
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `config.option1` | `string` | Description |
|
||||
| `config.option2` | `boolean` | Description |
|
||||
|
||||
**Methods:**
|
||||
|
||||
- [`method1()`](#method1) — Brief description
|
||||
- [`method2(param)`](#method2) — Brief description
|
||||
|
||||
**Properties:**
|
||||
|
||||
| Property | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `property1` | `string` | Description |
|
||||
| `property2` | `number` | Read-only. Description |
|
||||
```
|
||||
|
||||
#### Step 2.3: Architecture Documentation
|
||||
|
||||
For medium-to-large projects, include architecture documentation:
|
||||
|
||||
```markdown
|
||||
# Architecture Overview
|
||||
|
||||
## System Diagram
|
||||
|
||||
[Include a Mermaid diagram showing the high-level architecture]
|
||||
|
||||
\`\`\`mermaid
|
||||
graph TD
|
||||
A[Client] --> B[API Gateway]
|
||||
B --> C[Service A]
|
||||
B --> D[Service B]
|
||||
C --> E[(Database)]
|
||||
D --> E
|
||||
\`\`\`
|
||||
|
||||
## Component Overview
|
||||
|
||||
### Component Name
|
||||
- **Purpose**: What this component does
|
||||
- **Location**: `src/components/name/`
|
||||
- **Dependencies**: What it depends on
|
||||
- **Public API**: Key exports or interfaces
|
||||
|
||||
## Data Flow
|
||||
|
||||
[Describe how data flows through the system for key operations]
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Decision Title
|
||||
- **Context**: What situation led to this decision
|
||||
- **Decision**: What was decided
|
||||
- **Rationale**: Why this approach was chosen
|
||||
- **Trade-offs**: What was sacrificed
|
||||
```
|
||||
|
||||
#### Step 2.4: Inline Code Documentation
|
||||
|
||||
Generate language-appropriate inline documentation:
|
||||
|
||||
**Python (Docstrings — Google style)**:
|
||||
```python
|
||||
def process_data(input_path: str, options: dict | None = None) -> ProcessResult:
|
||||
"""Process data from the given file path.
|
||||
|
||||
Reads the input file, applies transformations based on the provided
|
||||
options, and returns a structured result object.
|
||||
|
||||
Args:
|
||||
input_path: Absolute path to the input data file.
|
||||
Supports CSV, JSON, and Parquet formats.
|
||||
options: Optional configuration dictionary.
|
||||
- "validate" (bool): Enable input validation. Defaults to True.
|
||||
- "format" (str): Output format ("json" or "csv"). Defaults to "json".
|
||||
|
||||
Returns:
|
||||
A ProcessResult containing the transformed data and metadata.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If input_path does not exist.
|
||||
ValidationError: If validation is enabled and data is malformed.
|
||||
|
||||
Example:
|
||||
>>> result = process_data("/data/input.csv", {"validate": True})
|
||||
>>> print(result.row_count)
|
||||
1500
|
||||
"""
|
||||
```
|
||||
|
||||
**TypeScript (JSDoc / TSDoc)**:
|
||||
```typescript
|
||||
/**
|
||||
* Fetches user data from the API and transforms it for display.
|
||||
*
|
||||
* @param userId - The unique identifier of the user
|
||||
* @param options - Configuration options for the fetch operation
|
||||
* @param options.includeProfile - Whether to include the full profile. Defaults to `false`.
|
||||
* @param options.cache - Cache duration in seconds. Set to `0` to disable.
|
||||
* @returns The transformed user data ready for rendering
|
||||
* @throws {NotFoundError} When the user ID does not exist
|
||||
* @throws {NetworkError} When the API is unreachable
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const user = await fetchUser("usr_123", { includeProfile: true });
|
||||
* console.log(user.displayName);
|
||||
* ```
|
||||
*/
|
||||
```
|
||||
|
||||
**Go (GoDoc)**:
|
||||
```go
|
||||
// ProcessData reads the input file at the given path, applies the specified
|
||||
// transformations, and returns the processed result.
|
||||
//
|
||||
// The input path must be an absolute path to a CSV or JSON file.
|
||||
// If options is nil, default options are used.
|
||||
//
|
||||
// ProcessData returns an error if the file does not exist or cannot be parsed.
|
||||
func ProcessData(inputPath string, options *ProcessOptions) (*Result, error) {
|
||||
```
|
||||
|
||||
### Phase 3: Quality Assurance
|
||||
|
||||
#### Step 3.1: Documentation Completeness Check
|
||||
|
||||
Verify the documentation covers:
|
||||
|
||||
- [ ] **What it is** — Clear project description that a newcomer can understand
|
||||
- [ ] **Why it exists** — Problem it solves and value proposition
|
||||
- [ ] **How to install** — Copy-paste-ready installation commands
|
||||
- [ ] **How to use** — At least one minimal working example
|
||||
- [ ] **API surface** — All public functions, classes, and types documented
|
||||
- [ ] **Configuration** — All environment variables, config files, and options
|
||||
- [ ] **Error handling** — Common errors and how to resolve them
|
||||
- [ ] **Contributing** — How to set up dev environment and submit changes
|
||||
|
||||
#### Step 3.2: Quality Standards
|
||||
|
||||
| Standard | Check |
|
||||
|----------|-------|
|
||||
| **Accuracy** | Every code example must actually work with the described API |
|
||||
| **Completeness** | No public API surface left undocumented |
|
||||
| **Consistency** | Same formatting and structure throughout |
|
||||
| **Freshness** | Documentation matches the current code, not an older version |
|
||||
| **Accessibility** | No jargon without explanation, acronyms defined on first use |
|
||||
| **Examples** | Every complex concept has at least one practical example |
|
||||
|
||||
#### Step 3.3: Cross-reference Validation
|
||||
|
||||
Ensure:
|
||||
- All mentioned file paths exist in the project
|
||||
- All referenced functions and classes exist in the code
|
||||
- All code examples use the correct function signatures
|
||||
- Version numbers match the project's actual version
|
||||
- All links (internal and external) are valid
|
||||
|
||||
## Documentation Style Guide
|
||||
|
||||
### Writing Principles
|
||||
|
||||
1. **Lead with the "why"** — Before explaining how something works, explain why it exists
|
||||
2. **Progressive disclosure** — Start simple, add complexity gradually
|
||||
3. **Show, don't tell** — Prefer code examples over lengthy explanations
|
||||
4. **Active voice** — "The function returns X" not "X is returned by the function"
|
||||
5. **Present tense** — "The server starts on port 8080" not "The server will start on port 8080"
|
||||
6. **Second person** — "You can configure..." not "Users can configure..."
|
||||
|
||||
### Formatting Rules
|
||||
|
||||
- Use ATX-style headers (`#`, `##`, `###`)
|
||||
- Use fenced code blocks with language specification (` ```python `, ` ```bash `)
|
||||
- Use tables for structured information (parameters, options, configuration)
|
||||
- Use admonitions for important notes, warnings, and tips
|
||||
- Keep line length readable (wrap prose at ~80-100 characters in source)
|
||||
- Use `code formatting` for function names, file paths, variable names, and CLI commands
|
||||
|
||||
### Language-Specific Conventions
|
||||
|
||||
| Language | Doc Format | Style Guide |
|
||||
|----------|-----------|-------------|
|
||||
| Python | Google-style docstrings | PEP 257 |
|
||||
| TypeScript/JavaScript | TSDoc / JSDoc | TypeDoc conventions |
|
||||
| Go | GoDoc comments | Effective Go |
|
||||
| Rust | Rustdoc (`///`) | Rust API Guidelines |
|
||||
| Java | Javadoc | Oracle Javadoc Guide |
|
||||
| C/C++ | Doxygen | Doxygen manual |
|
||||
|
||||
## Output Handling
|
||||
|
||||
After generation:
|
||||
|
||||
- Save documentation files to `/mnt/user-data/outputs/`
|
||||
- For multi-file documentation, maintain the project directory structure
|
||||
- Present generated files to the user using the `present_files` tool
|
||||
- Offer to iterate on specific sections or adjust the level of detail
|
||||
- Suggest additional documentation that might be valuable
|
||||
|
||||
## Notes
|
||||
|
||||
- Always analyze the actual code before writing documentation — never guess at API signatures or behavior
|
||||
- When existing documentation exists, preserve its structure unless the user explicitly asks for a rewrite
|
||||
- For large codebases, prioritize documenting the public API surface and key abstractions first
|
||||
- Documentation should be written in the same language as the project's existing docs; default to English if none exist
|
||||
- When generating changelogs, use the [Keep a Changelog](https://keepachangelog.com/) format
|
||||
- This skill works well in combination with the `deep-research` skill for documenting third-party integrations or dependencies
|
||||
|
|
@ -1,631 +0,0 @@
|
|||
---
|
||||
name: consulting-analysis
|
||||
description: Use this skill when the user requests to generate, create, or write professional research reports including but not limited to market analysis, consumer insights, brand analysis, financial analysis, industry research, competitive intelligence, investment due diligence, or any consulting-grade analytical report. This skill operates in two phases — (1) generating a structured analysis framework with chapter skeleton, data query requirements, and analysis logic, and (2) after data collection by other skills, producing the final consulting-grade report with structured narratives, embedded charts, and strategic insights.
|
||||
---
|
||||
|
||||
# Professional Research Report Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill produces professional, consulting-grade research reports in Markdown format, covering domains such as **market analysis, consumer insights, brand strategy, financial analysis, industry research, competitive intelligence, investment research, and macroeconomic analysis**. It operates across two distinct phases:
|
||||
|
||||
1. **Phase 1 — Analysis Framework Generation**: Given a research subject, produce a rigorous analysis framework including chapter skeleton, per-chapter data requirements, analysis logic, and visualization plan.
|
||||
2. **Phase 2 — Report Generation**: After data has been collected by other skills, synthesize all inputs into a final polished report.
|
||||
|
||||
The output adheres to McKinsey/BCG consulting voice standards. The report language follows the `output_locale` setting (default: `zh_CN` for Chinese).
|
||||
|
||||
## Data Authenticity Protocol
|
||||
|
||||
**Strict Adherence Rule**: All data presented in the report and visualized in charts MUST be derived directly from the provided **Data Summary** or **External Search Findings**.
|
||||
- **NO Hallucinations**: Do not invent, estimate, or simulate data. If data is missing, state "Data not available" rather than fabricating numbers.
|
||||
- **Traceable Sources**: Every major claim and chart must be traceable back to the input data package.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- **Design analysis frameworks** from scratch given only a research subject and scope
|
||||
- Transform raw data into structured, high-depth research reports
|
||||
- Follow the **"Visual Anchor → Data Contrast → Integrated Analysis"** flow per sub-chapter
|
||||
- Produce insights following the **"Data → User Psychology → Strategy Implication"** chain
|
||||
- Embed pre-generated charts and construct comparison tables
|
||||
- Generate inline citations formatted per **GB/T 7714-2015** standards
|
||||
- Output reports in the language specified by `output_locale` with professional consulting tone
|
||||
- Adapt analytical depth and structure to domain (marketing, finance, industry, etc.)
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
**Always load this skill when:**
|
||||
|
||||
- User asks for a market analysis, consumer insight report, financial analysis, industry research, or any consulting-grade analytical report
|
||||
- User provides a research subject and needs a structured analysis framework before data collection
|
||||
- User provides data summaries, analysis frameworks, or chart files to be synthesized into a report
|
||||
- User needs a professional consulting-style research report
|
||||
- The task involves transforming research findings into structured strategic narratives
|
||||
|
||||
---
|
||||
|
||||
# Phase 1: Analysis Framework Generation
|
||||
|
||||
## Purpose
|
||||
|
||||
Given a **research subject** (e.g., "Gen-Z Skincare Market Analysis", "NEV Industry Competitive Landscape", "Brand X Consumer Profiling"), produce a complete **analysis framework** that serves as the blueprint for downstream data collection and final report generation.
|
||||
|
||||
## Phase 1 Inputs
|
||||
|
||||
| Input | Description | Required |
|
||||
|-------|-------------|----------|
|
||||
| **Research Subject** | The topic or question to be analyzed | Yes |
|
||||
| **Scope / Constraints** | Geographic scope, time range, industry segment, target audience, etc. | Optional |
|
||||
| **Specific Angles** | Any particular angles or hypotheses the user wants explored | Optional |
|
||||
| **Domain** | The analytical domain: market, finance, industry, brand, consumer, investment, etc. | Inferred |
|
||||
|
||||
## Phase 1 Workflow
|
||||
|
||||
### Step 1.1: Understand the Research Subject
|
||||
|
||||
- Parse the research subject to identify the **core entity** (market, brand, product, industry, consumer segment, financial instrument, etc.)
|
||||
- Identify the **analytical domain** (marketing, finance, industry, competitive, consumer, investment, macro, etc.)
|
||||
- Determine the **natural analytical dimensions** based on domain:
|
||||
|
||||
| Domain | Typical Dimensions |
|
||||
|--------|--------------------|
|
||||
| Market Analysis | Market size, growth trends, market segmentation, growth drivers, competitive landscape, consumer profiling |
|
||||
| Brand Analysis | Brand positioning, market share, consumer perception, marketing strategy, competitor comparison |
|
||||
| Consumer Insights | Demographic profiling, purchase behavior, decision journey, pain points, scenario analysis |
|
||||
| Financial Analysis | Macro environment, industry trends, company fundamentals, financial metrics, valuation, risk assessment |
|
||||
| Industry Research | Value chain analysis, market size, competitive landscape, policy environment, technology trends, entry barriers |
|
||||
| Investment Due Diligence | Business model, financial health, management assessment, market opportunity, risk factors, exit pathways |
|
||||
| Competitive Intelligence | Competitor identification, strategic comparison, SWOT analysis, differentiated positioning, market dynamics |
|
||||
|
||||
### Step 1.2: Select Analysis Frameworks & Models
|
||||
|
||||
Based on the identified domain and research subject, select **one or more** professional analysis frameworks to structure the reasoning in each chapter. The chosen frameworks guide the **Analysis Logic** in the chapter skeleton (Step 1.3).
|
||||
|
||||
#### Strategic & Environmental Analysis
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **SWOT Analysis** | Strengths, Weaknesses, Opportunities, Threats | Brand assessment, competitive positioning, strategic planning |
|
||||
| **PEST / PESTEL Analysis** | Political, Economic, Social, Technological (+ Environmental, Legal) | Macro-environment scanning, market entry assessment, policy impact analysis |
|
||||
| **Porter's Five Forces** | Supplier bargaining power, buyer bargaining power, threat of new entrants, threat of substitutes, industry rivalry | Industry competitive landscape, entry barrier assessment, profit margin analysis |
|
||||
| **Porter's Diamond Model** | Factor conditions, demand conditions, related industries, firm strategy & structure | National/regional competitive advantage analysis |
|
||||
| **VRIO Analysis** | Value, Rarity, Imitability, Organization | Core competency assessment, resource advantage analysis |
|
||||
|
||||
#### Market & Growth Analysis
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **STP Analysis** | Segmentation, Targeting, Positioning | Market segmentation, target market selection, brand positioning |
|
||||
| **BCG Matrix (Growth-Share Matrix)** | Stars, Cash Cows, Question Marks, Dogs | Product portfolio management, resource allocation decisions |
|
||||
| **Ansoff Matrix** | Market penetration, market development, product development, diversification | Growth strategy selection |
|
||||
| **Product Life Cycle (PLC)** | Introduction, growth, maturity, decline | Product strategy formulation, market timing decisions |
|
||||
| **TAM-SAM-SOM** | Total / Serviceable / Obtainable Market | Market sizing, opportunity quantification |
|
||||
| **Technology Adoption Lifecycle** | Innovators → Early Adopters → Early Majority → Late Majority → Laggards | Emerging technology/category penetration analysis |
|
||||
|
||||
#### Consumer & Behavioral Analysis
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **Consumer Decision Journey** | Awareness → Consideration → Evaluation → Purchase → Loyalty | Consumer behavior path mapping, touchpoint optimization |
|
||||
| **AARRR Funnel (Pirate Metrics)** | Acquisition, Activation, Retention, Revenue, Referral | User growth analysis, conversion rate optimization |
|
||||
| **RFM Model** | Recency, Frequency, Monetary | Customer value segmentation, precision marketing |
|
||||
| **Maslow's Hierarchy of Needs** | Physiological → Safety → Social → Esteem → Self-actualization | Consumer psychology analysis, product value proposition |
|
||||
| **Jobs-to-be-Done (JTBD)** | The "job" a user needs to accomplish in a specific context | Demand insight, product innovation direction |
|
||||
|
||||
#### Financial & Valuation Analysis
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **DuPont Analysis** | ROE = Net Profit Margin × Asset Turnover × Equity Multiplier | Profitability decomposition, financial health diagnosis |
|
||||
| **DCF (Discounted Cash Flow)** | Free cash flow discounting | Enterprise/project valuation |
|
||||
| **Comparable Company Analysis** | PE, PB, PS, EV/EBITDA multiples comparison | Relative valuation, peer benchmarking |
|
||||
| **EVA (Economic Value Added)** | After-tax operating profit - Cost of capital | Value creation capability assessment |
|
||||
|
||||
#### Competitive & Strategic Positioning
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **Benchmarking** | Key performance indicator item-by-item comparison | Competitor gap analysis, best practice identification |
|
||||
| **Strategic Group Mapping** | Cluster competitors along two key dimensions | Competitive landscape visualization, white-space identification |
|
||||
| **Value Chain Analysis** | Primary activities + support activities value decomposition | Cost advantage sources, differentiation opportunity identification |
|
||||
| **Blue Ocean Strategy** | Value curve, four-action framework (Eliminate-Reduce-Raise-Create) | Differentiated innovation, new market space creation |
|
||||
| **Perceptual Mapping** | Plot brand positions along two consumer-perceived dimensions | Brand positioning analysis, market gap discovery |
|
||||
|
||||
#### Industry & Supply Chain Analysis
|
||||
|
||||
| Framework | Description | Best For |
|
||||
|-----------|-------------|----------|
|
||||
| **Industry Value Chain** | Upstream → Midstream → Downstream decomposition | Industry structure understanding, profit distribution analysis |
|
||||
| **Gartner Hype Cycle** | Technology Trigger → Peak of Inflated Expectations → Trough of Disillusionment → Slope of Enlightenment → Plateau of Productivity | Emerging technology maturity assessment |
|
||||
| **GE-McKinsey Matrix** | Industry Attractiveness × Competitive Strength | Business portfolio prioritization, investment decisions |
|
||||
|
||||
#### Selection Principles
|
||||
|
||||
1. **Domain-First**: Based on the domain identified in Step 1.1, select **2-4** most relevant frameworks from the toolkit above
|
||||
2. **Complementary**: Choose complementary rather than overlapping frameworks (e.g., macro-level with PESTEL + micro-level with Porter's Five Forces)
|
||||
3. **Depth over Breadth**: Better to deeply apply 2 frameworks than superficially stack 6
|
||||
4. **Data-Feasible**: Selected frameworks must be supportable by downstream data collection skills — if the data required by a framework cannot be reasonably obtained, downgrade or substitute
|
||||
5. **Explicit Mapping**: In the chapter skeleton, explicitly annotate which framework each chapter uses and how it is applied
|
||||
|
||||
#### Framework Selection Output Format
|
||||
|
||||
```markdown
|
||||
## Framework Selection
|
||||
|
||||
| Chapter | Selected Framework(s) | Application |
|
||||
|---------|----------------------|-------------|
|
||||
| Market Size & Growth Trends | TAM-SAM-SOM + Product Life Cycle | TAM-SAM-SOM to quantify market space, PLC to determine market stage |
|
||||
| Competitive Landscape Assessment | Porter's Five Forces + Strategic Group Mapping | Five Forces to assess industry competition intensity, Group Mapping to visualize competitive positioning |
|
||||
| Consumer Profiling | RFM + Consumer Decision Journey | RFM to segment customer value, Decision Journey to identify key conversion nodes |
|
||||
| Brand Strategy Recommendations | SWOT + Blue Ocean Strategy | SWOT to summarize overall landscape, Blue Ocean to guide differentiation direction |
|
||||
```
|
||||
|
||||
### Step 1.3: Design Chapter Skeleton
|
||||
|
||||
Produce a hierarchical chapter structure. Each chapter must include:
|
||||
|
||||
1. **Chapter Title** — Professional, concise, subject-based (follow titling constraints in Formatting section)
|
||||
2. **Analysis Objective** — What this chapter aims to reveal
|
||||
3. **Analysis Logic** — The reasoning chain or framework (must reference the frameworks selected in Step 1.2)
|
||||
4. **Core Hypothesis** — Preliminary hypotheses to be validated or refuted by data
|
||||
|
||||
#### Chapter Skeleton Output Format
|
||||
|
||||
```markdown
|
||||
## Analysis Framework
|
||||
|
||||
### Chapter 1: [Title]
|
||||
- **Analysis Objective**: [This chapter aims to...]
|
||||
- **Analysis Logic**: [Framework or reasoning chain used]
|
||||
- **Core Hypothesis**: [Hypotheses to validate]
|
||||
- **Data Requirements**: (see Step 1.4)
|
||||
- **Visualization Plan**: (see Step 1.5)
|
||||
|
||||
### Chapter 2: [Title]
|
||||
...
|
||||
```
|
||||
|
||||
### Step 1.4: Define Data Query Requirements Per Chapter
|
||||
|
||||
For each chapter, specify **exactly what data needs to be collected**. This is the bridge to downstream data collection skills.
|
||||
|
||||
Each data requirement entry must include:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| **Data Metric** | The specific metric or data point needed (e.g., "China skincare market size 2020-2025 (in billion CNY)") |
|
||||
| **Data Type** | Quantitative, Qualitative, or Mixed |
|
||||
| **Suggested Sources** | Suggested source categories: Industry reports, financial statements, government statistics, social media, e-commerce platforms, survey data, news |
|
||||
| **Search Keywords** | Suggested search queries for data collection agents |
|
||||
| **Priority** | P0 (Required) / P1 (Important) / P2 (Supplementary) |
|
||||
| **Time Range** | The time period the data should cover |
|
||||
|
||||
#### Data Requirements Output Format (per chapter)
|
||||
|
||||
```markdown
|
||||
#### Data Requirements
|
||||
|
||||
| # | Data Metric | Data Type | Suggested Sources | Search Keywords | Priority | Time Range |
|
||||
|---|-------------|-----------|-------------------|-----------------|----------|------------|
|
||||
| 1 | Market size (billion CNY) | Quantitative | Industry reports, government statistics | "China skincare market size 2024" | P0 | 2020-2025 |
|
||||
| 2 | CAGR | Quantitative | Industry reports | "skincare CAGR growth rate" | P0 | 2020-2025 |
|
||||
| 3 | Sub-category share | Quantitative | E-commerce platforms, industry reports | "skincare category share cream serum sunscreen" | P1 | Latest |
|
||||
| 4 | Policy & regulatory updates | Qualitative | Government announcements, news | "cosmetics regulation 2024" | P2 | Past 1 year |
|
||||
```
|
||||
|
||||
### Step 1.5: Define Visualization & Content Structure Per Chapter
|
||||
|
||||
For each chapter, specify the **planned visualization** and **content structure** for the final report:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| **Visualization Type** | Chart type: Line chart, bar chart, pie chart, scatter plot, radar chart, heatmap, Sankey diagram, comparison table, etc. |
|
||||
| **Visualization Title** | Descriptive title for the chart |
|
||||
| **Visualization Data Mapping** | Which data indicators map to X/Y axes or segments |
|
||||
| **Comparison Table Design** | Column headers and comparison dimensions for the data contrast table |
|
||||
| **Argument Structure** | The planned "What → Why → So What" narrative outline |
|
||||
|
||||
#### Visualization Plan Output Format (per chapter)
|
||||
|
||||
```markdown
|
||||
#### Visualization & Content Plan
|
||||
|
||||
**Chart 1**: [Type] — [Title]
|
||||
- X-axis: [Dimension], Y-axis: [Metric]
|
||||
- Data source: Corresponds to Data Requirement #1, #2
|
||||
|
||||
**Comparison Table**:
|
||||
| Dimension | Item A | Item B | Item C |
|
||||
|-----------|--------|--------|--------|
|
||||
|
||||
**Argument Structure**:
|
||||
1. **Observation (What)**: [Surface phenomenon revealed by data]
|
||||
2. **Attribution (Why)**: [Driving factors or underlying causes]
|
||||
3. **Implication (So What)**: [Strategic implications or recommended actions]
|
||||
```
|
||||
|
||||
### Step 1.6: Output Complete Analysis Framework
|
||||
|
||||
Assemble all outputs into a single, structured **Analysis Framework Document**:
|
||||
|
||||
```markdown
|
||||
# [Research Subject] Analysis Framework
|
||||
|
||||
## Research Overview
|
||||
- **Research Subject**: [...]
|
||||
- **Scope**: [Geography, time range, industry segment]
|
||||
- **Analysis Domain**: [Market / Finance / Industry / Brand / Consumer / ...]
|
||||
- **Core Research Questions**: [1-3 key questions]
|
||||
|
||||
## Framework Selection
|
||||
|
||||
| Chapter | Selected Framework(s) | Application |
|
||||
|---------|----------------------|-------------|
|
||||
| ... | ... | ... |
|
||||
|
||||
## Chapter Skeleton
|
||||
|
||||
### 1. [Chapter Title]
|
||||
- **Analysis Objective**: [...]
|
||||
- **Analysis Logic**: [...]
|
||||
- **Core Hypothesis**: [...]
|
||||
|
||||
#### Data Requirements
|
||||
| # | Data Metric | Data Type | Suggested Sources | Search Keywords | Priority | Time Range |
|
||||
|---|-------------|-----------|-------------------|-----------------|----------|------------|
|
||||
| ... | ... | ... | ... | ... | ... | ... |
|
||||
|
||||
#### Visualization & Content Plan
|
||||
[Chart plan + Comparison table design + Argument structure]
|
||||
|
||||
### 2. [Chapter Title]
|
||||
...
|
||||
|
||||
### N. [Chapter Title]
|
||||
...
|
||||
|
||||
## Data Collection Task List
|
||||
[Consolidate all P0/P1 data requirements across chapters into a structured task list for downstream data collection skills to execute]
|
||||
```
|
||||
|
||||
## Phase 1 Quality Checklist
|
||||
|
||||
- [ ] Analysis framework covers all natural dimensions for the identified domain
|
||||
- [ ] 2-4 professional analysis frameworks are selected and explicitly mapped to chapters
|
||||
- [ ] Selected frameworks are complementary (not overlapping) and data-feasible
|
||||
- [ ] Each chapter has clear Analysis Objective, Analysis Logic (referencing chosen framework), and Core Hypothesis
|
||||
- [ ] Data requirements are specific, measurable, and include search keywords
|
||||
- [ ] Every chapter has at least one visualization plan
|
||||
- [ ] Data priorities (P0/P1/P2) are assigned realistically
|
||||
- [ ] The framework is actionable — a data collection agent can execute on the Search Keywords directly
|
||||
- [ ] Data Collection Task List is comprehensive and deduplicated
|
||||
|
||||
---
|
||||
|
||||
# Phase 1→2 Handoff: Data Collection & Chart Generation
|
||||
|
||||
After the analysis framework is generated, it is handed off to **other data collection skills** (e.g., deep-research, data-analysis, web search agents) to:
|
||||
|
||||
1. Execute the **Search Keywords** from each chapter's data requirements
|
||||
2. Collect quantitative data, qualitative insights, and source URLs
|
||||
3. Generate charts based on the **Visualization & Content Plan**
|
||||
4. Return a **Data Package** containing:
|
||||
- **Data Summary**: Raw numbers, metrics, and qualitative findings per chapter
|
||||
- **Chart Files**: Generated chart images with local file paths
|
||||
- **External Search Findings**: Source URLs and summaries for citations
|
||||
|
||||
> **This skill does NOT perform data collection.** It only produces the framework (Phase 1) and the final report (Phase 2).
|
||||
>
|
||||
> **Chart Generation**: If a visualization/charting skill is available (e.g., data-analysis, image-generation), chart generation can be deferred to the beginning of Phase 2 — see Step 2.3.
|
||||
|
||||
---
|
||||
|
||||
# Phase 2: Report Generation
|
||||
|
||||
## Purpose
|
||||
|
||||
Receive the completed **Analysis Framework** and **Data Package** from upstream, and synthesize them into a final consulting-grade report.
|
||||
|
||||
## Phase 2 Inputs
|
||||
|
||||
| Input | Description | Required |
|
||||
|-------|-------------|----------|
|
||||
| **Analysis Framework** | The framework document produced in Phase 1 | Yes |
|
||||
| **Data Summary** | Collected data organized per chapter from the data collection phase | Yes |
|
||||
| **Chart Files** | Local file paths for generated chart images. If not provided, will be generated in Step 2.3 using available visualization skills | Optional |
|
||||
| **External Search Findings** | URLs and summaries for inline citations | Optional |
|
||||
|
||||
## Phase 2 Workflow
|
||||
|
||||
### Step 2.1: Receive and Validate Inputs
|
||||
|
||||
Verify that all required inputs are present:
|
||||
|
||||
1. **Analysis Framework** — Confirm it contains chapter skeleton, data requirements, and visualization plans
|
||||
2. **Data Summary** — Confirm it contains data organized per chapter, cross-reference against P0 requirements
|
||||
3. **Chart Files** — Confirm file paths are valid local paths
|
||||
|
||||
If any P0 data is missing, note it in the report and flag for the user.
|
||||
|
||||
### Step 2.2: Map Report Structure
|
||||
|
||||
Map the final report structure from the Analysis Framework:
|
||||
|
||||
1. **Abstract** — Executive summary with key takeaways
|
||||
2. **Introduction** — Background, objectives, methodology
|
||||
3. **Main Body Chapters (2...N)** — Mapped from the Framework's chapter skeleton
|
||||
4. **Conclusion** — Pure, objective synthesis
|
||||
5. **References** — GB/T 7714-2015 formatted references
|
||||
|
||||
### Step 2.3: Generate Chapter Charts (Pre-Report Visualization)
|
||||
|
||||
Before writing the report, generate all planned charts from the Analysis Framework's **Visualization & Content Plan**. This step ensures every sub-chapter has its "Visual Anchor" ready before narrative writing begins.
|
||||
|
||||
#### When to Execute This Step
|
||||
|
||||
- **Chart Files already provided**: Skip this step — proceed directly to Step 2.4.
|
||||
- **Chart Files NOT provided but a visualization skill is available**: Execute this step to generate all charts first.
|
||||
- **No Chart Files and no visualization skill available**: Skip this step — use comparison tables as the primary visual anchor in Step 2.4, and note the absence of charts.
|
||||
|
||||
#### Chart Generation Workflow
|
||||
|
||||
1. **Extract Chart Tasks**: Parse all `Visualization & Content Plan` entries from the Analysis Framework to build a chart generation task list:
|
||||
|
||||
| # | Chapter | Chart Type | Chart Title | Data Mapping | Data Source |
|
||||
|---|---------|------------|-------------|--------------|-------------|
|
||||
| 1 | 2.1 | Line chart | Market Size Trend 2020-2025 | X: Year, Y: Market Size (billion CNY) | Data Requirement #1, #2 |
|
||||
| 2 | 3.1 | Pie chart | Consumer Age Distribution | Segments: Age groups, Values: Share % | Data Requirement #5 |
|
||||
| ... | ... | ... | ... | ... | ... |
|
||||
|
||||
2. **Prepare Chart Data**: For each chart task, extract the corresponding data points from the **Data Summary**.
|
||||
> **CRITICAL**: Use ONLY the numbers provided in the Data Summary. Do NOT invent or "smooth" data to make charts look better. If data points are missing, the chart must reflect that reality (e.g., broken line or missing bar), or the chart type must be adjusted.
|
||||
|
||||
3. **Delegate to Visualization Skill**: Invoke the available visualization/charting skill (e.g., `data-analysis`) for each chart task with:
|
||||
- Chart type and title
|
||||
- Structured data
|
||||
- Axis labels and formatting preferences
|
||||
- Output file path convention: `charts/chapter_{N}_{chart_index}.png`
|
||||
|
||||
4. **Collect Chart File Paths**: Record all generated chart file paths for embedding in Step 2.4:
|
||||
|
||||
```markdown
|
||||
## Generated Charts
|
||||
| # | Chapter | Chart Title | File Path |
|
||||
|---|---------|-------------|-----------|
|
||||
| 1 | 2.1 | Market Size Trend 2020-2025 | charts/chapter_2_1.png |
|
||||
| 2 | 3.1 | Consumer Age Distribution | charts/chapter_3_1.png |
|
||||
```
|
||||
|
||||
5. **Validate**: Confirm all P0-priority charts have been generated. If any chart generation fails, note it and fall back to comparison tables for that sub-chapter.
|
||||
|
||||
> **Principle**: Complete ALL chart generation before starting report writing. This ensures a consistent visual narrative and avoids interleaving generation with writing.
|
||||
|
||||
### Step 2.4: Write the Report
|
||||
|
||||
For each sub-chapter, follow the **"Visual Anchor → Data Contrast → Integrated Analysis"** flow:
|
||||
|
||||
1. **Visual Evidence Block**: Embed charts using `` — use the file paths collected in Step 2.3
|
||||
2. **Data Contrast Table**: Create a Markdown comparison table for key metrics
|
||||
> **Source Rule**: Every number in the table must come from the Data Summary. No hallucinations.
|
||||
3. **Integrated Narrative Analysis**: Write analytical text following "What → Why → So What"
|
||||
> **Narrative Rule**: Narrative must explain the *provided* data. Do not make claims unsupported by the inputs.
|
||||
|
||||
Each sub-chapter must end with a robust analytical paragraph (min. 200 words) that:
|
||||
- Synthesizes conflicting or reinforcing data points
|
||||
- Reveals the underlying user tension or opportunity
|
||||
- Optionally ends with a punchy "One-Liner Truth" in a blockquote (`>`)
|
||||
|
||||
### Step 2.5: Final Structure Self-Check
|
||||
|
||||
Before outputting, confirm the report contains **all sections in order**:
|
||||
|
||||
```
|
||||
Abstract → 1. Introduction → 2...N. Body Chapters → N+1. Conclusion → N+2. References
|
||||
```
|
||||
|
||||
Additionally verify:
|
||||
- All charts generated in Step 2.3 are embedded in the correct sub-chapters
|
||||
- Chart file paths in `` references are valid
|
||||
- Sub-chapters without charts have comparison tables as visual anchors
|
||||
|
||||
The report **MUST NOT** stop after the Conclusion — it **MUST** include References as the final section.
|
||||
|
||||
## Formatting & Tone Standards
|
||||
|
||||
### Consulting Voice
|
||||
- **Tone**: McKinsey/BCG — Authoritative, Objective, Professional
|
||||
- **Language**: All headings and content in the language specified by `output_locale`
|
||||
- **Number Formatting**: Use English commas for thousands separators (`1,000` not `1,000`)
|
||||
- **Data emphasis**: **Bold** important viewpoints and key numbers
|
||||
|
||||
### Titling Constraints
|
||||
- **Numbering**: Use standard numbering (`1.`, `1.1`) directly followed by the title
|
||||
- **Forbidden Prefixes**: Do NOT use "Chapter", "Part", "Section" as prefixes
|
||||
- **Allowed Tone Words**: Analysis, Profiling, Overview, Insights, Assessment
|
||||
- **Forbidden Words**: "Decoding", "DNA", "Secrets", "Mindscape", "Solar System", "Unlocking"
|
||||
|
||||
### Sub-Chapter Conclusions
|
||||
- **Requirement**: End each sub-chapter with a robust analytical paragraph (min. 200 words).
|
||||
- **Narrative Flow**: This paragraph must look like a natural continuation of the text. It must synthesize the section's findings into a strategic judgment.
|
||||
- **Content Logic**:
|
||||
1. Synthesize the conflicting or reinforcing data points above.
|
||||
2. Reveal the *underlying* user tension or opportunity.
|
||||
3. Key Insight: **Optional**: Only if you have a concise, punchy "One-Liner Truth", place it at the very end using a **Blockquote** (`>`) to anchor the section.
|
||||
|
||||
### Insight Depth (The "So What" Chain)
|
||||
|
||||
Every insight must connect **Data → User Psychology → Strategy Implication**:
|
||||
|
||||
```
|
||||
❌ Bad: "Females are 60%. Strategy: Target females."
|
||||
|
||||
✅ Good: "Females constitute 60% with a high TGI of 180. **This suggests**
|
||||
the purchase decision is driven by aesthetic and social validation
|
||||
rather than pure utility. **Consequently**, media spend should pivot
|
||||
towards visual-heavy platforms (e.g., RED/Instagram) to maximize CTR,
|
||||
treating male audiences only as a secondary gift-giving segment."
|
||||
```
|
||||
|
||||
### References
|
||||
- **Inline**: Use markdown links for sources (e.g. `[Source Title](URL)`) when using External Search Findings
|
||||
- **References section**: Formatted strictly per **GB/T 7714-2015**
|
||||
|
||||
### Markdown Rules
|
||||
- **Immediate Start**: Begin directly with `# Report Title` — no introductory text
|
||||
- **No Separators**: Do NOT use horizontal rules (`---`)
|
||||
|
||||
## Report Structure Template
|
||||
|
||||
```markdown
|
||||
# [Report Title]
|
||||
|
||||
## Abstract
|
||||
[Executive summary with key takeaways]
|
||||
|
||||
## 1. Introduction
|
||||
[Background, objectives, methodology]
|
||||
|
||||
## 2. [Body Chapter Title]
|
||||
### 2.1 [Sub-chapter Title]
|
||||

|
||||
|
||||
| Metric | Brand A | Brand B |
|
||||
|--------|---------|--------|
|
||||
| ... | ... | ... |
|
||||
|
||||
[Integrated narrative analysis: What → Why → So What, min. 200 words]
|
||||
|
||||
> [Optional: One-liner strategic truth]
|
||||
|
||||
### 2.2 [Sub-chapter Title]
|
||||
...
|
||||
|
||||
## N+1. Conclusion
|
||||
[Pure objective synthesis, NO bullet points, neutral tone]
|
||||
[Para 1: The fundamental nature of the group/market]
|
||||
[Para 2: Core tension or behavior pattern]
|
||||
[Final: One or two sentences stating the objective truth]
|
||||
|
||||
## N+2. References
|
||||
[1] Author. Title[EB/OL]. URL, Date.
|
||||
[2] ...
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
### Phase 1 Example: Framework Generation
|
||||
|
||||
User provides: Research subject "Gen-Z Skincare Market Analysis"
|
||||
|
||||
**Phase 1 output (Analysis Framework):**
|
||||
|
||||
```markdown
|
||||
# Gen-Z Skincare Market Analysis Framework
|
||||
|
||||
## Research Overview
|
||||
- **Research Subject**: Gen-Z Skincare Market Deep Analysis
|
||||
- **Scope**: China market, 2020-2025, consumers aged 18-27
|
||||
- **Analysis Domain**: Market Analysis + Consumer Insights
|
||||
- **Core Research Questions**:
|
||||
1. What is the size and growth momentum of the Gen-Z skincare market?
|
||||
2. What is unique about Gen-Z consumer skincare behavior patterns?
|
||||
3. How can brands effectively reach and convert Gen-Z consumers?
|
||||
|
||||
## Chapter Skeleton
|
||||
|
||||
### 1. Market Size & Growth Trends
|
||||
- **Analysis Objective**: Quantify Gen-Z skincare market size and identify growth drivers
|
||||
- **Analysis Logic**: Total market → Segmentation → Growth rate → Driver decomposition
|
||||
- **Core Hypothesis**: Gen-Z is becoming the core engine of skincare consumption growth
|
||||
|
||||
#### Data Requirements
|
||||
| # | Data Metric | Data Type | Suggested Sources | Search Keywords | Priority | Time Range |
|
||||
|---|-------------|-----------|-------------------|-----------------|----------|------------|
|
||||
| 1 | China skincare market total size | Quantitative | Industry reports | "China skincare market size 2024 2025" | P0 | 2020-2025 |
|
||||
| 2 | Gen-Z skincare spending share | Quantitative | Industry reports, e-commerce platforms | "Gen-Z skincare spending share youth" | P0 | Latest |
|
||||
|
||||
#### Visualization & Content Plan
|
||||
**Chart 1**: Line chart — China Skincare Market Size Trend 2020-2025
|
||||
**Argument Structure**:
|
||||
1. What: Quantified status of market size and Gen-Z share
|
||||
2. Why: Consumption upgrade, ingredient-conscious consumers, social media driven
|
||||
3. So What: Brands should prioritize building youth-oriented product lines
|
||||
|
||||
### 2. Consumer Profiling & Behavioral Insights
|
||||
...
|
||||
|
||||
## Data Collection Task List
|
||||
[Consolidated P0/P1 tasks]
|
||||
```
|
||||
|
||||
### Phase 2 Example: Report Generation
|
||||
|
||||
After data collection, user provides: Analysis Framework + Data Summary with brand metrics + chart file paths.
|
||||
|
||||
**Phase 2 output (Final Report) follows this flow:**
|
||||
|
||||
1. Start with `# Gen-Z Skincare Market Deep Analysis Report`
|
||||
2. Abstract — 3-5 key takeaways in executive summary form
|
||||
3. 1. Introduction — Market context, research scope, data sources
|
||||
4. 2. Market Size & Growth Trend Analysis — Embed trend charts, comparison tables, strategic narrative
|
||||
5. 3. Consumer Profiling & Behavioral Insights — Demographics, purchase drivers, "So What" analysis
|
||||
6. 4. Brand Competitive Landscape Assessment — Brand positioning, share analysis, competitive dynamics
|
||||
7. 5. Marketing Strategy & Channel Insights — Channel effectiveness, content strategy implications
|
||||
8. 6. Conclusion — Objective synthesis in flowing prose (no bullets)
|
||||
9. 7. References — GB/T 7714-2015 formatted list
|
||||
|
||||
---
|
||||
|
||||
## Quality Checklists
|
||||
|
||||
### Phase 1 Quality Checklist (Analysis Framework)
|
||||
|
||||
- [ ] Framework covers all natural analytical dimensions for the identified domain
|
||||
- [ ] Each chapter has clear Analysis Objective, Analysis Logic, and Core Hypothesis
|
||||
- [ ] Data requirements are specific, measurable, and include actionable Search Keywords
|
||||
- [ ] Every chapter has at least one visualization plan with chart type and data mapping
|
||||
- [ ] Data priorities (P0/P1/P2) are assigned — P0 items are essential for core arguments
|
||||
- [ ] Data Collection Task List is comprehensive, deduplicated, and ready for downstream execution
|
||||
- [ ] Framework adapts to the correct domain (market/finance/industry/consumer/etc.)
|
||||
|
||||
### Phase 2 Quality Checklist (Final Report)
|
||||
|
||||
- [ ] **NO HALLUCINATION**: All numbers and charts are verified against the input Data Summary
|
||||
- [ ] All planned charts generated before report writing (Step 2.3 completed first)
|
||||
- [ ] All sections present in correct order (Abstract → Introduction → Body → Conclusion → References)
|
||||
- [ ] Every sub-chapter follows "Visual Anchor → Data Contrast → Integrated Analysis"
|
||||
- [ ] Every sub-chapter ends with a min. 200-word analytical paragraph
|
||||
- [ ] All insights follow the "Data → User Psychology → Strategy Implication" chain
|
||||
- [ ] All headings use proper numbering (no "Chapter/Part/Section" prefixes)
|
||||
- [ ] Charts are embedded with `` syntax
|
||||
- [ ] Numbers use English commas for thousands separators
|
||||
- [ ] Inline references use markdown links where applicable
|
||||
- [ ] References section follows GB/T 7714-2015
|
||||
- [ ] No horizontal rules (`---`) in the document
|
||||
- [ ] Conclusion uses flowing prose — no bullet points
|
||||
- [ ] Report starts directly with `#` title — no preamble
|
||||
- [ ] Missing P0 data is explicitly flagged in the report
|
||||
|
||||
## Output Format
|
||||
|
||||
- **Phase 1**: Output the complete Analysis Framework in **Markdown** format
|
||||
- **Phase 2**: Output the complete Report in **Markdown** format
|
||||
|
||||
## Settings
|
||||
|
||||
```
|
||||
output_locale = zh_CN # configurable per user request
|
||||
reasoning_locale = en
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- This skill operates in **two phases** of a multi-step agentic workflow:
|
||||
- **Phase 1** produces the analysis framework and data collection requirements
|
||||
- **Data collection** is performed by other skills (deep-research, data-analysis, etc.)
|
||||
- **Phase 2** receives the collected data and produces the final report
|
||||
- Dynamic titling: **Rewrite** topics from the Framework into professional, concise subject-based headers
|
||||
- The Conclusion section must contain **NO** detailed recommendations — those belong in the preceding body chapters
|
||||
- **ZERO HALLUCINATION POLICY**: Each statement, chart, and number in the report must be supported by data points from the input Data Summary. If data is missing, admit it.
|
||||
- **Traceability**: If requested, you must be able to point to the specific line in the Data Summary or External Search Findings that supports a claim.
|
||||
- The framework should adapt its analytical dimensions and depth to the specific domain (financial analysis uses different frameworks than consumer insights)
|
||||
- When the research subject is ambiguous, default to the broadest reasonable scope and note assumptions
|
||||
|
|
@ -1,248 +0,0 @@
|
|||
---
|
||||
name: data-analysis
|
||||
description: Use this skill when the user uploads Excel (.xlsx/.xls) or CSV files and wants to perform data analysis, generate statistics, create summaries, pivot tables, SQL queries, or any form of structured data exploration. Supports multi-sheet Excel workbooks, aggregation, filtering, joins, and exporting results to CSV/JSON/Markdown.
|
||||
---
|
||||
|
||||
# Data Analysis Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill analyzes user-uploaded Excel/CSV files using DuckDB — an in-process analytical SQL engine. It supports schema inspection, SQL-based querying, statistical summaries, and result export, all through a single Python script.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- Inspect Excel/CSV file structure (sheets, columns, types, row counts)
|
||||
- Execute arbitrary SQL queries against uploaded data
|
||||
- Generate statistical summaries (mean, median, stddev, percentiles, nulls)
|
||||
- Support multi-sheet Excel workbooks (each sheet becomes a table)
|
||||
- Export query results to CSV, JSON, or Markdown
|
||||
- Handle large files efficiently with DuckDB's columnar engine
|
||||
|
||||
## Workflow
|
||||
|
||||
### Step 1: Understand Requirements
|
||||
|
||||
When a user uploads data files and requests analysis, identify:
|
||||
|
||||
- **File location**: Path(s) to uploaded Excel/CSV files under `/mnt/user-data/uploads/`
|
||||
- **Analysis goal**: What insights the user wants (summary, filtering, aggregation, comparison, etc.)
|
||||
- **Output format**: How results should be presented (table, CSV export, JSON, etc.)
|
||||
- You don't need to check the folder under `/mnt/user-data`
|
||||
|
||||
### Step 2: Inspect File Structure
|
||||
|
||||
First, inspect the uploaded file to understand its schema:
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/data.xlsx \
|
||||
--action inspect
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Sheet names (for Excel) or filename (for CSV)
|
||||
- Column names, data types, and non-null counts
|
||||
- Row count per sheet/file
|
||||
- Sample data (first 5 rows)
|
||||
|
||||
### Step 3: Perform Analysis
|
||||
|
||||
Based on the schema, construct SQL queries to answer the user's questions.
|
||||
|
||||
#### Run SQL Query
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/data.xlsx \
|
||||
--action query \
|
||||
--sql "SELECT category, COUNT(*) as count, AVG(amount) as avg_amount FROM Sheet1 GROUP BY category ORDER BY count DESC"
|
||||
```
|
||||
|
||||
#### Generate Statistical Summary
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/data.xlsx \
|
||||
--action summary \
|
||||
--table Sheet1
|
||||
```
|
||||
|
||||
This returns for each numeric column: count, mean, std, min, 25%, 50%, 75%, max, null_count.
|
||||
For string columns: count, unique, top value, frequency, null_count.
|
||||
|
||||
#### Export Results
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/data.xlsx \
|
||||
--action query \
|
||||
--sql "SELECT * FROM Sheet1 WHERE amount > 1000" \
|
||||
--output-file /mnt/user-data/outputs/filtered-results.csv
|
||||
```
|
||||
|
||||
Supported output formats (auto-detected from extension):
|
||||
- `.csv` — Comma-separated values
|
||||
- `.json` — JSON array of records
|
||||
- `.md` — Markdown table
|
||||
|
||||
### Parameters
|
||||
|
||||
| Parameter | Required | Description |
|
||||
|-----------|----------|-------------|
|
||||
| `--files` | Yes | Space-separated paths to Excel/CSV files |
|
||||
| `--action` | Yes | One of: `inspect`, `query`, `summary` |
|
||||
| `--sql` | For `query` | SQL query to execute |
|
||||
| `--table` | For `summary` | Table/sheet name to summarize |
|
||||
| `--output-file` | No | Path to export results (CSV/JSON/MD) |
|
||||
|
||||
> [!NOTE]
|
||||
> Do NOT read the Python file, just call it with the parameters.
|
||||
|
||||
## Table Naming Rules
|
||||
|
||||
- **Excel files**: Each sheet becomes a table named after the sheet (e.g., `Sheet1`, `Sales`, `Revenue`)
|
||||
- **CSV files**: Table name is the filename without extension (e.g., `data.csv` → `data`)
|
||||
- **Multiple files**: All tables from all files are available in the same query context, enabling cross-file joins
|
||||
- **Special characters**: Sheet/file names with spaces or special characters are auto-sanitized (spaces → underscores). Use double quotes for names that start with numbers or contain special characters, e.g., `"2024_Sales"`
|
||||
|
||||
## Analysis Patterns
|
||||
|
||||
### Basic Exploration
|
||||
```sql
|
||||
-- Row count
|
||||
SELECT COUNT(*) FROM Sheet1
|
||||
|
||||
-- Distinct values in a column
|
||||
SELECT DISTINCT category FROM Sheet1
|
||||
|
||||
-- Value distribution
|
||||
SELECT category, COUNT(*) as cnt FROM Sheet1 GROUP BY category ORDER BY cnt DESC
|
||||
|
||||
-- Date range
|
||||
SELECT MIN(date_col), MAX(date_col) FROM Sheet1
|
||||
```
|
||||
|
||||
### Aggregation & Grouping
|
||||
```sql
|
||||
-- Revenue by category and month
|
||||
SELECT category, DATE_TRUNC('month', order_date) as month,
|
||||
SUM(revenue) as total_revenue
|
||||
FROM Sales
|
||||
GROUP BY category, month
|
||||
ORDER BY month, total_revenue DESC
|
||||
|
||||
-- Top 10 customers by spend
|
||||
SELECT customer_name, SUM(amount) as total_spend
|
||||
FROM Orders GROUP BY customer_name
|
||||
ORDER BY total_spend DESC LIMIT 10
|
||||
```
|
||||
|
||||
### Cross-file Joins
|
||||
```sql
|
||||
-- Join sales with customer info from different files
|
||||
SELECT s.order_id, s.amount, c.customer_name, c.region
|
||||
FROM sales s
|
||||
JOIN customers c ON s.customer_id = c.id
|
||||
WHERE s.amount > 500
|
||||
```
|
||||
|
||||
### Window Functions
|
||||
```sql
|
||||
-- Running total and rank
|
||||
SELECT order_date, amount,
|
||||
SUM(amount) OVER (ORDER BY order_date) as running_total,
|
||||
RANK() OVER (ORDER BY amount DESC) as amount_rank
|
||||
FROM Sales
|
||||
```
|
||||
|
||||
### Pivot-style Analysis
|
||||
```sql
|
||||
-- Pivot: monthly revenue by category
|
||||
SELECT category,
|
||||
SUM(CASE WHEN MONTH(date) = 1 THEN revenue END) as Jan,
|
||||
SUM(CASE WHEN MONTH(date) = 2 THEN revenue END) as Feb,
|
||||
SUM(CASE WHEN MONTH(date) = 3 THEN revenue END) as Mar
|
||||
FROM Sales
|
||||
GROUP BY category
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
User uploads `sales_2024.xlsx` (with sheets: `Orders`, `Products`, `Customers`) and asks: "Analyze my sales data — show top products by revenue and monthly trends."
|
||||
|
||||
### Step 1: Inspect the file
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/sales_2024.xlsx \
|
||||
--action inspect
|
||||
```
|
||||
|
||||
### Step 2: Top products by revenue
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/sales_2024.xlsx \
|
||||
--action query \
|
||||
--sql "SELECT p.product_name, SUM(o.quantity * o.unit_price) as total_revenue, SUM(o.quantity) as total_units FROM Orders o JOIN Products p ON o.product_id = p.id GROUP BY p.product_name ORDER BY total_revenue DESC LIMIT 10"
|
||||
```
|
||||
|
||||
### Step 3: Monthly revenue trends
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/sales_2024.xlsx \
|
||||
--action query \
|
||||
--sql "SELECT DATE_TRUNC('month', order_date) as month, SUM(quantity * unit_price) as revenue FROM Orders GROUP BY month ORDER BY month" \
|
||||
--output-file /mnt/user-data/outputs/monthly-trends.csv
|
||||
```
|
||||
|
||||
### Step 4: Statistical summary
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/sales_2024.xlsx \
|
||||
--action summary \
|
||||
--table Orders
|
||||
```
|
||||
|
||||
Present results to the user with clear explanations of findings, trends, and actionable insights.
|
||||
|
||||
## Multi-file Example
|
||||
|
||||
User uploads `orders.csv` and `customers.xlsx` and asks: "Which region has the highest average order value?"
|
||||
|
||||
```bash
|
||||
python /mnt/skills/public/data-analysis/scripts/analyze.py \
|
||||
--files /mnt/user-data/uploads/orders.csv /mnt/user-data/uploads/customers.xlsx \
|
||||
--action query \
|
||||
--sql "SELECT c.region, AVG(o.amount) as avg_order_value, COUNT(*) as order_count FROM orders o JOIN Customers c ON o.customer_id = c.id GROUP BY c.region ORDER BY avg_order_value DESC"
|
||||
```
|
||||
|
||||
## Output Handling
|
||||
|
||||
After analysis:
|
||||
|
||||
- Present query results directly in conversation as formatted tables
|
||||
- For large results, export to file and share via `present_files` tool
|
||||
- Always explain findings in plain language with key takeaways
|
||||
- Suggest follow-up analyses when patterns are interesting
|
||||
- Offer to export results if the user wants to keep them
|
||||
|
||||
## Caching
|
||||
|
||||
The script automatically caches loaded data to avoid re-parsing files on every call:
|
||||
|
||||
- On first load, files are parsed and stored in a persistent DuckDB database under `/mnt/user-data/workspace/.data-analysis-cache/`
|
||||
- The cache key is a SHA256 hash of all input file contents — if files change, a new cache is created
|
||||
- Subsequent calls with the same files will use the cached database directly (near-instant startup)
|
||||
- Cache is transparent — no extra parameters needed
|
||||
|
||||
This is especially useful when running multiple queries against the same data files (inspect → query → summary).
|
||||
|
||||
## Notes
|
||||
|
||||
- DuckDB supports full SQL including window functions, CTEs, subqueries, and advanced aggregations
|
||||
- Excel date columns are automatically parsed; use DuckDB date functions (`DATE_TRUNC`, `EXTRACT`, etc.)
|
||||
- For very large files (100MB+), DuckDB handles them efficiently without loading everything into memory
|
||||
- Column names with spaces are accessible using double quotes: `"Column Name"`
|
||||
|
|
@ -1,566 +0,0 @@
|
|||
"""
|
||||
Data Analysis Script using DuckDB.
|
||||
|
||||
Analyzes Excel (.xlsx/.xls) and CSV files using DuckDB's in-process SQL engine.
|
||||
Supports schema inspection, SQL queries, statistical summaries, and result export.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import duckdb
|
||||
except ImportError:
|
||||
logger.error("duckdb is not installed. Installing...")
|
||||
subprocess.run([sys.executable, "-m", "pip", "install", "duckdb", "openpyxl", "-q"], check=True)
|
||||
import duckdb
|
||||
|
||||
try:
|
||||
import openpyxl # noqa: F401
|
||||
except ImportError:
|
||||
subprocess.run([sys.executable, "-m", "pip", "install", "openpyxl", "-q"], check=True)
|
||||
|
||||
# Cache directory for persistent DuckDB databases
|
||||
CACHE_DIR = os.path.join(tempfile.gettempdir(), ".data-analysis-cache")
|
||||
TABLE_MAP_SUFFIX = ".table_map.json"
|
||||
|
||||
|
||||
def compute_files_hash(files: list[str]) -> str:
|
||||
"""Compute a combined SHA256 hash of all input files for cache key."""
|
||||
hasher = hashlib.sha256()
|
||||
for file_path in sorted(files):
|
||||
try:
|
||||
with open(file_path, "rb") as f:
|
||||
while chunk := f.read(8192):
|
||||
hasher.update(chunk)
|
||||
except OSError:
|
||||
# Include path as fallback if file can't be read
|
||||
hasher.update(file_path.encode())
|
||||
return hasher.hexdigest()
|
||||
|
||||
|
||||
def get_cache_db_path(files_hash: str) -> str:
|
||||
"""Get the path to the cached DuckDB database file."""
|
||||
os.makedirs(CACHE_DIR, exist_ok=True)
|
||||
return os.path.join(CACHE_DIR, f"{files_hash}.duckdb")
|
||||
|
||||
|
||||
def get_table_map_path(files_hash: str) -> str:
|
||||
"""Get the path to the cached table map JSON file."""
|
||||
return os.path.join(CACHE_DIR, f"{files_hash}{TABLE_MAP_SUFFIX}")
|
||||
|
||||
|
||||
def save_table_map(files_hash: str, table_map: dict[str, str]) -> None:
|
||||
"""Save table map to a JSON file alongside the cached DB."""
|
||||
path = get_table_map_path(files_hash)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(table_map, f, ensure_ascii=False)
|
||||
|
||||
|
||||
def load_table_map(files_hash: str) -> dict[str, str] | None:
|
||||
"""Load table map from cache. Returns None if not found."""
|
||||
path = get_table_map_path(files_hash)
|
||||
if not os.path.exists(path):
|
||||
return None
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def sanitize_table_name(name: str) -> str:
|
||||
"""Sanitize a sheet/file name into a valid SQL table name."""
|
||||
sanitized = re.sub(r"[^\w]", "_", name)
|
||||
if sanitized and sanitized[0].isdigit():
|
||||
sanitized = f"t_{sanitized}"
|
||||
return sanitized
|
||||
|
||||
|
||||
def load_files(con: duckdb.DuckDBPyConnection, files: list[str]) -> dict[str, str]:
|
||||
"""
|
||||
Load Excel/CSV files into DuckDB tables.
|
||||
|
||||
Returns a mapping of original_name -> sanitized_table_name.
|
||||
"""
|
||||
con.execute("INSTALL spatial; LOAD spatial;")
|
||||
table_map: dict[str, str] = {}
|
||||
|
||||
for file_path in files:
|
||||
if not os.path.exists(file_path):
|
||||
logger.error(f"File not found: {file_path}")
|
||||
continue
|
||||
|
||||
ext = os.path.splitext(file_path)[1].lower()
|
||||
|
||||
if ext in (".xlsx", ".xls"):
|
||||
_load_excel(con, file_path, table_map)
|
||||
elif ext == ".csv":
|
||||
_load_csv(con, file_path, table_map)
|
||||
else:
|
||||
logger.warning(f"Unsupported file format: {ext} ({file_path})")
|
||||
|
||||
return table_map
|
||||
|
||||
|
||||
def _load_excel(
|
||||
con: duckdb.DuckDBPyConnection, file_path: str, table_map: dict[str, str]
|
||||
) -> None:
|
||||
"""Load all sheets from an Excel file into DuckDB tables."""
|
||||
import openpyxl
|
||||
|
||||
wb = openpyxl.load_workbook(file_path, read_only=True, data_only=True)
|
||||
sheet_names = wb.sheetnames
|
||||
wb.close()
|
||||
|
||||
for sheet_name in sheet_names:
|
||||
table_name = sanitize_table_name(sheet_name)
|
||||
|
||||
# Handle duplicate table names
|
||||
original_table_name = table_name
|
||||
counter = 1
|
||||
while table_name in table_map.values():
|
||||
table_name = f"{original_table_name}_{counter}"
|
||||
counter += 1
|
||||
|
||||
try:
|
||||
con.execute(
|
||||
f"""
|
||||
CREATE TABLE "{table_name}" AS
|
||||
SELECT * FROM st_read(
|
||||
'{file_path}',
|
||||
layer = '{sheet_name}',
|
||||
open_options = ['HEADERS=FORCE', 'FIELD_TYPES=AUTO']
|
||||
)
|
||||
"""
|
||||
)
|
||||
table_map[sheet_name] = table_name
|
||||
row_count = con.execute(f'SELECT COUNT(*) FROM "{table_name}"').fetchone()[
|
||||
0
|
||||
]
|
||||
logger.info(
|
||||
f" Loaded sheet '{sheet_name}' -> table '{table_name}' ({row_count} rows)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f" Failed to load sheet '{sheet_name}': {e}")
|
||||
|
||||
|
||||
def _load_csv(
|
||||
con: duckdb.DuckDBPyConnection, file_path: str, table_map: dict[str, str]
|
||||
) -> None:
|
||||
"""Load a CSV file into a DuckDB table."""
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
table_name = sanitize_table_name(base_name)
|
||||
|
||||
# Handle duplicate table names
|
||||
original_table_name = table_name
|
||||
counter = 1
|
||||
while table_name in table_map.values():
|
||||
table_name = f"{original_table_name}_{counter}"
|
||||
counter += 1
|
||||
|
||||
try:
|
||||
con.execute(
|
||||
f"""
|
||||
CREATE TABLE "{table_name}" AS
|
||||
SELECT * FROM read_csv_auto('{file_path}')
|
||||
"""
|
||||
)
|
||||
table_map[base_name] = table_name
|
||||
row_count = con.execute(f'SELECT COUNT(*) FROM "{table_name}"').fetchone()[0]
|
||||
logger.info(
|
||||
f" Loaded CSV '{base_name}' -> table '{table_name}' ({row_count} rows)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f" Failed to load CSV '{base_name}': {e}")
|
||||
|
||||
|
||||
def action_inspect(con: duckdb.DuckDBPyConnection, table_map: dict[str, str]) -> str:
|
||||
"""Inspect the schema of all loaded tables."""
|
||||
output_parts = []
|
||||
|
||||
for original_name, table_name in table_map.items():
|
||||
output_parts.append(f"\n{'=' * 60}")
|
||||
output_parts.append(f'Table: {original_name} (SQL name: "{table_name}")')
|
||||
output_parts.append(f"{'=' * 60}")
|
||||
|
||||
# Get row count
|
||||
row_count = con.execute(f'SELECT COUNT(*) FROM "{table_name}"').fetchone()[0]
|
||||
output_parts.append(f"Rows: {row_count}")
|
||||
|
||||
# Get column info
|
||||
columns = con.execute(f'DESCRIBE "{table_name}"').fetchall()
|
||||
output_parts.append(f"\nColumns ({len(columns)}):")
|
||||
output_parts.append(f"{'Name':<30} {'Type':<15} {'Nullable'}")
|
||||
output_parts.append(f"{'-' * 30} {'-' * 15} {'-' * 8}")
|
||||
for col in columns:
|
||||
col_name, col_type, nullable = col[0], col[1], col[2]
|
||||
output_parts.append(f"{col_name:<30} {col_type:<15} {nullable}")
|
||||
|
||||
# Get non-null counts per column
|
||||
col_names = [col[0] for col in columns]
|
||||
non_null_parts = []
|
||||
for c in col_names:
|
||||
non_null_parts.append(f'COUNT("{c}") as "{c}"')
|
||||
non_null_sql = f'SELECT {", ".join(non_null_parts)} FROM "{table_name}"'
|
||||
try:
|
||||
non_null_counts = con.execute(non_null_sql).fetchone()
|
||||
output_parts.append(f"\nNon-null counts:")
|
||||
for i, c in enumerate(col_names):
|
||||
output_parts.append(f" {c}: {non_null_counts[i]} / {row_count}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Sample data (first 5 rows)
|
||||
output_parts.append(f"\nSample data (first 5 rows):")
|
||||
try:
|
||||
sample = con.execute(f'SELECT * FROM "{table_name}" LIMIT 5').fetchdf()
|
||||
output_parts.append(sample.to_string(index=False))
|
||||
except Exception:
|
||||
sample = con.execute(f'SELECT * FROM "{table_name}" LIMIT 5').fetchall()
|
||||
header = [col[0] for col in columns]
|
||||
output_parts.append(" " + " | ".join(header))
|
||||
for row in sample:
|
||||
output_parts.append(" " + " | ".join(str(v) for v in row))
|
||||
|
||||
result = "\n".join(output_parts)
|
||||
print(result)
|
||||
return result
|
||||
|
||||
|
||||
def action_query(
|
||||
con: duckdb.DuckDBPyConnection,
|
||||
sql: str,
|
||||
table_map: dict[str, str],
|
||||
output_file: str | None = None,
|
||||
) -> str:
|
||||
"""Execute a SQL query and return/export results."""
|
||||
# Replace original sheet/file names with sanitized table names in SQL
|
||||
modified_sql = sql
|
||||
for original_name, table_name in sorted(
|
||||
table_map.items(), key=lambda x: len(x[0]), reverse=True
|
||||
):
|
||||
if original_name != table_name:
|
||||
# Replace occurrences not already quoted
|
||||
modified_sql = re.sub(
|
||||
rf"\b{re.escape(original_name)}\b",
|
||||
f'"{table_name}"',
|
||||
modified_sql,
|
||||
)
|
||||
|
||||
try:
|
||||
result = con.execute(modified_sql)
|
||||
columns = [desc[0] for desc in result.description]
|
||||
rows = result.fetchall()
|
||||
except Exception as e:
|
||||
error_msg = f"SQL Error: {e}\n\nAvailable tables:\n"
|
||||
for orig, tbl in table_map.items():
|
||||
cols = con.execute(f'DESCRIBE "{tbl}"').fetchall()
|
||||
col_names = [c[0] for c in cols]
|
||||
error_msg += f' "{tbl}" ({orig}): {", ".join(col_names)}\n'
|
||||
print(error_msg)
|
||||
return error_msg
|
||||
|
||||
# Format output
|
||||
if output_file:
|
||||
return _export_results(columns, rows, output_file)
|
||||
|
||||
# Print as table
|
||||
return _format_table(columns, rows)
|
||||
|
||||
|
||||
def _format_table(columns: list[str], rows: list[tuple]) -> str:
|
||||
"""Format query results as a readable table."""
|
||||
if not rows:
|
||||
msg = "Query returned 0 rows."
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
# Calculate column widths
|
||||
col_widths = [len(str(c)) for c in columns]
|
||||
for row in rows:
|
||||
for i, val in enumerate(row):
|
||||
col_widths[i] = max(col_widths[i], len(str(val)))
|
||||
|
||||
# Cap column width
|
||||
max_width = 40
|
||||
col_widths = [min(w, max_width) for w in col_widths]
|
||||
|
||||
# Build table
|
||||
parts = []
|
||||
header = " | ".join(str(c).ljust(col_widths[i]) for i, c in enumerate(columns))
|
||||
separator = "-+-".join("-" * col_widths[i] for i in range(len(columns)))
|
||||
parts.append(header)
|
||||
parts.append(separator)
|
||||
for row in rows:
|
||||
row_str = " | ".join(
|
||||
str(v)[:max_width].ljust(col_widths[i]) for i, v in enumerate(row)
|
||||
)
|
||||
parts.append(row_str)
|
||||
|
||||
parts.append(f"\n({len(rows)} rows)")
|
||||
result = "\n".join(parts)
|
||||
print(result)
|
||||
return result
|
||||
|
||||
|
||||
def _export_results(columns: list[str], rows: list[tuple], output_file: str) -> str:
|
||||
"""Export query results to a file (CSV, JSON, or Markdown)."""
|
||||
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
||||
ext = os.path.splitext(output_file)[1].lower()
|
||||
|
||||
if ext == ".csv":
|
||||
import csv
|
||||
|
||||
with open(output_file, "w", newline="", encoding="utf-8") as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerow(columns)
|
||||
writer.writerows(rows)
|
||||
|
||||
elif ext == ".json":
|
||||
records = []
|
||||
for row in rows:
|
||||
record = {}
|
||||
for i, col in enumerate(columns):
|
||||
val = row[i]
|
||||
# Handle non-JSON-serializable types
|
||||
if hasattr(val, "isoformat"):
|
||||
val = val.isoformat()
|
||||
elif isinstance(val, (bytes, bytearray)):
|
||||
val = val.hex()
|
||||
record[col] = val
|
||||
records.append(record)
|
||||
with open(output_file, "w", encoding="utf-8") as f:
|
||||
json.dump(records, f, indent=2, ensure_ascii=False, default=str)
|
||||
|
||||
elif ext == ".md":
|
||||
with open(output_file, "w", encoding="utf-8") as f:
|
||||
# Header
|
||||
f.write("| " + " | ".join(columns) + " |\n")
|
||||
f.write("| " + " | ".join("---" for _ in columns) + " |\n")
|
||||
# Rows
|
||||
for row in rows:
|
||||
f.write(
|
||||
"| " + " | ".join(str(v).replace("|", "\\|") for v in row) + " |\n"
|
||||
)
|
||||
else:
|
||||
msg = f"Unsupported output format: {ext}. Use .csv, .json, or .md"
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
msg = f"Results exported to {output_file} ({len(rows)} rows)"
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def action_summary(
|
||||
con: duckdb.DuckDBPyConnection,
|
||||
table_name: str,
|
||||
table_map: dict[str, str],
|
||||
) -> str:
|
||||
"""Generate statistical summary for a table."""
|
||||
# Resolve table name
|
||||
resolved = table_map.get(table_name, table_name)
|
||||
|
||||
try:
|
||||
columns = con.execute(f'DESCRIBE "{resolved}"').fetchall()
|
||||
except Exception:
|
||||
available = ", ".join(f'"{t}" ({o})' for o, t in table_map.items())
|
||||
msg = f"Table '{table_name}' not found. Available tables: {available}"
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
row_count = con.execute(f'SELECT COUNT(*) FROM "{resolved}"').fetchone()[0]
|
||||
|
||||
output_parts = []
|
||||
output_parts.append(f"\nStatistical Summary: {table_name}")
|
||||
output_parts.append(f"Total rows: {row_count}")
|
||||
output_parts.append(f"{'=' * 70}")
|
||||
|
||||
numeric_types = {
|
||||
"BIGINT",
|
||||
"INTEGER",
|
||||
"SMALLINT",
|
||||
"TINYINT",
|
||||
"DOUBLE",
|
||||
"FLOAT",
|
||||
"DECIMAL",
|
||||
"HUGEINT",
|
||||
"REAL",
|
||||
"NUMERIC",
|
||||
}
|
||||
|
||||
for col in columns:
|
||||
col_name, col_type = col[0], col[1].upper()
|
||||
output_parts.append(f"\n--- {col_name} ({col[1]}) ---")
|
||||
|
||||
# Check base type (strip parameterized parts)
|
||||
base_type = re.sub(r"\(.*\)", "", col_type).strip()
|
||||
|
||||
if base_type in numeric_types:
|
||||
try:
|
||||
stats = con.execute(f"""
|
||||
SELECT
|
||||
COUNT("{col_name}") as count,
|
||||
AVG("{col_name}")::DOUBLE as mean,
|
||||
STDDEV("{col_name}")::DOUBLE as std,
|
||||
MIN("{col_name}") as min,
|
||||
QUANTILE_CONT("{col_name}", 0.25) as q25,
|
||||
MEDIAN("{col_name}") as median,
|
||||
QUANTILE_CONT("{col_name}", 0.75) as q75,
|
||||
MAX("{col_name}") as max,
|
||||
COUNT(*) - COUNT("{col_name}") as null_count
|
||||
FROM "{resolved}"
|
||||
""").fetchone()
|
||||
labels = [
|
||||
"count",
|
||||
"mean",
|
||||
"std",
|
||||
"min",
|
||||
"25%",
|
||||
"50%",
|
||||
"75%",
|
||||
"max",
|
||||
"nulls",
|
||||
]
|
||||
for label, val in zip(labels, stats):
|
||||
if isinstance(val, float):
|
||||
output_parts.append(f" {label:<8}: {val:,.4f}")
|
||||
else:
|
||||
output_parts.append(f" {label:<8}: {val}")
|
||||
except Exception as e:
|
||||
output_parts.append(f" Error computing stats: {e}")
|
||||
else:
|
||||
try:
|
||||
stats = con.execute(f"""
|
||||
SELECT
|
||||
COUNT("{col_name}") as count,
|
||||
COUNT(DISTINCT "{col_name}") as unique_count,
|
||||
MODE("{col_name}") as mode_val,
|
||||
COUNT(*) - COUNT("{col_name}") as null_count
|
||||
FROM "{resolved}"
|
||||
""").fetchone()
|
||||
output_parts.append(f" count : {stats[0]}")
|
||||
output_parts.append(f" unique : {stats[1]}")
|
||||
output_parts.append(f" top : {stats[2]}")
|
||||
output_parts.append(f" nulls : {stats[3]}")
|
||||
|
||||
# Show top 5 values
|
||||
top_vals = con.execute(f"""
|
||||
SELECT "{col_name}", COUNT(*) as freq
|
||||
FROM "{resolved}"
|
||||
WHERE "{col_name}" IS NOT NULL
|
||||
GROUP BY "{col_name}"
|
||||
ORDER BY freq DESC
|
||||
LIMIT 5
|
||||
""").fetchall()
|
||||
if top_vals:
|
||||
output_parts.append(f" top values:")
|
||||
for val, freq in top_vals:
|
||||
pct = (freq / row_count * 100) if row_count > 0 else 0
|
||||
output_parts.append(f" {val}: {freq} ({pct:.1f}%)")
|
||||
except Exception as e:
|
||||
output_parts.append(f" Error computing stats: {e}")
|
||||
|
||||
result = "\n".join(output_parts)
|
||||
print(result)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Analyze Excel/CSV files using DuckDB")
|
||||
parser.add_argument(
|
||||
"--files",
|
||||
nargs="+",
|
||||
required=True,
|
||||
help="Paths to Excel (.xlsx/.xls) or CSV files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--action",
|
||||
required=True,
|
||||
choices=["inspect", "query", "summary"],
|
||||
help="Action to perform: inspect, query, or summary",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sql",
|
||||
type=str,
|
||||
default=None,
|
||||
help="SQL query to execute (required for 'query' action)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--table",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Table name for summary (required for 'summary' action)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-file",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to export results (CSV/JSON/MD)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate arguments
|
||||
if args.action == "query" and not args.sql:
|
||||
parser.error("--sql is required for 'query' action")
|
||||
if args.action == "summary" and not args.table:
|
||||
parser.error("--table is required for 'summary' action")
|
||||
|
||||
# Compute file hash for caching
|
||||
files_hash = compute_files_hash(args.files)
|
||||
db_path = get_cache_db_path(files_hash)
|
||||
cached_table_map = load_table_map(files_hash)
|
||||
|
||||
if cached_table_map and os.path.exists(db_path):
|
||||
# Cache hit: connect to existing DB
|
||||
logger.info(f"Cache hit! Using cached database: {db_path}")
|
||||
con = duckdb.connect(db_path, read_only=True)
|
||||
table_map = cached_table_map
|
||||
logger.info(
|
||||
f"Loaded {len(table_map)} table(s) from cache: {', '.join(table_map.keys())}"
|
||||
)
|
||||
else:
|
||||
# Cache miss: load files and persist to DB
|
||||
logger.info("Loading files (first time, will cache for future use)...")
|
||||
con = duckdb.connect(db_path)
|
||||
table_map = load_files(con, args.files)
|
||||
|
||||
if not table_map:
|
||||
logger.error("No tables were loaded. Check file paths and formats.")
|
||||
# Clean up empty DB file
|
||||
con.close()
|
||||
if os.path.exists(db_path):
|
||||
os.remove(db_path)
|
||||
sys.exit(1)
|
||||
|
||||
# Save table map for future cache lookups
|
||||
save_table_map(files_hash, table_map)
|
||||
logger.info(
|
||||
f"\nLoaded {len(table_map)} table(s): {', '.join(table_map.keys())}"
|
||||
)
|
||||
logger.info(f"Cached database saved to: {db_path}")
|
||||
|
||||
# Perform action
|
||||
if args.action == "inspect":
|
||||
action_inspect(con, table_map)
|
||||
elif args.action == "query":
|
||||
action_query(con, args.sql, table_map, args.output_file)
|
||||
elif args.action == "summary":
|
||||
action_summary(con, args.table, table_map)
|
||||
|
||||
con.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,198 +0,0 @@
|
|||
---
|
||||
name: deep-research
|
||||
description: Use this skill instead of WebSearch for ANY question requiring web research. Trigger on queries like "what is X", "explain X", "compare X and Y", "research X", or before content generation tasks. Provides systematic multi-angle research methodology instead of single superficial searches. Use this proactively when the user's question needs online information.
|
||||
---
|
||||
|
||||
# Deep Research Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill provides a systematic methodology for conducting thorough web research. **Load this skill BEFORE starting any content generation task** to ensure you gather sufficient information from multiple angles, depths, and sources.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
**Always load this skill when:**
|
||||
|
||||
### Research Questions
|
||||
- User asks "what is X", "explain X", "research X", "investigate X"
|
||||
- User wants to understand a concept, technology, or topic in depth
|
||||
- The question requires current, comprehensive information from multiple sources
|
||||
- A single web search would be insufficient to answer properly
|
||||
|
||||
### Content Generation (Pre-research)
|
||||
- Creating presentations (PPT/slides)
|
||||
- Creating frontend designs or UI mockups
|
||||
- Writing articles, reports, or documentation
|
||||
- Producing videos or multimedia content
|
||||
- Any content that requires real-world information, examples, or current data
|
||||
|
||||
## Core Principle
|
||||
|
||||
**Never generate content based solely on general knowledge.** The quality of your output directly depends on the quality and quantity of research conducted beforehand. A single search query is NEVER enough.
|
||||
|
||||
## Research Methodology
|
||||
|
||||
### Phase 1: Broad Exploration
|
||||
|
||||
Start with broad searches to understand the landscape:
|
||||
|
||||
1. **Initial Survey**: Search for the main topic to understand the overall context
|
||||
2. **Identify Dimensions**: From initial results, identify key subtopics, themes, angles, or aspects that need deeper exploration
|
||||
3. **Map the Territory**: Note different perspectives, stakeholders, or viewpoints that exist
|
||||
|
||||
Example:
|
||||
```
|
||||
Topic: "AI in healthcare"
|
||||
Initial searches:
|
||||
- "AI healthcare applications 2024"
|
||||
- "artificial intelligence medical diagnosis"
|
||||
- "healthcare AI market trends"
|
||||
|
||||
Identified dimensions:
|
||||
- Diagnostic AI (radiology, pathology)
|
||||
- Treatment recommendation systems
|
||||
- Administrative automation
|
||||
- Patient monitoring
|
||||
- Regulatory landscape
|
||||
- Ethical considerations
|
||||
```
|
||||
|
||||
### Phase 2: Deep Dive
|
||||
|
||||
For each important dimension identified, conduct targeted research:
|
||||
|
||||
1. **Specific Queries**: Search with precise keywords for each subtopic
|
||||
2. **Multiple Phrasings**: Try different keyword combinations and phrasings
|
||||
3. **Fetch Full Content**: Use `web_fetch` to read important sources in full, not just snippets
|
||||
4. **Follow References**: When sources mention other important resources, search for those too
|
||||
|
||||
Example:
|
||||
```
|
||||
Dimension: "Diagnostic AI in radiology"
|
||||
Targeted searches:
|
||||
- "AI radiology FDA approved systems"
|
||||
- "chest X-ray AI detection accuracy"
|
||||
- "radiology AI clinical trials results"
|
||||
|
||||
Then fetch and read:
|
||||
- Key research papers or summaries
|
||||
- Industry reports
|
||||
- Real-world case studies
|
||||
```
|
||||
|
||||
### Phase 3: Diversity & Validation
|
||||
|
||||
Ensure comprehensive coverage by seeking diverse information types:
|
||||
|
||||
| Information Type | Purpose | Example Searches |
|
||||
|-----------------|---------|------------------|
|
||||
| **Facts & Data** | Concrete evidence | "statistics", "data", "numbers", "market size" |
|
||||
| **Examples & Cases** | Real-world applications | "case study", "example", "implementation" |
|
||||
| **Expert Opinions** | Authority perspectives | "expert analysis", "interview", "commentary" |
|
||||
| **Trends & Predictions** | Future direction | "trends 2024", "forecast", "future of" |
|
||||
| **Comparisons** | Context and alternatives | "vs", "comparison", "alternatives" |
|
||||
| **Challenges & Criticisms** | Balanced view | "challenges", "limitations", "criticism" |
|
||||
|
||||
### Phase 4: Synthesis Check
|
||||
|
||||
Before proceeding to content generation, verify:
|
||||
|
||||
- [ ] Have I searched from at least 3-5 different angles?
|
||||
- [ ] Have I fetched and read the most important sources in full?
|
||||
- [ ] Do I have concrete data, examples, and expert perspectives?
|
||||
- [ ] Have I explored both positive aspects and challenges/limitations?
|
||||
- [ ] Is my information current and from authoritative sources?
|
||||
|
||||
**If any answer is NO, continue researching before generating content.**
|
||||
|
||||
## Search Strategy Tips
|
||||
|
||||
### Effective Query Patterns
|
||||
|
||||
```
|
||||
# Be specific with context
|
||||
❌ "AI trends"
|
||||
✅ "enterprise AI adoption trends 2024"
|
||||
|
||||
# Include authoritative source hints
|
||||
"[topic] research paper"
|
||||
"[topic] McKinsey report"
|
||||
"[topic] industry analysis"
|
||||
|
||||
# Search for specific content types
|
||||
"[topic] case study"
|
||||
"[topic] statistics"
|
||||
"[topic] expert interview"
|
||||
|
||||
# Use temporal qualifiers — always use the ACTUAL current year from <current_date>
|
||||
"[topic] 2026" # ← replace with real current year, never hardcode a past year
|
||||
"[topic] latest"
|
||||
"[topic] recent developments"
|
||||
```
|
||||
|
||||
### Temporal Awareness
|
||||
|
||||
**Always check `<current_date>` in your context before forming ANY search query.**
|
||||
|
||||
`<current_date>` gives you the full date: year, month, day, and weekday (e.g. `2026-02-28, Saturday`). Use the right level of precision depending on what the user is asking:
|
||||
|
||||
| User intent | Temporal precision needed | Example query |
|
||||
|---|---|---|
|
||||
| "today / this morning / just released" | **Month + Day** | `"tech news February 28 2026"` |
|
||||
| "this week" | **Week range** | `"technology releases week of Feb 24 2026"` |
|
||||
| "recently / latest / new" | **Month** | `"AI breakthroughs February 2026"` |
|
||||
| "this year / trends" | **Year** | `"software trends 2026"` |
|
||||
|
||||
**Rules:**
|
||||
- When the user asks about "today" or "just released", use **month + day + year** in your search queries to get same-day results
|
||||
- Never drop to year-only when day-level precision is needed — `"tech news 2026"` will NOT surface today's news
|
||||
- Try multiple phrasings: numeric form (`2026-02-28`), written form (`February 28 2026`), and relative terms (`today`, `this week`) across different queries
|
||||
|
||||
❌ User asks "what's new in tech today" → searching `"new technology 2026"` → misses today's news
|
||||
✅ User asks "what's new in tech today" → searching `"new technology February 28 2026"` + `"tech news today Feb 28"` → gets today's results
|
||||
|
||||
### When to Use web_fetch
|
||||
|
||||
Use `web_fetch` to read full content when:
|
||||
- A search result looks highly relevant and authoritative
|
||||
- You need detailed information beyond the snippet
|
||||
- The source contains data, case studies, or expert analysis
|
||||
- You want to understand the full context of a finding
|
||||
|
||||
### Iterative Refinement
|
||||
|
||||
Research is iterative. After initial searches:
|
||||
1. Review what you've learned
|
||||
2. Identify gaps in your understanding
|
||||
3. Formulate new, more targeted queries
|
||||
4. Repeat until you have comprehensive coverage
|
||||
|
||||
## Quality Bar
|
||||
|
||||
Your research is sufficient when you can confidently answer:
|
||||
- What are the key facts and data points?
|
||||
- What are 2-3 concrete real-world examples?
|
||||
- What do experts say about this topic?
|
||||
- What are the current trends and future directions?
|
||||
- What are the challenges or limitations?
|
||||
- What makes this topic relevant or important now?
|
||||
|
||||
## Common Mistakes to Avoid
|
||||
|
||||
- ❌ Stopping after 1-2 searches
|
||||
- ❌ Relying on search snippets without reading full sources
|
||||
- ❌ Searching only one aspect of a multi-faceted topic
|
||||
- ❌ Ignoring contradicting viewpoints or challenges
|
||||
- ❌ Using outdated information when current data exists
|
||||
- ❌ Starting content generation before research is complete
|
||||
|
||||
## Output
|
||||
|
||||
After completing research, you should have:
|
||||
1. A comprehensive understanding of the topic from multiple angles
|
||||
2. Specific facts, data points, and statistics
|
||||
3. Real-world examples and case studies
|
||||
4. Expert perspectives and authoritative sources
|
||||
5. Current trends and relevant context
|
||||
|
||||
**Only then proceed to content generation**, using the gathered information to create high-quality, well-informed content.
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
---
|
||||
name: find-skills
|
||||
description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill.
|
||||
---
|
||||
|
||||
# Find Skills
|
||||
|
||||
This skill helps you discover and install skills from the open agent skills ecosystem.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use this skill when the user:
|
||||
|
||||
- Asks "how do I do X" where X might be a common task with an existing skill
|
||||
- Says "find a skill for X" or "is there a skill for X"
|
||||
- Asks "can you do X" where X is a specialized capability
|
||||
- Expresses interest in extending agent capabilities
|
||||
- Wants to search for tools, templates, or workflows
|
||||
- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.)
|
||||
|
||||
## What is the Skills CLI?
|
||||
|
||||
The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools.
|
||||
|
||||
**Key commands:**
|
||||
|
||||
- `npx skills find [query]` - Search for skills interactively or by keyword
|
||||
- `npx skills check` - Check for skill updates
|
||||
- `npx skills update` - Update all installed skills
|
||||
|
||||
**Browse skills at:** https://skills.sh/
|
||||
|
||||
## How to Help Users Find Skills
|
||||
|
||||
### Step 1: Understand What They Need
|
||||
|
||||
When a user asks for help with something, identify:
|
||||
|
||||
1. The domain (e.g., React, testing, design, deployment)
|
||||
2. The specific task (e.g., writing tests, creating animations, reviewing PRs)
|
||||
3. Whether this is a common enough task that a skill likely exists
|
||||
|
||||
### Step 2: Search for Skills
|
||||
|
||||
Run the find command with a relevant query:
|
||||
|
||||
```bash
|
||||
npx skills find [query]
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
- User asks "how do I make my React app faster?" → `npx skills find react performance`
|
||||
- User asks "can you help me with PR reviews?" → `npx skills find pr review`
|
||||
- User asks "I need to create a changelog" → `npx skills find changelog`
|
||||
|
||||
The command will return results like:
|
||||
|
||||
```
|
||||
Install with bash /path/to/skill/scripts/install-skill.sh vercel-labs/agent-skills@vercel-react-best-practices
|
||||
|
||||
vercel-labs/agent-skills@vercel-react-best-practices
|
||||
└ https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices
|
||||
```
|
||||
|
||||
### Step 3: Present Options to the User
|
||||
|
||||
When you find relevant skills, present them to the user with:
|
||||
|
||||
1. The skill name and what it does
|
||||
2. The install command they can run
|
||||
3. A link to learn more at skills.sh
|
||||
|
||||
Example response:
|
||||
|
||||
```
|
||||
I found a skill that might help! The "vercel-react-best-practices" skill provides
|
||||
React and Next.js performance optimization guidelines from Vercel Engineering.
|
||||
|
||||
To install it:
|
||||
bash /path/to/skill/scripts/install-skill.sh vercel-labs/agent-skills@vercel-react-best-practices
|
||||
|
||||
Learn more: https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices
|
||||
```
|
||||
|
||||
### Step 4: Install the Skill
|
||||
|
||||
If the user wants to proceed, use the `install-skill.sh` script to install the skill and automatically link it to the project:
|
||||
|
||||
```bash
|
||||
bash /path/to/skill/scripts/install-skill.sh <owner/repo@skill-name>
|
||||
```
|
||||
|
||||
For example, if the user wants to install `vercel-react-best-practices`:
|
||||
|
||||
```bash
|
||||
bash /path/to/skill/scripts/install-skill.sh vercel-labs/agent-skills@vercel-react-best-practices
|
||||
```
|
||||
|
||||
The script will install the skill globally to `skills/custom/`
|
||||
|
||||
## Common Skill Categories
|
||||
|
||||
When searching, consider these common categories:
|
||||
|
||||
| Category | Example Queries |
|
||||
| --------------- | ---------------------------------------- |
|
||||
| Web Development | react, nextjs, typescript, css, tailwind |
|
||||
| Testing | testing, jest, playwright, e2e |
|
||||
| DevOps | deploy, docker, kubernetes, ci-cd |
|
||||
| Documentation | docs, readme, changelog, api-docs |
|
||||
| Code Quality | review, lint, refactor, best-practices |
|
||||
| Design | ui, ux, design-system, accessibility |
|
||||
| Productivity | workflow, automation, git |
|
||||
|
||||
## Tips for Effective Searches
|
||||
|
||||
1. **Use specific keywords**: "react testing" is better than just "testing"
|
||||
2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd"
|
||||
3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills`
|
||||
|
||||
## When No Skills Are Found
|
||||
|
||||
If no relevant skills exist:
|
||||
|
||||
1. Acknowledge that no existing skill was found
|
||||
2. Offer to help with the task directly using your general capabilities
|
||||
3. Suggest the user could create their own skill with `npx skills init`
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
I searched for skills related to "xyz" but didn't find any matches.
|
||||
I can still help you with this task directly! Would you like me to proceed?
|
||||
|
||||
If this is something you do often, you could create your own skill:
|
||||
npx skills init my-xyz-skill
|
||||
```
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Install a skill and link it to the project's skills/custom directory
|
||||
# Usage: ./skills/install-skill.sh <owner/repo@skill-name>
|
||||
# Example: ./skills/install-skill.sh vercel-labs/agent-skills@vercel-react-best-practices
|
||||
|
||||
set -e
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "Usage: $0 <owner/repo@skill-name>"
|
||||
echo "Example: $0 vercel-labs/agent-skills@vercel-react-best-practices"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FULL_SKILL_NAME="$1"
|
||||
|
||||
# Extract skill name (the part after @)
|
||||
SKILL_NAME="${FULL_SKILL_NAME##*@}"
|
||||
|
||||
if [[ -z "$SKILL_NAME" || "$SKILL_NAME" == "$FULL_SKILL_NAME" ]]; then
|
||||
echo "Error: Invalid skill format. Expected: owner/repo@skill-name"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find project root by looking for deer-flow.code-workspace
|
||||
find_project_root() {
|
||||
local dir="$PWD"
|
||||
while [[ "$dir" != "/" ]]; do
|
||||
if [[ -f "$dir/deer-flow.code-workspace" ]]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
dir="$(dirname "$dir")"
|
||||
done
|
||||
echo ""
|
||||
return 1
|
||||
}
|
||||
|
||||
PROJECT_ROOT=$(find_project_root)
|
||||
|
||||
if [[ -z "$PROJECT_ROOT" ]]; then
|
||||
echo "Error: Could not find project root (deer-flow.code-workspace not found)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SKILL_SOURCE="$HOME/.agents/skills/$SKILL_NAME"
|
||||
SKILL_TARGET="$PROJECT_ROOT/skills/custom"
|
||||
|
||||
# Step 1: Install the skill using npx
|
||||
npx skills add "$FULL_SKILL_NAME" -g -y > /dev/null 2>&1
|
||||
|
||||
# Step 2: Verify installation
|
||||
if [[ ! -d "$SKILL_SOURCE" ]]; then
|
||||
echo "Skill '$SKILL_NAME' installation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 3: Create symlink
|
||||
mkdir -p "$SKILL_TARGET"
|
||||
ln -sf "$SKILL_SOURCE" "$SKILL_TARGET/"
|
||||
|
||||
echo "Skill '$SKILL_NAME' installed successfully"
|
||||
|
|
@ -1,177 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
---
|
||||
name: frontend-design
|
||||
description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, artifacts, posters, or applications (examples include websites, landing pages, dashboards, React components, HTML/CSS layouts, or when styling/beautifying any web UI). Generates creative, polished code and UI design that avoids generic AI aesthetics.
|
||||
license: Complete terms in LICENSE.txt
|
||||
---
|
||||
|
||||
This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices.
|
||||
|
||||
The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints.
|
||||
|
||||
## Output Requirements
|
||||
|
||||
**MANDATORY**: The entry HTML file MUST be named `index.html`. This is a strict requirement for all generated frontend projects to ensure compatibility with standard web hosting and deployment workflows.
|
||||
|
||||
## Design Thinking
|
||||
|
||||
Before coding, understand the context and commit to a BOLD aesthetic direction:
|
||||
- **Purpose**: What problem does this interface solve? Who uses it?
|
||||
- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction.
|
||||
- **Constraints**: Technical requirements (framework, performance, accessibility).
|
||||
- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember?
|
||||
|
||||
**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity.
|
||||
|
||||
Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is:
|
||||
- Production-grade and functional
|
||||
- Visually striking and memorable
|
||||
- Cohesive with a clear aesthetic point-of-view
|
||||
- Meticulously refined in every detail
|
||||
|
||||
## Frontend Aesthetics Guidelines
|
||||
|
||||
Focus on:
|
||||
- **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font.
|
||||
- **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes.
|
||||
- **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise.
|
||||
- **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density.
|
||||
- **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays.
|
||||
|
||||
NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character.
|
||||
|
||||
Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations.
|
||||
|
||||
**IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well.
|
||||
|
||||
## Branding Requirement
|
||||
|
||||
**MANDATORY**: Every generated frontend interface MUST include a "Created By Deerflow" signature. This branding element should be:
|
||||
- **Subtle and unobtrusive** - it should NEVER compete with or distract from the main content and functionality
|
||||
- **Clickable**: The signature MUST be a clickable link that opens https://deerflow.tech in a new tab (target="_blank")
|
||||
- Integrated naturally into the design, feeling like an intentional design element rather than an afterthought
|
||||
- Small in size, using muted colors or reduced opacity that blend harmoniously with the overall aesthetic
|
||||
|
||||
**IMPORTANT**: The branding should be discoverable but not prominent. Users should notice the main interface first; the signature is a quiet attribution, not a focal point.
|
||||
|
||||
**Creative Implementation Ideas** (choose one that best matches your design aesthetic):
|
||||
|
||||
1. **Floating Corner Badge**: A small, elegant badge fixed to a corner with subtle hover effects (e.g., gentle glow, slight scale-up, color shift)
|
||||
|
||||
2. **Artistic Watermark**: A semi-transparent diagonal text or logo pattern in the background, barely visible but adds texture
|
||||
|
||||
3. **Integrated Border Element**: Part of a decorative border or frame around the content - the signature becomes an organic part of the design structure
|
||||
|
||||
4. **Animated Signature**: A small signature that elegantly writes itself on page load, or reveals on scroll near the bottom
|
||||
|
||||
5. **Contextual Integration**: Blend into the theme - for a retro design, use a vintage stamp look; for minimalist, a single small icon or monogram "DF" with tooltip
|
||||
|
||||
6. **Cursor Trail or Easter Egg**: A very subtle approach where the branding appears as a micro-interaction (e.g., holding cursor still reveals a tiny signature, or appears in a creative loading state)
|
||||
|
||||
7. **Decorative Divider**: Incorporate into a decorative line, separator, or ornamental element on the page
|
||||
|
||||
8. **Glassmorphism Card**: A tiny floating glass-effect card in a corner with blur backdrop
|
||||
|
||||
Example code patterns:
|
||||
```html
|
||||
<!-- Floating corner badge with hover effect -->
|
||||
<a href="https://deerflow.tech" target="_blank" class="deerflow-badge">✦ Deerflow</a>
|
||||
|
||||
<!-- Monogram with tooltip -->
|
||||
<a href="https://deerflow.tech" target="_blank" title="Created By Deerflow" class="deerflow-mark">DF</a>
|
||||
|
||||
<!-- Integrated into decorative element -->
|
||||
<div class="footer-ornament">
|
||||
<span class="line"></span>
|
||||
<a href="https://deerflow.tech" target="_blank">Deerflow</a>
|
||||
<span class="line"></span>
|
||||
</div>
|
||||
```
|
||||
|
||||
**Design Principle**: The branding should feel like it belongs - a natural extension of your creative vision, not a mandatory stamp. Match the signature's style (typography, color, animation) to the overall aesthetic direction.
|
||||
|
||||
Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision.
|
||||
|
|
@ -1,166 +0,0 @@
|
|||
---
|
||||
name: github-deep-research
|
||||
description: Conduct multi-round deep research on any GitHub Repo. Use when users request comprehensive analysis, timeline reconstruction, competitive analysis, or in-depth investigation of GitHub. Produces structured markdown reports with executive summaries, chronological timelines, metrics analysis, and Mermaid diagrams. Triggers on Github repository URL or open source projects.
|
||||
---
|
||||
|
||||
# GitHub Deep Research Skill
|
||||
|
||||
Multi-round research combining GitHub API, web_search, web_fetch to produce comprehensive markdown reports.
|
||||
|
||||
## Research Workflow
|
||||
|
||||
- Round 1: GitHub API
|
||||
- Round 2: Discovery
|
||||
- Round 3: Deep Investigation
|
||||
- Round 4: Deep Dive
|
||||
|
||||
## Core Methodology
|
||||
|
||||
### Query Strategy
|
||||
|
||||
**Broad to Narrow**: Start with GitHub API, then general queries, refine based on findings.
|
||||
|
||||
```
|
||||
Round 1: GitHub API
|
||||
Round 2: "{topic} overview"
|
||||
Round 3: "{topic} architecture", "{topic} vs alternatives"
|
||||
Round 4: "{topic} issues", "{topic} roadmap", "site:github.com {topic}"
|
||||
```
|
||||
|
||||
**Source Prioritization**:
|
||||
1. Official docs/repos (highest weight)
|
||||
2. Technical blogs (Medium, Dev.to)
|
||||
3. News articles (verified outlets)
|
||||
4. Community discussions (Reddit, HN)
|
||||
5. Social media (lowest weight, for sentiment)
|
||||
|
||||
### Research Rounds
|
||||
|
||||
**Round 1 - GitHub API**
|
||||
Directly execute `scripts/github_api.py` without `read_file()`:
|
||||
```bash
|
||||
python /path/to/skill/scripts/github_api.py <owner> <repo> summary
|
||||
python /path/to/skill/scripts/github_api.py <owner> <repo> readme
|
||||
python /path/to/skill/scripts/github_api.py <owner> <repo> tree
|
||||
```
|
||||
|
||||
**Available commands (the last argument of `github_api.py`):**
|
||||
- summary
|
||||
- info
|
||||
- readme
|
||||
- tree
|
||||
- languages
|
||||
- contributors
|
||||
- commits
|
||||
- issues
|
||||
- prs
|
||||
- releases
|
||||
|
||||
**Round 2 - Discovery (3-5 web_search)**
|
||||
- Get overview and identify key terms
|
||||
- Find official website/repo
|
||||
- Identify main players/competitors
|
||||
|
||||
**Round 3 - Deep Investigation (5-10 web_search + web_fetch)**
|
||||
- Technical architecture details
|
||||
- Timeline of key events
|
||||
- Community sentiment
|
||||
- Use web_fetch on valuable URLs for full content
|
||||
|
||||
**Round 4 - Deep Dive**
|
||||
- Analyze commit history for timeline
|
||||
- Review issues/PRs for feature evolution
|
||||
- Check contributor activity
|
||||
|
||||
## Report Structure
|
||||
|
||||
Follow template in `assets/report_template.md`:
|
||||
|
||||
1. **Metadata Block** - Date, confidence level, subject
|
||||
2. **Executive Summary** - 2-3 sentence overview with key metrics
|
||||
3. **Chronological Timeline** - Phased breakdown with dates
|
||||
4. **Key Analysis Sections** - Topic-specific deep dives
|
||||
5. **Metrics & Comparisons** - Tables, growth charts
|
||||
6. **Strengths & Weaknesses** - Balanced assessment
|
||||
7. **Sources** - Categorized references
|
||||
8. **Confidence Assessment** - Claims by confidence level
|
||||
9. **Methodology** - Research approach used
|
||||
|
||||
### Mermaid Diagrams
|
||||
|
||||
Include diagrams where helpful:
|
||||
|
||||
**Timeline (Gantt)**:
|
||||
```mermaid
|
||||
gantt
|
||||
title Project Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
section Phase 1
|
||||
Development :2025-01-01, 2025-03-01
|
||||
section Phase 2
|
||||
Launch :2025-03-01, 2025-04-01
|
||||
```
|
||||
|
||||
**Architecture (Flowchart)**:
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[User] --> B[Coordinator]
|
||||
B --> C[Planner]
|
||||
C --> D[Research Team]
|
||||
D --> E[Reporter]
|
||||
```
|
||||
|
||||
**Comparison (Pie/Bar)**:
|
||||
```mermaid
|
||||
pie title Market Share
|
||||
"Project A" : 45
|
||||
"Project B" : 30
|
||||
"Others" : 25
|
||||
```
|
||||
|
||||
## Confidence Scoring
|
||||
|
||||
Assign confidence based on source quality:
|
||||
|
||||
| Confidence | Criteria |
|
||||
|------------|----------|
|
||||
| High (90%+) | Official docs, GitHub data, multiple corroborating sources |
|
||||
| Medium (70-89%) | Single reliable source, recent articles |
|
||||
| Low (50-69%) | Social media, unverified claims, outdated info |
|
||||
|
||||
## Output
|
||||
|
||||
Save report as: `research_{topic}_{YYYYMMDD}.md`
|
||||
|
||||
### Formatting Rules
|
||||
|
||||
- Chinese content: Use full-width punctuation(,。:;!?)
|
||||
- Technical terms: Provide Wiki/doc URL on first mention
|
||||
- Tables: Use for metrics, comparisons
|
||||
- Code blocks: For technical examples
|
||||
- Mermaid: For architecture, timelines, flows
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start with official sources** - Repo, docs, company blog
|
||||
2. **Verify dates from commits/PRs** - More reliable than articles
|
||||
3. **Triangulate claims** - 2+ independent sources
|
||||
4. **Note conflicting info** - Don't hide contradictions
|
||||
5. **Distinguish fact vs opinion** - Label speculation clearly
|
||||
6. **CRITICAL: Always include inline citations** - Use `[citation:Title](URL)` format immediately after each claim from external sources
|
||||
7. **Extract URLs from search results** - web_search returns {title, url, snippet} - always use the URL field
|
||||
8. **Update as you go** - Don't wait until end to synthesize
|
||||
|
||||
### Citation Examples
|
||||
|
||||
**Good - With inline citations:**
|
||||
```markdown
|
||||
The project gained 10,000 stars within 3 months of launch [citation:GitHub Stats](https://github.com/owner/repo).
|
||||
The architecture uses LangGraph for workflow orchestration [citation:LangGraph Docs](https://langchain.com/langgraph).
|
||||
```
|
||||
|
||||
**Bad - Without citations:**
|
||||
```markdown
|
||||
The project gained 10,000 stars within 3 months of launch.
|
||||
The architecture uses LangGraph for workflow orchestration.
|
||||
```
|
||||
|
|
@ -1,192 +0,0 @@
|
|||
[!NOTE] Generate this report in user's own language.
|
||||
|
||||
# {TITLE}
|
||||
|
||||
- **Research Date:** {DATE}
|
||||
- **Timestamp:** {TIMESTAMP}
|
||||
- **Confidence Level:** {CONFIDENCE_LEVEL}
|
||||
- **Subject:** {SUBJECT_DESCRIPTION}
|
||||
|
||||
---
|
||||
|
||||
## Repository Information
|
||||
|
||||
- **Name:** {REPOSITORY_NAME}
|
||||
- **Description:** {REPOSITORY_DESCRIPTION}
|
||||
- **URL:** {REPOSITORY_URL}
|
||||
- **Stars:** {REPOSITORY_STARS}
|
||||
- **Forks:** {REPOSITORY_FORKS}
|
||||
- **Open Issues:** {REPOSITORY_OPEN_ISSUES}
|
||||
- **Language(s):** {REPOSITORY_LANGUAGES}
|
||||
- **License:** {REPOSITORY_LICENSE}
|
||||
- **Created At:** {REPOSITORY_CREATED_AT}
|
||||
- **Updated At:** {REPOSITORY_UPDATED_AT}
|
||||
- **Pushed At:** {REPOSITORY_PUSHED_AT}
|
||||
- **Topics:** {REPOSITORY_TOPICS}
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
{EXECUTIVE_SUMMARY}
|
||||
|
||||
**IMPORTANT**: Include inline citations using `[citation:Title](URL)` format after each claim. Example:
|
||||
"The project gained 10k stars in 3 months [citation:GitHub Stats](https://github.com/owner/repo)."
|
||||
|
||||
---
|
||||
|
||||
## Complete Chronological Timeline
|
||||
|
||||
### PHASE 1: {PHASE_1_NAME}
|
||||
|
||||
#### {PHASE_1_PERIOD}
|
||||
|
||||
{PHASE_1_CONTENT}
|
||||
|
||||
### PHASE 2: {PHASE_2_NAME}
|
||||
|
||||
#### {PHASE_2_PERIOD}
|
||||
|
||||
{PHASE_2_CONTENT}
|
||||
|
||||
### PHASE 3: {PHASE_3_NAME}
|
||||
|
||||
#### {PHASE_3_PERIOD}
|
||||
|
||||
{PHASE_3_CONTENT}
|
||||
|
||||
---
|
||||
|
||||
## Key Analysis
|
||||
|
||||
**IMPORTANT**: Support each analysis point with inline citations `[citation:Title](URL)`.
|
||||
|
||||
### {ANALYSIS_SECTION_1_TITLE}
|
||||
|
||||
{ANALYSIS_SECTION_1_CONTENT}
|
||||
|
||||
### {ANALYSIS_SECTION_2_TITLE}
|
||||
|
||||
{ANALYSIS_SECTION_2_CONTENT}
|
||||
|
||||
---
|
||||
|
||||
## Architecture / System Overview
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Component A] --> B[Component B]
|
||||
B --> C[Component C]
|
||||
C --> D[Component D]
|
||||
```
|
||||
|
||||
{ARCHITECTURE_DESCRIPTION}
|
||||
|
||||
---
|
||||
|
||||
## Metrics & Impact Analysis
|
||||
|
||||
### Growth Trajectory
|
||||
|
||||
```
|
||||
{METRICS_TIMELINE}
|
||||
```
|
||||
|
||||
### Key Metrics
|
||||
|
||||
| Metric | Value | Assessment |
|
||||
|--------|-------|------------|
|
||||
| {METRIC_1} | {VALUE_1} | {ASSESSMENT_1} |
|
||||
| {METRIC_2} | {VALUE_2} | {ASSESSMENT_2} |
|
||||
| {METRIC_3} | {VALUE_3} | {ASSESSMENT_3} |
|
||||
|
||||
---
|
||||
|
||||
## Comparative Analysis
|
||||
|
||||
### Feature Comparison
|
||||
|
||||
| Feature | {SUBJECT} | {COMPETITOR_1} | {COMPETITOR_2} |
|
||||
|---------|-----------|----------------|----------------|
|
||||
| {FEATURE_1} | {SUBJ_F1} | {COMP1_F1} | {COMP2_F1} |
|
||||
| {FEATURE_2} | {SUBJ_F2} | {COMP1_F2} | {COMP2_F2} |
|
||||
| {FEATURE_3} | {SUBJ_F3} | {COMP1_F3} | {COMP2_F3} |
|
||||
|
||||
### Market Positioning
|
||||
|
||||
{MARKET_POSITIONING}
|
||||
|
||||
---
|
||||
|
||||
## Strengths & Weaknesses
|
||||
|
||||
### Strengths
|
||||
|
||||
{STRENGTHS}
|
||||
|
||||
### Areas for Improvement
|
||||
|
||||
{WEAKNESSES}
|
||||
|
||||
---
|
||||
|
||||
## Key Success Factors
|
||||
|
||||
{SUCCESS_FACTORS}
|
||||
|
||||
---
|
||||
|
||||
## Sources
|
||||
|
||||
### Primary Sources
|
||||
|
||||
{PRIMARY_SOURCES}
|
||||
|
||||
### Media Coverage
|
||||
|
||||
{MEDIA_SOURCES}
|
||||
|
||||
### Academic / Technical Sources
|
||||
|
||||
{ACADEMIC_SOURCES}
|
||||
|
||||
### Community Sources
|
||||
|
||||
{COMMUNITY_SOURCES}
|
||||
|
||||
---
|
||||
|
||||
## Confidence Assessment
|
||||
|
||||
**High Confidence (90%+) Claims:**
|
||||
{HIGH_CONFIDENCE_CLAIMS}
|
||||
|
||||
**Medium Confidence (70-89%) Claims:**
|
||||
{MEDIUM_CONFIDENCE_CLAIMS}
|
||||
|
||||
**Lower Confidence (50-69%) Claims:**
|
||||
{LOW_CONFIDENCE_CLAIMS}
|
||||
|
||||
---
|
||||
|
||||
## Research Methodology
|
||||
|
||||
This report was compiled using:
|
||||
|
||||
1. **Multi-source web search** - Broad discovery and targeted queries
|
||||
2. **GitHub repository analysis** - Commits, issues, PRs, activity metrics
|
||||
3. **Content extraction** - Official docs, technical articles, media coverage
|
||||
4. **Cross-referencing** - Verification across independent sources
|
||||
5. **Chronological reconstruction** - Timeline from timestamped data
|
||||
6. **Confidence scoring** - Claims weighted by source reliability
|
||||
|
||||
**Research Depth:** {RESEARCH_DEPTH}
|
||||
**Time Scope:** {TIME_SCOPE}
|
||||
**Geographic Scope:** {GEOGRAPHIC_SCOPE}
|
||||
|
||||
---
|
||||
|
||||
**Report Prepared By:** Github Deep Research by DeerFlow
|
||||
**Date:** {REPORT_DATE}
|
||||
**Report Version:** 1.0
|
||||
**Status:** Complete
|
||||
|
|
@ -1,331 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
GitHub API client for deep research.
|
||||
Uses requests for HTTP operations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
# Fallback to urllib if requests not available
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
class RequestsFallback:
|
||||
"""Minimal requests-like interface using urllib."""
|
||||
|
||||
class Response:
|
||||
def __init__(self, data: bytes, status: int):
|
||||
self._data = data
|
||||
self.status_code = status
|
||||
self.text = data.decode("utf-8", errors="replace")
|
||||
|
||||
def json(self):
|
||||
return json.loads(self._data)
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise Exception(f"HTTP {self.status_code}")
|
||||
|
||||
@staticmethod
|
||||
def get(url: str, headers: dict = None, params: dict = None, timeout: int = 30):
|
||||
if params:
|
||||
query = "&".join(f"{k}={v}" for k, v in params.items())
|
||||
url = f"{url}?{query}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers or {})
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
return RequestsFallback.Response(resp.read(), resp.status)
|
||||
except urllib.error.HTTPError as e:
|
||||
return RequestsFallback.Response(e.read(), e.code)
|
||||
|
||||
requests = RequestsFallback()
|
||||
|
||||
|
||||
class GitHubAPI:
|
||||
"""GitHub API client for repository analysis."""
|
||||
|
||||
BASE_URL = "https://api.github.com"
|
||||
|
||||
def __init__(self, token: Optional[str] = None):
|
||||
"""
|
||||
Initialize GitHub API client.
|
||||
|
||||
Args:
|
||||
token:
|
||||
Optional GitHub personal access token for higher rate limits.
|
||||
User can set it in .env by uncommenting the line "GITHUB_TOKEN=your-github-token".
|
||||
"""
|
||||
self.headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
"User-Agent": "Deep-Research-Bot/1.0",
|
||||
}
|
||||
if token:
|
||||
self.headers["Authorization"] = f"token {token}"
|
||||
|
||||
def _get(
|
||||
self, endpoint: str, params: Optional[Dict] = None, accept: Optional[str] = None
|
||||
) -> Any:
|
||||
"""Make GET request to GitHub API."""
|
||||
url = f"{self.BASE_URL}{endpoint}"
|
||||
headers = self.headers.copy()
|
||||
if accept:
|
||||
headers["Accept"] = accept
|
||||
|
||||
resp = requests.get(url, headers=headers, params=params, timeout=30)
|
||||
resp.raise_for_status()
|
||||
|
||||
if "application/vnd.github.raw" in (accept or ""):
|
||||
return resp.text
|
||||
return resp.json()
|
||||
|
||||
def get_repo_info(self, owner: str, repo: str) -> Dict:
|
||||
"""Get basic repository information."""
|
||||
return self._get(f"/repos/{owner}/{repo}")
|
||||
|
||||
def get_readme(self, owner: str, repo: str) -> str:
|
||||
"""Get repository README content as markdown."""
|
||||
try:
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/readme", accept="application/vnd.github.raw"
|
||||
)
|
||||
except Exception as e:
|
||||
return f"[README not found: {e}]"
|
||||
|
||||
def get_tree(
|
||||
self, owner: str, repo: str, branch: str = "main", recursive: bool = True
|
||||
) -> Dict:
|
||||
"""Get repository directory tree."""
|
||||
params = {"recursive": "1"} if recursive else {}
|
||||
try:
|
||||
return self._get(f"/repos/{owner}/{repo}/git/trees/{branch}", params)
|
||||
except Exception:
|
||||
# Try 'master' if 'main' fails
|
||||
if branch == "main":
|
||||
return self._get(f"/repos/{owner}/{repo}/git/trees/master", params)
|
||||
raise
|
||||
|
||||
def get_file_content(self, owner: str, repo: str, path: str) -> str:
|
||||
"""Get content of a specific file."""
|
||||
try:
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/contents/{path}",
|
||||
accept="application/vnd.github.raw",
|
||||
)
|
||||
except Exception as e:
|
||||
return f"[File not found: {e}]"
|
||||
|
||||
def get_languages(self, owner: str, repo: str) -> Dict[str, int]:
|
||||
"""Get repository languages and their bytes."""
|
||||
return self._get(f"/repos/{owner}/{repo}/languages")
|
||||
|
||||
def get_contributors(self, owner: str, repo: str, limit: int = 30) -> List[Dict]:
|
||||
"""Get repository contributors."""
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/contributors", params={"per_page": min(limit, 100)}
|
||||
)
|
||||
|
||||
def get_recent_commits(
|
||||
self, owner: str, repo: str, limit: int = 50, since: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get recent commits.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
limit: Max commits to fetch
|
||||
since: ISO date string to fetch commits since
|
||||
"""
|
||||
params = {"per_page": min(limit, 100)}
|
||||
if since:
|
||||
params["since"] = since
|
||||
return self._get(f"/repos/{owner}/{repo}/commits", params)
|
||||
|
||||
def get_issues(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
state: str = "all",
|
||||
limit: int = 30,
|
||||
labels: Optional[str] = None,
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get repository issues.
|
||||
|
||||
Args:
|
||||
state: 'open', 'closed', or 'all'
|
||||
labels: Comma-separated label names
|
||||
"""
|
||||
params = {"state": state, "per_page": min(limit, 100)}
|
||||
if labels:
|
||||
params["labels"] = labels
|
||||
return self._get(f"/repos/{owner}/{repo}/issues", params)
|
||||
|
||||
def get_pull_requests(
|
||||
self, owner: str, repo: str, state: str = "all", limit: int = 30
|
||||
) -> List[Dict]:
|
||||
"""Get repository pull requests."""
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/pulls",
|
||||
params={"state": state, "per_page": min(limit, 100)},
|
||||
)
|
||||
|
||||
def get_releases(self, owner: str, repo: str, limit: int = 10) -> List[Dict]:
|
||||
"""Get repository releases."""
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/releases", params={"per_page": min(limit, 100)}
|
||||
)
|
||||
|
||||
def get_tags(self, owner: str, repo: str, limit: int = 20) -> List[Dict]:
|
||||
"""Get repository tags."""
|
||||
return self._get(
|
||||
f"/repos/{owner}/{repo}/tags", params={"per_page": min(limit, 100)}
|
||||
)
|
||||
|
||||
def search_issues(self, owner: str, repo: str, query: str, limit: int = 30) -> Dict:
|
||||
"""Search issues and PRs in repository."""
|
||||
q = f"repo:{owner}/{repo} {query}"
|
||||
return self._get("/search/issues", params={"q": q, "per_page": min(limit, 100)})
|
||||
|
||||
def get_commit_activity(self, owner: str, repo: str) -> List[Dict]:
|
||||
"""Get weekly commit activity for the last year."""
|
||||
return self._get(f"/repos/{owner}/{repo}/stats/commit_activity")
|
||||
|
||||
def get_code_frequency(self, owner: str, repo: str) -> List[List[int]]:
|
||||
"""Get weekly additions/deletions."""
|
||||
return self._get(f"/repos/{owner}/{repo}/stats/code_frequency")
|
||||
|
||||
def format_tree(self, tree_data: Dict, max_depth: int = 3) -> str:
|
||||
"""
|
||||
Format tree data as text directory structure.
|
||||
|
||||
Args:
|
||||
tree_data: Response from get_tree()
|
||||
max_depth: Maximum depth to display
|
||||
"""
|
||||
if "tree" not in tree_data:
|
||||
return "[Unable to parse tree]"
|
||||
|
||||
lines = []
|
||||
for item in tree_data["tree"]:
|
||||
path = item["path"]
|
||||
depth = path.count("/")
|
||||
if depth < max_depth:
|
||||
indent = " " * depth
|
||||
name = path.split("/")[-1]
|
||||
if item["type"] == "tree":
|
||||
lines.append(f"{indent}{name}/")
|
||||
else:
|
||||
lines.append(f"{indent}{name}")
|
||||
|
||||
return "\n".join(lines[:100]) # Limit output
|
||||
|
||||
def summarize_repo(self, owner: str, repo: str) -> Dict:
|
||||
"""
|
||||
Get comprehensive repository summary.
|
||||
|
||||
Returns dict with: info, languages, contributor_count,
|
||||
recent_activity, top_issues, latest_release
|
||||
"""
|
||||
info = self.get_repo_info(owner, repo)
|
||||
|
||||
summary = {
|
||||
"name": info.get("full_name"),
|
||||
"description": info.get("description"),
|
||||
"url": info.get("html_url"),
|
||||
"stars": info.get("stargazers_count"),
|
||||
"forks": info.get("forks_count"),
|
||||
"open_issues": info.get("open_issues_count"),
|
||||
"language": info.get("language"),
|
||||
"license": info.get("license", {}).get("spdx_id")
|
||||
if info.get("license")
|
||||
else None,
|
||||
"created_at": info.get("created_at"),
|
||||
"updated_at": info.get("updated_at"),
|
||||
"pushed_at": info.get("pushed_at"),
|
||||
"default_branch": info.get("default_branch"),
|
||||
"topics": info.get("topics", []),
|
||||
}
|
||||
|
||||
# Add languages
|
||||
try:
|
||||
summary["languages"] = self.get_languages(owner, repo)
|
||||
except Exception:
|
||||
summary["languages"] = {}
|
||||
|
||||
# Add contributor count
|
||||
try:
|
||||
contributors = self.get_contributors(owner, repo, limit=1)
|
||||
# GitHub returns Link header with total, but we approximate
|
||||
summary["contributor_count"] = len(
|
||||
self.get_contributors(owner, repo, limit=100)
|
||||
)
|
||||
except Exception:
|
||||
summary["contributor_count"] = "N/A"
|
||||
|
||||
# Latest release
|
||||
try:
|
||||
releases = self.get_releases(owner, repo, limit=1)
|
||||
if releases:
|
||||
summary["latest_release"] = {
|
||||
"tag": releases[0].get("tag_name"),
|
||||
"name": releases[0].get("name"),
|
||||
"date": releases[0].get("published_at"),
|
||||
}
|
||||
except Exception:
|
||||
summary["latest_release"] = None
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI interface for testing."""
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: python github_api.py <owner> <repo> [command]")
|
||||
print("Commands: info, readme, tree, languages, contributors,")
|
||||
print(" commits, issues, prs, releases, summary")
|
||||
sys.exit(1)
|
||||
|
||||
owner, repo = sys.argv[1], sys.argv[2]
|
||||
command = sys.argv[3] if len(sys.argv) > 3 else "summary"
|
||||
|
||||
token = os.getenv("GITHUB_TOKEN")
|
||||
api = GitHubAPI(token=token)
|
||||
|
||||
commands = {
|
||||
"info": lambda: api.get_repo_info(owner, repo),
|
||||
"readme": lambda: api.get_readme(owner, repo),
|
||||
"tree": lambda: api.format_tree(api.get_tree(owner, repo)),
|
||||
"languages": lambda: api.get_languages(owner, repo),
|
||||
"contributors": lambda: api.get_contributors(owner, repo),
|
||||
"commits": lambda: api.get_recent_commits(owner, repo),
|
||||
"issues": lambda: api.get_issues(owner, repo),
|
||||
"prs": lambda: api.get_pull_requests(owner, repo),
|
||||
"releases": lambda: api.get_releases(owner, repo),
|
||||
"summary": lambda: api.summarize_repo(owner, repo),
|
||||
}
|
||||
|
||||
if command not in commands:
|
||||
print(f"Unknown command: {command}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
result = commands[command]()
|
||||
if isinstance(result, str):
|
||||
print(result)
|
||||
else:
|
||||
print(json.dumps(result, indent=2, default=str))
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
# RunningHub API Configuration
|
||||
# Copy this file to .env and fill in your actual API key
|
||||
|
||||
# RunningHub API Key for image generation
|
||||
# Get your API key from: https://www.runninghub.cn
|
||||
RUNNINGHUB_API_KEY=your_api_key_here
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
venv/
|
||||
env/
|
||||
ENV/
|
||||
|
||||
# Output files
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.webp
|
||||
outputs/
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
|
@ -1,166 +0,0 @@
|
|||
---
|
||||
name: image-generation
|
||||
description: Use this skill when the user requests to generate, create, imagine, or visualize images including characters, scenes, products, or any visual content. Supports structured prompts and reference images for guided generation.
|
||||
---
|
||||
|
||||
# Image Generation Skill
|
||||
|
||||
## Overview
|
||||
|
||||
This skill generates high-quality images using RunningHub API with structured prompts and a Python script. The workflow includes creating JSON-formatted prompts and executing image generation through asynchronous task submission.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- Create structured JSON prompts for AIGC image generation
|
||||
- Generate images through RunningHub's Z-Image Turbo LoRA API
|
||||
- Support asynchronous task submission and status polling
|
||||
- Handle various image generation scenarios (character design, scenes, products, etc.)
|
||||
- Support multiple aspect ratios and output formats (PNG, JPEG, WEBP)
|
||||
|
||||
## Configuration
|
||||
|
||||
### API Key Setup
|
||||
|
||||
This skill uses RunningHub API for image generation. You need to configure your API key before using the skill.
|
||||
|
||||
**Option 1: Environment Variable (Recommended)**
|
||||
```bash
|
||||
# Set the RUNNINGHUB_API_KEY environment variable
|
||||
export RUNNINGHUB_API_KEY=your_api_key_here
|
||||
|
||||
# Or on Windows:
|
||||
set RUNNINGHUB_API_KEY=your_api_key_here
|
||||
```
|
||||
|
||||
**Option 2: .env File**
|
||||
1. Copy `.env.example` to `.env`:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
2. Edit `.env` and add your API key:
|
||||
```
|
||||
RUNNINGHUB_API_KEY=your_api_key_here
|
||||
```
|
||||
3. The `.env` file is automatically excluded from version control via `.gitignore`
|
||||
|
||||
**Security Notes:**
|
||||
- Never commit `.env` files to version control
|
||||
- Never hardcode API keys in source code
|
||||
- Rotate your API keys if they are accidentally exposed
|
||||
- Get your API key from: https://www.runninghub.cn
|
||||
|
||||
## Workflow
|
||||
|
||||
### Step 1: Understand Requirements
|
||||
|
||||
When a user requests image generation, identify:
|
||||
|
||||
- Subject/content: What should be in the image
|
||||
- Style preferences: Art style, mood, color palette
|
||||
- Technical specs: Aspect ratio, composition, lighting
|
||||
- Reference images: Any images to guide generation
|
||||
- You don't need to check the folder under `/mnt/user-data`
|
||||
|
||||
### Step 2: Create Structured Prompt
|
||||
|
||||
Generate a structured JSON file in `/mnt/user-data/workspace/` with naming pattern: `{descriptive-name}.json`
|
||||
|
||||
### Step 3: Execute Generation
|
||||
|
||||
Call the Python script:
|
||||
```bash
|
||||
python /mnt/skills/public/image-generation/scripts/generate.py \
|
||||
--prompt-file /mnt/user-data/workspace/prompt-file.json \
|
||||
--output-file /mnt/user-data/outputs/generated-image.jpg \
|
||||
--aspect-ratio 16:9
|
||||
```
|
||||
|
||||
Parameters:
|
||||
|
||||
- `--prompt-file`: Absolute path to JSON prompt file (required)
|
||||
- `--output-file`: Absolute path to output image file (required)
|
||||
- `--aspect-ratio`: Aspect ratio of the generated image (optional, default: 16:9)
|
||||
|
||||
[!NOTE]
|
||||
- The script uses RunningHub API which requires `RUNNINGHUB_API_KEY` environment variable to be set
|
||||
- Do NOT read the python file, just call it with the parameters
|
||||
- The script automatically handles task submission, status polling, and image download
|
||||
|
||||
## Character Generation Example
|
||||
|
||||
User request: "Create a Tokyo street style woman character in 1990s"
|
||||
|
||||
Create prompt file: `/mnt/user-data/workspace/asian-woman.json`
|
||||
```json
|
||||
{
|
||||
"characters": [{
|
||||
"gender": "female",
|
||||
"age": "mid-20s",
|
||||
"ethnicity": "Japanese",
|
||||
"body_type": "slender, elegant",
|
||||
"facial_features": "delicate features, expressive eyes, subtle makeup with emphasis on lips, long dark hair partially wet from rain",
|
||||
"clothing": "stylish trench coat, designer handbag, high heels, contemporary Tokyo street fashion",
|
||||
"accessories": "minimal jewelry, statement earrings, leather handbag",
|
||||
"era": "1990s"
|
||||
}],
|
||||
"negative_prompt": "blurry face, deformed, low quality, overly sharp digital look, oversaturated colors, artificial lighting, studio setting, posed, selfie angle",
|
||||
"style": "Leica M11 street photography aesthetic, film-like rendering, natural color palette with slight warmth, bokeh background blur, analog photography feel",
|
||||
"composition": "medium shot, rule of thirds, subject slightly off-center, environmental context of Tokyo street visible, shallow depth of field isolating subject",
|
||||
"lighting": "neon lights from signs and storefronts, wet pavement reflections, soft ambient city glow, natural street lighting, rim lighting from background neons",
|
||||
"color_palette": "muted naturalistic tones, warm skin tones, cool blue and magenta neon accents, desaturated compared to digital photography, film grain texture"
|
||||
}
|
||||
```
|
||||
|
||||
Execute generation:
|
||||
```bash
|
||||
python /mnt/skills/public/image-generation/scripts/generate.py \
|
||||
--prompt-file /mnt/user-data/workspace/cyberpunk-hacker.json \
|
||||
--output-file /mnt/user-data/outputs/cyberpunk-hacker-01.jpg \
|
||||
--aspect-ratio 2:3
|
||||
```
|
||||
|
||||
## Common Scenarios
|
||||
|
||||
Use different JSON schemas for different scenarios.
|
||||
|
||||
**Character Design**:
|
||||
- Physical attributes (gender, age, ethnicity, body type)
|
||||
- Facial features and expressions
|
||||
- Clothing and accessories
|
||||
- Historical era or setting
|
||||
- Pose and context
|
||||
|
||||
**Scene Generation**:
|
||||
- Environment description
|
||||
- Time of day, weather
|
||||
- Mood and atmosphere
|
||||
- Focal points and composition
|
||||
|
||||
**Product Visualization**:
|
||||
- Product details and materials
|
||||
- Lighting setup
|
||||
- Background and context
|
||||
- Presentation angle
|
||||
|
||||
## Specific Templates
|
||||
|
||||
Read the following template file only when matching the user request.
|
||||
|
||||
- [Doraemon Comic](templates/doraemon.md)
|
||||
|
||||
## Output Handling
|
||||
|
||||
After generation:
|
||||
|
||||
- Images are typically saved in `/mnt/user-data/outputs/`
|
||||
- Share generated images with user using present_files tool
|
||||
- Provide brief description of the generation result
|
||||
- Offer to iterate if adjustments needed
|
||||
|
||||
## Notes
|
||||
|
||||
- Always use English for prompts regardless of user's language
|
||||
- JSON format ensures structured, parsable prompts
|
||||
- Iterative refinement is normal for optimal results
|
||||
- For character generation, include the detailed character object plus a consolidated prompt field
|
||||
- The script automatically polls task status and downloads the generated image
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue