Initial commit: hardened DeerFlow factory
Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection hardening: - New deerflow.security package: content_delimiter, html_cleaner, sanitizer (8 layers — invisible chars, control chars, symbols, NFC, PUA, tag chars, horizontal whitespace collapse with newline/tab preservation, length cap) - New deerflow.community.searx package: web_search, web_fetch, image_search backed by a private SearX instance, every external string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>> delimiters - All native community web providers (ddg_search, tavily, exa, firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail stubs that raise NativeWebToolDisabledError at import time, so a misconfigured tool.use path fails loud rather than silently falling back to unsanitized output - Native client back-doors (jina_client.py, infoquest_client.py) stubbed too - Native-tool tests quarantined under tests/_disabled_native/ (collect_ignore_glob via local conftest.py) - Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve newlines and tabs so list/table structure survives - Hardened runtime config.yaml references only the searx-backed tools - Factory overlay (backend/) kept in sync with deer-flow tree as a reference / source See HARDENING.md for the full audit trail and verification steps.
This commit is contained in:
@@ -0,0 +1,39 @@
|
||||
"""LangGraph-compatible runtime — runs, streaming, and lifecycle management.
|
||||
|
||||
Re-exports the public API of :mod:`~deerflow.runtime.runs` and
|
||||
:mod:`~deerflow.runtime.stream_bridge` so that consumers can import
|
||||
directly from ``deerflow.runtime``.
|
||||
"""
|
||||
|
||||
from .runs import ConflictError, DisconnectMode, RunManager, RunRecord, RunStatus, UnsupportedStrategyError, run_agent
|
||||
from .serialization import serialize, serialize_channel_values, serialize_lc_object, serialize_messages_tuple
|
||||
from .store import get_store, make_store, reset_store, store_context
|
||||
from .stream_bridge import END_SENTINEL, HEARTBEAT_SENTINEL, MemoryStreamBridge, StreamBridge, StreamEvent, make_stream_bridge
|
||||
|
||||
__all__ = [
|
||||
# runs
|
||||
"ConflictError",
|
||||
"DisconnectMode",
|
||||
"RunManager",
|
||||
"RunRecord",
|
||||
"RunStatus",
|
||||
"UnsupportedStrategyError",
|
||||
"run_agent",
|
||||
# serialization
|
||||
"serialize",
|
||||
"serialize_channel_values",
|
||||
"serialize_lc_object",
|
||||
"serialize_messages_tuple",
|
||||
# store
|
||||
"get_store",
|
||||
"make_store",
|
||||
"reset_store",
|
||||
"store_context",
|
||||
# stream_bridge
|
||||
"END_SENTINEL",
|
||||
"HEARTBEAT_SENTINEL",
|
||||
"MemoryStreamBridge",
|
||||
"StreamBridge",
|
||||
"StreamEvent",
|
||||
"make_stream_bridge",
|
||||
]
|
||||
@@ -0,0 +1,15 @@
|
||||
"""Run lifecycle management for LangGraph Platform API compatibility."""
|
||||
|
||||
from .manager import ConflictError, RunManager, RunRecord, UnsupportedStrategyError
|
||||
from .schemas import DisconnectMode, RunStatus
|
||||
from .worker import run_agent
|
||||
|
||||
__all__ = [
|
||||
"ConflictError",
|
||||
"DisconnectMode",
|
||||
"RunManager",
|
||||
"RunRecord",
|
||||
"RunStatus",
|
||||
"UnsupportedStrategyError",
|
||||
"run_agent",
|
||||
]
|
||||
@@ -0,0 +1,210 @@
|
||||
"""In-memory run registry."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from .schemas import DisconnectMode, RunStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(UTC).isoformat()
|
||||
|
||||
|
||||
@dataclass
|
||||
class RunRecord:
|
||||
"""Mutable record for a single run."""
|
||||
|
||||
run_id: str
|
||||
thread_id: str
|
||||
assistant_id: str | None
|
||||
status: RunStatus
|
||||
on_disconnect: DisconnectMode
|
||||
multitask_strategy: str = "reject"
|
||||
metadata: dict = field(default_factory=dict)
|
||||
kwargs: dict = field(default_factory=dict)
|
||||
created_at: str = ""
|
||||
updated_at: str = ""
|
||||
task: asyncio.Task | None = field(default=None, repr=False)
|
||||
abort_event: asyncio.Event = field(default_factory=asyncio.Event, repr=False)
|
||||
abort_action: str = "interrupt"
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class RunManager:
|
||||
"""In-memory run registry. All mutations are protected by an asyncio lock."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._runs: dict[str, RunRecord] = {}
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
assistant_id: str | None = None,
|
||||
*,
|
||||
on_disconnect: DisconnectMode = DisconnectMode.cancel,
|
||||
metadata: dict | None = None,
|
||||
kwargs: dict | None = None,
|
||||
multitask_strategy: str = "reject",
|
||||
) -> RunRecord:
|
||||
"""Create a new pending run and register it."""
|
||||
run_id = str(uuid.uuid4())
|
||||
now = _now_iso()
|
||||
record = RunRecord(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
assistant_id=assistant_id,
|
||||
status=RunStatus.pending,
|
||||
on_disconnect=on_disconnect,
|
||||
multitask_strategy=multitask_strategy,
|
||||
metadata=metadata or {},
|
||||
kwargs=kwargs or {},
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
async with self._lock:
|
||||
self._runs[run_id] = record
|
||||
logger.info("Run created: run_id=%s thread_id=%s", run_id, thread_id)
|
||||
return record
|
||||
|
||||
def get(self, run_id: str) -> RunRecord | None:
|
||||
"""Return a run record by ID, or ``None``."""
|
||||
return self._runs.get(run_id)
|
||||
|
||||
async def list_by_thread(self, thread_id: str) -> list[RunRecord]:
|
||||
"""Return all runs for a given thread, newest first."""
|
||||
async with self._lock:
|
||||
# Dict insertion order matches creation order, so reversing it gives
|
||||
# us deterministic newest-first results even when timestamps tie.
|
||||
return [r for r in reversed(self._runs.values()) if r.thread_id == thread_id]
|
||||
|
||||
async def set_status(self, run_id: str, status: RunStatus, *, error: str | None = None) -> None:
|
||||
"""Transition a run to a new status."""
|
||||
async with self._lock:
|
||||
record = self._runs.get(run_id)
|
||||
if record is None:
|
||||
logger.warning("set_status called for unknown run %s", run_id)
|
||||
return
|
||||
record.status = status
|
||||
record.updated_at = _now_iso()
|
||||
if error is not None:
|
||||
record.error = error
|
||||
logger.info("Run %s -> %s", run_id, status.value)
|
||||
|
||||
async def cancel(self, run_id: str, *, action: str = "interrupt") -> bool:
|
||||
"""Request cancellation of a run.
|
||||
|
||||
Args:
|
||||
run_id: The run ID to cancel.
|
||||
action: "interrupt" keeps checkpoint, "rollback" reverts to pre-run state.
|
||||
|
||||
Sets the abort event with the action reason and cancels the asyncio task.
|
||||
Returns ``True`` if the run was in-flight and cancellation was initiated.
|
||||
"""
|
||||
async with self._lock:
|
||||
record = self._runs.get(run_id)
|
||||
if record is None:
|
||||
return False
|
||||
if record.status not in (RunStatus.pending, RunStatus.running):
|
||||
return False
|
||||
record.abort_action = action
|
||||
record.abort_event.set()
|
||||
if record.task is not None and not record.task.done():
|
||||
record.task.cancel()
|
||||
record.status = RunStatus.interrupted
|
||||
record.updated_at = _now_iso()
|
||||
logger.info("Run %s cancelled (action=%s)", run_id, action)
|
||||
return True
|
||||
|
||||
async def create_or_reject(
|
||||
self,
|
||||
thread_id: str,
|
||||
assistant_id: str | None = None,
|
||||
*,
|
||||
on_disconnect: DisconnectMode = DisconnectMode.cancel,
|
||||
metadata: dict | None = None,
|
||||
kwargs: dict | None = None,
|
||||
multitask_strategy: str = "reject",
|
||||
) -> RunRecord:
|
||||
"""Atomically check for inflight runs and create a new one.
|
||||
|
||||
For ``reject`` strategy, raises ``ConflictError`` if thread
|
||||
already has a pending/running run. For ``interrupt``/``rollback``,
|
||||
cancels inflight runs before creating.
|
||||
|
||||
This method holds the lock across both the check and the insert,
|
||||
eliminating the TOCTOU race in separate ``has_inflight`` + ``create``.
|
||||
"""
|
||||
run_id = str(uuid.uuid4())
|
||||
now = _now_iso()
|
||||
|
||||
_supported_strategies = ("reject", "interrupt", "rollback")
|
||||
|
||||
async with self._lock:
|
||||
if multitask_strategy not in _supported_strategies:
|
||||
raise UnsupportedStrategyError(f"Multitask strategy '{multitask_strategy}' is not yet supported. Supported strategies: {', '.join(_supported_strategies)}")
|
||||
|
||||
inflight = [r for r in self._runs.values() if r.thread_id == thread_id and r.status in (RunStatus.pending, RunStatus.running)]
|
||||
|
||||
if multitask_strategy == "reject" and inflight:
|
||||
raise ConflictError(f"Thread {thread_id} already has an active run")
|
||||
|
||||
if multitask_strategy in ("interrupt", "rollback") and inflight:
|
||||
for r in inflight:
|
||||
r.abort_action = multitask_strategy
|
||||
r.abort_event.set()
|
||||
if r.task is not None and not r.task.done():
|
||||
r.task.cancel()
|
||||
r.status = RunStatus.interrupted
|
||||
r.updated_at = now
|
||||
logger.info(
|
||||
"Cancelled %d inflight run(s) on thread %s (strategy=%s)",
|
||||
len(inflight),
|
||||
thread_id,
|
||||
multitask_strategy,
|
||||
)
|
||||
|
||||
record = RunRecord(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
assistant_id=assistant_id,
|
||||
status=RunStatus.pending,
|
||||
on_disconnect=on_disconnect,
|
||||
multitask_strategy=multitask_strategy,
|
||||
metadata=metadata or {},
|
||||
kwargs=kwargs or {},
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
self._runs[run_id] = record
|
||||
|
||||
logger.info("Run created: run_id=%s thread_id=%s", run_id, thread_id)
|
||||
return record
|
||||
|
||||
async def has_inflight(self, thread_id: str) -> bool:
|
||||
"""Return ``True`` if *thread_id* has a pending or running run."""
|
||||
async with self._lock:
|
||||
return any(r.thread_id == thread_id and r.status in (RunStatus.pending, RunStatus.running) for r in self._runs.values())
|
||||
|
||||
async def cleanup(self, run_id: str, *, delay: float = 300) -> None:
|
||||
"""Remove a run record after an optional delay."""
|
||||
if delay > 0:
|
||||
await asyncio.sleep(delay)
|
||||
async with self._lock:
|
||||
self._runs.pop(run_id, None)
|
||||
logger.debug("Run record %s cleaned up", run_id)
|
||||
|
||||
|
||||
class ConflictError(Exception):
|
||||
"""Raised when multitask_strategy=reject and thread has inflight runs."""
|
||||
|
||||
|
||||
class UnsupportedStrategyError(Exception):
|
||||
"""Raised when a multitask_strategy value is not yet implemented."""
|
||||
@@ -0,0 +1,21 @@
|
||||
"""Run status and disconnect mode enums."""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class RunStatus(StrEnum):
|
||||
"""Lifecycle status of a single run."""
|
||||
|
||||
pending = "pending"
|
||||
running = "running"
|
||||
success = "success"
|
||||
error = "error"
|
||||
timeout = "timeout"
|
||||
interrupted = "interrupted"
|
||||
|
||||
|
||||
class DisconnectMode(StrEnum):
|
||||
"""Behaviour when the SSE consumer disconnects."""
|
||||
|
||||
cancel = "cancel"
|
||||
continue_ = "continue"
|
||||
@@ -0,0 +1,381 @@
|
||||
"""Background agent execution.
|
||||
|
||||
Runs an agent graph inside an ``asyncio.Task``, publishing events to
|
||||
a :class:`StreamBridge` as they are produced.
|
||||
|
||||
Uses ``graph.astream(stream_mode=[...])`` which gives correct full-state
|
||||
snapshots for ``values`` mode, proper ``{node: writes}`` for ``updates``,
|
||||
and ``(chunk, metadata)`` tuples for ``messages`` mode.
|
||||
|
||||
Note: ``events`` mode is not supported through the gateway — it requires
|
||||
``graph.astream_events()`` which cannot simultaneously produce ``values``
|
||||
snapshots. The JS open-source LangGraph API server works around this via
|
||||
internal checkpoint callbacks that are not exposed in the Python public API.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import inspect
|
||||
import logging
|
||||
from typing import Any, Literal
|
||||
|
||||
from deerflow.runtime.serialization import serialize
|
||||
from deerflow.runtime.stream_bridge import StreamBridge
|
||||
|
||||
from .manager import RunManager, RunRecord
|
||||
from .schemas import RunStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Valid stream_mode values for LangGraph's graph.astream()
|
||||
_VALID_LG_MODES = {"values", "updates", "checkpoints", "tasks", "debug", "messages", "custom"}
|
||||
|
||||
|
||||
async def run_agent(
|
||||
bridge: StreamBridge,
|
||||
run_manager: RunManager,
|
||||
record: RunRecord,
|
||||
*,
|
||||
checkpointer: Any,
|
||||
store: Any | None = None,
|
||||
agent_factory: Any,
|
||||
graph_input: dict,
|
||||
config: dict,
|
||||
stream_modes: list[str] | None = None,
|
||||
stream_subgraphs: bool = False,
|
||||
interrupt_before: list[str] | Literal["*"] | None = None,
|
||||
interrupt_after: list[str] | Literal["*"] | None = None,
|
||||
) -> None:
|
||||
"""Execute an agent in the background, publishing events to *bridge*."""
|
||||
|
||||
run_id = record.run_id
|
||||
thread_id = record.thread_id
|
||||
requested_modes: set[str] = set(stream_modes or ["values"])
|
||||
pre_run_checkpoint_id: str | None = None
|
||||
pre_run_snapshot: dict[str, Any] | None = None
|
||||
snapshot_capture_failed = False
|
||||
|
||||
# Track whether "events" was requested but skipped
|
||||
if "events" in requested_modes:
|
||||
logger.info(
|
||||
"Run %s: 'events' stream_mode not supported in gateway (requires astream_events + checkpoint callbacks). Skipping.",
|
||||
run_id,
|
||||
)
|
||||
|
||||
try:
|
||||
# 1. Mark running
|
||||
await run_manager.set_status(run_id, RunStatus.running)
|
||||
|
||||
# Snapshot the latest pre-run checkpoint so rollback can restore it.
|
||||
if checkpointer is not None:
|
||||
try:
|
||||
config_for_check = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
ckpt_tuple = await checkpointer.aget_tuple(config_for_check)
|
||||
if ckpt_tuple is not None:
|
||||
ckpt_config = getattr(ckpt_tuple, "config", {}).get("configurable", {})
|
||||
pre_run_checkpoint_id = ckpt_config.get("checkpoint_id")
|
||||
pre_run_snapshot = {
|
||||
"checkpoint_ns": ckpt_config.get("checkpoint_ns", ""),
|
||||
"checkpoint": copy.deepcopy(getattr(ckpt_tuple, "checkpoint", {})),
|
||||
"metadata": copy.deepcopy(getattr(ckpt_tuple, "metadata", {})),
|
||||
"pending_writes": copy.deepcopy(getattr(ckpt_tuple, "pending_writes", []) or []),
|
||||
}
|
||||
except Exception:
|
||||
snapshot_capture_failed = True
|
||||
logger.warning("Could not capture pre-run checkpoint snapshot for run %s", run_id, exc_info=True)
|
||||
|
||||
# 2. Publish metadata — useStream needs both run_id AND thread_id
|
||||
await bridge.publish(
|
||||
run_id,
|
||||
"metadata",
|
||||
{
|
||||
"run_id": run_id,
|
||||
"thread_id": thread_id,
|
||||
},
|
||||
)
|
||||
|
||||
# 3. Build the agent
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
# Inject runtime context so middlewares can access thread_id
|
||||
# (langgraph-cli does this automatically; we must do it manually)
|
||||
runtime = Runtime(context={"thread_id": thread_id}, store=store)
|
||||
# If the caller already set a ``context`` key (LangGraph >= 0.6.0
|
||||
# prefers it over ``configurable`` for thread-level data), make
|
||||
# sure ``thread_id`` is available there too.
|
||||
if "context" in config and isinstance(config["context"], dict):
|
||||
config["context"].setdefault("thread_id", thread_id)
|
||||
config.setdefault("configurable", {})["__pregel_runtime"] = runtime
|
||||
|
||||
runnable_config = RunnableConfig(**config)
|
||||
agent = agent_factory(config=runnable_config)
|
||||
|
||||
# 4. Attach checkpointer and store
|
||||
if checkpointer is not None:
|
||||
agent.checkpointer = checkpointer
|
||||
if store is not None:
|
||||
agent.store = store
|
||||
|
||||
# 5. Set interrupt nodes
|
||||
if interrupt_before:
|
||||
agent.interrupt_before_nodes = interrupt_before
|
||||
if interrupt_after:
|
||||
agent.interrupt_after_nodes = interrupt_after
|
||||
|
||||
# 6. Build LangGraph stream_mode list
|
||||
# "events" is NOT a valid astream mode — skip it
|
||||
# "messages-tuple" maps to LangGraph's "messages" mode
|
||||
lg_modes: list[str] = []
|
||||
for m in requested_modes:
|
||||
if m == "messages-tuple":
|
||||
lg_modes.append("messages")
|
||||
elif m == "events":
|
||||
# Skipped — see log above
|
||||
continue
|
||||
elif m in _VALID_LG_MODES:
|
||||
lg_modes.append(m)
|
||||
if not lg_modes:
|
||||
lg_modes = ["values"]
|
||||
|
||||
# Deduplicate while preserving order
|
||||
seen: set[str] = set()
|
||||
deduped: list[str] = []
|
||||
for m in lg_modes:
|
||||
if m not in seen:
|
||||
seen.add(m)
|
||||
deduped.append(m)
|
||||
lg_modes = deduped
|
||||
|
||||
logger.info("Run %s: streaming with modes %s (requested: %s)", run_id, lg_modes, requested_modes)
|
||||
|
||||
# 7. Stream using graph.astream
|
||||
if len(lg_modes) == 1 and not stream_subgraphs:
|
||||
# Single mode, no subgraphs: astream yields raw chunks
|
||||
single_mode = lg_modes[0]
|
||||
async for chunk in agent.astream(graph_input, config=runnable_config, stream_mode=single_mode):
|
||||
if record.abort_event.is_set():
|
||||
logger.info("Run %s abort requested — stopping", run_id)
|
||||
break
|
||||
sse_event = _lg_mode_to_sse_event(single_mode)
|
||||
await bridge.publish(run_id, sse_event, serialize(chunk, mode=single_mode))
|
||||
else:
|
||||
# Multiple modes or subgraphs: astream yields tuples
|
||||
async for item in agent.astream(
|
||||
graph_input,
|
||||
config=runnable_config,
|
||||
stream_mode=lg_modes,
|
||||
subgraphs=stream_subgraphs,
|
||||
):
|
||||
if record.abort_event.is_set():
|
||||
logger.info("Run %s abort requested — stopping", run_id)
|
||||
break
|
||||
|
||||
mode, chunk = _unpack_stream_item(item, lg_modes, stream_subgraphs)
|
||||
if mode is None:
|
||||
continue
|
||||
|
||||
sse_event = _lg_mode_to_sse_event(mode)
|
||||
await bridge.publish(run_id, sse_event, serialize(chunk, mode=mode))
|
||||
|
||||
# 8. Final status
|
||||
if record.abort_event.is_set():
|
||||
action = record.abort_action
|
||||
if action == "rollback":
|
||||
await run_manager.set_status(run_id, RunStatus.error, error="Rolled back by user")
|
||||
try:
|
||||
await _rollback_to_pre_run_checkpoint(
|
||||
checkpointer=checkpointer,
|
||||
thread_id=thread_id,
|
||||
run_id=run_id,
|
||||
pre_run_checkpoint_id=pre_run_checkpoint_id,
|
||||
pre_run_snapshot=pre_run_snapshot,
|
||||
snapshot_capture_failed=snapshot_capture_failed,
|
||||
)
|
||||
logger.info("Run %s rolled back to pre-run checkpoint %s", run_id, pre_run_checkpoint_id)
|
||||
except Exception:
|
||||
logger.warning("Failed to rollback checkpoint for run %s", run_id, exc_info=True)
|
||||
else:
|
||||
await run_manager.set_status(run_id, RunStatus.interrupted)
|
||||
else:
|
||||
await run_manager.set_status(run_id, RunStatus.success)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
action = record.abort_action
|
||||
if action == "rollback":
|
||||
await run_manager.set_status(run_id, RunStatus.error, error="Rolled back by user")
|
||||
try:
|
||||
await _rollback_to_pre_run_checkpoint(
|
||||
checkpointer=checkpointer,
|
||||
thread_id=thread_id,
|
||||
run_id=run_id,
|
||||
pre_run_checkpoint_id=pre_run_checkpoint_id,
|
||||
pre_run_snapshot=pre_run_snapshot,
|
||||
snapshot_capture_failed=snapshot_capture_failed,
|
||||
)
|
||||
logger.info("Run %s was cancelled and rolled back", run_id)
|
||||
except Exception:
|
||||
logger.warning("Run %s cancellation rollback failed", run_id, exc_info=True)
|
||||
else:
|
||||
await run_manager.set_status(run_id, RunStatus.interrupted)
|
||||
logger.info("Run %s was cancelled", run_id)
|
||||
|
||||
except Exception as exc:
|
||||
error_msg = f"{exc}"
|
||||
logger.exception("Run %s failed: %s", run_id, error_msg)
|
||||
await run_manager.set_status(run_id, RunStatus.error, error=error_msg)
|
||||
await bridge.publish(
|
||||
run_id,
|
||||
"error",
|
||||
{
|
||||
"message": error_msg,
|
||||
"name": type(exc).__name__,
|
||||
},
|
||||
)
|
||||
|
||||
finally:
|
||||
await bridge.publish_end(run_id)
|
||||
asyncio.create_task(bridge.cleanup(run_id, delay=60))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _call_checkpointer_method(checkpointer: Any, async_name: str, sync_name: str, *args: Any, **kwargs: Any) -> Any:
|
||||
"""Call a checkpointer method, supporting async and sync variants."""
|
||||
method = getattr(checkpointer, async_name, None) or getattr(checkpointer, sync_name, None)
|
||||
if method is None:
|
||||
raise AttributeError(f"Missing checkpointer method: {async_name}/{sync_name}")
|
||||
result = method(*args, **kwargs)
|
||||
if inspect.isawaitable(result):
|
||||
return await result
|
||||
return result
|
||||
|
||||
|
||||
async def _rollback_to_pre_run_checkpoint(
|
||||
*,
|
||||
checkpointer: Any,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
pre_run_checkpoint_id: str | None,
|
||||
pre_run_snapshot: dict[str, Any] | None,
|
||||
snapshot_capture_failed: bool,
|
||||
) -> None:
|
||||
"""Restore thread state to the checkpoint snapshot captured before run start."""
|
||||
if checkpointer is None:
|
||||
logger.info("Run %s rollback requested but no checkpointer is configured", run_id)
|
||||
return
|
||||
|
||||
if snapshot_capture_failed:
|
||||
logger.warning("Run %s rollback skipped: pre-run checkpoint snapshot capture failed", run_id)
|
||||
return
|
||||
|
||||
if pre_run_snapshot is None:
|
||||
await _call_checkpointer_method(checkpointer, "adelete_thread", "delete_thread", thread_id)
|
||||
logger.info("Run %s rollback reset thread %s to empty state", run_id, thread_id)
|
||||
return
|
||||
|
||||
checkpoint_to_restore = None
|
||||
metadata_to_restore: dict[str, Any] = {}
|
||||
checkpoint_ns = ""
|
||||
checkpoint = pre_run_snapshot.get("checkpoint")
|
||||
if not isinstance(checkpoint, dict):
|
||||
logger.warning("Run %s rollback skipped: invalid pre-run checkpoint snapshot", run_id)
|
||||
return
|
||||
checkpoint_to_restore = checkpoint
|
||||
if checkpoint_to_restore.get("id") is None and pre_run_checkpoint_id is not None:
|
||||
checkpoint_to_restore = {**checkpoint_to_restore, "id": pre_run_checkpoint_id}
|
||||
if checkpoint_to_restore.get("id") is None:
|
||||
logger.warning("Run %s rollback skipped: pre-run checkpoint has no checkpoint id", run_id)
|
||||
return
|
||||
metadata = pre_run_snapshot.get("metadata", {})
|
||||
metadata_to_restore = metadata if isinstance(metadata, dict) else {}
|
||||
raw_checkpoint_ns = pre_run_snapshot.get("checkpoint_ns")
|
||||
checkpoint_ns = raw_checkpoint_ns if isinstance(raw_checkpoint_ns, str) else ""
|
||||
|
||||
channel_versions = checkpoint_to_restore.get("channel_versions")
|
||||
new_versions = dict(channel_versions) if isinstance(channel_versions, dict) else {}
|
||||
|
||||
restore_config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": checkpoint_ns}}
|
||||
restored_config = await _call_checkpointer_method(
|
||||
checkpointer,
|
||||
"aput",
|
||||
"put",
|
||||
restore_config,
|
||||
checkpoint_to_restore,
|
||||
metadata_to_restore if isinstance(metadata_to_restore, dict) else {},
|
||||
new_versions,
|
||||
)
|
||||
if not isinstance(restored_config, dict):
|
||||
raise RuntimeError(f"Run {run_id} rollback restore returned invalid config: expected dict")
|
||||
restored_configurable = restored_config.get("configurable", {})
|
||||
if not isinstance(restored_configurable, dict):
|
||||
raise RuntimeError(f"Run {run_id} rollback restore returned invalid config payload")
|
||||
restored_checkpoint_id = restored_configurable.get("checkpoint_id")
|
||||
if not restored_checkpoint_id:
|
||||
raise RuntimeError(f"Run {run_id} rollback restore did not return checkpoint_id")
|
||||
|
||||
pending_writes = pre_run_snapshot.get("pending_writes", [])
|
||||
if not pending_writes:
|
||||
return
|
||||
|
||||
writes_by_task: dict[str, list[tuple[str, Any]]] = {}
|
||||
for item in pending_writes:
|
||||
if not isinstance(item, (tuple, list)) or len(item) != 3:
|
||||
raise RuntimeError(f"Run {run_id} rollback failed: pending_write is not a 3-tuple: {item!r}")
|
||||
task_id, channel, value = item
|
||||
if not isinstance(channel, str):
|
||||
raise RuntimeError(f"Run {run_id} rollback failed: pending_write has non-string channel: task_id={task_id!r}, channel={channel!r}")
|
||||
writes_by_task.setdefault(str(task_id), []).append((channel, value))
|
||||
|
||||
for task_id, writes in writes_by_task.items():
|
||||
await _call_checkpointer_method(
|
||||
checkpointer,
|
||||
"aput_writes",
|
||||
"put_writes",
|
||||
restored_config,
|
||||
writes,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
|
||||
def _lg_mode_to_sse_event(mode: str) -> str:
|
||||
"""Map LangGraph internal stream_mode name to SSE event name.
|
||||
|
||||
LangGraph's ``astream(stream_mode="messages")`` produces message
|
||||
tuples. The SSE protocol calls this ``messages-tuple`` when the
|
||||
client explicitly requests it, but the default SSE event name used
|
||||
by LangGraph Platform is simply ``"messages"``.
|
||||
"""
|
||||
# All LG modes map 1:1 to SSE event names — "messages" stays "messages"
|
||||
return mode
|
||||
|
||||
|
||||
def _unpack_stream_item(
|
||||
item: Any,
|
||||
lg_modes: list[str],
|
||||
stream_subgraphs: bool,
|
||||
) -> tuple[str | None, Any]:
|
||||
"""Unpack a multi-mode or subgraph stream item into (mode, chunk).
|
||||
|
||||
Returns ``(None, None)`` if the item cannot be parsed.
|
||||
"""
|
||||
if stream_subgraphs:
|
||||
if isinstance(item, tuple) and len(item) == 3:
|
||||
_ns, mode, chunk = item
|
||||
return str(mode), chunk
|
||||
if isinstance(item, tuple) and len(item) == 2:
|
||||
mode, chunk = item
|
||||
return str(mode), chunk
|
||||
return None, None
|
||||
|
||||
if isinstance(item, tuple) and len(item) == 2:
|
||||
mode, chunk = item
|
||||
return str(mode), chunk
|
||||
|
||||
# Fallback: single-element output from first mode
|
||||
return lg_modes[0] if lg_modes else None, item
|
||||
@@ -0,0 +1,78 @@
|
||||
"""Canonical serialization for LangChain / LangGraph objects.
|
||||
|
||||
Provides a single source of truth for converting LangChain message
|
||||
objects, Pydantic models, and LangGraph state dicts into plain
|
||||
JSON-serialisable Python structures.
|
||||
|
||||
Consumers: ``deerflow.runtime.runs.worker`` (SSE publishing) and
|
||||
``app.gateway.routers.threads`` (REST responses).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def serialize_lc_object(obj: Any) -> Any:
|
||||
"""Recursively serialize a LangChain object to a JSON-serialisable dict."""
|
||||
if obj is None:
|
||||
return None
|
||||
if isinstance(obj, (str, int, float, bool)):
|
||||
return obj
|
||||
if isinstance(obj, dict):
|
||||
return {k: serialize_lc_object(v) for k, v in obj.items()}
|
||||
if isinstance(obj, (list, tuple)):
|
||||
return [serialize_lc_object(item) for item in obj]
|
||||
# Pydantic v2
|
||||
if hasattr(obj, "model_dump"):
|
||||
try:
|
||||
return obj.model_dump()
|
||||
except Exception:
|
||||
pass
|
||||
# Pydantic v1 / older objects
|
||||
if hasattr(obj, "dict"):
|
||||
try:
|
||||
return obj.dict()
|
||||
except Exception:
|
||||
pass
|
||||
# Last resort
|
||||
try:
|
||||
return str(obj)
|
||||
except Exception:
|
||||
return repr(obj)
|
||||
|
||||
|
||||
def serialize_channel_values(channel_values: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Serialize channel values, stripping internal LangGraph keys.
|
||||
|
||||
Internal keys like ``__pregel_*`` and ``__interrupt__`` are removed
|
||||
to match what the LangGraph Platform API returns.
|
||||
"""
|
||||
result: dict[str, Any] = {}
|
||||
for key, value in channel_values.items():
|
||||
if key.startswith("__pregel_") or key == "__interrupt__":
|
||||
continue
|
||||
result[key] = serialize_lc_object(value)
|
||||
return result
|
||||
|
||||
|
||||
def serialize_messages_tuple(obj: Any) -> Any:
|
||||
"""Serialize a messages-mode tuple ``(chunk, metadata)``."""
|
||||
if isinstance(obj, tuple) and len(obj) == 2:
|
||||
chunk, metadata = obj
|
||||
return [serialize_lc_object(chunk), metadata if isinstance(metadata, dict) else {}]
|
||||
return serialize_lc_object(obj)
|
||||
|
||||
|
||||
def serialize(obj: Any, *, mode: str = "") -> Any:
|
||||
"""Serialize LangChain objects with mode-specific handling.
|
||||
|
||||
* ``messages`` — obj is ``(message_chunk, metadata_dict)``
|
||||
* ``values`` — obj is the full state dict; ``__pregel_*`` keys stripped
|
||||
* everything else — recursive ``model_dump()`` / ``dict()`` fallback
|
||||
"""
|
||||
if mode == "messages":
|
||||
return serialize_messages_tuple(obj)
|
||||
if mode == "values":
|
||||
return serialize_channel_values(obj) if isinstance(obj, dict) else serialize_lc_object(obj)
|
||||
return serialize_lc_object(obj)
|
||||
@@ -0,0 +1,31 @@
|
||||
"""Store provider for the DeerFlow runtime.
|
||||
|
||||
Re-exports the public API of both the async provider (for long-running
|
||||
servers) and the sync provider (for CLI tools and the embedded client).
|
||||
|
||||
Async usage (FastAPI lifespan)::
|
||||
|
||||
from deerflow.runtime.store import make_store
|
||||
|
||||
async with make_store() as store:
|
||||
app.state.store = store
|
||||
|
||||
Sync usage (CLI / DeerFlowClient)::
|
||||
|
||||
from deerflow.runtime.store import get_store, store_context
|
||||
|
||||
store = get_store() # singleton
|
||||
with store_context() as store: ... # one-shot
|
||||
"""
|
||||
|
||||
from .async_provider import make_store
|
||||
from .provider import get_store, reset_store, store_context
|
||||
|
||||
__all__ = [
|
||||
# async
|
||||
"make_store",
|
||||
# sync
|
||||
"get_store",
|
||||
"reset_store",
|
||||
"store_context",
|
||||
]
|
||||
@@ -0,0 +1,28 @@
|
||||
"""Shared SQLite connection utilities for store and checkpointer providers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pathlib
|
||||
|
||||
from deerflow.config.paths import resolve_path
|
||||
|
||||
|
||||
def resolve_sqlite_conn_str(raw: str) -> str:
|
||||
"""Return a SQLite connection string ready for use with store/checkpointer backends.
|
||||
|
||||
SQLite special strings (``":memory:"`` and ``file:`` URIs) are returned
|
||||
unchanged. Plain filesystem paths — relative or absolute — are resolved
|
||||
to an absolute string via :func:`resolve_path`.
|
||||
"""
|
||||
if raw == ":memory:" or raw.startswith("file:"):
|
||||
return raw
|
||||
return str(resolve_path(raw))
|
||||
|
||||
|
||||
def ensure_sqlite_parent_dir(conn_str: str) -> None:
|
||||
"""Create parent directory for a SQLite filesystem path.
|
||||
|
||||
No-op for in-memory databases (``":memory:"``) and ``file:`` URIs.
|
||||
"""
|
||||
if conn_str != ":memory:" and not conn_str.startswith("file:"):
|
||||
pathlib.Path(conn_str).parent.mkdir(parents=True, exist_ok=True)
|
||||
@@ -0,0 +1,113 @@
|
||||
"""Async Store factory — backend mirrors the configured checkpointer.
|
||||
|
||||
The store and checkpointer share the same ``checkpointer`` section in
|
||||
*config.yaml* so they always use the same persistence backend:
|
||||
|
||||
- ``type: memory`` → :class:`langgraph.store.memory.InMemoryStore`
|
||||
- ``type: sqlite`` → :class:`langgraph.store.sqlite.aio.AsyncSqliteStore`
|
||||
- ``type: postgres`` → :class:`langgraph.store.postgres.aio.AsyncPostgresStore`
|
||||
|
||||
Usage (e.g. FastAPI lifespan)::
|
||||
|
||||
from deerflow.runtime.store import make_store
|
||||
|
||||
async with make_store() as store:
|
||||
app.state.store = store
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from langgraph.store.base import BaseStore
|
||||
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.runtime.store.provider import POSTGRES_CONN_REQUIRED, POSTGRES_STORE_INSTALL, SQLITE_STORE_INSTALL, ensure_sqlite_parent_dir, resolve_sqlite_conn_str
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal backend factory
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def _async_store(config) -> AsyncIterator[BaseStore]:
|
||||
"""Async context manager that constructs and tears down a Store.
|
||||
|
||||
The ``config`` argument is a :class:`deerflow.config.checkpointer_config.CheckpointerConfig`
|
||||
instance — the same object used by the checkpointer factory.
|
||||
"""
|
||||
if config.type == "memory":
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
logger.info("Store: using InMemoryStore (in-process, not persistent)")
|
||||
yield InMemoryStore()
|
||||
return
|
||||
|
||||
if config.type == "sqlite":
|
||||
try:
|
||||
from langgraph.store.sqlite.aio import AsyncSqliteStore
|
||||
except ImportError as exc:
|
||||
raise ImportError(SQLITE_STORE_INSTALL) from exc
|
||||
|
||||
conn_str = resolve_sqlite_conn_str(config.connection_string or "store.db")
|
||||
ensure_sqlite_parent_dir(conn_str)
|
||||
|
||||
async with AsyncSqliteStore.from_conn_string(conn_str) as store:
|
||||
await store.setup()
|
||||
logger.info("Store: using AsyncSqliteStore (%s)", conn_str)
|
||||
yield store
|
||||
return
|
||||
|
||||
if config.type == "postgres":
|
||||
try:
|
||||
from langgraph.store.postgres.aio import AsyncPostgresStore # type: ignore[import]
|
||||
except ImportError as exc:
|
||||
raise ImportError(POSTGRES_STORE_INSTALL) from exc
|
||||
|
||||
if not config.connection_string:
|
||||
raise ValueError(POSTGRES_CONN_REQUIRED)
|
||||
|
||||
async with AsyncPostgresStore.from_conn_string(config.connection_string) as store:
|
||||
await store.setup()
|
||||
logger.info("Store: using AsyncPostgresStore")
|
||||
yield store
|
||||
return
|
||||
|
||||
raise ValueError(f"Unknown store backend type: {config.type!r}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public async context manager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def make_store() -> AsyncIterator[BaseStore]:
|
||||
"""Async context manager that yields a Store whose backend matches the
|
||||
configured checkpointer.
|
||||
|
||||
Reads from the same ``checkpointer`` section of *config.yaml* used by
|
||||
:func:`deerflow.agents.checkpointer.async_provider.make_checkpointer` so
|
||||
that both singletons always use the same persistence technology::
|
||||
|
||||
async with make_store() as store:
|
||||
app.state.store = store
|
||||
|
||||
Yields an :class:`~langgraph.store.memory.InMemoryStore` when no
|
||||
``checkpointer`` section is configured (emits a WARNING in that case).
|
||||
"""
|
||||
config = get_app_config()
|
||||
|
||||
if config.checkpointer is None:
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
logger.warning("No 'checkpointer' section in config.yaml — using InMemoryStore for the store. Thread list will be lost on server restart. Configure a sqlite or postgres backend for persistence.")
|
||||
yield InMemoryStore()
|
||||
return
|
||||
|
||||
async with _async_store(config.checkpointer) as store:
|
||||
yield store
|
||||
@@ -0,0 +1,188 @@
|
||||
"""Sync Store factory.
|
||||
|
||||
Provides a **sync singleton** and a **sync context manager** for CLI tools
|
||||
and the embedded :class:`~deerflow.client.DeerFlowClient`.
|
||||
|
||||
The backend mirrors the configured checkpointer so that both always use the
|
||||
same persistence technology. Supported backends: memory, sqlite, postgres.
|
||||
|
||||
Usage::
|
||||
|
||||
from deerflow.runtime.store.provider import get_store, store_context
|
||||
|
||||
# Singleton — reused across calls, closed on process exit
|
||||
store = get_store()
|
||||
|
||||
# One-shot — fresh connection, closed on block exit
|
||||
with store_context() as store:
|
||||
store.put(("ns",), "key", {"value": 1})
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
from collections.abc import Iterator
|
||||
|
||||
from langgraph.store.base import BaseStore
|
||||
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Error message constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
SQLITE_STORE_INSTALL = "langgraph-checkpoint-sqlite is required for the SQLite store. Install it with: uv add langgraph-checkpoint-sqlite"
|
||||
POSTGRES_STORE_INSTALL = "langgraph-checkpoint-postgres is required for the PostgreSQL store. Install it with: uv add langgraph-checkpoint-postgres psycopg[binary] psycopg-pool"
|
||||
POSTGRES_CONN_REQUIRED = "checkpointer.connection_string is required for the postgres backend"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sync factory
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _sync_store_cm(config) -> Iterator[BaseStore]:
|
||||
"""Context manager that creates and tears down a sync Store.
|
||||
|
||||
The ``config`` argument is a
|
||||
:class:`~deerflow.config.checkpointer_config.CheckpointerConfig` instance —
|
||||
the same object used by the checkpointer factory.
|
||||
"""
|
||||
if config.type == "memory":
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
logger.info("Store: using InMemoryStore (in-process, not persistent)")
|
||||
yield InMemoryStore()
|
||||
return
|
||||
|
||||
if config.type == "sqlite":
|
||||
try:
|
||||
from langgraph.store.sqlite import SqliteStore
|
||||
except ImportError as exc:
|
||||
raise ImportError(SQLITE_STORE_INSTALL) from exc
|
||||
|
||||
conn_str = resolve_sqlite_conn_str(config.connection_string or "store.db")
|
||||
ensure_sqlite_parent_dir(conn_str)
|
||||
|
||||
with SqliteStore.from_conn_string(conn_str) as store:
|
||||
store.setup()
|
||||
logger.info("Store: using SqliteStore (%s)", conn_str)
|
||||
yield store
|
||||
return
|
||||
|
||||
if config.type == "postgres":
|
||||
try:
|
||||
from langgraph.store.postgres import PostgresStore # type: ignore[import]
|
||||
except ImportError as exc:
|
||||
raise ImportError(POSTGRES_STORE_INSTALL) from exc
|
||||
|
||||
if not config.connection_string:
|
||||
raise ValueError(POSTGRES_CONN_REQUIRED)
|
||||
|
||||
with PostgresStore.from_conn_string(config.connection_string) as store:
|
||||
store.setup()
|
||||
logger.info("Store: using PostgresStore")
|
||||
yield store
|
||||
return
|
||||
|
||||
raise ValueError(f"Unknown store backend type: {config.type!r}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sync singleton
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_store: BaseStore | None = None
|
||||
_store_ctx = None # open context manager keeping the connection alive
|
||||
|
||||
|
||||
def get_store() -> BaseStore:
|
||||
"""Return the global sync Store singleton, creating it on first call.
|
||||
|
||||
Returns an :class:`~langgraph.store.memory.InMemoryStore` when no
|
||||
checkpointer is configured in *config.yaml* (emits a WARNING in that case).
|
||||
|
||||
Raises:
|
||||
ImportError: If the required package for the configured backend is not installed.
|
||||
ValueError: If ``connection_string`` is missing for a backend that requires it.
|
||||
"""
|
||||
global _store, _store_ctx
|
||||
|
||||
if _store is not None:
|
||||
return _store
|
||||
|
||||
# Lazily load app config, mirroring the checkpointer singleton pattern so
|
||||
# that tests that set the global checkpointer config explicitly remain isolated.
|
||||
from deerflow.config.app_config import _app_config
|
||||
from deerflow.config.checkpointer_config import get_checkpointer_config
|
||||
|
||||
config = get_checkpointer_config()
|
||||
|
||||
if config is None and _app_config is None:
|
||||
try:
|
||||
get_app_config()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
config = get_checkpointer_config()
|
||||
|
||||
if config is None:
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
logger.warning("No 'checkpointer' section in config.yaml — using InMemoryStore for the store. Thread list will be lost on server restart. Configure a sqlite or postgres backend for persistence.")
|
||||
_store = InMemoryStore()
|
||||
return _store
|
||||
|
||||
_store_ctx = _sync_store_cm(config)
|
||||
_store = _store_ctx.__enter__()
|
||||
return _store
|
||||
|
||||
|
||||
def reset_store() -> None:
|
||||
"""Reset the sync singleton, forcing recreation on the next call.
|
||||
|
||||
Closes any open backend connections and clears the cached instance.
|
||||
Useful in tests or after a configuration change.
|
||||
"""
|
||||
global _store, _store_ctx
|
||||
if _store_ctx is not None:
|
||||
try:
|
||||
_store_ctx.__exit__(None, None, None)
|
||||
except Exception:
|
||||
logger.warning("Error during store cleanup", exc_info=True)
|
||||
_store_ctx = None
|
||||
_store = None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sync context manager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def store_context() -> Iterator[BaseStore]:
|
||||
"""Sync context manager that yields a Store and cleans up on exit.
|
||||
|
||||
Unlike :func:`get_store`, this does **not** cache the instance — each
|
||||
``with`` block creates and destroys its own connection. Use it in CLI
|
||||
scripts or tests where you want deterministic cleanup::
|
||||
|
||||
with store_context() as store:
|
||||
store.put(("threads",), thread_id, {...})
|
||||
|
||||
Yields an :class:`~langgraph.store.memory.InMemoryStore` when no
|
||||
checkpointer is configured in *config.yaml*.
|
||||
"""
|
||||
config = get_app_config()
|
||||
if config.checkpointer is None:
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
logger.warning("No 'checkpointer' section in config.yaml — using InMemoryStore for the store. Thread list will be lost on server restart. Configure a sqlite or postgres backend for persistence.")
|
||||
yield InMemoryStore()
|
||||
return
|
||||
|
||||
with _sync_store_cm(config.checkpointer) as store:
|
||||
yield store
|
||||
@@ -0,0 +1,21 @@
|
||||
"""Stream bridge — decouples agent workers from SSE endpoints.
|
||||
|
||||
A ``StreamBridge`` sits between the background task that runs an agent
|
||||
(producer) and the HTTP endpoint that pushes Server-Sent Events to
|
||||
the client (consumer). This package provides an abstract protocol
|
||||
(:class:`StreamBridge`) plus a default in-memory implementation backed
|
||||
by :mod:`asyncio.Queue`.
|
||||
"""
|
||||
|
||||
from .async_provider import make_stream_bridge
|
||||
from .base import END_SENTINEL, HEARTBEAT_SENTINEL, StreamBridge, StreamEvent
|
||||
from .memory import MemoryStreamBridge
|
||||
|
||||
__all__ = [
|
||||
"END_SENTINEL",
|
||||
"HEARTBEAT_SENTINEL",
|
||||
"MemoryStreamBridge",
|
||||
"StreamBridge",
|
||||
"StreamEvent",
|
||||
"make_stream_bridge",
|
||||
]
|
||||
@@ -0,0 +1,52 @@
|
||||
"""Async stream bridge factory.
|
||||
|
||||
Provides an **async context manager** aligned with
|
||||
:func:`deerflow.agents.checkpointer.async_provider.make_checkpointer`.
|
||||
|
||||
Usage (e.g. FastAPI lifespan)::
|
||||
|
||||
from deerflow.agents.stream_bridge import make_stream_bridge
|
||||
|
||||
async with make_stream_bridge() as bridge:
|
||||
app.state.stream_bridge = bridge
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from deerflow.config.stream_bridge_config import get_stream_bridge_config
|
||||
|
||||
from .base import StreamBridge
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def make_stream_bridge(config=None) -> AsyncIterator[StreamBridge]:
|
||||
"""Async context manager that yields a :class:`StreamBridge`.
|
||||
|
||||
Falls back to :class:`MemoryStreamBridge` when no configuration is
|
||||
provided and nothing is set globally.
|
||||
"""
|
||||
if config is None:
|
||||
config = get_stream_bridge_config()
|
||||
|
||||
if config is None or config.type == "memory":
|
||||
from deerflow.runtime.stream_bridge.memory import MemoryStreamBridge
|
||||
|
||||
maxsize = config.queue_maxsize if config is not None else 256
|
||||
bridge = MemoryStreamBridge(queue_maxsize=maxsize)
|
||||
logger.info("Stream bridge initialised: memory (queue_maxsize=%d)", maxsize)
|
||||
try:
|
||||
yield bridge
|
||||
finally:
|
||||
await bridge.close()
|
||||
return
|
||||
|
||||
if config.type == "redis":
|
||||
raise NotImplementedError("Redis stream bridge planned for Phase 2")
|
||||
|
||||
raise ValueError(f"Unknown stream bridge type: {config.type!r}")
|
||||
@@ -0,0 +1,72 @@
|
||||
"""Abstract stream bridge protocol.
|
||||
|
||||
StreamBridge decouples agent workers (producers) from SSE endpoints
|
||||
(consumers), aligning with LangGraph Platform's Queue + StreamManager
|
||||
architecture.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
from collections.abc import AsyncIterator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class StreamEvent:
|
||||
"""Single stream event.
|
||||
|
||||
Attributes:
|
||||
id: Monotonically increasing event ID (used as SSE ``id:`` field,
|
||||
supports ``Last-Event-ID`` reconnection).
|
||||
event: SSE event name, e.g. ``"metadata"``, ``"updates"``,
|
||||
``"events"``, ``"error"``, ``"end"``.
|
||||
data: JSON-serialisable payload.
|
||||
"""
|
||||
|
||||
id: str
|
||||
event: str
|
||||
data: Any
|
||||
|
||||
|
||||
HEARTBEAT_SENTINEL = StreamEvent(id="", event="__heartbeat__", data=None)
|
||||
END_SENTINEL = StreamEvent(id="", event="__end__", data=None)
|
||||
|
||||
|
||||
class StreamBridge(abc.ABC):
|
||||
"""Abstract base for stream bridges."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def publish(self, run_id: str, event: str, data: Any) -> None:
|
||||
"""Enqueue a single event for *run_id* (producer side)."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def publish_end(self, run_id: str) -> None:
|
||||
"""Signal that no more events will be produced for *run_id*."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def subscribe(
|
||||
self,
|
||||
run_id: str,
|
||||
*,
|
||||
last_event_id: str | None = None,
|
||||
heartbeat_interval: float = 15.0,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Async iterator that yields events for *run_id* (consumer side).
|
||||
|
||||
Yields :data:`HEARTBEAT_SENTINEL` when no event arrives within
|
||||
*heartbeat_interval* seconds. Yields :data:`END_SENTINEL` once
|
||||
the producer calls :meth:`publish_end`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def cleanup(self, run_id: str, *, delay: float = 0) -> None:
|
||||
"""Release resources associated with *run_id*.
|
||||
|
||||
If *delay* > 0 the implementation should wait before releasing,
|
||||
giving late subscribers a chance to drain remaining events.
|
||||
"""
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Release backend resources. Default is a no-op."""
|
||||
@@ -0,0 +1,133 @@
|
||||
"""In-memory stream bridge backed by an in-process event log."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import AsyncIterator
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from .base import END_SENTINEL, HEARTBEAT_SENTINEL, StreamBridge, StreamEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _RunStream:
|
||||
events: list[StreamEvent] = field(default_factory=list)
|
||||
condition: asyncio.Condition = field(default_factory=asyncio.Condition)
|
||||
ended: bool = False
|
||||
start_offset: int = 0
|
||||
|
||||
|
||||
class MemoryStreamBridge(StreamBridge):
|
||||
"""Per-run in-memory event log implementation.
|
||||
|
||||
Events are retained for a bounded time window per run so late subscribers
|
||||
and reconnecting clients can replay buffered events from ``Last-Event-ID``.
|
||||
"""
|
||||
|
||||
def __init__(self, *, queue_maxsize: int = 256) -> None:
|
||||
self._maxsize = queue_maxsize
|
||||
self._streams: dict[str, _RunStream] = {}
|
||||
self._counters: dict[str, int] = {}
|
||||
|
||||
# -- helpers ---------------------------------------------------------------
|
||||
|
||||
def _get_or_create_stream(self, run_id: str) -> _RunStream:
|
||||
if run_id not in self._streams:
|
||||
self._streams[run_id] = _RunStream()
|
||||
self._counters[run_id] = 0
|
||||
return self._streams[run_id]
|
||||
|
||||
def _next_id(self, run_id: str) -> str:
|
||||
self._counters[run_id] = self._counters.get(run_id, 0) + 1
|
||||
ts = int(time.time() * 1000)
|
||||
seq = self._counters[run_id] - 1
|
||||
return f"{ts}-{seq}"
|
||||
|
||||
def _resolve_start_offset(self, stream: _RunStream, last_event_id: str | None) -> int:
|
||||
if last_event_id is None:
|
||||
return stream.start_offset
|
||||
|
||||
for index, entry in enumerate(stream.events):
|
||||
if entry.id == last_event_id:
|
||||
return stream.start_offset + index + 1
|
||||
|
||||
if stream.events:
|
||||
logger.warning(
|
||||
"last_event_id=%s not found in retained buffer; replaying from earliest retained event",
|
||||
last_event_id,
|
||||
)
|
||||
return stream.start_offset
|
||||
|
||||
# -- StreamBridge API ------------------------------------------------------
|
||||
|
||||
async def publish(self, run_id: str, event: str, data: Any) -> None:
|
||||
stream = self._get_or_create_stream(run_id)
|
||||
entry = StreamEvent(id=self._next_id(run_id), event=event, data=data)
|
||||
async with stream.condition:
|
||||
stream.events.append(entry)
|
||||
if len(stream.events) > self._maxsize:
|
||||
overflow = len(stream.events) - self._maxsize
|
||||
del stream.events[:overflow]
|
||||
stream.start_offset += overflow
|
||||
stream.condition.notify_all()
|
||||
|
||||
async def publish_end(self, run_id: str) -> None:
|
||||
stream = self._get_or_create_stream(run_id)
|
||||
async with stream.condition:
|
||||
stream.ended = True
|
||||
stream.condition.notify_all()
|
||||
|
||||
async def subscribe(
|
||||
self,
|
||||
run_id: str,
|
||||
*,
|
||||
last_event_id: str | None = None,
|
||||
heartbeat_interval: float = 15.0,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
stream = self._get_or_create_stream(run_id)
|
||||
async with stream.condition:
|
||||
next_offset = self._resolve_start_offset(stream, last_event_id)
|
||||
|
||||
while True:
|
||||
async with stream.condition:
|
||||
if next_offset < stream.start_offset:
|
||||
logger.warning(
|
||||
"subscriber for run %s fell behind retained buffer; resuming from offset %s",
|
||||
run_id,
|
||||
stream.start_offset,
|
||||
)
|
||||
next_offset = stream.start_offset
|
||||
|
||||
local_index = next_offset - stream.start_offset
|
||||
if 0 <= local_index < len(stream.events):
|
||||
entry = stream.events[local_index]
|
||||
next_offset += 1
|
||||
elif stream.ended:
|
||||
entry = END_SENTINEL
|
||||
else:
|
||||
try:
|
||||
await asyncio.wait_for(stream.condition.wait(), timeout=heartbeat_interval)
|
||||
except TimeoutError:
|
||||
entry = HEARTBEAT_SENTINEL
|
||||
else:
|
||||
continue
|
||||
|
||||
if entry is END_SENTINEL:
|
||||
yield END_SENTINEL
|
||||
return
|
||||
yield entry
|
||||
|
||||
async def cleanup(self, run_id: str, *, delay: float = 0) -> None:
|
||||
if delay > 0:
|
||||
await asyncio.sleep(delay)
|
||||
self._streams.pop(run_id, None)
|
||||
self._counters.pop(run_id, None)
|
||||
|
||||
async def close(self) -> None:
|
||||
self._streams.clear()
|
||||
self._counters.clear()
|
||||
Reference in New Issue
Block a user