Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection hardening: - New deerflow.security package: content_delimiter, html_cleaner, sanitizer (8 layers — invisible chars, control chars, symbols, NFC, PUA, tag chars, horizontal whitespace collapse with newline/tab preservation, length cap) - New deerflow.community.searx package: web_search, web_fetch, image_search backed by a private SearX instance, every external string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>> delimiters - All native community web providers (ddg_search, tavily, exa, firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail stubs that raise NativeWebToolDisabledError at import time, so a misconfigured tool.use path fails loud rather than silently falling back to unsanitized output - Native client back-doors (jina_client.py, infoquest_client.py) stubbed too - Native-tool tests quarantined under tests/_disabled_native/ (collect_ignore_glob via local conftest.py) - Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve newlines and tabs so list/table structure survives - Hardened runtime config.yaml references only the searx-backed tools - Factory overlay (backend/) kept in sync with deer-flow tree as a reference / source See HARDENING.md for the full audit trail and verification steps.
56 lines
1.8 KiB
Python
56 lines
1.8 KiB
Python
from typing import Annotated, NotRequired, TypedDict
|
|
|
|
from langchain.agents import AgentState
|
|
|
|
|
|
class SandboxState(TypedDict):
|
|
sandbox_id: NotRequired[str | None]
|
|
|
|
|
|
class ThreadDataState(TypedDict):
|
|
workspace_path: NotRequired[str | None]
|
|
uploads_path: NotRequired[str | None]
|
|
outputs_path: NotRequired[str | None]
|
|
|
|
|
|
class ViewedImageData(TypedDict):
|
|
base64: str
|
|
mime_type: str
|
|
|
|
|
|
def merge_artifacts(existing: list[str] | None, new: list[str] | None) -> list[str]:
|
|
"""Reducer for artifacts list - merges and deduplicates artifacts."""
|
|
if existing is None:
|
|
return new or []
|
|
if new is None:
|
|
return existing
|
|
# Use dict.fromkeys to deduplicate while preserving order
|
|
return list(dict.fromkeys(existing + new))
|
|
|
|
|
|
def merge_viewed_images(existing: dict[str, ViewedImageData] | None, new: dict[str, ViewedImageData] | None) -> dict[str, ViewedImageData]:
|
|
"""Reducer for viewed_images dict - merges image dictionaries.
|
|
|
|
Special case: If new is an empty dict {}, it clears the existing images.
|
|
This allows middlewares to clear the viewed_images state after processing.
|
|
"""
|
|
if existing is None:
|
|
return new or {}
|
|
if new is None:
|
|
return existing
|
|
# Special case: empty dict means clear all viewed images
|
|
if len(new) == 0:
|
|
return {}
|
|
# Merge dictionaries, new values override existing ones for same keys
|
|
return {**existing, **new}
|
|
|
|
|
|
class ThreadState(AgentState):
|
|
sandbox: NotRequired[SandboxState | None]
|
|
thread_data: NotRequired[ThreadDataState | None]
|
|
title: NotRequired[str | None]
|
|
artifacts: Annotated[list[str], merge_artifacts]
|
|
todos: NotRequired[list | None]
|
|
uploaded_files: NotRequired[list[dict] | None]
|
|
viewed_images: Annotated[dict[str, ViewedImageData], merge_viewed_images] # image_path -> {base64, mime_type}
|