Initial commit: hardened DeerFlow factory

Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection
hardening:

- New deerflow.security package: content_delimiter, html_cleaner,
  sanitizer (8 layers — invisible chars, control chars, symbols, NFC,
  PUA, tag chars, horizontal whitespace collapse with newline/tab
  preservation, length cap)
- New deerflow.community.searx package: web_search, web_fetch,
  image_search backed by a private SearX instance, every external
  string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>>
  delimiters
- All native community web providers (ddg_search, tavily, exa,
  firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail
  stubs that raise NativeWebToolDisabledError at import time, so a
  misconfigured tool.use path fails loud rather than silently falling
  back to unsanitized output
- Native client back-doors (jina_client.py, infoquest_client.py)
  stubbed too
- Native-tool tests quarantined under tests/_disabled_native/
  (collect_ignore_glob via local conftest.py)
- Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve
  newlines and tabs so list/table structure survives
- Hardened runtime config.yaml references only the searx-backed tools
- Factory overlay (backend/) kept in sync with deer-flow tree as a
  reference / source

See HARDENING.md for the full audit trail and verification steps.
This commit is contained in:
2026-04-12 14:23:57 +02:00
commit 6de0bf9f5b
889 changed files with 173052 additions and 0 deletions

View File

@@ -0,0 +1 @@
"""Hardened SearX provider for DeerFlow."""

View File

@@ -0,0 +1,98 @@
"""Hardened SearX web search and fetch tools."""
import json
import os
from urllib.parse import quote
import httpx
from langchain.tools import tool
from deerflow.config import get_app_config
from deerflow.security.content_delimiter import wrap_untrusted_content
from deerflow.security.sanitizer import sanitizer
from deerflow.security.html_cleaner import extract_secure_text
def _get_searx_config() -> dict:
"""Get SearX configuration from app config."""
config = get_app_config().get_tool_config("web_search")
return {
"url": config.model_extra.get("searx_url", "http://localhost:8888"),
"max_results": config.model_extra.get("max_results", 10),
}
@tool("web_search", parse_docstring=True)
def web_search_tool(query: str, max_results: int = 10) -> str:
"""Search the web using hardened SearX instance.
All results are sanitized against prompt injection attacks.
Args:
query: Search keywords
max_results: Maximum results to return (default 10)
"""
cfg = _get_searx_config()
searx_url = cfg["url"]
# URL-safe encoding
encoded_query = quote(query)
try:
response = httpx.get(
f"{searx_url}/search",
params={
"q": encoded_query,
"format": "json",
"max_results": min(max_results, cfg["max_results"]),
},
timeout=30.0
)
response.raise_for_status()
data = response.json()
except Exception as e:
return wrap_untrusted_content({"error": f"Search failed: {e}"})
# Sanitize and limit results
results = []
for r in data.get("results", [])[:max_results]:
results.append({
"title": sanitizer.sanitize(r.get("title", "")),
"url": r.get("url", ""), # Keep URL intact
"content": sanitizer.sanitize(r.get("content", ""), max_length=500),
})
output = {
"query": query,
"total_results": len(results),
"results": results,
}
# Wrap with security delimiters
return wrap_untrusted_content(output)
@tool("web_fetch", parse_docstring=True)
async def web_fetch_tool(url: str, max_chars: int = 10000) -> str:
"""Fetch web page content with security hardening.
Dangerous HTML elements are stripped and content is sanitized.
Args:
url: URL to fetch
max_chars: Maximum characters to return (default 10000)
"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(url, timeout=30.0)
response.raise_for_status()
html = response.text
except Exception as e:
return wrap_untrusted_content({"error": f"Fetch failed: {e}"})
# Extract text and sanitize
raw_text = extract_secure_text(html)
clean_text = sanitizer.sanitize(raw_text, max_length=max_chars)
# Wrap with security delimiters
return wrap_untrusted_content(clean_text)

View File

@@ -0,0 +1,44 @@
"""Content delimiter wrapper for safe LLM prompt integration."""
from typing import Union
import json
# OpenClaw-style delimiters
START_DELIMITER = "<<<EXTERNAL_UNTRUSTED_CONTENT>>>"
END_DELIMITER = "<<<END_EXTERNAL_UNTRUSTED_CONTENT>>>"
def wrap_untrusted_content(content: Union[str, dict, list]) -> str:
"""Wrap external content with safety delimiters.
This creates a semantic boundary between system instructions
and untrusted external data, helping prevent prompt injection.
Args:
content: Raw content (string, dict, or list)
Returns:
Delimited string for LLM consumption
"""
if isinstance(content, (dict, list)):
text = json.dumps(content, indent=2, ensure_ascii=False)
else:
text = str(content)
return f"{START_DELIMITER}\n{text}\n{END_DELIMITER}"
def unwrap_trusted_content(delimited: str) -> str:
"""Extract content from delimiters (for testing/debugging).
Args:
delimited: Content wrapped in delimiters
Returns:
Raw content string
"""
lines = delimited.split('\n')
if lines[0] == START_DELIMITER and lines[-1] == END_DELIMITER:
return '\n'.join(lines[1:-1])
return delimited

View File

@@ -0,0 +1,63 @@
"""HTML-to-text extraction with security-focused stripping."""
from html.parser import HTMLParser
from typing import Optional
class SecureTextExtractor(HTMLParser):
"""Extract visible text while stripping potentially dangerous elements.
Based on OpenClaw's fetch.sh implementation.
"""
DANGEROUS_TAGS = {
'script', 'style', 'noscript',
'header', 'footer', 'nav', 'aside',
'iframe', 'object', 'embed', 'form',
}
def __init__(self):
super().__init__()
self.text = []
self.skip_depth = 0
def handle_starttag(self, tag, attrs):
if tag in self.DANGEROUS_TAGS:
self.skip_depth += 1
def handle_endtag(self, tag):
if tag in self.DANGEROUS_TAGS and self.skip_depth > 0:
self.skip_depth -= 1
def handle_data(self, data):
if self.skip_depth == 0:
self.text.append(data)
def get_text(self) -> str:
return ' '.join(self.text)
def extract_secure_text(html: str, max_chars: Optional[int] = None) -> str:
"""Extract clean text from HTML.
Args:
html: Raw HTML content
max_chars: Optional maximum length
Returns:
Clean text without dangerous elements
"""
extractor = SecureTextExtractor()
extractor.feed(html)
text = extractor.get_text()
# Collapse whitespace
import re
text = re.sub(r'[ \t]+', ' ', text)
text = re.sub(r'\n{3,}', '\n\n', text)
text = text.strip()
if max_chars and len(text) > max_chars:
text = text[:max_chars-3] + '...'
return text

View File

@@ -0,0 +1,94 @@
"""Prompt injection hardening sanitizer based on OpenClaw patterns."""
import re
import unicodedata
from typing import Optional
class PromptInjectionSanitizer:
"""Sanitizes external content for safe LLM consumption."""
# Zero-width and invisible characters (OpenClaw pattern)
INVISIBLE_CHARS = [
'\u200b', '\u200c', '\u200d', '\u200e', '\u200f', # Zero-width spaces
'\u2060', '\u2061', '\u2062', '\u2063', '\u2064', # Word joiners
'\ufeff', '\ufffe', # BOM
'\u00ad', # Soft hyphen
'\u034f', # Combining grapheme
'\u061c', # Arabic letter mark
'\u115f', '\u1160', # Hangul fillers
'\u17b4', '\u17b5', # Khmer vowels
'\u180e', # Mongolian separator
'\u3164', # Hangul filler
'\uffa0', # Halfwidth Hangul
]
def sanitize(self, text: str, max_length: Optional[int] = None) -> str:
"""Apply all sanitization layers.
Args:
text: Raw text to sanitize
max_length: Optional maximum length (with ellipsis)
Returns:
Sanitized text safe for LLM prompts
"""
if not text:
return ''
# Layer 1: Remove invisible/zero-width characters
text = self._remove_invisible(text)
# Layer 2: Remove control characters (except \n, \t)
text = self._remove_control_chars(text)
# Layer 3: Remove symbols (So, Sk categories)
text = self._remove_symbols(text)
# Layer 4: Normalize Unicode (NFC)
text = unicodedata.normalize('NFC', text)
# Layer 5: Remove Private Use Area
text = self._remove_pua(text)
# Layer 6: Remove tag characters
text = self._remove_tag_chars(text)
# Layer 7: Collapse horizontal whitespace; preserve \n and \t so that
# list/table structure from web pages survives. Also collapse runs of
# 3+ blank lines down to a single blank line.
text = re.sub(r"[ \u00a0\u2000-\u200a\u202f\u205f\u3000]+", " ", text)
text = re.sub(r"\n{3,}", "\n\n", text)
text = text.strip()
# Layer 8: Length limiting
if max_length and len(text) > max_length:
text = text[:max_length-3] + '...'
return text
def _remove_invisible(self, text: str) -> str:
for char in self.INVISIBLE_CHARS:
text = text.replace(char, '')
return text
def _remove_control_chars(self, text: str) -> str:
return ''.join(c for c in text
if unicodedata.category(c) != 'Cc' or c in '\n\t')
def _remove_symbols(self, text: str) -> str:
return ''.join(c for c in text
if unicodedata.category(c) not in ('So', 'Sk'))
def _remove_pua(self, text: str) -> str:
return ''.join(c for c in text
if not (0xE000 <= ord(c) <= 0xF8FF
or 0xF0000 <= ord(c) <= 0x10FFFF))
def _remove_tag_chars(self, text: str) -> str:
return ''.join(c for c in text
if not (0xE0000 <= ord(c) <= 0xE007F))
# Global instance
sanitizer = PromptInjectionSanitizer()

View File

@@ -0,0 +1,15 @@
"""Integration tests for hardened web tools."""
import pytest
from deerflow.community.searx.tools import web_search_tool
class TestHardenedSearxSearch:
"""Test hardened SearX search against prompt injection."""
def test_search_results_are_delimited(self):
"""Results must be wrapped in security delimiters."""
result = web_search_tool("test query")
assert "<<<EXTERNAL_UNTRUSTED_CONTENT>>>" in result
assert "<<<END_EXTERNAL_UNTRUSTED_CONTENT>>>" in result

View File

@@ -0,0 +1,15 @@
"""Tests for HTML cleaner."""
import pytest
from deerflow.security.html_cleaner import extract_secure_text
def test_html_scripts_are_removed():
"""Script tags must be stripped."""
html = "<p>Hello</p><script>alert('xss')</script><p>World</p>"
result = extract_secure_text(html)
assert "script" not in result.lower()
assert "alert" not in result
assert "Hello" in result
assert "World" in result

View File

@@ -0,0 +1,72 @@
"""Tests for prompt injection sanitizer."""
import pytest
from deerflow.security.sanitizer import PromptInjectionSanitizer
class TestPromptInjectionSanitizer:
"""Test cases based on OpenClaw patterns."""
def test_removes_zero_width_spaces(self):
"""Zero-width characters are common steganography vectors."""
sanitizer = PromptInjectionSanitizer()
text = "Hello\u200bWorld\u200c" # ZWSP and ZWNJ
result = sanitizer.sanitize(text)
assert "\u200b" not in result
assert "\u200c" not in result
assert result == "HelloWorld"
def test_removes_control_chars(self):
"""Control chars can disrupt prompt parsing."""
sanitizer = PromptInjectionSanitizer()
text = "Hello\x00World\x01Test"
result = sanitizer.sanitize(text)
assert "\x00" not in result
assert "\x01" not in result
assert "Hello" in result
def test_preserves_newlines_and_tabs(self):
"""Structural characters should be preserved."""
sanitizer = PromptInjectionSanitizer()
text = "Line1\nLine2\tTabbed"
result = sanitizer.sanitize(text)
assert "\n" in result
assert "\t" in result
def test_truncates_long_content(self):
"""Length limiting prevents context overflow."""
sanitizer = PromptInjectionSanitizer()
text = "A" * 1000
result = sanitizer.sanitize(text, max_length=100)
assert len(result) == 100
assert result.endswith("...")
def test_handles_pua_characters(self):
"""Private Use Area chars can encode hidden data."""
sanitizer = PromptInjectionSanitizer()
text = "Hello\uE000World" # PUA start
result = sanitizer.sanitize(text)
assert "\uE000" not in result
class TestContentDelimiter:
"""Test delimiter wrapping."""
def test_wraps_dict_content(self):
from deerflow.security.content_delimiter import wrap_untrusted_content
content = {"title": "Test", "url": "http://example.com"}
result = wrap_untrusted_content(content)
assert "<<<EXTERNAL_UNTRUSTED_CONTENT>>>" in result
assert "<<<END_EXTERNAL_UNTRUSTED_CONTENT>>>" in result
assert "Test" in result
def test_wraps_string_content(self):
from deerflow.security.content_delimiter import wrap_untrusted_content
content = "Raw text from web"
result = wrap_untrusted_content(content)
assert "<<<EXTERNAL_UNTRUSTED_CONTENT>>>" in result
assert "Raw text from web" in result