Files
DATA 6de0bf9f5b Initial commit: hardened DeerFlow factory
Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection
hardening:

- New deerflow.security package: content_delimiter, html_cleaner,
  sanitizer (8 layers — invisible chars, control chars, symbols, NFC,
  PUA, tag chars, horizontal whitespace collapse with newline/tab
  preservation, length cap)
- New deerflow.community.searx package: web_search, web_fetch,
  image_search backed by a private SearX instance, every external
  string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>>
  delimiters
- All native community web providers (ddg_search, tavily, exa,
  firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail
  stubs that raise NativeWebToolDisabledError at import time, so a
  misconfigured tool.use path fails loud rather than silently falling
  back to unsanitized output
- Native client back-doors (jina_client.py, infoquest_client.py)
  stubbed too
- Native-tool tests quarantined under tests/_disabled_native/
  (collect_ignore_glob via local conftest.py)
- Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve
  newlines and tabs so list/table structure survives
- Hardened runtime config.yaml references only the searx-backed tools
- Factory overlay (backend/) kept in sync with deer-flow tree as a
  reference / source

See HARDENING.md for the full audit trail and verification steps.
2026-04-12 14:23:57 +02:00

84 lines
2.8 KiB
Python

import logging
import re
import subprocess
from urllib.parse import urljoin
from markdownify import markdownify as md
from readabilipy import simple_json_from_html_string
logger = logging.getLogger(__name__)
class Article:
url: str
def __init__(self, title: str, html_content: str):
self.title = title
self.html_content = html_content
def to_markdown(self, including_title: bool = True) -> str:
markdown = ""
if including_title:
markdown += f"# {self.title}\n\n"
if self.html_content is None or not str(self.html_content).strip():
markdown += "*No content available*\n"
else:
markdown += md(self.html_content)
return markdown
def to_message(self) -> list[dict]:
image_pattern = r"!\[.*?\]\((.*?)\)"
content: list[dict[str, str]] = []
markdown = self.to_markdown()
if not markdown or not markdown.strip():
return [{"type": "text", "text": "No content available"}]
parts = re.split(image_pattern, markdown)
for i, part in enumerate(parts):
if i % 2 == 1:
image_url = urljoin(self.url, part.strip())
content.append({"type": "image_url", "image_url": {"url": image_url}})
else:
text_part = part.strip()
if text_part:
content.append({"type": "text", "text": text_part})
# If after processing all parts, content is still empty, provide a fallback message.
if not content:
content = [{"type": "text", "text": "No content available"}]
return content
class ReadabilityExtractor:
def extract_article(self, html: str) -> Article:
try:
article = simple_json_from_html_string(html, use_readability=True)
except (subprocess.CalledProcessError, FileNotFoundError) as exc:
stderr = getattr(exc, "stderr", None)
if isinstance(stderr, bytes):
stderr = stderr.decode(errors="replace")
stderr_info = f"; stderr={stderr.strip()}" if isinstance(stderr, str) and stderr.strip() else ""
logger.warning(
"Readability.js extraction failed with %s%s; falling back to pure-Python extraction",
type(exc).__name__,
stderr_info,
exc_info=True,
)
article = simple_json_from_html_string(html, use_readability=False)
html_content = article.get("content")
if not html_content or not str(html_content).strip():
html_content = "No content could be extracted from this page"
title = article.get("title")
if not title or not str(title).strip():
title = "Untitled"
return Article(title=title, html_content=html_content)