Initial commit: hardened DeerFlow factory
Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection hardening: - New deerflow.security package: content_delimiter, html_cleaner, sanitizer (8 layers — invisible chars, control chars, symbols, NFC, PUA, tag chars, horizontal whitespace collapse with newline/tab preservation, length cap) - New deerflow.community.searx package: web_search, web_fetch, image_search backed by a private SearX instance, every external string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>> delimiters - All native community web providers (ddg_search, tavily, exa, firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail stubs that raise NativeWebToolDisabledError at import time, so a misconfigured tool.use path fails loud rather than silently falling back to unsanitized output - Native client back-doors (jina_client.py, infoquest_client.py) stubbed too - Native-tool tests quarantined under tests/_disabled_native/ (collect_ignore_glob via local conftest.py) - Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve newlines and tabs so list/table structure survives - Hardened runtime config.yaml references only the searx-backed tools - Factory overlay (backend/) kept in sync with deer-flow tree as a reference / source See HARDENING.md for the full audit trail and verification steps.
This commit is contained in:
123
deer-flow/backend/packages/harness/deerflow/models/factory.py
Normal file
123
deer-flow/backend/packages/harness/deerflow/models/factory.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import logging
|
||||
|
||||
from langchain.chat_models import BaseChatModel
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.reflection import resolve_class
|
||||
from deerflow.tracing import build_tracing_callbacks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _deep_merge_dicts(base: dict | None, override: dict) -> dict:
|
||||
"""Recursively merge two dictionaries without mutating the inputs."""
|
||||
merged = dict(base or {})
|
||||
for key, value in override.items():
|
||||
if isinstance(value, dict) and isinstance(merged.get(key), dict):
|
||||
merged[key] = _deep_merge_dicts(merged[key], value)
|
||||
else:
|
||||
merged[key] = value
|
||||
return merged
|
||||
|
||||
|
||||
def _vllm_disable_chat_template_kwargs(chat_template_kwargs: dict) -> dict:
|
||||
"""Build the disable payload for vLLM/Qwen chat template kwargs."""
|
||||
disable_kwargs: dict[str, bool] = {}
|
||||
if "thinking" in chat_template_kwargs:
|
||||
disable_kwargs["thinking"] = False
|
||||
if "enable_thinking" in chat_template_kwargs:
|
||||
disable_kwargs["enable_thinking"] = False
|
||||
return disable_kwargs
|
||||
|
||||
|
||||
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, **kwargs) -> BaseChatModel:
|
||||
"""Create a chat model instance from the config.
|
||||
|
||||
Args:
|
||||
name: The name of the model to create. If None, the first model in the config will be used.
|
||||
|
||||
Returns:
|
||||
A chat model instance.
|
||||
"""
|
||||
config = get_app_config()
|
||||
if name is None:
|
||||
name = config.models[0].name
|
||||
model_config = config.get_model_config(name)
|
||||
if model_config is None:
|
||||
raise ValueError(f"Model {name} not found in config") from None
|
||||
model_class = resolve_class(model_config.use, BaseChatModel)
|
||||
model_settings_from_config = model_config.model_dump(
|
||||
exclude_none=True,
|
||||
exclude={
|
||||
"use",
|
||||
"name",
|
||||
"display_name",
|
||||
"description",
|
||||
"supports_thinking",
|
||||
"supports_reasoning_effort",
|
||||
"when_thinking_enabled",
|
||||
"when_thinking_disabled",
|
||||
"thinking",
|
||||
"supports_vision",
|
||||
},
|
||||
)
|
||||
# Compute effective when_thinking_enabled by merging in the `thinking` shortcut field.
|
||||
# The `thinking` shortcut is equivalent to setting when_thinking_enabled["thinking"].
|
||||
has_thinking_settings = (model_config.when_thinking_enabled is not None) or (model_config.thinking is not None)
|
||||
effective_wte: dict = dict(model_config.when_thinking_enabled) if model_config.when_thinking_enabled else {}
|
||||
if model_config.thinking is not None:
|
||||
merged_thinking = {**(effective_wte.get("thinking") or {}), **model_config.thinking}
|
||||
effective_wte = {**effective_wte, "thinking": merged_thinking}
|
||||
if thinking_enabled and has_thinking_settings:
|
||||
if not model_config.supports_thinking:
|
||||
raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None
|
||||
if effective_wte:
|
||||
model_settings_from_config.update(effective_wte)
|
||||
if not thinking_enabled:
|
||||
if model_config.when_thinking_disabled is not None:
|
||||
# User-provided disable settings take full precedence
|
||||
model_settings_from_config.update(model_config.when_thinking_disabled)
|
||||
elif has_thinking_settings and effective_wte.get("extra_body", {}).get("thinking", {}).get("type"):
|
||||
# OpenAI-compatible gateway: thinking is nested under extra_body
|
||||
model_settings_from_config["extra_body"] = _deep_merge_dicts(
|
||||
model_settings_from_config.get("extra_body"),
|
||||
{"thinking": {"type": "disabled"}},
|
||||
)
|
||||
model_settings_from_config["reasoning_effort"] = "minimal"
|
||||
elif has_thinking_settings and (disable_chat_template_kwargs := _vllm_disable_chat_template_kwargs(effective_wte.get("extra_body", {}).get("chat_template_kwargs") or {})):
|
||||
# vLLM uses chat template kwargs to switch thinking on/off.
|
||||
model_settings_from_config["extra_body"] = _deep_merge_dicts(
|
||||
model_settings_from_config.get("extra_body"),
|
||||
{"chat_template_kwargs": disable_chat_template_kwargs},
|
||||
)
|
||||
elif has_thinking_settings and effective_wte.get("thinking", {}).get("type"):
|
||||
# Native langchain_anthropic: thinking is a direct constructor parameter
|
||||
model_settings_from_config["thinking"] = {"type": "disabled"}
|
||||
if not model_config.supports_reasoning_effort:
|
||||
kwargs.pop("reasoning_effort", None)
|
||||
model_settings_from_config.pop("reasoning_effort", None)
|
||||
|
||||
# For Codex Responses API models: map thinking mode to reasoning_effort
|
||||
from deerflow.models.openai_codex_provider import CodexChatModel
|
||||
|
||||
if issubclass(model_class, CodexChatModel):
|
||||
# The ChatGPT Codex endpoint currently rejects max_tokens/max_output_tokens.
|
||||
model_settings_from_config.pop("max_tokens", None)
|
||||
|
||||
# Use explicit reasoning_effort from frontend if provided (low/medium/high)
|
||||
explicit_effort = kwargs.pop("reasoning_effort", None)
|
||||
if not thinking_enabled:
|
||||
model_settings_from_config["reasoning_effort"] = "none"
|
||||
elif explicit_effort and explicit_effort in ("low", "medium", "high", "xhigh"):
|
||||
model_settings_from_config["reasoning_effort"] = explicit_effort
|
||||
elif "reasoning_effort" not in model_settings_from_config:
|
||||
model_settings_from_config["reasoning_effort"] = "medium"
|
||||
|
||||
model_instance = model_class(**{**model_settings_from_config, **kwargs})
|
||||
|
||||
callbacks = build_tracing_callbacks()
|
||||
if callbacks:
|
||||
existing_callbacks = model_instance.callbacks or []
|
||||
model_instance.callbacks = [*existing_callbacks, *callbacks]
|
||||
logger.debug(f"Tracing attached to model '{name}' with providers={len(callbacks)}")
|
||||
return model_instance
|
||||
Reference in New Issue
Block a user