Files
deerflow-factory/deer-flow/backend/tests/test_vllm_provider.py
DATA 6de0bf9f5b Initial commit: hardened DeerFlow factory
Vendored deer-flow upstream (bytedance/deer-flow) plus prompt-injection
hardening:

- New deerflow.security package: content_delimiter, html_cleaner,
  sanitizer (8 layers — invisible chars, control chars, symbols, NFC,
  PUA, tag chars, horizontal whitespace collapse with newline/tab
  preservation, length cap)
- New deerflow.community.searx package: web_search, web_fetch,
  image_search backed by a private SearX instance, every external
  string sanitized and wrapped in <<<EXTERNAL_UNTRUSTED_CONTENT>>>
  delimiters
- All native community web providers (ddg_search, tavily, exa,
  firecrawl, jina_ai, infoquest, image_search) replaced with hard-fail
  stubs that raise NativeWebToolDisabledError at import time, so a
  misconfigured tool.use path fails loud rather than silently falling
  back to unsanitized output
- Native client back-doors (jina_client.py, infoquest_client.py)
  stubbed too
- Native-tool tests quarantined under tests/_disabled_native/
  (collect_ignore_glob via local conftest.py)
- Sanitizer Layer 7 fix: only collapse horizontal whitespace, preserve
  newlines and tabs so list/table structure survives
- Hardened runtime config.yaml references only the searx-backed tools
- Factory overlay (backend/) kept in sync with deer-flow tree as a
  reference / source

See HARDENING.md for the full audit trail and verification steps.
2026-04-12 14:23:57 +02:00

139 lines
4.4 KiB
Python

from __future__ import annotations
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from deerflow.models.vllm_provider import VllmChatModel
def _make_model() -> VllmChatModel:
return VllmChatModel(
model="Qwen/QwQ-32B",
api_key="dummy",
base_url="http://localhost:8000/v1",
)
def test_vllm_provider_restores_reasoning_in_request_payload():
model = _make_model()
payload = model._get_request_payload(
[
AIMessage(
content="",
tool_calls=[{"name": "bash", "args": {"cmd": "pwd"}, "id": "tool-1", "type": "tool_call"}],
additional_kwargs={"reasoning": "Need to inspect the workspace first."},
),
HumanMessage(content="Continue"),
]
)
assistant_message = payload["messages"][0]
assert assistant_message["role"] == "assistant"
assert assistant_message["reasoning"] == "Need to inspect the workspace first."
assert assistant_message["tool_calls"][0]["function"]["name"] == "bash"
def test_vllm_provider_normalizes_legacy_thinking_kwarg_to_enable_thinking():
model = VllmChatModel(
model="qwen3",
api_key="dummy",
base_url="http://localhost:8000/v1",
extra_body={"chat_template_kwargs": {"thinking": True}},
)
payload = model._get_request_payload([HumanMessage(content="Hello")])
assert payload["extra_body"]["chat_template_kwargs"] == {"enable_thinking": True}
def test_vllm_provider_preserves_explicit_enable_thinking_kwarg():
model = VllmChatModel(
model="qwen3",
api_key="dummy",
base_url="http://localhost:8000/v1",
extra_body={"chat_template_kwargs": {"enable_thinking": False, "foo": "bar"}},
)
payload = model._get_request_payload([HumanMessage(content="Hello")])
assert payload["extra_body"]["chat_template_kwargs"] == {
"enable_thinking": False,
"foo": "bar",
}
def test_vllm_provider_preserves_reasoning_in_chat_result():
model = _make_model()
result = model._create_chat_result(
{
"model": "Qwen/QwQ-32B",
"choices": [
{
"message": {
"role": "assistant",
"content": "42",
"reasoning": "I compared the two numbers directly.",
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2},
}
)
message = result.generations[0].message
assert message.additional_kwargs["reasoning"] == "I compared the two numbers directly."
assert message.additional_kwargs["reasoning_content"] == "I compared the two numbers directly."
def test_vllm_provider_preserves_reasoning_in_streaming_chunks():
model = _make_model()
chunk = model._convert_chunk_to_generation_chunk(
{
"model": "Qwen/QwQ-32B",
"choices": [
{
"delta": {
"role": "assistant",
"reasoning": "First, call the weather tool.",
"content": "Calling tool...",
},
"finish_reason": None,
}
],
},
AIMessageChunk,
{},
)
assert chunk is not None
assert chunk.message.additional_kwargs["reasoning"] == "First, call the weather tool."
assert chunk.message.additional_kwargs["reasoning_content"] == "First, call the weather tool."
assert chunk.message.content == "Calling tool..."
def test_vllm_provider_preserves_empty_reasoning_values_in_streaming_chunks():
model = _make_model()
chunk = model._convert_chunk_to_generation_chunk(
{
"model": "Qwen/QwQ-32B",
"choices": [
{
"delta": {
"role": "assistant",
"reasoning": "",
"content": "Still replying...",
},
"finish_reason": None,
}
],
},
AIMessageChunk,
{},
)
assert chunk is not None
assert "reasoning" in chunk.message.additional_kwargs
assert chunk.message.additional_kwargs["reasoning"] == ""
assert "reasoning_content" not in chunk.message.additional_kwargs
assert chunk.message.content == "Still replying..."