Skip to main content

harn_vm/llm/
mock.rs

1use std::cell::RefCell;
2use std::collections::BTreeSet;
3
4use super::api::LlmResult;
5use crate::orchestration::ToolCallRecord;
6use crate::value::{ErrorCategory, VmError};
7
8/// LLM replay mode.
9#[derive(Debug, Clone, Copy, PartialEq)]
10pub enum LlmReplayMode {
11    Off,
12    Record,
13    Replay,
14}
15
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
17enum CliLlmMockMode {
18    Off,
19    Replay,
20    Record,
21}
22
23/// Categorized error injected by a mock. When present, the mock
24/// short-circuits the provider call and surfaces as
25/// `VmError::CategorizedError`, so `llm_call` throws and
26/// `llm_call_safe` populates its `error` envelope.
27#[derive(Clone)]
28pub struct MockError {
29    pub category: ErrorCategory,
30    pub message: String,
31    /// Optional hint echoed into the error message as a synthetic
32    /// `retry-after:` header so the existing `extract_retry_after_ms`
33    /// parser recovers it — matches how real provider errors embed
34    /// the value. Lets tests assert that `e.retry_after_ms` flows
35    /// end-to-end on the thrown dict.
36    pub retry_after_ms: Option<u64>,
37}
38
39#[derive(Clone)]
40pub struct LlmMock {
41    pub text: String,
42    pub tool_calls: Vec<serde_json::Value>,
43    pub match_pattern: Option<String>, // None = FIFO (consumed), Some = glob (reusable)
44    pub consume_on_match: bool,
45    pub input_tokens: Option<i64>,
46    pub output_tokens: Option<i64>,
47    pub cache_read_tokens: Option<i64>,
48    pub cache_write_tokens: Option<i64>,
49    pub thinking: Option<String>,
50    pub thinking_summary: Option<String>,
51    pub stop_reason: Option<String>,
52    pub model: String,
53    pub provider: Option<String>,
54    pub blocks: Option<Vec<serde_json::Value>>,
55    pub logprobs: Vec<serde_json::Value>,
56    /// When `Some`, this mock synthesizes an error instead of an
57    /// `LlmResult`. `text`/`tool_calls` are ignored for error mocks.
58    pub error: Option<MockError>,
59}
60
61#[derive(Clone)]
62pub(crate) struct LlmMockCall {
63    pub messages: Vec<serde_json::Value>,
64    pub system: Option<String>,
65    pub tools: Option<Vec<serde_json::Value>>,
66    pub tool_choice: Option<serde_json::Value>,
67    pub thinking: serde_json::Value,
68}
69
70type LlmMockScope = (Vec<LlmMock>, Vec<LlmMockCall>, BTreeSet<String>);
71
72thread_local! {
73    static LLM_REPLAY_MODE: RefCell<LlmReplayMode> = const { RefCell::new(LlmReplayMode::Off) };
74    static LLM_FIXTURE_DIR: RefCell<String> = const { RefCell::new(String::new()) };
75    static TOOL_RECORDINGS: RefCell<Vec<ToolCallRecord>> = const { RefCell::new(Vec::new()) };
76    static LLM_MOCKS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
77    static CLI_LLM_MOCK_MODE: RefCell<CliLlmMockMode> = const { RefCell::new(CliLlmMockMode::Off) };
78    static CLI_LLM_MOCKS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
79    static CLI_LLM_RECORDINGS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
80    static LLM_MOCK_CALLS: RefCell<Vec<LlmMockCall>> = const { RefCell::new(Vec::new()) };
81    static LLM_PROMPT_CACHE: RefCell<BTreeSet<String>> = const { RefCell::new(BTreeSet::new()) };
82    static LLM_MOCK_SCOPES: RefCell<Vec<LlmMockScope>> = const { RefCell::new(Vec::new()) };
83}
84
85pub(crate) fn push_llm_mock(mock: LlmMock) {
86    LLM_MOCKS.with(|v| v.borrow_mut().push(mock));
87}
88
89pub(crate) fn get_llm_mock_calls() -> Vec<LlmMockCall> {
90    LLM_MOCK_CALLS.with(|v| v.borrow().clone())
91}
92
93pub(crate) fn builtin_llm_mock_active() -> bool {
94    LLM_MOCKS.with(|v| !v.borrow().is_empty())
95}
96
97pub(crate) fn reset_llm_mock_state() {
98    LLM_MOCKS.with(|v| v.borrow_mut().clear());
99    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Off);
100    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
101    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
102    LLM_MOCK_CALLS.with(|v| v.borrow_mut().clear());
103    LLM_PROMPT_CACHE.with(|v| v.borrow_mut().clear());
104    LLM_MOCK_SCOPES.with(|v| v.borrow_mut().clear());
105}
106
107/// Save the current builtin LLM mock queue and recorded-calls list, then
108/// start a fresh empty scope. Paired with `pop_llm_mock_scope`. Backs
109/// the `with_llm_mocks` helper in `std/testing` so tests reliably
110/// roll back to the prior state, including when the body throws.
111pub(crate) fn push_llm_mock_scope() {
112    let mocks = LLM_MOCKS.with(|v| std::mem::take(&mut *v.borrow_mut()));
113    let calls = LLM_MOCK_CALLS.with(|v| std::mem::take(&mut *v.borrow_mut()));
114    let cache = LLM_PROMPT_CACHE.with(|v| std::mem::take(&mut *v.borrow_mut()));
115    LLM_MOCK_SCOPES.with(|v| v.borrow_mut().push((mocks, calls, cache)));
116}
117
118/// Restore the most recently pushed builtin LLM mock scope. Returns
119/// `false` when there is nothing to pop, so the builtin can surface a
120/// clear "imbalanced scope" error rather than silently corrupting
121/// state. CLI-installed mocks are intentionally untouched: they are an
122/// outer harness and should not flicker on each per-test scope swap.
123pub(crate) fn pop_llm_mock_scope() -> bool {
124    let entry = LLM_MOCK_SCOPES.with(|v| v.borrow_mut().pop());
125    match entry {
126        Some((mocks, calls, cache)) => {
127            LLM_MOCKS.with(|v| *v.borrow_mut() = mocks);
128            LLM_MOCK_CALLS.with(|v| *v.borrow_mut() = calls);
129            LLM_PROMPT_CACHE.with(|v| *v.borrow_mut() = cache);
130            true
131        }
132        None => false,
133    }
134}
135
136pub fn clear_cli_llm_mock_mode() {
137    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Off);
138    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
139    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
140}
141
142pub fn install_cli_llm_mocks(mocks: Vec<LlmMock>) {
143    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Replay);
144    CLI_LLM_MOCKS.with(|v| *v.borrow_mut() = mocks);
145    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
146}
147
148pub fn enable_cli_llm_mock_recording() {
149    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Record);
150    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
151    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
152}
153
154pub fn take_cli_llm_recordings() -> Vec<LlmMock> {
155    CLI_LLM_RECORDINGS.with(|v| std::mem::take(&mut *v.borrow_mut()))
156}
157
158pub(crate) fn cli_llm_mock_replay_active() -> bool {
159    CLI_LLM_MOCK_MODE.with(|v| *v.borrow() == CliLlmMockMode::Replay)
160}
161
162fn record_llm_mock_call(
163    messages: &[serde_json::Value],
164    system: Option<&str>,
165    native_tools: Option<&[serde_json::Value]>,
166    tool_choice: Option<&serde_json::Value>,
167    thinking: &super::api::ThinkingConfig,
168) {
169    LLM_MOCK_CALLS.with(|v| {
170        v.borrow_mut().push(LlmMockCall {
171            messages: messages.to_vec(),
172            system: system.map(|s| s.to_string()),
173            tools: native_tools.map(|t| t.to_vec()),
174            tool_choice: tool_choice.cloned(),
175            thinking: serde_json::to_value(thinking).unwrap_or_else(|_| {
176                serde_json::json!({
177                    "mode": "disabled"
178                })
179            }),
180        });
181    });
182}
183
184/// Build an LlmResult from a matched mock.
185fn build_mock_result(mock: &LlmMock, last_msg_len: usize) -> LlmResult {
186    let (tool_calls, blocks) = if let Some(blocks) = &mock.blocks {
187        (mock.tool_calls.clone(), blocks.clone())
188    } else {
189        let mut blocks = Vec::new();
190
191        if !mock.text.is_empty() {
192            blocks.push(serde_json::json!({
193                "type": "output_text",
194                "text": mock.text,
195                "visibility": "public",
196            }));
197        }
198
199        let mut tool_calls = Vec::new();
200        for (i, tc) in mock.tool_calls.iter().enumerate() {
201            let id = format!("mock_call_{}", i + 1);
202            let name = tc.get("name").and_then(|n| n.as_str()).unwrap_or("unknown");
203            let arguments = tc
204                .get("arguments")
205                .cloned()
206                .unwrap_or(serde_json::json!({}));
207            tool_calls.push(serde_json::json!({
208                "id": id,
209                "type": "tool_call",
210                "name": name,
211                "arguments": arguments,
212            }));
213            blocks.push(serde_json::json!({
214                "type": "tool_call",
215                "id": id,
216                "name": name,
217                "arguments": arguments,
218                "visibility": "internal",
219            }));
220        }
221
222        (tool_calls, blocks)
223    };
224
225    LlmResult {
226        text: mock.text.clone(),
227        tool_calls,
228        input_tokens: mock.input_tokens.unwrap_or(last_msg_len as i64),
229        output_tokens: mock.output_tokens.unwrap_or(30),
230        cache_read_tokens: mock.cache_read_tokens.unwrap_or(0),
231        cache_write_tokens: mock.cache_write_tokens.unwrap_or(0),
232        model: mock.model.clone(),
233        provider: mock.provider.clone().unwrap_or_else(|| "mock".to_string()),
234        thinking: mock.thinking.clone(),
235        thinking_summary: mock.thinking_summary.clone(),
236        stop_reason: mock.stop_reason.clone(),
237        blocks,
238        logprobs: mock.logprobs.clone(),
239    }
240}
241
242/// Multi-segment glob match: split on `*` and check segments appear in order.
243/// Handles `*`, `prefix*`, `*suffix`, `*contains*`, `pre*mid*suf`, etc.
244fn mock_glob_match(pattern: &str, text: &str) -> bool {
245    if pattern == "*" {
246        return true;
247    }
248    if !pattern.contains('*') {
249        return pattern == text;
250    }
251    let parts: Vec<&str> = pattern.split('*').collect();
252    let mut remaining = text;
253    for (i, part) in parts.iter().enumerate() {
254        if part.is_empty() {
255            continue;
256        }
257        if i == 0 {
258            if !remaining.starts_with(part) {
259                return false;
260            }
261            remaining = &remaining[part.len()..];
262        } else if i == parts.len() - 1 {
263            if !remaining.ends_with(part) {
264                return false;
265            }
266            remaining = "";
267        } else {
268            match remaining.find(part) {
269                Some(pos) => remaining = &remaining[pos + part.len()..],
270                None => return false,
271            }
272        }
273    }
274    true
275}
276
277fn collect_mock_match_strings(value: &serde_json::Value, out: &mut Vec<String>) {
278    match value {
279        serde_json::Value::String(text) if !text.is_empty() => out.push(text.clone()),
280        serde_json::Value::String(_) => {}
281        serde_json::Value::Array(items) => {
282            for item in items {
283                collect_mock_match_strings(item, out);
284            }
285        }
286        serde_json::Value::Object(map) => {
287            for value in map.values() {
288                collect_mock_match_strings(value, out);
289            }
290        }
291        _ => {}
292    }
293}
294
295fn mock_match_text(messages: &[serde_json::Value]) -> String {
296    let mut parts = Vec::new();
297    for message in messages {
298        collect_mock_match_strings(message, &mut parts);
299    }
300    parts.join("\n")
301}
302
303fn mock_last_prompt_text(messages: &[serde_json::Value]) -> String {
304    for message in messages.iter().rev() {
305        let Some(content) = message.get("content") else {
306            continue;
307        };
308        let mut parts = Vec::new();
309        collect_mock_match_strings(content, &mut parts);
310        let text = parts.join("\n");
311        if !text.trim().is_empty() {
312            return text;
313        }
314    }
315    String::new()
316}
317
318fn mock_prompt_cache_key(
319    model: &str,
320    messages: &[serde_json::Value],
321    system: Option<&str>,
322) -> String {
323    serde_json::to_string(&serde_json::json!({
324        "model": model,
325        "system": system,
326        "messages": messages,
327    }))
328    .unwrap_or_default()
329}
330
331fn apply_mock_prompt_cache(result: &mut LlmResult, cache_key: &str) {
332    if result.cache_read_tokens > 0 || result.cache_write_tokens > 0 {
333        return;
334    }
335    let cache_tokens = result.input_tokens.max(0);
336    if cache_tokens == 0 {
337        return;
338    }
339    let cache_hit = LLM_PROMPT_CACHE.with(|cache| {
340        let mut cache = cache.borrow_mut();
341        if cache.contains(cache_key) {
342            true
343        } else {
344            cache.insert(cache_key.to_string());
345            false
346        }
347    });
348    if cache_hit {
349        result.cache_read_tokens = cache_tokens;
350    } else {
351        result.cache_write_tokens = cache_tokens;
352    }
353}
354
355/// Convert a mock's `error` payload into the `VmError` that the
356/// provider path would have raised, so classification, retry, and
357/// `error_category` all behave identically to a real failure.
358fn mock_error_to_vm_error(err: &MockError) -> VmError {
359    // Embed `retry_after_ms` as a synthetic `retry-after:` header on
360    // the message so `agent_observe::extract_retry_after_ms` — the
361    // same parser that handles real HTTP 429s — surfaces the value
362    // on the caller's thrown dict. Keeps the mock path byte-for-byte
363    // compatible with a real rate-limit response.
364    let message = match err.retry_after_ms {
365        Some(ms) => {
366            let secs = (ms as f64 / 1000.0).max(0.0);
367            let sep = if err.message.is_empty() || err.message.ends_with('\n') {
368                ""
369            } else {
370                "\n"
371            };
372            format!("{}{sep}retry-after: {secs}\n", err.message)
373        }
374        None => err.message.clone(),
375    };
376    VmError::CategorizedError {
377        message,
378        category: err.category.clone(),
379    }
380}
381
382/// Try to find and return a matching mock response. Returns
383/// `Some(Ok(LlmResult))` on a text/tool_call match, `Some(Err(VmError))`
384/// on an error-mock match, and `None` to fall through to default.
385fn try_match_mock_queue(
386    mocks: &mut Vec<LlmMock>,
387    match_text: &str,
388) -> Option<Result<LlmResult, VmError>> {
389    if let Some(idx) = mocks.iter().position(|m| m.match_pattern.is_none()) {
390        let mock = mocks.remove(idx);
391        return Some(match &mock.error {
392            Some(err) => Err(mock_error_to_vm_error(err)),
393            None => Ok(build_mock_result(&mock, match_text.len())),
394        });
395    }
396
397    for idx in 0..mocks.len() {
398        let mock = &mocks[idx];
399        if let Some(ref pattern) = mock.match_pattern {
400            if mock_glob_match(pattern, match_text) {
401                if mock.consume_on_match {
402                    let mock = mocks.remove(idx);
403                    return Some(match &mock.error {
404                        Some(err) => Err(mock_error_to_vm_error(err)),
405                        None => Ok(build_mock_result(&mock, match_text.len())),
406                    });
407                }
408                return Some(match &mock.error {
409                    Some(err) => Err(mock_error_to_vm_error(err)),
410                    None => Ok(build_mock_result(mock, match_text.len())),
411                });
412            }
413        }
414    }
415
416    None
417}
418
419fn try_match_builtin_mock(match_text: &str) -> Option<Result<LlmResult, VmError>> {
420    LLM_MOCKS.with(|mocks| try_match_mock_queue(&mut mocks.borrow_mut(), match_text))
421}
422
423fn try_match_cli_mock(match_text: &str) -> Option<Result<LlmResult, VmError>> {
424    CLI_LLM_MOCKS.with(|mocks| try_match_mock_queue(&mut mocks.borrow_mut(), match_text))
425}
426
427pub(crate) fn record_cli_llm_result(result: &LlmResult) {
428    record_unified_tape_llm_call(result);
429    if !CLI_LLM_MOCK_MODE.with(|mode| *mode.borrow() == CliLlmMockMode::Record) {
430        return;
431    }
432    CLI_LLM_RECORDINGS.with(|recordings| {
433        recordings.borrow_mut().push(LlmMock {
434            text: result.text.clone(),
435            tool_calls: result.tool_calls.clone(),
436            match_pattern: None,
437            consume_on_match: false,
438            input_tokens: Some(result.input_tokens),
439            output_tokens: Some(result.output_tokens),
440            cache_read_tokens: Some(result.cache_read_tokens),
441            cache_write_tokens: Some(result.cache_write_tokens),
442            thinking: result.thinking.clone(),
443            thinking_summary: result.thinking_summary.clone(),
444            stop_reason: result.stop_reason.clone(),
445            model: result.model.clone(),
446            provider: Some(result.provider.clone()),
447            blocks: Some(result.blocks.clone()),
448            logprobs: result.logprobs.clone(),
449            error: None,
450        });
451    });
452}
453
454/// Append an `LlmCall` record to the unified-tape recorder when one is
455/// active. The request digest is built from the most recently recorded
456/// `LlmMockCall` so the same hashing surface used for fixture matching
457/// drives the fidelity oracle's request comparison; falls back to a
458/// hash of the response text alone when no matching call is on record
459/// (e.g. when `record_llm_mock_call` was bypassed).
460fn record_unified_tape_llm_call(result: &LlmResult) {
461    if crate::testbench::tape::active_recorder().is_none() {
462        return;
463    }
464    let response_json = serde_json::to_vec(result).unwrap_or_else(|_| Vec::new());
465    let request_digest = LLM_MOCK_CALLS
466        .with(|calls| calls.borrow().last().cloned())
467        .map(|call| {
468            let serialized = serde_json::to_vec(&serde_json::json!({
469                "messages": call.messages,
470                "system": call.system,
471                "tools": call.tools,
472                "tool_choice": call.tool_choice,
473                "thinking": call.thinking,
474                "model": result.model,
475            }))
476            .unwrap_or_default();
477            crate::testbench::tape::content_hash(&serialized)
478        })
479        .unwrap_or_else(|| {
480            // Fall back to hashing the response — keeps fidelity comparable
481            // across runs even when the request surface wasn't captured.
482            crate::testbench::tape::content_hash(result.text.as_bytes())
483        });
484    crate::testbench::tape::with_active_recorder(|recorder| {
485        let response = recorder.payload_from_bytes(response_json);
486        Some(crate::testbench::tape::TapeRecordKind::LlmCall {
487            request_digest,
488            response,
489        })
490    });
491}
492
493fn unmatched_cli_prompt_error(match_text: &str) -> VmError {
494    let mut snippet: String = match_text.chars().take(200).collect();
495    if match_text.chars().count() > 200 {
496        snippet.push_str("...");
497    }
498    VmError::Runtime(format!("No --llm-mock fixture matched prompt: {snippet:?}"))
499}
500
501/// Set LLM replay mode (record/replay) and fixture directory.
502pub fn set_replay_mode(mode: LlmReplayMode, fixture_dir: &str) {
503    LLM_REPLAY_MODE.with(|v| *v.borrow_mut() = mode);
504    LLM_FIXTURE_DIR.with(|v| *v.borrow_mut() = fixture_dir.to_string());
505}
506
507pub(crate) fn get_replay_mode() -> LlmReplayMode {
508    LLM_REPLAY_MODE.with(|v| *v.borrow())
509}
510
511pub(crate) fn get_fixture_dir() -> String {
512    LLM_FIXTURE_DIR.with(|v| v.borrow().clone())
513}
514
515/// Hash a request for fixture file naming using canonical JSON serialization.
516pub(crate) fn fixture_hash(
517    model: &str,
518    messages: &[serde_json::Value],
519    system: Option<&str>,
520) -> String {
521    use std::hash::{Hash, Hasher};
522    let mut hasher = std::collections::hash_map::DefaultHasher::new();
523    model.hash(&mut hasher);
524    // Canonical JSON hashing is stable across Debug-format changes.
525    serde_json::to_string(messages)
526        .unwrap_or_default()
527        .hash(&mut hasher);
528    system.hash(&mut hasher);
529    format!("{:016x}", hasher.finish())
530}
531
532pub(crate) fn save_fixture(hash: &str, result: &LlmResult) {
533    let dir = get_fixture_dir();
534    if dir.is_empty() {
535        return;
536    }
537    let _ = std::fs::create_dir_all(&dir);
538    let path = format!("{dir}/{hash}.json");
539    let json = serde_json::json!({
540        "text": result.text,
541        "tool_calls": result.tool_calls,
542        "input_tokens": result.input_tokens,
543        "output_tokens": result.output_tokens,
544        "cache_read_tokens": result.cache_read_tokens,
545        "cache_write_tokens": result.cache_write_tokens,
546        "cache_creation_input_tokens": result.cache_write_tokens,
547        "model": result.model,
548        "provider": result.provider,
549        "thinking": result.thinking,
550        "thinking_summary": result.thinking_summary,
551        "stop_reason": result.stop_reason,
552        "blocks": result.blocks,
553        "logprobs": result.logprobs,
554    });
555    let _ = std::fs::write(
556        &path,
557        serde_json::to_string_pretty(&json).unwrap_or_default(),
558    );
559}
560
561pub(crate) fn load_fixture(hash: &str) -> Option<LlmResult> {
562    let dir = get_fixture_dir();
563    if dir.is_empty() {
564        return None;
565    }
566    let path = format!("{dir}/{hash}.json");
567    let content = std::fs::read_to_string(&path).ok()?;
568    let json: serde_json::Value = serde_json::from_str(&content).ok()?;
569    Some(LlmResult {
570        text: json["text"].as_str().unwrap_or("").to_string(),
571        tool_calls: json["tool_calls"].as_array().cloned().unwrap_or_default(),
572        input_tokens: json["input_tokens"].as_i64().unwrap_or(0),
573        output_tokens: json["output_tokens"].as_i64().unwrap_or(0),
574        cache_read_tokens: json["cache_read_tokens"].as_i64().unwrap_or(0),
575        cache_write_tokens: json["cache_write_tokens"]
576            .as_i64()
577            .or_else(|| json["cache_creation_input_tokens"].as_i64())
578            .unwrap_or(0),
579        model: json["model"].as_str().unwrap_or("").to_string(),
580        provider: json["provider"].as_str().unwrap_or("mock").to_string(),
581        thinking: json["thinking"].as_str().map(|s| s.to_string()),
582        thinking_summary: json["thinking_summary"].as_str().map(|s| s.to_string()),
583        stop_reason: json["stop_reason"].as_str().map(|s| s.to_string()),
584        blocks: json["blocks"].as_array().cloned().unwrap_or_default(),
585        logprobs: json["logprobs"].as_array().cloned().unwrap_or_default(),
586    })
587}
588
589/// Generate stub argument values for required parameters in a tool schema.
590/// This makes mock tool calls realistic — a real model would always fill
591/// required fields, so the mock should too.
592fn mock_required_args(tool_schema: &serde_json::Value) -> serde_json::Value {
593    let mut args = serde_json::Map::new();
594    // Anthropic: {name, input_schema: {properties, required}}
595    // OpenAI:    {function: {name, parameters: {properties, required}}}
596    // Harn VM:   {parameters: {name: {type, required}}}  (from tool_define)
597    let input_schema = tool_schema
598        .get("input_schema")
599        .or_else(|| tool_schema.get("inputSchema"))
600        .or_else(|| {
601            tool_schema
602                .get("function")
603                .and_then(|f| f.get("parameters"))
604        })
605        .or_else(|| tool_schema.get("parameters"));
606    let Some(schema) = input_schema else {
607        return serde_json::Value::Object(args);
608    };
609    let required: std::collections::BTreeSet<String> = schema
610        .get("required")
611        .and_then(|r| r.as_array())
612        .map(|arr| {
613            arr.iter()
614                .filter_map(|v| v.as_str().map(|s| s.to_string()))
615                .collect()
616        })
617        .unwrap_or_default();
618    if let Some(props) = schema.get("properties").and_then(|p| p.as_object()) {
619        for (name, prop) in props {
620            if !required.contains(name) {
621                continue;
622            }
623            let ty = prop
624                .get("type")
625                .and_then(|t| t.as_str())
626                .unwrap_or("string");
627            let placeholder = match ty {
628                "integer" => serde_json::json!(0),
629                "number" => serde_json::json!(0.0),
630                "boolean" => serde_json::json!(false),
631                "array" => serde_json::json!([]),
632                "object" => serde_json::json!({}),
633                _ => serde_json::json!(""),
634            };
635            args.insert(name.clone(), placeholder);
636        }
637    }
638    serde_json::Value::Object(args)
639}
640
641/// Mock LLM provider -- deterministic responses for testing without API keys.
642/// When configurable mocks have been registered via `llm_mock()`, those are
643/// checked first (FIFO queue, then pattern matching). Falls through to the
644/// default deterministic behavior when no mocks match.
645pub(crate) fn mock_llm_response(
646    messages: &[serde_json::Value],
647    system: Option<&str>,
648    native_tools: Option<&[serde_json::Value]>,
649    tool_choice: Option<&serde_json::Value>,
650    thinking: &super::api::ThinkingConfig,
651    model: &str,
652    cache: bool,
653) -> Result<LlmResult, VmError> {
654    record_llm_mock_call(messages, system, native_tools, tool_choice, thinking);
655
656    let match_text = mock_match_text(messages);
657    let prompt_text = mock_last_prompt_text(messages);
658    let cache_key = mock_prompt_cache_key(model, messages, system);
659
660    if let Some(matched) = try_match_cli_mock(&match_text) {
661        return matched.map(|mut result| {
662            if cache {
663                apply_mock_prompt_cache(&mut result, &cache_key);
664            }
665            result
666        });
667    }
668
669    if let Some(matched) = try_match_builtin_mock(&match_text) {
670        return matched.map(|mut result| {
671            if cache {
672                apply_mock_prompt_cache(&mut result, &cache_key);
673            }
674            result
675        });
676    }
677
678    if cli_llm_mock_replay_active() {
679        return Err(unmatched_cli_prompt_error(&match_text));
680    }
681
682    // Generate a mock tool call for the first tool, filling required
683    // params with placeholders so the call passes schema validation.
684    if let Some(tools) = native_tools {
685        if let Some(first_tool) = tools.first() {
686            let tool_name = first_tool
687                .get("name")
688                .or_else(|| first_tool.get("function").and_then(|f| f.get("name")))
689                .and_then(|n| n.as_str())
690                .unwrap_or("unknown");
691            let mock_args = mock_required_args(first_tool);
692            let mut result = LlmResult {
693                text: String::new(),
694                tool_calls: vec![serde_json::json!({
695                        "id": "mock_call_1",
696                        "type": "tool_call",
697                        "name": tool_name,
698                "arguments": mock_args
699                })],
700                input_tokens: prompt_text.len() as i64,
701                output_tokens: 20,
702                cache_read_tokens: 0,
703                cache_write_tokens: 0,
704                model: model.to_string(),
705                provider: "mock".to_string(),
706                thinking: None,
707                thinking_summary: None,
708                stop_reason: None,
709                blocks: vec![serde_json::json!({
710                    "type": "tool_call",
711                    "id": "mock_call_1",
712                    "name": tool_name,
713                    "arguments": mock_args,
714                    "visibility": "internal",
715                })],
716                logprobs: Vec::new(),
717            };
718            if cache {
719                apply_mock_prompt_cache(&mut result, &cache_key);
720            }
721            return Ok(result);
722        }
723    }
724
725    // Preserve the historical auto-complete behavior for tagged text-tool
726    // prompts only. Bare `##DONE##` in no-tool/native prompts changes
727    // loop semantics by completing runs that used to exhaust budget unless
728    // a fixture explicitly returned the sentinel.
729    let tagged_done = system.is_some_and(|s| s.contains("<done>"));
730
731    let prose_body = if prompt_text.is_empty() {
732        "Mock LLM response".to_string()
733    } else {
734        let word_count = prompt_text.split_whitespace().count();
735        format!(
736            "Mock response to {word_count}-word prompt: {}",
737            prompt_text.chars().take(100).collect::<String>()
738        )
739    };
740    let response = if tagged_done {
741        format!("<assistant_prose>{prose_body}</assistant_prose>\n<done>##DONE##</done>")
742    } else {
743        prose_body
744    };
745
746    let mut result = LlmResult {
747        text: response.clone(),
748        tool_calls: vec![],
749        input_tokens: prompt_text.len() as i64,
750        output_tokens: 30,
751        cache_read_tokens: 0,
752        cache_write_tokens: 0,
753        model: model.to_string(),
754        provider: "mock".to_string(),
755        thinking: None,
756        thinking_summary: None,
757        stop_reason: None,
758        blocks: vec![serde_json::json!({
759            "type": "output_text",
760            "text": response,
761            "visibility": "public",
762        })],
763        logprobs: Vec::new(),
764    };
765    if cache {
766        apply_mock_prompt_cache(&mut result, &cache_key);
767    }
768    Ok(result)
769}
770
771/// Take all recorded tool calls, leaving the buffer empty.
772pub fn drain_tool_recordings() -> Vec<ToolCallRecord> {
773    TOOL_RECORDINGS.with(|v| std::mem::take(&mut *v.borrow_mut()))
774}