Skip to main content

harn_vm/llm/
mock.rs

1use std::cell::RefCell;
2use std::collections::BTreeSet;
3
4use super::api::LlmResult;
5use crate::orchestration::ToolCallRecord;
6use crate::value::{ErrorCategory, VmError};
7
8/// LLM replay mode.
9#[derive(Debug, Clone, Copy, PartialEq)]
10pub enum LlmReplayMode {
11    Off,
12    Record,
13    Replay,
14}
15
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
17enum CliLlmMockMode {
18    Off,
19    Replay,
20    Record,
21}
22
23/// Categorized error injected by a mock. When present, the mock
24/// short-circuits the provider call and surfaces as
25/// `VmError::CategorizedError`, so `llm_call` throws and
26/// `llm_call_safe` populates its `error` envelope.
27#[derive(Clone)]
28pub struct MockError {
29    pub category: ErrorCategory,
30    pub message: String,
31    /// Optional hint echoed into the error message as a synthetic
32    /// `retry-after:` header so the existing `extract_retry_after_ms`
33    /// parser recovers it — matches how real provider errors embed
34    /// the value. Lets tests assert that `e.retry_after_ms` flows
35    /// end-to-end on the thrown dict.
36    pub retry_after_ms: Option<u64>,
37}
38
39#[derive(Clone)]
40pub struct LlmMock {
41    pub text: String,
42    pub tool_calls: Vec<serde_json::Value>,
43    pub match_pattern: Option<String>, // None = FIFO (consumed), Some = glob (reusable)
44    pub consume_on_match: bool,
45    pub input_tokens: Option<i64>,
46    pub output_tokens: Option<i64>,
47    pub cache_read_tokens: Option<i64>,
48    pub cache_write_tokens: Option<i64>,
49    pub thinking: Option<String>,
50    pub thinking_summary: Option<String>,
51    pub stop_reason: Option<String>,
52    pub model: String,
53    pub provider: Option<String>,
54    pub blocks: Option<Vec<serde_json::Value>>,
55    pub logprobs: Vec<serde_json::Value>,
56    /// When `Some`, this mock synthesizes an error instead of an
57    /// `LlmResult`. `text`/`tool_calls` are ignored for error mocks.
58    pub error: Option<MockError>,
59}
60
61#[derive(Clone)]
62pub(crate) struct LlmMockCall {
63    pub messages: Vec<serde_json::Value>,
64    pub system: Option<String>,
65    pub tools: Option<Vec<serde_json::Value>>,
66    pub tool_choice: Option<serde_json::Value>,
67    pub thinking: serde_json::Value,
68}
69
70type LlmMockScope = (Vec<LlmMock>, Vec<LlmMockCall>, BTreeSet<String>);
71
72thread_local! {
73    static LLM_REPLAY_MODE: RefCell<LlmReplayMode> = const { RefCell::new(LlmReplayMode::Off) };
74    static LLM_FIXTURE_DIR: RefCell<String> = const { RefCell::new(String::new()) };
75    static TOOL_RECORDINGS: RefCell<Vec<ToolCallRecord>> = const { RefCell::new(Vec::new()) };
76    static LLM_MOCKS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
77    static CLI_LLM_MOCK_MODE: RefCell<CliLlmMockMode> = const { RefCell::new(CliLlmMockMode::Off) };
78    static CLI_LLM_MOCKS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
79    static CLI_LLM_RECORDINGS: RefCell<Vec<LlmMock>> = const { RefCell::new(Vec::new()) };
80    static LLM_MOCK_CALLS: RefCell<Vec<LlmMockCall>> = const { RefCell::new(Vec::new()) };
81    static LLM_PROMPT_CACHE: RefCell<BTreeSet<String>> = const { RefCell::new(BTreeSet::new()) };
82    static LLM_MOCK_SCOPES: RefCell<Vec<LlmMockScope>> = const { RefCell::new(Vec::new()) };
83}
84
85pub(crate) fn push_llm_mock(mock: LlmMock) {
86    LLM_MOCKS.with(|v| v.borrow_mut().push(mock));
87}
88
89pub(crate) fn get_llm_mock_calls() -> Vec<LlmMockCall> {
90    LLM_MOCK_CALLS.with(|v| v.borrow().clone())
91}
92
93pub(crate) fn builtin_llm_mock_active() -> bool {
94    LLM_MOCKS.with(|v| !v.borrow().is_empty())
95}
96
97pub(crate) fn reset_llm_mock_state() {
98    LLM_MOCKS.with(|v| v.borrow_mut().clear());
99    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Off);
100    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
101    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
102    LLM_MOCK_CALLS.with(|v| v.borrow_mut().clear());
103    LLM_PROMPT_CACHE.with(|v| v.borrow_mut().clear());
104    LLM_MOCK_SCOPES.with(|v| v.borrow_mut().clear());
105}
106
107/// Save the current builtin LLM mock queue and recorded-calls list, then
108/// start a fresh empty scope. Paired with `pop_llm_mock_scope`. Backs
109/// the `with_llm_mocks` helper in `std/testing` so tests reliably
110/// roll back to the prior state, including when the body throws.
111pub(crate) fn push_llm_mock_scope() {
112    let mocks = LLM_MOCKS.with(|v| std::mem::take(&mut *v.borrow_mut()));
113    let calls = LLM_MOCK_CALLS.with(|v| std::mem::take(&mut *v.borrow_mut()));
114    let cache = LLM_PROMPT_CACHE.with(|v| std::mem::take(&mut *v.borrow_mut()));
115    LLM_MOCK_SCOPES.with(|v| v.borrow_mut().push((mocks, calls, cache)));
116}
117
118/// Restore the most recently pushed builtin LLM mock scope. Returns
119/// `false` when there is nothing to pop, so the builtin can surface a
120/// clear "imbalanced scope" error rather than silently corrupting
121/// state. CLI-installed mocks are intentionally untouched: they are an
122/// outer harness and should not flicker on each per-test scope swap.
123pub(crate) fn pop_llm_mock_scope() -> bool {
124    let entry = LLM_MOCK_SCOPES.with(|v| v.borrow_mut().pop());
125    match entry {
126        Some((mocks, calls, cache)) => {
127            LLM_MOCKS.with(|v| *v.borrow_mut() = mocks);
128            LLM_MOCK_CALLS.with(|v| *v.borrow_mut() = calls);
129            LLM_PROMPT_CACHE.with(|v| *v.borrow_mut() = cache);
130            true
131        }
132        None => false,
133    }
134}
135
136pub fn clear_cli_llm_mock_mode() {
137    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Off);
138    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
139    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
140}
141
142pub fn install_cli_llm_mocks(mocks: Vec<LlmMock>) {
143    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Replay);
144    CLI_LLM_MOCKS.with(|v| *v.borrow_mut() = mocks);
145    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
146}
147
148pub fn enable_cli_llm_mock_recording() {
149    CLI_LLM_MOCK_MODE.with(|v| *v.borrow_mut() = CliLlmMockMode::Record);
150    CLI_LLM_MOCKS.with(|v| v.borrow_mut().clear());
151    CLI_LLM_RECORDINGS.with(|v| v.borrow_mut().clear());
152}
153
154pub fn take_cli_llm_recordings() -> Vec<LlmMock> {
155    CLI_LLM_RECORDINGS.with(|v| std::mem::take(&mut *v.borrow_mut()))
156}
157
158pub(crate) fn cli_llm_mock_replay_active() -> bool {
159    CLI_LLM_MOCK_MODE.with(|v| *v.borrow() == CliLlmMockMode::Replay)
160}
161
162fn record_llm_mock_call(
163    messages: &[serde_json::Value],
164    system: Option<&str>,
165    native_tools: Option<&[serde_json::Value]>,
166    tool_choice: Option<&serde_json::Value>,
167    thinking: &super::api::ThinkingConfig,
168) {
169    LLM_MOCK_CALLS.with(|v| {
170        v.borrow_mut().push(LlmMockCall {
171            messages: messages.to_vec(),
172            system: system.map(|s| s.to_string()),
173            tools: native_tools.map(|t| t.to_vec()),
174            tool_choice: tool_choice.cloned(),
175            thinking: serde_json::to_value(thinking).unwrap_or_else(|_| {
176                serde_json::json!({
177                    "mode": "disabled"
178                })
179            }),
180        });
181    });
182}
183
184/// Build an LlmResult from a matched mock.
185fn build_mock_result(mock: &LlmMock, last_msg_len: usize) -> LlmResult {
186    let (tool_calls, blocks) = if let Some(blocks) = &mock.blocks {
187        (mock.tool_calls.clone(), blocks.clone())
188    } else {
189        let mut blocks = Vec::new();
190
191        if !mock.text.is_empty() {
192            blocks.push(serde_json::json!({
193                "type": "output_text",
194                "text": mock.text,
195                "visibility": "public",
196            }));
197        }
198
199        let mut tool_calls = Vec::new();
200        for (i, tc) in mock.tool_calls.iter().enumerate() {
201            let id = format!("mock_call_{}", i + 1);
202            let name = tc.get("name").and_then(|n| n.as_str()).unwrap_or("unknown");
203            let arguments = tc
204                .get("arguments")
205                .cloned()
206                .unwrap_or(serde_json::json!({}));
207            tool_calls.push(serde_json::json!({
208                "id": id,
209                "type": "tool_call",
210                "name": name,
211                "arguments": arguments,
212            }));
213            blocks.push(serde_json::json!({
214                "type": "tool_call",
215                "id": id,
216                "name": name,
217                "arguments": arguments,
218                "visibility": "internal",
219            }));
220        }
221
222        (tool_calls, blocks)
223    };
224
225    LlmResult {
226        text: mock.text.clone(),
227        tool_calls,
228        input_tokens: mock.input_tokens.unwrap_or(last_msg_len as i64),
229        output_tokens: mock.output_tokens.unwrap_or(30),
230        cache_read_tokens: mock.cache_read_tokens.unwrap_or(0),
231        cache_write_tokens: mock.cache_write_tokens.unwrap_or(0),
232        model: mock.model.clone(),
233        provider: mock.provider.clone().unwrap_or_else(|| "mock".to_string()),
234        thinking: mock.thinking.clone(),
235        thinking_summary: mock.thinking_summary.clone(),
236        stop_reason: mock.stop_reason.clone(),
237        blocks,
238        logprobs: mock.logprobs.clone(),
239    }
240}
241
242/// Multi-segment glob match: split on `*` and check segments appear in order.
243/// Handles `*`, `prefix*`, `*suffix`, `*contains*`, `pre*mid*suf`, etc.
244fn mock_glob_match(pattern: &str, text: &str) -> bool {
245    if pattern == "*" {
246        return true;
247    }
248    if !pattern.contains('*') {
249        return pattern == text;
250    }
251    let parts: Vec<&str> = pattern.split('*').collect();
252    let mut remaining = text;
253    for (i, part) in parts.iter().enumerate() {
254        if part.is_empty() {
255            continue;
256        }
257        if i == 0 {
258            if !remaining.starts_with(part) {
259                return false;
260            }
261            remaining = &remaining[part.len()..];
262        } else if i == parts.len() - 1 {
263            if !remaining.ends_with(part) {
264                return false;
265            }
266            remaining = "";
267        } else {
268            match remaining.find(part) {
269                Some(pos) => remaining = &remaining[pos + part.len()..],
270                None => return false,
271            }
272        }
273    }
274    true
275}
276
277fn collect_mock_match_strings(value: &serde_json::Value, out: &mut Vec<String>) {
278    match value {
279        serde_json::Value::String(text) if !text.is_empty() => out.push(text.clone()),
280        serde_json::Value::String(_) => {}
281        serde_json::Value::Array(items) => {
282            for item in items {
283                collect_mock_match_strings(item, out);
284            }
285        }
286        serde_json::Value::Object(map) => {
287            for value in map.values() {
288                collect_mock_match_strings(value, out);
289            }
290        }
291        _ => {}
292    }
293}
294
295fn mock_match_text(messages: &[serde_json::Value]) -> String {
296    let mut parts = Vec::new();
297    for message in messages {
298        collect_mock_match_strings(message, &mut parts);
299    }
300    parts.join("\n")
301}
302
303fn mock_last_prompt_text(messages: &[serde_json::Value]) -> String {
304    for message in messages.iter().rev() {
305        let Some(content) = message.get("content") else {
306            continue;
307        };
308        let mut parts = Vec::new();
309        collect_mock_match_strings(content, &mut parts);
310        let text = parts.join("\n");
311        if !text.trim().is_empty() {
312            return text;
313        }
314    }
315    String::new()
316}
317
318fn mock_prompt_cache_key(
319    model: &str,
320    messages: &[serde_json::Value],
321    system: Option<&str>,
322) -> String {
323    serde_json::to_string(&serde_json::json!({
324        "model": model,
325        "system": system,
326        "messages": messages,
327    }))
328    .unwrap_or_default()
329}
330
331fn apply_mock_prompt_cache(result: &mut LlmResult, cache_key: &str) {
332    if result.cache_read_tokens > 0 || result.cache_write_tokens > 0 {
333        return;
334    }
335    let cache_tokens = result.input_tokens.max(0);
336    if cache_tokens == 0 {
337        return;
338    }
339    let cache_hit = LLM_PROMPT_CACHE.with(|cache| {
340        let mut cache = cache.borrow_mut();
341        if cache.contains(cache_key) {
342            true
343        } else {
344            cache.insert(cache_key.to_string());
345            false
346        }
347    });
348    if cache_hit {
349        result.cache_read_tokens = cache_tokens;
350    } else {
351        result.cache_write_tokens = cache_tokens;
352    }
353}
354
355/// Convert a mock's `error` payload into the `VmError` that the
356/// provider path would have raised, so classification, retry, and
357/// `error_category` all behave identically to a real failure.
358fn mock_error_to_vm_error(err: &MockError) -> VmError {
359    // Embed `retry_after_ms` as a synthetic `retry-after:` header on
360    // the message so `agent_observe::extract_retry_after_ms` — the
361    // same parser that handles real HTTP 429s — surfaces the value
362    // on the caller's thrown dict. Keeps the mock path byte-for-byte
363    // compatible with a real rate-limit response.
364    let message = match err.retry_after_ms {
365        Some(ms) => {
366            let secs = (ms as f64 / 1000.0).max(0.0);
367            let sep = if err.message.is_empty() || err.message.ends_with('\n') {
368                ""
369            } else {
370                "\n"
371            };
372            format!("{}{sep}retry-after: {secs}\n", err.message)
373        }
374        None => err.message.clone(),
375    };
376    VmError::CategorizedError {
377        message,
378        category: err.category.clone(),
379    }
380}
381
382/// Try to find and return a matching mock response. Returns
383/// `Some(Ok(LlmResult))` on a text/tool_call match, `Some(Err(VmError))`
384/// on an error-mock match, and `None` to fall through to default.
385fn try_match_mock_queue(
386    mocks: &mut Vec<LlmMock>,
387    match_text: &str,
388) -> Option<Result<LlmResult, VmError>> {
389    if let Some(idx) = mocks.iter().position(|m| m.match_pattern.is_none()) {
390        let mock = mocks.remove(idx);
391        return Some(match &mock.error {
392            Some(err) => Err(mock_error_to_vm_error(err)),
393            None => Ok(build_mock_result(&mock, match_text.len())),
394        });
395    }
396
397    for idx in 0..mocks.len() {
398        let mock = &mocks[idx];
399        if let Some(ref pattern) = mock.match_pattern {
400            if mock_glob_match(pattern, match_text) {
401                if mock.consume_on_match {
402                    let mock = mocks.remove(idx);
403                    return Some(match &mock.error {
404                        Some(err) => Err(mock_error_to_vm_error(err)),
405                        None => Ok(build_mock_result(&mock, match_text.len())),
406                    });
407                }
408                return Some(match &mock.error {
409                    Some(err) => Err(mock_error_to_vm_error(err)),
410                    None => Ok(build_mock_result(mock, match_text.len())),
411                });
412            }
413        }
414    }
415
416    None
417}
418
419fn try_match_builtin_mock(match_text: &str) -> Option<Result<LlmResult, VmError>> {
420    LLM_MOCKS.with(|mocks| try_match_mock_queue(&mut mocks.borrow_mut(), match_text))
421}
422
423fn try_match_cli_mock(match_text: &str) -> Option<Result<LlmResult, VmError>> {
424    CLI_LLM_MOCKS.with(|mocks| try_match_mock_queue(&mut mocks.borrow_mut(), match_text))
425}
426
427pub(crate) fn record_cli_llm_result(result: &LlmResult) {
428    if !CLI_LLM_MOCK_MODE.with(|mode| *mode.borrow() == CliLlmMockMode::Record) {
429        return;
430    }
431    CLI_LLM_RECORDINGS.with(|recordings| {
432        recordings.borrow_mut().push(LlmMock {
433            text: result.text.clone(),
434            tool_calls: result.tool_calls.clone(),
435            match_pattern: None,
436            consume_on_match: false,
437            input_tokens: Some(result.input_tokens),
438            output_tokens: Some(result.output_tokens),
439            cache_read_tokens: Some(result.cache_read_tokens),
440            cache_write_tokens: Some(result.cache_write_tokens),
441            thinking: result.thinking.clone(),
442            thinking_summary: result.thinking_summary.clone(),
443            stop_reason: result.stop_reason.clone(),
444            model: result.model.clone(),
445            provider: Some(result.provider.clone()),
446            blocks: Some(result.blocks.clone()),
447            logprobs: result.logprobs.clone(),
448            error: None,
449        });
450    });
451}
452
453fn unmatched_cli_prompt_error(match_text: &str) -> VmError {
454    let mut snippet: String = match_text.chars().take(200).collect();
455    if match_text.chars().count() > 200 {
456        snippet.push_str("...");
457    }
458    VmError::Runtime(format!("No --llm-mock fixture matched prompt: {snippet:?}"))
459}
460
461/// Set LLM replay mode (record/replay) and fixture directory.
462pub fn set_replay_mode(mode: LlmReplayMode, fixture_dir: &str) {
463    LLM_REPLAY_MODE.with(|v| *v.borrow_mut() = mode);
464    LLM_FIXTURE_DIR.with(|v| *v.borrow_mut() = fixture_dir.to_string());
465}
466
467pub(crate) fn get_replay_mode() -> LlmReplayMode {
468    LLM_REPLAY_MODE.with(|v| *v.borrow())
469}
470
471pub(crate) fn get_fixture_dir() -> String {
472    LLM_FIXTURE_DIR.with(|v| v.borrow().clone())
473}
474
475/// Hash a request for fixture file naming using canonical JSON serialization.
476pub(crate) fn fixture_hash(
477    model: &str,
478    messages: &[serde_json::Value],
479    system: Option<&str>,
480) -> String {
481    use std::hash::{Hash, Hasher};
482    let mut hasher = std::collections::hash_map::DefaultHasher::new();
483    model.hash(&mut hasher);
484    // Canonical JSON hashing is stable across Debug-format changes.
485    serde_json::to_string(messages)
486        .unwrap_or_default()
487        .hash(&mut hasher);
488    system.hash(&mut hasher);
489    format!("{:016x}", hasher.finish())
490}
491
492pub(crate) fn save_fixture(hash: &str, result: &LlmResult) {
493    let dir = get_fixture_dir();
494    if dir.is_empty() {
495        return;
496    }
497    let _ = std::fs::create_dir_all(&dir);
498    let path = format!("{dir}/{hash}.json");
499    let json = serde_json::json!({
500        "text": result.text,
501        "tool_calls": result.tool_calls,
502        "input_tokens": result.input_tokens,
503        "output_tokens": result.output_tokens,
504        "cache_read_tokens": result.cache_read_tokens,
505        "cache_write_tokens": result.cache_write_tokens,
506        "cache_creation_input_tokens": result.cache_write_tokens,
507        "model": result.model,
508        "provider": result.provider,
509        "thinking": result.thinking,
510        "thinking_summary": result.thinking_summary,
511        "stop_reason": result.stop_reason,
512        "blocks": result.blocks,
513        "logprobs": result.logprobs,
514    });
515    let _ = std::fs::write(
516        &path,
517        serde_json::to_string_pretty(&json).unwrap_or_default(),
518    );
519}
520
521pub(crate) fn load_fixture(hash: &str) -> Option<LlmResult> {
522    let dir = get_fixture_dir();
523    if dir.is_empty() {
524        return None;
525    }
526    let path = format!("{dir}/{hash}.json");
527    let content = std::fs::read_to_string(&path).ok()?;
528    let json: serde_json::Value = serde_json::from_str(&content).ok()?;
529    Some(LlmResult {
530        text: json["text"].as_str().unwrap_or("").to_string(),
531        tool_calls: json["tool_calls"].as_array().cloned().unwrap_or_default(),
532        input_tokens: json["input_tokens"].as_i64().unwrap_or(0),
533        output_tokens: json["output_tokens"].as_i64().unwrap_or(0),
534        cache_read_tokens: json["cache_read_tokens"].as_i64().unwrap_or(0),
535        cache_write_tokens: json["cache_write_tokens"]
536            .as_i64()
537            .or_else(|| json["cache_creation_input_tokens"].as_i64())
538            .unwrap_or(0),
539        model: json["model"].as_str().unwrap_or("").to_string(),
540        provider: json["provider"].as_str().unwrap_or("mock").to_string(),
541        thinking: json["thinking"].as_str().map(|s| s.to_string()),
542        thinking_summary: json["thinking_summary"].as_str().map(|s| s.to_string()),
543        stop_reason: json["stop_reason"].as_str().map(|s| s.to_string()),
544        blocks: json["blocks"].as_array().cloned().unwrap_or_default(),
545        logprobs: json["logprobs"].as_array().cloned().unwrap_or_default(),
546    })
547}
548
549/// Generate stub argument values for required parameters in a tool schema.
550/// This makes mock tool calls realistic — a real model would always fill
551/// required fields, so the mock should too.
552fn mock_required_args(tool_schema: &serde_json::Value) -> serde_json::Value {
553    let mut args = serde_json::Map::new();
554    // Anthropic: {name, input_schema: {properties, required}}
555    // OpenAI:    {function: {name, parameters: {properties, required}}}
556    // Harn VM:   {parameters: {name: {type, required}}}  (from tool_define)
557    let input_schema = tool_schema
558        .get("input_schema")
559        .or_else(|| tool_schema.get("inputSchema"))
560        .or_else(|| {
561            tool_schema
562                .get("function")
563                .and_then(|f| f.get("parameters"))
564        })
565        .or_else(|| tool_schema.get("parameters"));
566    let Some(schema) = input_schema else {
567        return serde_json::Value::Object(args);
568    };
569    let required: std::collections::BTreeSet<String> = schema
570        .get("required")
571        .and_then(|r| r.as_array())
572        .map(|arr| {
573            arr.iter()
574                .filter_map(|v| v.as_str().map(|s| s.to_string()))
575                .collect()
576        })
577        .unwrap_or_default();
578    if let Some(props) = schema.get("properties").and_then(|p| p.as_object()) {
579        for (name, prop) in props {
580            if !required.contains(name) {
581                continue;
582            }
583            let ty = prop
584                .get("type")
585                .and_then(|t| t.as_str())
586                .unwrap_or("string");
587            let placeholder = match ty {
588                "integer" => serde_json::json!(0),
589                "number" => serde_json::json!(0.0),
590                "boolean" => serde_json::json!(false),
591                "array" => serde_json::json!([]),
592                "object" => serde_json::json!({}),
593                _ => serde_json::json!(""),
594            };
595            args.insert(name.clone(), placeholder);
596        }
597    }
598    serde_json::Value::Object(args)
599}
600
601/// Mock LLM provider -- deterministic responses for testing without API keys.
602/// When configurable mocks have been registered via `llm_mock()`, those are
603/// checked first (FIFO queue, then pattern matching). Falls through to the
604/// default deterministic behavior when no mocks match.
605pub(crate) fn mock_llm_response(
606    messages: &[serde_json::Value],
607    system: Option<&str>,
608    native_tools: Option<&[serde_json::Value]>,
609    tool_choice: Option<&serde_json::Value>,
610    thinking: &super::api::ThinkingConfig,
611    model: &str,
612    cache: bool,
613) -> Result<LlmResult, VmError> {
614    record_llm_mock_call(messages, system, native_tools, tool_choice, thinking);
615
616    let match_text = mock_match_text(messages);
617    let prompt_text = mock_last_prompt_text(messages);
618    let cache_key = mock_prompt_cache_key(model, messages, system);
619
620    if let Some(matched) = try_match_cli_mock(&match_text) {
621        return matched.map(|mut result| {
622            if cache {
623                apply_mock_prompt_cache(&mut result, &cache_key);
624            }
625            result
626        });
627    }
628
629    if let Some(matched) = try_match_builtin_mock(&match_text) {
630        return matched.map(|mut result| {
631            if cache {
632                apply_mock_prompt_cache(&mut result, &cache_key);
633            }
634            result
635        });
636    }
637
638    if cli_llm_mock_replay_active() {
639        return Err(unmatched_cli_prompt_error(&match_text));
640    }
641
642    // Generate a mock tool call for the first tool, filling required
643    // params with placeholders so the call passes schema validation.
644    if let Some(tools) = native_tools {
645        if let Some(first_tool) = tools.first() {
646            let tool_name = first_tool
647                .get("name")
648                .or_else(|| first_tool.get("function").and_then(|f| f.get("name")))
649                .and_then(|n| n.as_str())
650                .unwrap_or("unknown");
651            let mock_args = mock_required_args(first_tool);
652            let mut result = LlmResult {
653                text: String::new(),
654                tool_calls: vec![serde_json::json!({
655                        "id": "mock_call_1",
656                        "type": "tool_call",
657                        "name": tool_name,
658                "arguments": mock_args
659                })],
660                input_tokens: prompt_text.len() as i64,
661                output_tokens: 20,
662                cache_read_tokens: 0,
663                cache_write_tokens: 0,
664                model: model.to_string(),
665                provider: "mock".to_string(),
666                thinking: None,
667                thinking_summary: None,
668                stop_reason: None,
669                blocks: vec![serde_json::json!({
670                    "type": "tool_call",
671                    "id": "mock_call_1",
672                    "name": tool_name,
673                    "arguments": mock_args,
674                    "visibility": "internal",
675                })],
676                logprobs: Vec::new(),
677            };
678            if cache {
679                apply_mock_prompt_cache(&mut result, &cache_key);
680            }
681            return Ok(result);
682        }
683    }
684
685    // Preserve the historical auto-complete behavior for tagged text-tool
686    // prompts only. Bare `##DONE##` in no-tool/native prompts changes
687    // loop semantics by completing runs that used to exhaust budget unless
688    // a fixture explicitly returned the sentinel.
689    let tagged_done = system.is_some_and(|s| s.contains("<done>"));
690
691    let prose_body = if prompt_text.is_empty() {
692        "Mock LLM response".to_string()
693    } else {
694        let word_count = prompt_text.split_whitespace().count();
695        format!(
696            "Mock response to {word_count}-word prompt: {}",
697            prompt_text.chars().take(100).collect::<String>()
698        )
699    };
700    let response = if tagged_done {
701        format!("<assistant_prose>{prose_body}</assistant_prose>\n<done>##DONE##</done>")
702    } else {
703        prose_body
704    };
705
706    let mut result = LlmResult {
707        text: response.clone(),
708        tool_calls: vec![],
709        input_tokens: prompt_text.len() as i64,
710        output_tokens: 30,
711        cache_read_tokens: 0,
712        cache_write_tokens: 0,
713        model: model.to_string(),
714        provider: "mock".to_string(),
715        thinking: None,
716        thinking_summary: None,
717        stop_reason: None,
718        blocks: vec![serde_json::json!({
719            "type": "output_text",
720            "text": response,
721            "visibility": "public",
722        })],
723        logprobs: Vec::new(),
724    };
725    if cache {
726        apply_mock_prompt_cache(&mut result, &cache_key);
727    }
728    Ok(result)
729}
730
731/// Take all recorded tool calls, leaving the buffer empty.
732pub fn drain_tool_recordings() -> Vec<ToolCallRecord> {
733    TOOL_RECORDINGS.with(|v| std::mem::take(&mut *v.borrow_mut()))
734}