shadow_core/replay/backend.rs
1//! The [`LlmBackend`] trait — one `complete` call maps a `chat_request`
2//! payload to a `chat_response` payload.
3//!
4//! Keeping the trait payload-in / payload-out (rather than record-in /
5//! record-out) lets the engine own the envelope (ts, parent, id) while the
6//! backend only implements the LLM call itself. See SPEC.md §10
7//! lifecycle.
8
9use async_trait::async_trait;
10use serde_json::Value;
11use thiserror::Error;
12
13/// Errors a backend may return.
14#[derive(Debug, Error)]
15pub enum LlmError {
16 /// The backend has no recorded response for this request (MockLlm strict).
17 #[error("no recorded response for request id {0}\nhint: either re-record the baseline or run with --backend live")]
18 MissingResponse(String),
19
20 /// An I/O failure while talking to a live backend.
21 #[error(
22 "io error talking to LLM: {0}\nhint: check network connectivity and provider credentials"
23 )]
24 Io(String),
25
26 /// The request payload failed shape validation.
27 #[error("invalid request payload: {0}\nhint: check that the payload matches SPEC §4.1")]
28 BadRequest(String),
29
30 /// The backend is misconfigured.
31 #[error(
32 "backend misconfigured: {0}\nhint: see the backend's documentation for required fields"
33 )]
34 Config(String),
35}
36
37/// Pluggable LLM backend for replay.
38#[async_trait]
39pub trait LlmBackend: Send + Sync {
40 /// Given a `chat_request` payload, return the corresponding
41 /// `chat_response` payload.
42 async fn complete(&self, request: &Value) -> Result<Value, LlmError>;
43
44 /// Stable identifier for this backend (e.g. `"mock"`, `"anthropic"`).
45 /// Propagated into replay summaries.
46 fn id(&self) -> &str;
47}