Skip to main content

zeph_llm/
provider.rs

1// SPDX-FileCopyrightText: 2026 Andrei G <bug-ops>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4use std::future::Future;
5use std::pin::Pin;
6use std::{
7    any::TypeId,
8    collections::HashMap,
9    sync::{LazyLock, Mutex},
10};
11
12use futures_core::Stream;
13use serde::{Deserialize, Serialize};
14
15use zeph_common::ToolName;
16
17pub use zeph_common::ToolDefinition;
18
19use crate::embed::owned_strs;
20use crate::error::LlmError;
21
22static SCHEMA_CACHE: LazyLock<Mutex<HashMap<TypeId, (serde_json::Value, String)>>> =
23    LazyLock::new(|| Mutex::new(HashMap::new()));
24
25/// Return the JSON schema value and pretty-printed string for type `T`, cached by `TypeId`.
26///
27/// # Errors
28///
29/// Returns an error if schema serialization fails.
30pub(crate) fn cached_schema<T: schemars::JsonSchema + 'static>()
31-> Result<(serde_json::Value, String), crate::LlmError> {
32    let type_id = TypeId::of::<T>();
33    if let Ok(cache) = SCHEMA_CACHE.lock()
34        && let Some(entry) = cache.get(&type_id)
35    {
36        return Ok(entry.clone());
37    }
38    let schema = schemars::schema_for!(T);
39    let value = serde_json::to_value(&schema)
40        .map_err(|e| crate::LlmError::StructuredParse(e.to_string()))?;
41    let pretty = serde_json::to_string_pretty(&schema)
42        .map_err(|e| crate::LlmError::StructuredParse(e.to_string()))?;
43    if let Ok(mut cache) = SCHEMA_CACHE.lock() {
44        cache.insert(type_id, (value.clone(), pretty.clone()));
45    }
46    Ok((value, pretty))
47}
48
49/// Extract the short (unqualified) type name for schema prompts and tool names.
50///
51/// Returns the last `::` segment of [`std::any::type_name::<T>()`], which is always
52/// non-empty. The `"Output"` fallback is unreachable in practice (`type_name` never returns
53/// an empty string and `rsplit` on a non-empty string always yields at least one element),
54/// but is kept for defensive clarity.
55///
56/// # Examples
57///
58/// ```
59/// struct MyOutput;
60/// // short_type_name::<MyOutput>() returns "MyOutput"
61/// ```
62pub(crate) fn short_type_name<T: ?Sized>() -> &'static str {
63    std::any::type_name::<T>()
64        .rsplit("::")
65        .next()
66        .unwrap_or("Output")
67}
68
69/// Per-call extras returned alongside the chat response by [`LlmProvider::chat_with_extras`].
70///
71/// Always paired 1:1 with a single response — no shared state, no races possible.
72/// All optional fields default to `None` so providers that do not expose the
73/// underlying API (e.g. Claude, Gemini) can simply return the default.
74///
75/// Marked `#[non_exhaustive]` so future fields (e.g. `cached_tokens`) can be added
76/// without breaking match sites.
77#[non_exhaustive]
78#[derive(Debug, Clone, Default)]
79pub struct ChatExtras {
80    /// Mean negative log-probability of the generated tokens, when the provider
81    /// was configured to request `logprobs` and the API supplied them.
82    ///
83    /// Lower = more confident. Typical range: `[0.0, ~6.0]` for natural-language tokens.
84    pub entropy: Option<f64>,
85}
86
87impl ChatExtras {
88    /// Return a `ChatExtras` with the given entropy value.
89    ///
90    /// Used by `MockProvider` (test-only, enabled via `testing` feature) and OpenAI/Ollama providers.
91    ///
92    /// # Examples
93    ///
94    /// ```
95    /// use zeph_llm::provider::ChatExtras;
96    ///
97    /// let extras = ChatExtras::with_entropy(0.9);
98    /// assert_eq!(extras.entropy, Some(0.9));
99    /// ```
100    #[must_use]
101    pub fn with_entropy(entropy: f64) -> Self {
102        Self {
103            entropy: Some(entropy),
104        }
105    }
106}
107
108/// A chunk from an LLM streaming response.
109///
110/// Consumers should match all variants: future providers may emit non-`Content` chunks
111/// that callers must not silently drop (e.g. thinking blocks that must be echoed back).
112#[derive(Debug, Clone)]
113pub enum StreamChunk {
114    /// Regular response text.
115    Content(String),
116    /// Internal reasoning/thinking token (e.g. Claude extended thinking, `OpenAI` reasoning).
117    Thinking(String),
118    /// Server-side compaction summary (Claude compact-2026-01-12 beta).
119    /// Delivered when the Claude API automatically summarizes conversation history.
120    Compaction(String),
121    /// One or more tool calls from the model received during streaming.
122    ToolUse(Vec<ToolUseRequest>),
123}
124
125/// Boxed stream of typed chunks from an LLM provider.
126///
127/// Obtain via [`LlmProvider::chat_stream`]. Drive the stream with
128/// `futures::StreamExt::next` or `tokio_stream::StreamExt::next`.
129pub type ChatStream = Pin<Box<dyn Stream<Item = Result<StreamChunk, LlmError>> + Send>>;
130
131/// Structured tool invocation request from the model.
132///
133/// Returned by [`LlmProvider::chat_with_tools`] when the model decides to call one or
134/// more tools. The caller is responsible for executing the tool and returning results
135/// via a [`MessagePart::ToolResult`] in the next turn.
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct ToolUseRequest {
138    /// Opaque call identifier assigned by the model; must be echoed in `ToolResult.tool_use_id`.
139    pub id: String,
140    /// Name of the tool to invoke, matching a [`ToolDefinition::name`].
141    pub name: ToolName,
142    /// JSON arguments the model wants to pass to the tool.
143    pub input: serde_json::Value,
144}
145
146/// Thinking block returned by Claude when extended or adaptive thinking is enabled.
147///
148/// Both variants must be echoed verbatim in the next turn's `assistant` message so
149/// the API can correctly attribute reasoning across turns. Never modify or discard
150/// these blocks between turns.
151#[derive(Debug, Clone)]
152pub enum ThinkingBlock {
153    /// Visible reasoning token with its cryptographic signature.
154    Thinking { thinking: String, signature: String },
155    /// Redacted reasoning block (API-side privacy redaction). Preserved as opaque data.
156    Redacted { data: String },
157}
158
159/// Marker injected into `ChatResponse::Text` when the LLM response was cut off by the
160/// token limit. Consumers can detect this substring to signal `MaxTokens` stop reason.
161pub const MAX_TOKENS_TRUNCATION_MARKER: &str = "max_tokens limit reached";
162
163/// Response from [`LlmProvider::chat_with_tools`].
164///
165/// When the model returns `ToolUse`, the caller must:
166/// 1. Execute each tool in `tool_calls`.
167/// 2. Append an `assistant` message with the original `tool_calls` and any `thinking_blocks`.
168/// 3. Append a `user` message containing [`MessagePart::ToolResult`] entries.
169/// 4. Call `chat_with_tools` again to continue the conversation.
170#[derive(Debug, Clone)]
171pub enum ChatResponse {
172    /// Model produced text output only.
173    Text(String),
174    /// Model requests one or more tool invocations.
175    ToolUse {
176        /// Any text the model emitted before/alongside tool calls.
177        text: Option<String>,
178        tool_calls: Vec<ToolUseRequest>,
179        /// Thinking blocks from the model (empty when thinking is disabled).
180        /// Must be preserved verbatim in multi-turn requests.
181        thinking_blocks: Vec<ThinkingBlock>,
182    },
183}
184
185/// Boxed future returning an embedding vector, returned by [`EmbedFn`].
186pub type EmbedFuture = Pin<Box<dyn Future<Output = Result<Vec<f32>, LlmError>> + Send>>;
187
188/// A Send + Sync closure that embeds a text slice into a vector.
189///
190/// Obtain a provider-backed `EmbedFn` via [`crate::any::AnyProvider::embed_fn`].
191/// The closure captures an `Arc`-wrapped provider clone, so it is cheap to clone.
192pub type EmbedFn = Box<dyn Fn(&str) -> EmbedFuture + Send + Sync>;
193
194/// Sender for emitting human-readable status events (retries, fallbacks) to the UI layer.
195///
196/// When set on a provider, the provider sends short strings such as
197/// `"Retrying after rate limit…"` or `"Falling back to secondary provider"`.
198/// The TUI consumes these to show real-time activity spinners.
199pub type StatusTx = tokio::sync::mpsc::UnboundedSender<String>;
200
201/// Best-effort fallback for debug dump request payloads when a provider does not expose
202/// its concrete API request body.
203#[must_use]
204pub fn default_debug_request_json(
205    messages: &[Message],
206    tools: &[ToolDefinition],
207) -> serde_json::Value {
208    serde_json::json!({
209        "model": serde_json::Value::Null,
210        "max_tokens": serde_json::Value::Null,
211        "messages": serde_json::to_value(messages).unwrap_or(serde_json::Value::Array(vec![])),
212        "tools": serde_json::to_value(tools).unwrap_or(serde_json::Value::Array(vec![])),
213        "temperature": serde_json::Value::Null,
214        "cache_control": serde_json::Value::Null,
215    })
216}
217
218/// Partial LLM generation parameter overrides for experiment variation injection.
219///
220/// Applied by the experiment engine to clone-and-patch a provider before evaluation,
221/// so each variation is scored with its specific generation parameters.
222///
223/// Only `Some` fields are applied; `None` fields leave the provider's configured
224/// defaults unchanged. Not all providers support all fields — unsupported fields
225/// are silently ignored by each backend.
226#[derive(Debug, Clone, Default)]
227pub struct GenerationOverrides {
228    /// Sampling temperature in `[0.0, 2.0]`. Lower = more deterministic.
229    pub temperature: Option<f64>,
230    /// Nucleus sampling probability in `[0.0, 1.0]`.
231    pub top_p: Option<f64>,
232    /// Top-K sampling cutoff (number of top tokens to consider).
233    pub top_k: Option<usize>,
234    /// Penalty for tokens that have already appeared (OpenAI-compatible providers).
235    pub frequency_penalty: Option<f64>,
236    /// Penalty for topics the model has already covered (OpenAI-compatible providers).
237    pub presence_penalty: Option<f64>,
238}
239
240/// Message role in a conversation.
241///
242/// Determines how each message is presented to the model:
243/// - `System` — global instructions prepended before the conversation
244/// - `User` — human turn input
245/// - `Assistant` — previous model output (used for multi-turn context)
246#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
247#[serde(rename_all = "lowercase")]
248pub enum Role {
249    System,
250    User,
251    Assistant,
252}
253
254/// A typed content part within a [`Message`].
255///
256/// Messages may contain zero or more parts that represent heterogeneous content:
257/// plain text, tool invocations, memory recall fragments, images, and internal
258/// protocol blocks (thinking, compaction). Most providers flatten these into a single
259/// string before sending; Claude encodes them as structured content blocks.
260///
261/// # Ordering invariants
262///
263/// - `ToolUse` parts must precede their corresponding `ToolResult` parts.
264/// - `ThinkingBlock` / `RedactedThinkingBlock` parts must be preserved verbatim in
265///   multi-turn requests so the API can correctly attribute reasoning.
266/// - `Compaction` parts must be preserved verbatim; the API uses them to prune
267///   prior history on subsequent turns (Claude compact-2026-01-12 beta).
268#[derive(Clone, Debug, Serialize, Deserialize)]
269#[serde(tag = "kind", rename_all = "snake_case")]
270pub enum MessagePart {
271    /// Plain assistant or user text.
272    Text { text: String },
273    /// Output from a tool execution, optionally compacted.
274    ToolOutput {
275        tool_name: zeph_common::ToolName,
276        body: String,
277        #[serde(default, skip_serializing_if = "Option::is_none")]
278        compacted_at: Option<i64>,
279    },
280    /// Memory recall fragment injected by the agent's semantic memory layer.
281    Recall { text: String },
282    /// Repository or file code context injected by the code indexing layer.
283    CodeContext { text: String },
284    /// Compaction summary replacing pruned conversation history.
285    Summary { text: String },
286    /// Cross-session memory fragment carried over from a previous conversation.
287    CrossSession { text: String },
288    /// Model-initiated tool invocation. Pairs with a subsequent [`MessagePart::ToolResult`].
289    ToolUse {
290        id: String,
291        name: String,
292        input: serde_json::Value,
293    },
294    /// Tool execution result returned to the model after a [`MessagePart::ToolUse`].
295    ToolResult {
296        tool_use_id: String,
297        content: String,
298        #[serde(default)]
299        is_error: bool,
300    },
301    /// Inline image payload (vision input).
302    Image(Box<ImageData>),
303    /// Claude thinking block — must be preserved verbatim in multi-turn requests.
304    ThinkingBlock { thinking: String, signature: String },
305    /// Claude redacted thinking block — preserved as-is in multi-turn requests.
306    RedactedThinkingBlock { data: String },
307    /// Claude server-side compaction block — must be preserved verbatim in multi-turn requests
308    /// so the API can correctly prune prior history on the next turn.
309    Compaction { summary: String },
310}
311
312impl MessagePart {
313    /// Return the plain text content if this part is a text-like variant (`Text`, `Recall`,
314    /// `CodeContext`, `Summary`, `CrossSession`), `None` otherwise.
315    #[must_use]
316    pub fn as_plain_text(&self) -> Option<&str> {
317        match self {
318            Self::Text { text }
319            | Self::Recall { text }
320            | Self::CodeContext { text }
321            | Self::Summary { text }
322            | Self::CrossSession { text } => Some(text.as_str()),
323            _ => None,
324        }
325    }
326
327    /// Return the image data if this part is an `Image` variant, `None` otherwise.
328    #[must_use]
329    pub fn as_image(&self) -> Option<&ImageData> {
330        if let Self::Image(img) = self {
331            Some(img)
332        } else {
333            None
334        }
335    }
336}
337
338#[derive(Clone, Debug, Serialize, Deserialize)]
339/// Raw image payload for vision-capable providers.
340///
341/// The `data` field is serialized as a Base64 string. `mime_type` must be a valid
342/// image MIME type supported by the target provider (e.g. `"image/png"`, `"image/jpeg"`).
343pub struct ImageData {
344    #[serde(with = "serde_bytes_base64")]
345    pub data: Vec<u8>,
346    pub mime_type: String,
347}
348
349mod serde_bytes_base64 {
350    use base64::{Engine, engine::general_purpose::STANDARD};
351    use serde::{Deserialize, Deserializer, Serializer};
352
353    pub fn serialize<S>(bytes: &[u8], s: S) -> Result<S::Ok, S::Error>
354    where
355        S: Serializer,
356    {
357        s.serialize_str(&STANDARD.encode(bytes))
358    }
359
360    pub fn deserialize<'de, D>(d: D) -> Result<Vec<u8>, D::Error>
361    where
362        D: Deserializer<'de>,
363    {
364        let s = String::deserialize(d)?;
365        STANDARD.decode(&s).map_err(serde::de::Error::custom)
366    }
367}
368
369/// Visibility of a message to agent and user.
370///
371/// Replaces the former `(agent_visible: bool, user_visible: bool)` pair, which
372/// allowed the semantically invalid `(false, false)` combination. Every variant
373/// guarantees at least one consumer can see the message.
374///
375/// # Examples
376///
377/// ```
378/// use zeph_llm::provider::MessageVisibility;
379///
380/// let v = MessageVisibility::AgentOnly;
381/// assert!(v.is_agent_visible());
382/// assert!(!v.is_user_visible());
383/// ```
384#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
385#[serde(rename_all = "snake_case")]
386pub enum MessageVisibility {
387    /// Visible to both the agent (LLM context) and the user (conversation log).
388    Both,
389    /// Visible to the agent only (e.g. compaction summaries, internal context).
390    AgentOnly,
391    /// Visible to the user only (e.g. compacted originals shown in history).
392    UserOnly,
393}
394
395impl MessageVisibility {
396    /// Returns `true` if this message should be included in the LLM request context.
397    #[must_use]
398    pub fn is_agent_visible(self) -> bool {
399        matches!(self, MessageVisibility::Both | MessageVisibility::AgentOnly)
400    }
401
402    /// Returns `true` if this message should appear in the user-facing conversation log.
403    #[must_use]
404    pub fn is_user_visible(self) -> bool {
405        matches!(self, MessageVisibility::Both | MessageVisibility::UserOnly)
406    }
407}
408
409impl Default for MessageVisibility {
410    /// Defaults to [`Both`](MessageVisibility::Both) — visible to agent and user.
411    fn default() -> Self {
412        MessageVisibility::Both
413    }
414}
415
416impl MessageVisibility {
417    /// Serialize to the SQLite/PostgreSQL text value stored in the `visibility` column.
418    #[must_use]
419    pub fn as_db_str(self) -> &'static str {
420        match self {
421            MessageVisibility::Both => "both",
422            MessageVisibility::AgentOnly => "agent_only",
423            MessageVisibility::UserOnly => "user_only",
424        }
425    }
426
427    /// Deserialize from the SQLite/PostgreSQL text value stored in the `visibility` column.
428    ///
429    /// Unknown values (e.g. from a future migration) default to `Both` for safety.
430    #[must_use]
431    pub fn from_db_str(s: &str) -> Self {
432        match s {
433            "agent_only" => MessageVisibility::AgentOnly,
434            "user_only" => MessageVisibility::UserOnly,
435            _ => MessageVisibility::Both,
436        }
437    }
438}
439
440/// Per-message visibility and metadata controlling agent context and user display.
441///
442/// Constructors [`agent_only`](Self::agent_only), [`user_only`](Self::user_only),
443/// and [`focus_pinned`](Self::focus_pinned) cover the most common combinations.
444#[derive(Clone, Debug, Serialize, Deserialize)]
445pub struct MessageMetadata {
446    /// Who can see this message.
447    pub visibility: MessageVisibility,
448    /// Unix timestamp (seconds) when this message was compacted, if applicable.
449    #[serde(default, skip_serializing_if = "Option::is_none")]
450    pub compacted_at: Option<i64>,
451    /// Pre-computed tool pair summary, applied lazily when context pressure rises.
452    /// Stored on the tool response message; cleared after application.
453    #[serde(default, skip_serializing_if = "Option::is_none")]
454    pub deferred_summary: Option<String>,
455    /// When true, this message is excluded from all compaction passes (soft pruning,
456    /// hard summarization, sidequest eviction). Used for the Focus Knowledge block (#1850).
457    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
458    pub focus_pinned: bool,
459    /// Unique marker UUID set when `start_focus` begins a session. Used by `complete_focus`
460    /// to locate the checkpoint without relying on a fragile raw index.
461    #[serde(default, skip_serializing_if = "Option::is_none")]
462    pub focus_marker_id: Option<uuid::Uuid>,
463    /// `SQLite` row ID for this message. Populated when loading from DB or after persisting.
464    /// Never serialized — always re-populated from the database on load.
465    #[serde(skip)]
466    pub db_id: Option<i64>,
467}
468
469impl Default for MessageMetadata {
470    fn default() -> Self {
471        Self {
472            visibility: MessageVisibility::Both,
473            compacted_at: None,
474            deferred_summary: None,
475            focus_pinned: false,
476            focus_marker_id: None,
477            db_id: None,
478        }
479    }
480}
481
482impl MessageMetadata {
483    /// Message visible only to the agent (e.g. compaction summary).
484    #[must_use]
485    pub fn agent_only() -> Self {
486        Self {
487            visibility: MessageVisibility::AgentOnly,
488            compacted_at: None,
489            deferred_summary: None,
490            focus_pinned: false,
491            focus_marker_id: None,
492            db_id: None,
493        }
494    }
495
496    /// Message visible only to the user (e.g. compacted original).
497    #[must_use]
498    pub fn user_only() -> Self {
499        Self {
500            visibility: MessageVisibility::UserOnly,
501            compacted_at: None,
502            deferred_summary: None,
503            focus_pinned: false,
504            focus_marker_id: None,
505            db_id: None,
506        }
507    }
508
509    /// Pinned Knowledge block — excluded from all compaction passes.
510    #[must_use]
511    pub fn focus_pinned() -> Self {
512        Self {
513            visibility: MessageVisibility::AgentOnly,
514            compacted_at: None,
515            deferred_summary: None,
516            focus_pinned: true,
517            focus_marker_id: None,
518            db_id: None,
519        }
520    }
521}
522
523/// A single message in a conversation.
524///
525/// Each message has a [`Role`], a flat `content` string (used when sending to providers
526/// that do not support structured parts), and an optional list of [`MessagePart`]s for
527/// providers that accept heterogeneous content blocks (e.g. Claude).
528///
529/// The `content` field is kept in sync with `parts` via [`Message::rebuild_content`].
530/// When building messages from structured parts, always use [`Message::from_parts`] —
531/// it populates both `parts` and `content`.
532///
533/// # Examples
534///
535/// ```
536/// use zeph_llm::provider::{Message, MessagePart, Role};
537///
538/// // Simple text-only message
539/// let msg = Message::from_legacy(Role::User, "What is Rust?");
540/// assert_eq!(msg.to_llm_content(), "What is Rust?");
541///
542/// // Structured message with parts
543/// let parts = vec![
544///     MessagePart::Text { text: "Explain this code.".into() },
545/// ];
546/// let msg = Message::from_parts(Role::User, parts);
547/// assert!(!msg.parts.is_empty());
548/// ```
549#[derive(Clone, Debug, Serialize, Deserialize)]
550pub struct Message {
551    pub role: Role,
552    /// Flat text representation of this message, derived from `parts` when structured.
553    pub content: String,
554    #[serde(default)]
555    pub parts: Vec<MessagePart>,
556    #[serde(default)]
557    pub metadata: MessageMetadata,
558}
559
560impl Default for Message {
561    fn default() -> Self {
562        Self {
563            role: Role::User,
564            content: String::new(),
565            parts: vec![],
566            metadata: MessageMetadata::default(),
567        }
568    }
569}
570
571impl Message {
572    /// Create a simple text-only message without structured parts.
573    ///
574    /// Use this constructor for system prompts, plain user turns, and assistant
575    /// messages produced by providers that return a raw string.
576    #[must_use]
577    pub fn from_legacy(role: Role, content: impl Into<String>) -> Self {
578        Self {
579            role,
580            content: content.into(),
581            parts: vec![],
582            metadata: MessageMetadata::default(),
583        }
584    }
585
586    /// Create a message from structured parts, deriving the flat `content` automatically.
587    ///
588    /// Prefer this constructor when the message contains tool invocations, images,
589    /// or other non-text content that providers need to render as separate content blocks.
590    #[must_use]
591    pub fn from_parts(role: Role, parts: Vec<MessagePart>) -> Self {
592        let content = Self::flatten_parts(&parts);
593        Self {
594            role,
595            content,
596            parts,
597            metadata: MessageMetadata::default(),
598        }
599    }
600
601    /// Return the flat text content of this message, suitable for providers that do
602    /// not support structured content blocks.
603    #[must_use]
604    pub fn to_llm_content(&self) -> &str {
605        &self.content
606    }
607
608    /// Re-synchronize `content` from `parts` after in-place mutation.
609    pub fn rebuild_content(&mut self) {
610        if !self.parts.is_empty() {
611            self.content = Self::flatten_parts(&self.parts);
612        }
613    }
614
615    fn flatten_parts(parts: &[MessagePart]) -> String {
616        use std::fmt::Write;
617        let mut out = String::new();
618        for part in parts {
619            match part {
620                MessagePart::Text { text }
621                | MessagePart::Recall { text }
622                | MessagePart::CodeContext { text }
623                | MessagePart::Summary { text }
624                | MessagePart::CrossSession { text } => out.push_str(text),
625                MessagePart::ToolOutput {
626                    tool_name,
627                    body,
628                    compacted_at,
629                } => {
630                    if compacted_at.is_some() {
631                        if body.is_empty() {
632                            let _ = write!(out, "[tool output: {tool_name}] (pruned)");
633                        } else {
634                            let _ = write!(out, "[tool output: {tool_name}] {body}");
635                        }
636                    } else {
637                        let _ = write!(out, "[tool output: {tool_name}]\n```\n{body}\n```");
638                    }
639                }
640                MessagePart::ToolUse { id, name, .. } => {
641                    let _ = write!(out, "[tool_use: {name}({id})]");
642                }
643                MessagePart::ToolResult {
644                    tool_use_id,
645                    content,
646                    ..
647                } => {
648                    let _ = write!(out, "[tool_result: {tool_use_id}]\n{content}");
649                }
650                MessagePart::Image(img) => {
651                    let _ = write!(out, "[image: {}, {} bytes]", img.mime_type, img.data.len());
652                }
653                // Thinking and compaction blocks are internal API metadata — not rendered in text.
654                MessagePart::ThinkingBlock { .. }
655                | MessagePart::RedactedThinkingBlock { .. }
656                | MessagePart::Compaction { .. } => {}
657            }
658        }
659        out
660    }
661}
662
663/// Core abstraction for all LLM inference backends.
664///
665/// Every backend — `Ollama`, `Claude`, `OpenAI`, `Gemini`, `Candle` — implements this trait.
666/// The [`crate::any::AnyProvider`] enum erases the concrete type so callers can
667/// hold any backend behind a single type, and [`crate::router::RouterProvider`]
668/// implements this trait to multiplex across multiple backends.
669///
670/// # Required methods
671///
672/// Implementors must provide: [`chat`](Self::chat), [`chat_stream`](Self::chat_stream),
673/// [`supports_streaming`](Self::supports_streaming), [`embed`](Self::embed),
674/// [`supports_embeddings`](Self::supports_embeddings), and [`name`](Self::name).
675///
676/// # Optional methods
677///
678/// All other methods have default implementations that are safe to accept:
679/// - [`context_window`](Self::context_window) — returns `None`
680/// - [`embed_batch`](Self::embed_batch) — sequential fallback via [`embed`](Self::embed)
681/// - [`chat_with_tools`](Self::chat_with_tools) — falls back to [`chat`](Self::chat)
682/// - [`chat_typed`](Self::chat_typed) — schema-prompt injection + retry
683/// - [`supports_vision`](Self::supports_vision) — returns `false`
684/// - [`supports_tool_use`](Self::supports_tool_use) — returns `true`
685///
686/// # Examples
687///
688/// ```rust,no_run
689/// use zeph_llm::provider::{LlmProvider, Message, Role, ChatStream};
690/// use zeph_llm::LlmError;
691///
692/// struct EchoProvider;
693///
694/// impl LlmProvider for EchoProvider {
695///     async fn chat(&self, messages: &[Message]) -> Result<String, LlmError> {
696///         Ok(messages.last().map(|m| m.content.clone()).unwrap_or_default())
697///     }
698///
699///     async fn chat_stream(&self, messages: &[Message]) -> Result<ChatStream, LlmError> {
700///         use zeph_llm::provider::StreamChunk;
701///         let text = self.chat(messages).await?;
702///         Ok(Box::pin(tokio_stream::once(Ok(StreamChunk::Content(text)))))
703///     }
704///
705///     fn supports_streaming(&self) -> bool { true }
706///
707///     async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
708///         Err(LlmError::EmbedUnsupported { provider: "echo".into() })
709///     }
710///
711///     fn supports_embeddings(&self) -> bool { false }
712///
713///     fn name(&self) -> &str { "echo" }
714/// }
715/// ```
716pub trait LlmProvider: Send + Sync {
717    /// Report the model's context window size in tokens.
718    ///
719    /// Returns `None` if unknown. Used for auto-budget calculation.
720    fn context_window(&self) -> Option<usize> {
721        None
722    }
723
724    /// Send messages to the LLM and return the assistant response.
725    ///
726    /// # Errors
727    ///
728    /// Returns an error if the provider fails to communicate or the response is invalid.
729    fn chat(&self, messages: &[Message]) -> impl Future<Output = Result<String, LlmError>> + Send;
730
731    /// Send messages and return a stream of response chunks.
732    ///
733    /// # Errors
734    ///
735    /// Returns an error if the provider fails to communicate or the response is invalid.
736    fn chat_stream(
737        &self,
738        messages: &[Message],
739    ) -> impl Future<Output = Result<ChatStream, LlmError>> + Send;
740
741    /// Whether this provider supports native streaming.
742    fn supports_streaming(&self) -> bool;
743
744    /// Generate an embedding vector from text.
745    ///
746    /// # Errors
747    ///
748    /// Returns an error if the provider does not support embeddings or the request fails.
749    fn embed(&self, text: &str) -> impl Future<Output = Result<Vec<f32>, LlmError>> + Send;
750
751    /// Embed multiple texts in a single API call.
752    ///
753    /// Default implementation calls [`embed`][Self::embed] sequentially for each input.
754    /// Providers with native batch APIs should override this.
755    ///
756    /// # Errors
757    ///
758    /// Returns an error if any embedding fails. On native batch backends the entire batch
759    /// fails atomically; on the sequential fallback the first error aborts.
760    fn embed_batch(
761        &self,
762        texts: &[&str],
763    ) -> impl Future<Output = Result<Vec<Vec<f32>>, LlmError>> + Send {
764        let owned = owned_strs(texts);
765        async move {
766            let mut results = Vec::with_capacity(owned.len());
767            for text in &owned {
768                results.push(self.embed(text).await?);
769            }
770            Ok(results)
771        }
772    }
773
774    /// Whether this provider supports embedding generation.
775    fn supports_embeddings(&self) -> bool;
776
777    /// Provider name for logging and identification.
778    fn name(&self) -> &str;
779
780    /// Model identifier string (e.g. `gpt-4o-mini`, `claude-sonnet-4-6`).
781    /// Used by cost-estimation heuristics. Returns `""` when not applicable.
782    #[allow(clippy::unnecessary_literal_bound)]
783    fn model_identifier(&self) -> &str {
784        ""
785    }
786
787    /// Whether this provider supports image input (vision).
788    fn supports_vision(&self) -> bool {
789        false
790    }
791
792    /// Whether this provider supports native `tool_use` / function calling.
793    fn supports_tool_use(&self) -> bool {
794        true
795    }
796
797    /// Send messages with tool definitions, returning a structured response.
798    ///
799    /// Default: falls back to `chat()` and wraps the result in `ChatResponse::Text`.
800    ///
801    /// # Errors
802    ///
803    /// Returns an error if the provider fails to communicate or the response is invalid.
804    fn chat_with_tools(
805        &self,
806        messages: &[Message],
807        _tools: &[ToolDefinition],
808    ) -> impl std::future::Future<Output = Result<ChatResponse, LlmError>> + Send {
809        let msgs = messages.to_vec();
810        async move { Ok(ChatResponse::Text(self.chat(&msgs).await?)) }
811    }
812
813    /// Return the cache usage from the last API call, if available.
814    /// Returns `(cache_creation_tokens, cache_read_tokens)`.
815    fn last_cache_usage(&self) -> Option<(u64, u64)> {
816        None
817    }
818
819    /// Return token counts from the last API call, if available.
820    /// Returns `(input_tokens, output_tokens)`.
821    fn last_usage(&self) -> Option<(u64, u64)> {
822        None
823    }
824
825    /// Return the compaction summary from the most recent API call, if a server-side
826    /// compaction occurred (Claude compact-2026-01-12 beta). Clears the stored value.
827    fn take_compaction_summary(&self) -> Option<String> {
828        None
829    }
830
831    /// Send messages and return the assistant response together with per-call extras.
832    ///
833    /// Default implementation calls [`chat`][Self::chat] and returns [`ChatExtras::default()`],
834    /// keeping every existing implementor source-compatible at zero cost.
835    ///
836    /// Providers that support logprobs (`OpenAI`, `Compatible`, `Ollama`) override this to
837    /// populate [`ChatExtras::entropy`] with the mean negative log-probability.
838    ///
839    /// `CoE` is the only caller of this method; the canonical entry point for the agent
840    /// loop remains [`chat`][Self::chat].
841    ///
842    /// # Errors
843    ///
844    /// Same as [`chat`][Self::chat].
845    fn chat_with_extras(
846        &self,
847        messages: &[Message],
848    ) -> impl Future<Output = Result<(String, ChatExtras), LlmError>> + Send {
849        let msgs = messages.to_vec();
850        async move { Ok((self.chat(&msgs).await?, ChatExtras::default())) }
851    }
852
853    /// Return the request payload that will be sent to the provider, for debug dumps.
854    ///
855    /// Implementations should mirror the provider's request body as closely as practical.
856    #[must_use]
857    fn debug_request_json(
858        &self,
859        messages: &[Message],
860        tools: &[ToolDefinition],
861        _stream: bool,
862    ) -> serde_json::Value {
863        default_debug_request_json(messages, tools)
864    }
865
866    /// Return the list of model identifiers this provider can serve.
867    /// Default: empty (provider does not advertise models).
868    fn list_models(&self) -> Vec<String> {
869        vec![]
870    }
871
872    /// Whether this provider supports native structured output.
873    fn supports_structured_output(&self) -> bool {
874        false
875    }
876
877    /// Send messages and parse the response into a typed value `T`.
878    ///
879    /// Default implementation injects JSON schema into the system prompt and retries once
880    /// on parse failure. Providers with native structured output should override this.
881    #[allow(async_fn_in_trait)]
882    async fn chat_typed<T>(&self, messages: &[Message]) -> Result<T, LlmError>
883    where
884        T: serde::de::DeserializeOwned + schemars::JsonSchema + 'static,
885        Self: Sized,
886    {
887        let (_, schema_json) = cached_schema::<T>()?;
888        let type_name = short_type_name::<T>();
889
890        let mut augmented = messages.to_vec();
891        let instruction = format!(
892            "Respond with a valid JSON object matching this schema. \
893             Output ONLY the JSON, no markdown fences or extra text.\n\n\
894             Type: {type_name}\nSchema:\n```json\n{schema_json}\n```"
895        );
896        augmented.insert(0, Message::from_legacy(Role::System, instruction));
897
898        let raw = self.chat(&augmented).await?;
899        let cleaned = strip_json_fences(&raw);
900        match serde_json::from_str::<T>(cleaned) {
901            Ok(val) => Ok(val),
902            Err(first_err) => {
903                augmented.push(Message::from_legacy(Role::Assistant, &raw));
904                augmented.push(Message::from_legacy(
905                    Role::User,
906                    format!(
907                        "Your response was not valid JSON. Error: {first_err}. \
908                         Please output ONLY valid JSON matching the schema."
909                    ),
910                ));
911                let retry_raw = self.chat(&augmented).await?;
912                let retry_cleaned = strip_json_fences(&retry_raw);
913                serde_json::from_str::<T>(retry_cleaned).map_err(|e| {
914                    LlmError::StructuredParse(format!("parse failed after retry: {e}"))
915                })
916            }
917        }
918    }
919}
920
921/// Strip markdown code fences from LLM output. Only handles outer fences;
922/// JSON containing trailing triple backticks in string values may be
923/// incorrectly trimmed (acceptable for MVP — see review R2).
924fn strip_json_fences(s: &str) -> &str {
925    s.trim()
926        .trim_start_matches("```json")
927        .trim_start_matches("```")
928        .trim_end_matches("```")
929        .trim()
930}
931
932#[cfg(test)]
933mod tests {
934    use tokio_stream::StreamExt;
935
936    use super::*;
937
938    struct StubProvider {
939        response: String,
940    }
941
942    impl LlmProvider for StubProvider {
943        async fn chat(&self, _messages: &[Message]) -> Result<String, LlmError> {
944            Ok(self.response.clone())
945        }
946
947        async fn chat_stream(&self, messages: &[Message]) -> Result<ChatStream, LlmError> {
948            let response = self.chat(messages).await?;
949            Ok(Box::pin(tokio_stream::once(Ok(StreamChunk::Content(
950                response,
951            )))))
952        }
953
954        fn supports_streaming(&self) -> bool {
955            false
956        }
957
958        async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
959            Ok(vec![0.1, 0.2, 0.3])
960        }
961
962        fn supports_embeddings(&self) -> bool {
963            false
964        }
965
966        fn name(&self) -> &'static str {
967            "stub"
968        }
969    }
970
971    #[test]
972    fn context_window_default_returns_none() {
973        let provider = StubProvider {
974            response: String::new(),
975        };
976        assert!(provider.context_window().is_none());
977    }
978
979    #[test]
980    fn supports_streaming_default_returns_false() {
981        let provider = StubProvider {
982            response: String::new(),
983        };
984        assert!(!provider.supports_streaming());
985    }
986
987    #[tokio::test]
988    async fn chat_stream_default_yields_single_chunk() {
989        let provider = StubProvider {
990            response: "hello world".into(),
991        };
992        let messages = vec![Message {
993            role: Role::User,
994            content: "test".into(),
995            parts: vec![],
996            metadata: MessageMetadata::default(),
997        }];
998
999        let mut stream = provider.chat_stream(&messages).await.unwrap();
1000        let chunk = stream.next().await.unwrap().unwrap();
1001        assert!(matches!(chunk, StreamChunk::Content(s) if s == "hello world"));
1002        assert!(stream.next().await.is_none());
1003    }
1004
1005    #[tokio::test]
1006    async fn chat_stream_default_propagates_chat_error() {
1007        struct FailProvider;
1008
1009        impl LlmProvider for FailProvider {
1010            async fn chat(&self, _messages: &[Message]) -> Result<String, LlmError> {
1011                Err(LlmError::Unavailable)
1012            }
1013
1014            async fn chat_stream(&self, messages: &[Message]) -> Result<ChatStream, LlmError> {
1015                let response = self.chat(messages).await?;
1016                Ok(Box::pin(tokio_stream::once(Ok(StreamChunk::Content(
1017                    response,
1018                )))))
1019            }
1020
1021            fn supports_streaming(&self) -> bool {
1022                false
1023            }
1024
1025            async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
1026                Err(LlmError::Unavailable)
1027            }
1028
1029            fn supports_embeddings(&self) -> bool {
1030                false
1031            }
1032
1033            fn name(&self) -> &'static str {
1034                "fail"
1035            }
1036        }
1037
1038        let provider = FailProvider;
1039        let messages = vec![Message {
1040            role: Role::User,
1041            content: "test".into(),
1042            parts: vec![],
1043            metadata: MessageMetadata::default(),
1044        }];
1045
1046        let result = provider.chat_stream(&messages).await;
1047        assert!(result.is_err());
1048        if let Err(e) = result {
1049            assert!(e.to_string().contains("provider unavailable"));
1050        }
1051    }
1052
1053    #[tokio::test]
1054    async fn stub_provider_embed_returns_vector() {
1055        let provider = StubProvider {
1056            response: String::new(),
1057        };
1058        let embedding = provider.embed("test").await.unwrap();
1059        assert_eq!(embedding, vec![0.1, 0.2, 0.3]);
1060    }
1061
1062    #[tokio::test]
1063    async fn fail_provider_embed_propagates_error() {
1064        struct FailProvider;
1065
1066        impl LlmProvider for FailProvider {
1067            async fn chat(&self, _messages: &[Message]) -> Result<String, LlmError> {
1068                Err(LlmError::Unavailable)
1069            }
1070
1071            async fn chat_stream(&self, messages: &[Message]) -> Result<ChatStream, LlmError> {
1072                let response = self.chat(messages).await?;
1073                Ok(Box::pin(tokio_stream::once(Ok(StreamChunk::Content(
1074                    response,
1075                )))))
1076            }
1077
1078            fn supports_streaming(&self) -> bool {
1079                false
1080            }
1081
1082            async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
1083                Err(LlmError::EmbedUnsupported {
1084                    provider: "fail".into(),
1085                })
1086            }
1087
1088            fn supports_embeddings(&self) -> bool {
1089                false
1090            }
1091
1092            fn name(&self) -> &'static str {
1093                "fail"
1094            }
1095        }
1096
1097        let provider = FailProvider;
1098        let result = provider.embed("test").await;
1099        assert!(result.is_err());
1100        assert!(
1101            result
1102                .unwrap_err()
1103                .to_string()
1104                .contains("embedding not supported")
1105        );
1106    }
1107
1108    #[test]
1109    fn role_serialization() {
1110        let system = Role::System;
1111        let user = Role::User;
1112        let assistant = Role::Assistant;
1113
1114        assert_eq!(serde_json::to_string(&system).unwrap(), "\"system\"");
1115        assert_eq!(serde_json::to_string(&user).unwrap(), "\"user\"");
1116        assert_eq!(serde_json::to_string(&assistant).unwrap(), "\"assistant\"");
1117    }
1118
1119    #[test]
1120    fn role_deserialization() {
1121        let system: Role = serde_json::from_str("\"system\"").unwrap();
1122        let user: Role = serde_json::from_str("\"user\"").unwrap();
1123        let assistant: Role = serde_json::from_str("\"assistant\"").unwrap();
1124
1125        assert_eq!(system, Role::System);
1126        assert_eq!(user, Role::User);
1127        assert_eq!(assistant, Role::Assistant);
1128    }
1129
1130    #[test]
1131    fn message_clone() {
1132        let msg = Message {
1133            role: Role::User,
1134            content: "test".into(),
1135            parts: vec![],
1136            metadata: MessageMetadata::default(),
1137        };
1138        let cloned = msg.clone();
1139        assert_eq!(cloned.role, msg.role);
1140        assert_eq!(cloned.content, msg.content);
1141    }
1142
1143    #[test]
1144    fn message_debug() {
1145        let msg = Message {
1146            role: Role::Assistant,
1147            content: "response".into(),
1148            parts: vec![],
1149            metadata: MessageMetadata::default(),
1150        };
1151        let debug = format!("{msg:?}");
1152        assert!(debug.contains("Assistant"));
1153        assert!(debug.contains("response"));
1154    }
1155
1156    #[test]
1157    fn message_serialization() {
1158        let msg = Message {
1159            role: Role::User,
1160            content: "hello".into(),
1161            parts: vec![],
1162            metadata: MessageMetadata::default(),
1163        };
1164        let json = serde_json::to_string(&msg).unwrap();
1165        assert!(json.contains("\"role\":\"user\""));
1166        assert!(json.contains("\"content\":\"hello\""));
1167    }
1168
1169    #[test]
1170    fn message_part_serde_round_trip() {
1171        let parts = vec![
1172            MessagePart::Text {
1173                text: "hello".into(),
1174            },
1175            MessagePart::ToolOutput {
1176                tool_name: "bash".into(),
1177                body: "output".into(),
1178                compacted_at: None,
1179            },
1180            MessagePart::Recall {
1181                text: "recall".into(),
1182            },
1183            MessagePart::CodeContext {
1184                text: "code".into(),
1185            },
1186            MessagePart::Summary {
1187                text: "summary".into(),
1188            },
1189        ];
1190        let json = serde_json::to_string(&parts).unwrap();
1191        let deserialized: Vec<MessagePart> = serde_json::from_str(&json).unwrap();
1192        assert_eq!(deserialized.len(), 5);
1193    }
1194
1195    #[test]
1196    fn from_legacy_creates_empty_parts() {
1197        let msg = Message::from_legacy(Role::User, "hello");
1198        assert_eq!(msg.role, Role::User);
1199        assert_eq!(msg.content, "hello");
1200        assert!(msg.parts.is_empty());
1201        assert_eq!(msg.to_llm_content(), "hello");
1202    }
1203
1204    #[test]
1205    fn from_parts_flattens_content() {
1206        let msg = Message::from_parts(
1207            Role::System,
1208            vec![MessagePart::Recall {
1209                text: "recalled data".into(),
1210            }],
1211        );
1212        assert_eq!(msg.content, "recalled data");
1213        assert_eq!(msg.to_llm_content(), "recalled data");
1214        assert_eq!(msg.parts.len(), 1);
1215    }
1216
1217    #[test]
1218    fn from_parts_tool_output_format() {
1219        let msg = Message::from_parts(
1220            Role::User,
1221            vec![MessagePart::ToolOutput {
1222                tool_name: "bash".into(),
1223                body: "hello world".into(),
1224                compacted_at: None,
1225            }],
1226        );
1227        assert!(msg.content.contains("[tool output: bash]"));
1228        assert!(msg.content.contains("hello world"));
1229    }
1230
1231    #[test]
1232    fn message_deserializes_without_parts() {
1233        let json = r#"{"role":"user","content":"hello"}"#;
1234        let msg: Message = serde_json::from_str(json).unwrap();
1235        assert_eq!(msg.content, "hello");
1236        assert!(msg.parts.is_empty());
1237    }
1238
1239    #[test]
1240    fn flatten_skips_compacted_tool_output_empty_body() {
1241        // When compacted_at is set and body is empty, renders "(pruned)".
1242        let msg = Message::from_parts(
1243            Role::User,
1244            vec![
1245                MessagePart::Text {
1246                    text: "prefix ".into(),
1247                },
1248                MessagePart::ToolOutput {
1249                    tool_name: "bash".into(),
1250                    body: String::new(),
1251                    compacted_at: Some(1234),
1252                },
1253                MessagePart::Text {
1254                    text: " suffix".into(),
1255                },
1256            ],
1257        );
1258        assert!(msg.content.contains("(pruned)"));
1259        assert!(msg.content.contains("prefix "));
1260        assert!(msg.content.contains(" suffix"));
1261    }
1262
1263    #[test]
1264    fn flatten_compacted_tool_output_with_reference_renders_body() {
1265        // When compacted_at is set and body contains a reference notice, renders the body.
1266        let ref_notice = "[tool output pruned; full content at /tmp/overflow/big.txt]";
1267        let msg = Message::from_parts(
1268            Role::User,
1269            vec![MessagePart::ToolOutput {
1270                tool_name: "bash".into(),
1271                body: ref_notice.into(),
1272                compacted_at: Some(1234),
1273            }],
1274        );
1275        assert!(msg.content.contains(ref_notice));
1276        assert!(!msg.content.contains("(pruned)"));
1277    }
1278
1279    #[test]
1280    fn rebuild_content_syncs_after_mutation() {
1281        let mut msg = Message::from_parts(
1282            Role::User,
1283            vec![MessagePart::ToolOutput {
1284                tool_name: "bash".into(),
1285                body: "original".into(),
1286                compacted_at: None,
1287            }],
1288        );
1289        assert!(msg.content.contains("original"));
1290
1291        if let MessagePart::ToolOutput {
1292            ref mut compacted_at,
1293            ref mut body,
1294            ..
1295        } = msg.parts[0]
1296        {
1297            *compacted_at = Some(999);
1298            body.clear(); // simulate pruning: body cleared, no overflow notice
1299        }
1300        msg.rebuild_content();
1301
1302        assert!(msg.content.contains("(pruned)"));
1303        assert!(!msg.content.contains("original"));
1304    }
1305
1306    #[test]
1307    fn message_part_tool_use_serde_round_trip() {
1308        let part = MessagePart::ToolUse {
1309            id: "toolu_123".into(),
1310            name: "bash".into(),
1311            input: serde_json::json!({"command": "ls"}),
1312        };
1313        let json = serde_json::to_string(&part).unwrap();
1314        let deserialized: MessagePart = serde_json::from_str(&json).unwrap();
1315        if let MessagePart::ToolUse { id, name, input } = deserialized {
1316            assert_eq!(id, "toolu_123");
1317            assert_eq!(name, "bash");
1318            assert_eq!(input["command"], "ls");
1319        } else {
1320            panic!("expected ToolUse");
1321        }
1322    }
1323
1324    #[test]
1325    fn message_part_tool_result_serde_round_trip() {
1326        let part = MessagePart::ToolResult {
1327            tool_use_id: "toolu_123".into(),
1328            content: "file1.rs\nfile2.rs".into(),
1329            is_error: false,
1330        };
1331        let json = serde_json::to_string(&part).unwrap();
1332        let deserialized: MessagePart = serde_json::from_str(&json).unwrap();
1333        if let MessagePart::ToolResult {
1334            tool_use_id,
1335            content,
1336            is_error,
1337        } = deserialized
1338        {
1339            assert_eq!(tool_use_id, "toolu_123");
1340            assert_eq!(content, "file1.rs\nfile2.rs");
1341            assert!(!is_error);
1342        } else {
1343            panic!("expected ToolResult");
1344        }
1345    }
1346
1347    #[test]
1348    fn message_part_tool_result_is_error_default() {
1349        let json = r#"{"kind":"tool_result","tool_use_id":"id","content":"err"}"#;
1350        let part: MessagePart = serde_json::from_str(json).unwrap();
1351        if let MessagePart::ToolResult { is_error, .. } = part {
1352            assert!(!is_error);
1353        } else {
1354            panic!("expected ToolResult");
1355        }
1356    }
1357
1358    #[test]
1359    fn chat_response_construction() {
1360        let text = ChatResponse::Text("hello".into());
1361        assert!(matches!(text, ChatResponse::Text(s) if s == "hello"));
1362
1363        let tool_use = ChatResponse::ToolUse {
1364            text: Some("I'll run that".into()),
1365            tool_calls: vec![ToolUseRequest {
1366                id: "1".into(),
1367                name: "bash".into(),
1368                input: serde_json::json!({}),
1369            }],
1370            thinking_blocks: vec![],
1371        };
1372        assert!(matches!(tool_use, ChatResponse::ToolUse { .. }));
1373    }
1374
1375    #[test]
1376    fn flatten_parts_tool_use() {
1377        let msg = Message::from_parts(
1378            Role::Assistant,
1379            vec![MessagePart::ToolUse {
1380                id: "t1".into(),
1381                name: "bash".into(),
1382                input: serde_json::json!({"command": "ls"}),
1383            }],
1384        );
1385        assert!(msg.content.contains("[tool_use: bash(t1)]"));
1386    }
1387
1388    #[test]
1389    fn flatten_parts_tool_result() {
1390        let msg = Message::from_parts(
1391            Role::User,
1392            vec![MessagePart::ToolResult {
1393                tool_use_id: "t1".into(),
1394                content: "output here".into(),
1395                is_error: false,
1396            }],
1397        );
1398        assert!(msg.content.contains("[tool_result: t1]"));
1399        assert!(msg.content.contains("output here"));
1400    }
1401
1402    #[test]
1403    fn tool_definition_serde_round_trip() {
1404        let def = ToolDefinition {
1405            name: "bash".into(),
1406            description: "Execute a shell command".into(),
1407            parameters: serde_json::json!({"type": "object"}),
1408            output_schema: None,
1409        };
1410        let json = serde_json::to_string(&def).unwrap();
1411        let deserialized: ToolDefinition = serde_json::from_str(&json).unwrap();
1412        assert_eq!(deserialized.name, "bash");
1413        assert_eq!(deserialized.description, "Execute a shell command");
1414    }
1415
1416    #[tokio::test]
1417    async fn chat_with_tools_default_delegates_to_chat() {
1418        let provider = StubProvider {
1419            response: "hello".into(),
1420        };
1421        let messages = vec![Message::from_legacy(Role::User, "test")];
1422        let result = provider.chat_with_tools(&messages, &[]).await.unwrap();
1423        assert!(matches!(result, ChatResponse::Text(s) if s == "hello"));
1424    }
1425
1426    #[test]
1427    fn tool_output_compacted_at_serde_default() {
1428        let json = r#"{"kind":"tool_output","tool_name":"bash","body":"out"}"#;
1429        let part: MessagePart = serde_json::from_str(json).unwrap();
1430        if let MessagePart::ToolOutput { compacted_at, .. } = part {
1431            assert!(compacted_at.is_none());
1432        } else {
1433            panic!("expected ToolOutput");
1434        }
1435    }
1436
1437    // --- M27: strip_json_fences tests ---
1438
1439    #[test]
1440    fn strip_json_fences_plain_json() {
1441        assert_eq!(strip_json_fences(r#"{"a": 1}"#), r#"{"a": 1}"#);
1442    }
1443
1444    #[test]
1445    fn strip_json_fences_with_json_fence() {
1446        assert_eq!(strip_json_fences("```json\n{\"a\": 1}\n```"), r#"{"a": 1}"#);
1447    }
1448
1449    #[test]
1450    fn strip_json_fences_with_plain_fence() {
1451        assert_eq!(strip_json_fences("```\n{\"a\": 1}\n```"), r#"{"a": 1}"#);
1452    }
1453
1454    #[test]
1455    fn strip_json_fences_whitespace() {
1456        assert_eq!(strip_json_fences("  \n  "), "");
1457    }
1458
1459    #[test]
1460    fn strip_json_fences_empty() {
1461        assert_eq!(strip_json_fences(""), "");
1462    }
1463
1464    #[test]
1465    fn strip_json_fences_outer_whitespace() {
1466        assert_eq!(
1467            strip_json_fences("  ```json\n{\"a\": 1}\n```  "),
1468            r#"{"a": 1}"#
1469        );
1470    }
1471
1472    #[test]
1473    fn strip_json_fences_only_opening_fence() {
1474        assert_eq!(strip_json_fences("```json\n{\"a\": 1}"), r#"{"a": 1}"#);
1475    }
1476
1477    // --- M27: chat_typed tests ---
1478
1479    #[derive(Debug, serde::Deserialize, schemars::JsonSchema, PartialEq)]
1480    struct TestOutput {
1481        value: String,
1482    }
1483
1484    struct SequentialStub {
1485        responses: std::sync::Mutex<Vec<Result<String, LlmError>>>,
1486    }
1487
1488    impl SequentialStub {
1489        fn new(responses: Vec<Result<String, LlmError>>) -> Self {
1490            Self {
1491                responses: std::sync::Mutex::new(responses),
1492            }
1493        }
1494    }
1495
1496    impl LlmProvider for SequentialStub {
1497        async fn chat(&self, _messages: &[Message]) -> Result<String, LlmError> {
1498            let mut responses = self.responses.lock().unwrap();
1499            if responses.is_empty() {
1500                return Err(LlmError::Other("no more responses".into()));
1501            }
1502            responses.remove(0)
1503        }
1504
1505        async fn chat_stream(&self, messages: &[Message]) -> Result<ChatStream, LlmError> {
1506            let response = self.chat(messages).await?;
1507            Ok(Box::pin(tokio_stream::once(Ok(StreamChunk::Content(
1508                response,
1509            )))))
1510        }
1511
1512        fn supports_streaming(&self) -> bool {
1513            false
1514        }
1515
1516        async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
1517            Err(LlmError::EmbedUnsupported {
1518                provider: "sequential-stub".into(),
1519            })
1520        }
1521
1522        fn supports_embeddings(&self) -> bool {
1523            false
1524        }
1525
1526        fn name(&self) -> &'static str {
1527            "sequential-stub"
1528        }
1529    }
1530
1531    #[tokio::test]
1532    async fn chat_typed_happy_path() {
1533        let provider = StubProvider {
1534            response: r#"{"value": "hello"}"#.into(),
1535        };
1536        let messages = vec![Message::from_legacy(Role::User, "test")];
1537        let result: TestOutput = provider.chat_typed(&messages).await.unwrap();
1538        assert_eq!(
1539            result,
1540            TestOutput {
1541                value: "hello".into()
1542            }
1543        );
1544    }
1545
1546    #[tokio::test]
1547    async fn chat_typed_retry_succeeds() {
1548        let provider = SequentialStub::new(vec![
1549            Ok("not valid json".into()),
1550            Ok(r#"{"value": "ok"}"#.into()),
1551        ]);
1552        let messages = vec![Message::from_legacy(Role::User, "test")];
1553        let result: TestOutput = provider.chat_typed(&messages).await.unwrap();
1554        assert_eq!(result, TestOutput { value: "ok".into() });
1555    }
1556
1557    #[tokio::test]
1558    async fn chat_typed_both_fail() {
1559        let provider = SequentialStub::new(vec![Ok("bad json".into()), Ok("still bad".into())]);
1560        let messages = vec![Message::from_legacy(Role::User, "test")];
1561        let result = provider.chat_typed::<TestOutput>(&messages).await;
1562        let err = result.unwrap_err();
1563        assert!(err.to_string().contains("parse failed after retry"));
1564    }
1565
1566    #[tokio::test]
1567    async fn chat_typed_chat_error_propagates() {
1568        let provider = SequentialStub::new(vec![Err(LlmError::Unavailable)]);
1569        let messages = vec![Message::from_legacy(Role::User, "test")];
1570        let result = provider.chat_typed::<TestOutput>(&messages).await;
1571        assert!(matches!(result, Err(LlmError::Unavailable)));
1572    }
1573
1574    #[tokio::test]
1575    async fn chat_typed_strips_fences() {
1576        let provider = StubProvider {
1577            response: "```json\n{\"value\": \"fenced\"}\n```".into(),
1578        };
1579        let messages = vec![Message::from_legacy(Role::User, "test")];
1580        let result: TestOutput = provider.chat_typed(&messages).await.unwrap();
1581        assert_eq!(
1582            result,
1583            TestOutput {
1584                value: "fenced".into()
1585            }
1586        );
1587    }
1588
1589    #[test]
1590    fn supports_structured_output_default_false() {
1591        let provider = StubProvider {
1592            response: String::new(),
1593        };
1594        assert!(!provider.supports_structured_output());
1595    }
1596
1597    #[test]
1598    fn structured_parse_error_display() {
1599        let err = LlmError::StructuredParse("test error".into());
1600        assert_eq!(
1601            err.to_string(),
1602            "structured output parse failed: test error"
1603        );
1604    }
1605
1606    #[test]
1607    fn message_part_image_roundtrip_json() {
1608        let part = MessagePart::Image(Box::new(ImageData {
1609            data: vec![1, 2, 3, 4],
1610            mime_type: "image/jpeg".into(),
1611        }));
1612        let json = serde_json::to_string(&part).unwrap();
1613        let decoded: MessagePart = serde_json::from_str(&json).unwrap();
1614        match decoded {
1615            MessagePart::Image(img) => {
1616                assert_eq!(img.data, vec![1, 2, 3, 4]);
1617                assert_eq!(img.mime_type, "image/jpeg");
1618            }
1619            _ => panic!("expected Image variant"),
1620        }
1621    }
1622
1623    #[test]
1624    fn flatten_parts_includes_image_placeholder() {
1625        let msg = Message::from_parts(
1626            Role::User,
1627            vec![
1628                MessagePart::Text {
1629                    text: "see this".into(),
1630                },
1631                MessagePart::Image(Box::new(ImageData {
1632                    data: vec![0u8; 100],
1633                    mime_type: "image/png".into(),
1634                })),
1635            ],
1636        );
1637        let content = msg.to_llm_content();
1638        assert!(content.contains("see this"));
1639        assert!(content.contains("[image: image/png"));
1640    }
1641
1642    #[test]
1643    fn supports_vision_default_false() {
1644        let provider = StubProvider {
1645            response: String::new(),
1646        };
1647        assert!(!provider.supports_vision());
1648    }
1649
1650    #[test]
1651    fn message_metadata_default_both_visible() {
1652        let m = MessageMetadata::default();
1653        assert!(m.visibility.is_agent_visible());
1654        assert!(m.visibility.is_user_visible());
1655        assert_eq!(m.visibility, MessageVisibility::Both);
1656        assert!(m.compacted_at.is_none());
1657    }
1658
1659    #[test]
1660    fn message_metadata_agent_only() {
1661        let m = MessageMetadata::agent_only();
1662        assert!(m.visibility.is_agent_visible());
1663        assert!(!m.visibility.is_user_visible());
1664        assert_eq!(m.visibility, MessageVisibility::AgentOnly);
1665    }
1666
1667    #[test]
1668    fn message_metadata_user_only() {
1669        let m = MessageMetadata::user_only();
1670        assert!(!m.visibility.is_agent_visible());
1671        assert!(m.visibility.is_user_visible());
1672        assert_eq!(m.visibility, MessageVisibility::UserOnly);
1673    }
1674
1675    #[test]
1676    fn message_metadata_serde_default() {
1677        let json = r#"{"role":"user","content":"hello"}"#;
1678        let msg: Message = serde_json::from_str(json).unwrap();
1679        assert!(msg.metadata.visibility.is_agent_visible());
1680        assert!(msg.metadata.visibility.is_user_visible());
1681    }
1682
1683    #[test]
1684    fn message_metadata_round_trip() {
1685        let msg = Message {
1686            role: Role::User,
1687            content: "test".into(),
1688            parts: vec![],
1689            metadata: MessageMetadata::agent_only(),
1690        };
1691        let json = serde_json::to_string(&msg).unwrap();
1692        let decoded: Message = serde_json::from_str(&json).unwrap();
1693        assert!(decoded.metadata.visibility.is_agent_visible());
1694        assert!(!decoded.metadata.visibility.is_user_visible());
1695        assert_eq!(decoded.metadata.visibility, MessageVisibility::AgentOnly);
1696    }
1697
1698    #[test]
1699    fn message_part_compaction_round_trip() {
1700        let part = MessagePart::Compaction {
1701            summary: "Context was summarized.".to_owned(),
1702        };
1703        let json = serde_json::to_string(&part).unwrap();
1704        let decoded: MessagePart = serde_json::from_str(&json).unwrap();
1705        assert!(
1706            matches!(decoded, MessagePart::Compaction { summary } if summary == "Context was summarized.")
1707        );
1708    }
1709
1710    #[test]
1711    fn flatten_parts_compaction_contributes_no_text() {
1712        // MessagePart::Compaction must not appear in the flattened content string
1713        // (it's metadata-only; the summary is stored on the Message separately).
1714        let parts = vec![
1715            MessagePart::Text {
1716                text: "Hello".to_owned(),
1717            },
1718            MessagePart::Compaction {
1719                summary: "Summary".to_owned(),
1720            },
1721        ];
1722        let msg = Message::from_parts(Role::Assistant, parts);
1723        // Only the Text part should appear in content.
1724        assert_eq!(msg.content.trim(), "Hello");
1725    }
1726
1727    #[test]
1728    fn stream_chunk_compaction_variant() {
1729        let chunk = StreamChunk::Compaction("A summary".to_owned());
1730        assert!(matches!(chunk, StreamChunk::Compaction(s) if s == "A summary"));
1731    }
1732
1733    #[test]
1734    fn short_type_name_extracts_last_segment() {
1735        struct MyOutput;
1736        assert_eq!(short_type_name::<MyOutput>(), "MyOutput");
1737    }
1738
1739    #[test]
1740    fn short_type_name_primitive_returns_full_name() {
1741        // Primitives have no "::" in their type_name — rsplit returns the full name.
1742        assert_eq!(short_type_name::<u32>(), "u32");
1743        assert_eq!(short_type_name::<bool>(), "bool");
1744    }
1745
1746    #[test]
1747    fn short_type_name_nested_path_returns_last() {
1748        // Use a type whose path contains "::" segments.
1749        assert_eq!(
1750            short_type_name::<std::collections::HashMap<u32, u32>>(),
1751            "HashMap<u32, u32>"
1752        );
1753    }
1754
1755    // Regression test for #2257: `MessagePart::Summary` must serialize to the
1756    // internally-tagged format `{"kind":"summary","text":"..."}` and round-trip correctly.
1757    #[test]
1758    fn summary_roundtrip() {
1759        let part = MessagePart::Summary {
1760            text: "hello".to_string(),
1761        };
1762        let json = serde_json::to_string(&part).expect("serialization must not fail");
1763        assert!(
1764            json.contains("\"kind\":\"summary\""),
1765            "must use internally-tagged format, got: {json}"
1766        );
1767        assert!(
1768            !json.contains("\"Summary\""),
1769            "must not use externally-tagged format, got: {json}"
1770        );
1771        let decoded: MessagePart =
1772            serde_json::from_str(&json).expect("deserialization must not fail");
1773        match decoded {
1774            MessagePart::Summary { text } => assert_eq!(text, "hello"),
1775            other => panic!("expected MessagePart::Summary, got {other:?}"),
1776        }
1777    }
1778
1779    #[tokio::test]
1780    async fn embed_batch_default_empty_returns_empty() {
1781        let provider = StubProvider {
1782            response: String::new(),
1783        };
1784        let result = provider.embed_batch(&[]).await.unwrap();
1785        assert!(result.is_empty());
1786    }
1787
1788    #[tokio::test]
1789    async fn embed_batch_default_calls_embed_sequentially() {
1790        let provider = StubProvider {
1791            response: String::new(),
1792        };
1793        let texts = ["hello", "world", "foo"];
1794        let result = provider.embed_batch(&texts).await.unwrap();
1795        assert_eq!(result.len(), 3);
1796        // StubProvider::embed always returns [0.1, 0.2, 0.3]
1797        for vec in &result {
1798            assert_eq!(vec, &[0.1_f32, 0.2, 0.3]);
1799        }
1800    }
1801
1802    #[test]
1803    fn message_visibility_db_roundtrip_both() {
1804        assert_eq!(MessageVisibility::Both.as_db_str(), "both");
1805        assert_eq!(
1806            MessageVisibility::from_db_str("both"),
1807            MessageVisibility::Both
1808        );
1809    }
1810
1811    #[test]
1812    fn message_visibility_db_roundtrip_agent_only() {
1813        assert_eq!(MessageVisibility::AgentOnly.as_db_str(), "agent_only");
1814        assert_eq!(
1815            MessageVisibility::from_db_str("agent_only"),
1816            MessageVisibility::AgentOnly
1817        );
1818    }
1819
1820    #[test]
1821    fn message_visibility_db_roundtrip_user_only() {
1822        assert_eq!(MessageVisibility::UserOnly.as_db_str(), "user_only");
1823        assert_eq!(
1824            MessageVisibility::from_db_str("user_only"),
1825            MessageVisibility::UserOnly
1826        );
1827    }
1828
1829    #[test]
1830    fn message_visibility_from_db_str_unknown_defaults_to_both() {
1831        assert_eq!(
1832            MessageVisibility::from_db_str("unknown_future_value"),
1833            MessageVisibility::Both
1834        );
1835        assert_eq!(MessageVisibility::from_db_str(""), MessageVisibility::Both);
1836    }
1837}