Skip to main content

walrus_core/model/
mod.rs

1//! Unified LLM interface types and traits.
2//!
3//! Provides the shared types used across all LLM providers:
4//! `Message`, `Response`, `StreamChunk`, `Tool`, `Request`, and the `Model` trait.
5
6use anyhow::Result;
7use compact_str::CompactString;
8use futures_core::Stream;
9pub use limits::default_context_limit;
10pub use message::{Message, MessageBuilder, Role, estimate_tokens};
11pub use request::Request;
12pub use response::{
13    Choice, CompletionMeta, CompletionTokensDetails, Delta, FinishReason, Response, Usage,
14};
15pub use stream::StreamChunk;
16pub use tool::{FunctionCall, Tool, ToolCall, ToolChoice};
17
18mod limits;
19mod message;
20mod request;
21mod response;
22mod stream;
23mod tool;
24
25/// Unified LLM provider trait.
26///
27/// Abstracts any LLM provider — single-backend (DeepSeek, Claude) or
28/// multi-model registry (ProviderManager). All implementations take
29/// `&Request` directly; no associated config type.
30///
31/// Constructors are inherent methods on each provider — never called
32/// polymorphically.
33pub trait Model: Sized + Clone {
34    /// Send a chat completion request.
35    fn send(&self, request: &Request) -> impl Future<Output = Result<Response>> + Send;
36
37    /// Stream a chat completion response.
38    fn stream(&self, request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send;
39
40    /// Resolve the context limit for a model name.
41    ///
42    /// Default implementation uses the static prefix-matching map.
43    fn context_limit(&self, model: &str) -> usize {
44        default_context_limit(model)
45    }
46
47    /// Get the active/default model name.
48    fn active_model(&self) -> CompactString;
49}
50
51/// `()` as a no-op Model for testing (panics on send/stream).
52impl Model for () {
53    async fn send(&self, _request: &Request) -> Result<Response> {
54        panic!("NoopModel::send called — not intended for real LLM calls");
55    }
56
57    #[allow(unreachable_code)]
58    fn stream(&self, _request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send {
59        panic!("NoopModel::stream called — not intended for real LLM calls");
60        async_stream::stream! {
61            yield Err(anyhow::anyhow!("not implemented"));
62        }
63    }
64
65    fn context_limit(&self, _model: &str) -> usize {
66        0
67    }
68
69    fn active_model(&self) -> CompactString {
70        CompactString::new("")
71    }
72}