Skip to main content

crabtalk_core/model/
mod.rs

1//! Unified LLM interface types and traits.
2//!
3//! Provides the shared types used across all LLM providers:
4//! `Message`, `Response`, `StreamChunk`, `Tool`, `Request`, and the `Model` trait.
5
6use anyhow::Result;
7use futures_core::Stream;
8pub use limits::default_context_limit;
9pub use message::{Message, MessageBuilder, Role, estimate_tokens};
10pub use request::Request;
11pub use response::{
12    Choice, CompletionMeta, CompletionTokensDetails, Delta, FinishReason, Response, Usage,
13};
14pub use stream::StreamChunk;
15pub use tool::{FunctionCall, Tool, ToolCall, ToolChoice};
16
17mod limits;
18mod message;
19mod request;
20mod response;
21mod stream;
22mod tool;
23
24#[cfg(any(test, feature = "test-utils"))]
25pub mod test_model;
26
27/// Unified LLM provider trait.
28///
29/// Abstracts any LLM provider — single-backend (Claude, GPT) or
30/// multi-model registry (ProviderRegistry). All implementations take
31/// `&Request` directly; no associated config type.
32///
33/// Constructors are inherent methods on each provider — never called
34/// polymorphically.
35pub trait Model: Sized + Clone {
36    /// Send a chat completion request.
37    fn send(&self, request: &Request) -> impl Future<Output = Result<Response>> + Send;
38
39    /// Stream a chat completion response.
40    fn stream(&self, request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send;
41
42    /// Resolve the context limit for a model name.
43    ///
44    /// Default implementation uses the static prefix-matching map.
45    fn context_limit(&self, model: &str) -> usize {
46        default_context_limit(model)
47    }
48
49    /// Get the active/default model name.
50    fn active_model(&self) -> String;
51}
52
53/// `()` as a no-op Model for testing (panics on send/stream).
54impl Model for () {
55    async fn send(&self, _request: &Request) -> Result<Response> {
56        panic!("NoopModel::send called — not intended for real LLM calls");
57    }
58
59    #[allow(unreachable_code)]
60    fn stream(&self, _request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send {
61        panic!("NoopModel::stream called — not intended for real LLM calls");
62        async_stream::stream! {
63            yield Err(anyhow::anyhow!("not implemented"));
64        }
65    }
66
67    fn context_limit(&self, _model: &str) -> usize {
68        0
69    }
70
71    fn active_model(&self) -> String {
72        String::new()
73    }
74}