Skip to main content

crabtalk_core/model/
mod.rs

1//! Unified LLM interface types and traits.
2//!
3//! Provides the shared types used across all LLM providers:
4//! `Message`, `Response`, `StreamChunk`, `Tool`, `Request`, and the `Model` trait.
5
6use anyhow::Result;
7use futures_core::Stream;
8pub use limits::default_context_limit;
9pub use message::{Message, MessageBuilder, Role, estimate_tokens};
10pub use request::Request;
11pub use response::{
12    Choice, CompletionMeta, CompletionTokensDetails, Delta, FinishReason, Response, Usage,
13};
14pub use stream::StreamChunk;
15pub use tool::{FunctionCall, Tool, ToolCall, ToolChoice};
16
17mod limits;
18mod message;
19mod request;
20mod response;
21mod stream;
22mod tool;
23
24/// Unified LLM provider trait.
25///
26/// Abstracts any LLM provider — single-backend (Claude, GPT) or
27/// multi-model registry (ProviderRegistry). All implementations take
28/// `&Request` directly; no associated config type.
29///
30/// Constructors are inherent methods on each provider — never called
31/// polymorphically.
32pub trait Model: Sized + Clone {
33    /// Send a chat completion request.
34    fn send(&self, request: &Request) -> impl Future<Output = Result<Response>> + Send;
35
36    /// Stream a chat completion response.
37    fn stream(&self, request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send;
38
39    /// Resolve the context limit for a model name.
40    ///
41    /// Default implementation uses the static prefix-matching map.
42    fn context_limit(&self, model: &str) -> usize {
43        default_context_limit(model)
44    }
45
46    /// Get the active/default model name.
47    fn active_model(&self) -> String;
48}
49
50/// `()` as a no-op Model for testing (panics on send/stream).
51impl Model for () {
52    async fn send(&self, _request: &Request) -> Result<Response> {
53        panic!("NoopModel::send called — not intended for real LLM calls");
54    }
55
56    #[allow(unreachable_code)]
57    fn stream(&self, _request: Request) -> impl Stream<Item = Result<StreamChunk>> + Send {
58        panic!("NoopModel::stream called — not intended for real LLM calls");
59        async_stream::stream! {
60            yield Err(anyhow::anyhow!("not implemented"));
61        }
62    }
63
64    fn context_limit(&self, _model: &str) -> usize {
65        0
66    }
67
68    fn active_model(&self) -> String {
69        String::new()
70    }
71}