multi_llm/
lib.rs

1//! # multi-llm
2//!
3//! Unified multi-provider LLM client with support for OpenAI, Anthropic, Ollama, and LMStudio.
4//!
5//! ## Key Features
6//!
7//! - **Multiple Providers**: Seamless switching between LLM providers
8//! - **Unified Messages**: Provider-agnostic message architecture with caching hints
9//! - **Prompt Caching**: Native support for Anthropic prompt caching
10//! - **Tool Calling**: First-class function/tool calling support
11//! - **Resilience**: Built-in retry logic, rate limiting, and error handling
12//!
13//! ## Example
14//!
15//! ```rust,no_run
16//! use multi_llm::{UnifiedLLMClient, LLMConfig, OpenAIConfig, DefaultLLMParams, UnifiedMessage};
17//!
18//! # async fn example() -> anyhow::Result<()> {
19//! let config = LLMConfig {
20//!     provider: Box::new(OpenAIConfig {
21//!         api_key: Some("your-api-key".to_string()),
22//!         base_url: "https://api.openai.com".to_string(),
23//!         default_model: "gpt-4".to_string(),
24//!         max_context_tokens: 128_000,
25//!         retry_policy: Default::default(),
26//!     }),
27//!     default_params: DefaultLLMParams::default(),
28//! };
29//!
30//! let client = UnifiedLLMClient::from_config(config)?;
31//! let messages = vec![UnifiedMessage::user("Hello, how are you?")];
32//! // Use client.execute_llm(...) for actual requests
33//! # Ok(())
34//! # }
35//! ```
36
37// Allow missing errors documentation - errors are self-documenting via type signatures
38#![allow(clippy::missing_errors_doc)]
39// Allow unreachable in provider clone - all types are covered but compiler can't verify
40#![allow(clippy::unreachable)]
41
42// =============================================================================
43// Module declarations
44// =============================================================================
45
46// Public modules - flattened structure matching DESIGN.md
47pub mod client;
48pub mod config;
49pub mod error;
50pub mod messages;
51pub mod provider;
52pub mod providers;
53
54// Internal modules
55pub(crate) mod internals;
56pub(crate) mod logging;
57
58#[cfg(test)]
59pub mod tests;
60
61// =============================================================================
62// Public API re-exports (~28 types as per issue #4)
63// =============================================================================
64
65// Client
66pub use client::UnifiedLLMClient;
67
68// Configuration
69pub use config::{
70    AnthropicConfig, DefaultLLMParams, LLMConfig, LMStudioConfig, OllamaConfig, OpenAIConfig,
71    ProviderConfig,
72};
73
74// Errors
75pub use error::{LlmError, LlmResult};
76
77// Messages - the core unified message architecture
78pub use messages::{
79    CacheType, MessageAttributes, MessageCategory, MessageContent, MessageRole, UnifiedLLMRequest,
80    UnifiedMessage,
81};
82
83// Provider trait and types
84pub use provider::{
85    LlmProvider, RequestConfig, Response, ResponseFormat, TokenUsage, Tool, ToolCall,
86    ToolCallingRound, ToolChoice, ToolResult,
87};
88
89// Providers
90pub use providers::{AnthropicProvider, LMStudioProvider, OllamaProvider, OpenAIProvider};
91
92// Token counting (from internals, re-exported for public use)
93pub use internals::tokens::{
94    AnthropicTokenCounter, OpenAITokenCounter, TokenCounter, TokenCounterFactory,
95};
96
97// Retry policy (from internals, re-exported for public use)
98pub use internals::retry::RetryPolicy;
99
100// Event types - only available with "events" feature
101#[cfg(feature = "events")]
102pub use internals::events::{event_types, BusinessEvent, EventScope};
103#[cfg(feature = "events")]
104pub use provider::LLMBusinessEvent;
105
106// =============================================================================
107// Helper macro for handling response types with/without events feature
108// =============================================================================
109
110/// Extract the Response from execute_llm results, regardless of events feature.
111///
112/// When the `events` feature is enabled, `execute_llm` returns
113/// `Result<(Response, Vec<LLMBusinessEvent>)>`. Without the feature, it returns
114/// `Result<Response>`. This macro handles both cases uniformly.
115///
116/// # Example
117///
118/// ```rust,ignore
119/// use multi_llm::{unwrap_response, UnifiedLLMClient, LlmProvider};
120///
121/// let response = unwrap_response!(client.execute_llm(request, None, None).await?);
122/// println!("Content: {}", response.content);
123/// ```
124///
125/// # With events feature
126///
127/// If you need access to the events, don't use this macro - instead pattern match directly:
128///
129/// ```rust,ignore
130/// #[cfg(feature = "events")]
131/// let (response, events) = client.execute_llm(request, None, None).await?;
132/// ```
133#[cfg(feature = "events")]
134#[macro_export]
135macro_rules! unwrap_response {
136    ($result:expr) => {{
137        let (resp, _events) = $result;
138        resp
139    }};
140}
141
142/// Extract the Response from execute_llm results (non-events version).
143///
144/// See the `events` feature version for full documentation.
145#[cfg(not(feature = "events"))]
146#[macro_export]
147macro_rules! unwrap_response {
148    ($result:expr) => {
149        $result
150    };
151}