litellm_rs/
lib.rs

1//! # LiteLLM-RS
2//!
3//! A Rust implementation of Python LiteLLM - call 100+ LLM APIs using OpenAI format.
4//! High-performance AI Gateway with unified interface for multiple providers.
5//!
6//! ## Features
7//!
8//! - **Python LiteLLM Compatible**: Drop-in replacement with same API design
9//! - **OpenAI Compatible**: Full compatibility with OpenAI API format
10//! - **Multi-Provider**: Support for 100+ AI providers (OpenAI, Anthropic, Azure, Google, etc.)
11//! - **Unified Interface**: Call any LLM using the same function signature
12//! - **High Performance**: Built with Rust and Tokio for maximum throughput
13//! - **Intelligent Routing**: Smart load balancing and failover across providers
14//! - **Cost Optimization**: Automatic cost tracking and provider selection
15//! - **Streaming Support**: Real-time response streaming
16//!
17//! ## Quick Start - Python LiteLLM Style
18//!
19//! ```rust,no_run
20//! use litellm_rs::{completion, user_message, system_message};
21//!
22//! #[tokio::main]
23//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
24//!     // Call OpenAI (default provider for gpt-* models)
25//!     let response = completion(
26//!         "gpt-4",
27//!         vec![
28//!             system_message("You are a helpful assistant."),
29//!             user_message("Hello, how are you?"),
30//!         ],
31//!         None,
32//!     ).await?;
33//!     
34//!     println!("Response: {}", response.choices[0].message.content);
35//!
36//!     // Call Anthropic with explicit provider
37//!     let response = completion(
38//!         "anthropic/claude-3-sonnet-20240229",
39//!         vec![user_message("What is the capital of France?")],
40//!         None,
41//!     ).await?;
42//!     
43//!     println!("Claude says: {}", response.choices[0].message.content);
44//!     
45//!     Ok(())
46//! }
47//! ```
48//!
49//! ## Gateway Mode
50//!
51//! ```rust,no_run
52//! use litellm_rs::{Gateway, Config};
53//!
54//! #[tokio::main]
55//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
56//!     let config = Config::from_file("config/gateway.yaml").await?;
57//!     let gateway = Gateway::new(config).await?;
58//!     gateway.run().await?;
59//!     Ok(())
60//! }
61//! ```
62
63#![allow(missing_docs)]
64#![allow(missing_doc_code_examples)]
65#![warn(clippy::all)]
66#![allow(clippy::module_inception)]
67
68// Public module exports
69mod auth;
70// Core completion API moved to core::completion
71pub mod config;
72pub mod core;
73mod monitoring;
74pub mod sdk; // New SDK module
75pub mod server;
76pub mod services; // Add services module
77pub mod storage;
78pub mod utils;
79
80// Re-export main types
81pub use config::Config;
82pub use utils::error::{GatewayError, Result};
83
84// Export core completion functionality (Python LiteLLM compatible)
85pub use core::completion::{
86    Choice, CompletionOptions, CompletionResponse, ContentPart, LiteLLMError, Message, Router,
87    Usage, acompletion, assistant_message, completion, completion_stream, system_message,
88    user_message,
89};
90
91// Export unified type system
92pub use core::types::{MessageContent, MessageRole};
93
94// Export core functionality
95pub use core::models::{RequestContext, openai::*};
96pub use core::providers::{
97    Provider, ProviderError, ProviderRegistry, ProviderType, UnifiedProviderError,
98};
99
100use tracing::info;
101
102/// A minimal LiteLLM Gateway implementation
103pub struct Gateway {
104    config: Config,
105    server: server::HttpServer,
106}
107
108impl Gateway {
109    /// Create a new gateway instance
110    pub async fn new(config: Config) -> Result<Self> {
111        info!("Creating new gateway instance");
112
113        // Create HTTP server
114        let server = server::HttpServer::new(&config).await?;
115
116        Ok(Self { config, server })
117    }
118
119    /// Run the gateway server
120    pub async fn run(self) -> Result<()> {
121        info!("Starting LiteLLM Gateway");
122        info!("Configuration: {:#?}", self.config);
123
124        // Start HTTP server
125        self.server.start().await?;
126
127        Ok(())
128    }
129}
130
131// Version information
132/// Current version of the crate
133pub const VERSION: &str = env!("CARGO_PKG_VERSION");
134/// Name of the crate
135pub const NAME: &str = env!("CARGO_PKG_NAME");
136/// Description of the crate
137pub const DESCRIPTION: &str = env!("CARGO_PKG_DESCRIPTION");
138
139/// Gateway build information
140#[derive(Debug, Clone)]
141pub struct BuildInfo {
142    /// Version number
143    pub version: &'static str,
144    /// Build
145    pub build_time: &'static str,
146    /// Git commit hash
147    pub git_hash: &'static str,
148    /// Rust version
149    pub rust_version: &'static str,
150}
151
152impl Default for BuildInfo {
153    fn default() -> Self {
154        Self {
155            version: VERSION,
156            build_time: "unknown",
157            git_hash: "unknown",
158            rust_version: "unknown",
159        }
160    }
161}
162
163/// Build
164pub fn build_info() -> BuildInfo {
165    BuildInfo::default()
166}
167
168#[cfg(test)]
169mod tests {
170    use super::*;
171
172    #[test]
173    fn test_build_info() {
174        let info = build_info();
175        assert!(!info.version.is_empty());
176        assert_eq!(info.version, VERSION);
177    }
178
179    #[test]
180    fn test_constants() {
181        // Test that constants are defined and have expected values
182        assert_eq!(VERSION, env!("CARGO_PKG_VERSION"));
183        assert_eq!(NAME, env!("CARGO_PKG_NAME"));
184        assert_eq!(DESCRIPTION, env!("CARGO_PKG_DESCRIPTION"));
185    }
186}