Skip to main content

litellm_rust/
lib.rs

1//! Unified Rust SDK for chat completions, embeddings, images, and video across
2//! multiple LLM providers.
3//!
4//! litellm-rust is a Rust port of [LiteLLM](https://github.com/BerriAI/litellm).
5//! It provides a single [`LiteLLM`] client that routes requests to OpenAI-compatible,
6//! Anthropic, Gemini, and xAI backends using a `"provider/model"` format.
7//!
8//! # Quick Start
9//!
10//! ```rust,no_run
11//! use litellm_rust::{LiteLLM, ChatRequest};
12//!
13//! # async fn run() -> litellm_rust::Result<()> {
14//! let client = LiteLLM::new()?;
15//! let resp = client
16//!     .completion(ChatRequest::new("openai/gpt-4o").message("user", "hello"))
17//!     .await?;
18//! println!("{}", resp.content);
19//! # Ok(())
20//! # }
21//! ```
22//!
23//! # Streaming
24//!
25//! ```rust,no_run
26//! use futures_util::StreamExt;
27//! use litellm_rust::{LiteLLM, ChatRequest};
28//!
29//! # async fn run() -> litellm_rust::Result<()> {
30//! let client = LiteLLM::new()?;
31//! let mut stream = client
32//!     .stream_completion(ChatRequest::new("openai/gpt-4o").message("user", "hello"))
33//!     .await?;
34//! while let Some(chunk) = stream.next().await {
35//!     print!("{}", chunk?.content);
36//! }
37//! # Ok(())
38//! # }
39//! ```
40//!
41//! # Supported Providers
42//!
43//! | Provider | Chat | Streaming | Embeddings | Images | Video |
44//! |----------|------|-----------|------------|--------|-------|
45//! | OpenAI-compatible | yes | yes | yes | yes | yes |
46//! | Anthropic | yes | yes | - | - | - |
47//! | Gemini | yes | - | - | yes | yes |
48//! | xAI | yes | yes | - | - | - |
49
50pub mod client;
51pub mod config;
52pub mod error;
53pub mod http;
54pub mod providers;
55pub mod registry;
56pub mod router;
57pub mod stream;
58pub mod types;
59
60pub use client::LiteLLM;
61pub use config::{Config, ProviderConfig, ProviderKind};
62pub use error::{LiteLLMError, Result};
63pub use stream::{ChatStream, ChatStreamChunk};
64pub use types::*;