llm_cascade/lib.rs
1//! Resilient, cascading LLM inference across multiple providers.
2//!
3//! `llm-cascade` provides automatic failover, circuit breaking, and retry cooldowns
4//! when calling LLM APIs. Define ordered provider/model lists in a TOML config;
5//! the library tries each entry in sequence, skipping those on cooldown, and returns
6//! the first successful response.
7//!
8//! # Quick start
9//!
10//! ```rust,no_run
11//! use llm_cascade::{run_cascade, load_config, db, Conversation, Message};
12//!
13//! #[tokio::main]
14//! async fn main() {
15//! let config = load_config(&"config.toml".into()).expect("config");
16//! let conn = db::init_db(&config.database.path).expect("db");
17//!
18//! let conversation = Conversation::single_user_prompt("What is 2 + 2?");
19//! match run_cascade("my_cascade", &conversation, &config, &conn).await {
20//! Ok(response) => println!("{}", response.text_only()),
21//! Err(e) => eprintln!("All providers failed: {}", e),
22//! }
23//! }
24//! ```
25
26pub mod cascade;
27pub mod config;
28pub mod db;
29pub mod error;
30pub mod models;
31pub mod persistence;
32pub mod providers;
33pub mod secrets;
34
35pub use cascade::run_cascade;
36pub use config::{
37 AppConfig, CascadeConfig, CascadeEntry, DatabaseConfig, FailureConfig, ProviderConfig,
38 ProviderType, load_config,
39};
40pub use error::{CascadeError, ProviderError};
41pub use models::{ContentBlock, Conversation, LlmResponse, Message, MessageRole, ToolDefinition};