1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
//! Resilient, cascading LLM inference across multiple providers.
//!
//! `llm-cascade` provides automatic failover, circuit breaking, and retry cooldowns
//! when calling LLM APIs. Define ordered provider/model lists in a TOML config;
//! the library tries each entry in sequence, skipping those on cooldown, and returns
//! the first successful response.
//!
//! # Quick start
//!
//! ```rust,no_run
//! use llm_cascade::{run_cascade, load_config, db, Conversation, Message};
//!
//! #[tokio::main]
//! async fn main() {
//! let config = load_config(&"config.toml".into()).expect("config");
//! let conn = db::init_db(&config.database.path).expect("db");
//!
//! let conversation = Conversation::single_user_prompt("What is 2 + 2?");
//! match run_cascade("my_cascade", &conversation, &config, &conn).await {
//! Ok(response) => println!("{}", response.text_only()),
//! Err(e) => eprintln!("All providers failed: {}", e),
//! }
//! }
//! ```
pub use run_cascade;
pub use ;
pub use ;
pub use ;