use llmkit::{CompletionRequest, LLMKitClient, Message};
#[tokio::main]
async fn main() -> llmkit::Result<()> {
println!("{}", "=".repeat(50));
println!("Example 1: Auto-detect from Environment");
println!("{}", "=".repeat(50));
using_from_env().await?;
println!("\n{}", "=".repeat(50));
println!("Example 2: Switch Between Providers");
println!("{}", "=".repeat(50));
switch_between_providers().await?;
println!("\n{}", "=".repeat(50));
println!("Example 3: Provider Fallback");
println!("{}", "=".repeat(50));
provider_fallback().await?;
Ok(())
}
async fn using_from_env() -> llmkit::Result<()> {
let client = LLMKitClient::builder()
.with_anthropic_from_env()
.with_openai_from_env()
.with_default_retry()
.build()
.await?;
let providers = client.providers();
println!("Detected providers: {:?}", providers);
let response = client
.complete(
CompletionRequest::new(
"anthropic/claude-sonnet-4-20250514",
vec![Message::user("Say hello")],
)
.with_max_tokens(50),
)
.await?;
println!("\nDefault provider response: {}", response.text_content());
Ok(())
}
async fn switch_between_providers() -> llmkit::Result<()> {
let client = LLMKitClient::builder()
.with_anthropic_from_env()
.with_openai_from_env()
.with_default_retry()
.build()
.await?;
let providers = client.providers();
println!("Available: {:?}\n", providers);
let prompt = "What's 2+2? Answer with just the number.";
let models = ["anthropic/claude-sonnet-4-20250514", "openai/gpt-4o"];
for model in models {
let provider = model.split('/').next().unwrap_or("unknown");
if !providers.contains(&provider) {
println!("{}: Not configured", provider);
continue;
}
match client
.complete(
CompletionRequest::new(model, vec![Message::user(prompt)]).with_max_tokens(20),
)
.await
{
Ok(response) => {
println!("{}: {}", model, response.text_content().trim());
}
Err(e) => {
println!("{}: Error - {}", model, e);
}
}
}
Ok(())
}
async fn provider_fallback() -> llmkit::Result<()> {
let client = LLMKitClient::builder()
.with_anthropic_from_env()
.with_openai_from_env()
.with_default_retry()
.build()
.await?;
let model_priority = ["anthropic/claude-sonnet-4-20250514", "openai/gpt-4o"];
let available: std::collections::HashSet<_> = client.providers().into_iter().collect();
for model in model_priority {
let provider = model.split('/').next().unwrap_or("unknown");
if !available.contains(provider) {
println!("Skipping {} (not configured)", model);
continue;
}
print!("Trying {}... ", model);
match client
.complete(
CompletionRequest::new(model, vec![Message::user("What is Python?")])
.with_max_tokens(100),
)
.await
{
Ok(response) => {
println!("Success!");
println!(
"Response: {}...",
&response.text_content()[..100.min(response.text_content().len())]
);
return Ok(());
}
Err(e) => {
println!("Failed: {}", e);
continue;
}
}
}
println!("All providers failed!");
Ok(())
}