use llmkit::{CompletionRequest, LLMKitClient, Message};
#[tokio::main]
async fn main() -> llmkit::Result<()> {
let client = LLMKitClient::builder()
.with_anthropic_from_env()
.with_default_retry()
.build()
.await?;
let request = CompletionRequest::new(
"anthropic/claude-sonnet-4-20250514",
vec![Message::user(
"What is the capital of France? Reply in one word.",
)],
)
.with_max_tokens(100);
println!("Sending request...");
let response = client.complete(request).await?;
println!("\nResponse: {}", response.text_content());
println!("Model: {}", response.model);
println!("Stop reason: {:?}", response.stop_reason);
println!("\nToken usage:");
println!(" Input tokens: {}", response.usage.input_tokens);
println!(" Output tokens: {}", response.usage.output_tokens);
println!(" Total tokens: {}", response.usage.total_tokens());
Ok(())
}