use llmkit::{CompletionRequest, LLMKitClient, Message};
#[tokio::main]
async fn main() -> llmkit::Result<()> {
let client = LLMKitClient::builder()
.with_openrouter_from_env()
.build()
.await?;
let request = CompletionRequest::new(
"openrouter/qwen/qwq-32b",
vec![Message::user(
"Solve this step by step: \
A train travels from City A to City B at 60 mph. \
Another train leaves City B towards City A at 40 mph at the same time. \
The cities are 200 miles apart. \
Where do they meet and after how long?",
)],
)
.with_system("Think through the problem carefully and show your reasoning.")
.with_max_tokens(2000);
println!("Solving with QwQ-32B reasoning model...");
println!("(This may take a moment)\n");
let response = client.complete(request).await?;
println!("{}", "=".repeat(50));
println!("RESPONSE (with reasoning):");
println!("{}", "=".repeat(50));
println!("{}", response.text_content());
println!("\n{}", "=".repeat(50));
println!("Tokens used: {}", response.usage.total_tokens());
Ok(())
}