use grok_api::{ChatMessage, GrokClient, Model, Result};
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let api_key = std::env::var("GROK_API_KEY").expect("GROK_API_KEY environment variable not set");
let client = GrokClient::new(&api_key)?;
println!("🧠 Grok API — Reasoning Model Demo\n");
let reasoning_model = Model::Grok4_20_0309Reasoning;
let fast_model = Model::Grok4_1FastReasoning;
println!("Model capabilities:");
for model in [reasoning_model, fast_model] {
println!(
" {:<36} | reasoning={:<5} | ctx={:>12} | logprobs={}",
model.as_str(),
model.is_reasoning_model(),
model
.context_window()
.map(|n| format!("{n}"))
.unwrap_or_else(|| "N/A".into()),
model.supports_logprobs(),
);
}
println!();
println!("── Hard problem: grok-4.20-0309-reasoning ──\n");
let problem = "\
A Rust program spawns N worker threads. Each thread needs exclusive \
mutable access to a shared counter, but the main thread must also read \
the counter after all workers finish. \
Design the minimal safe solution using only the standard library \
(no external crates). Explain your choice of synchronisation primitive \
and show the complete code.";
println!("Problem:\n{problem}\n");
let builder = client
.chat_with_history(&[ChatMessage::user(problem)])
.model(reasoning_model.as_str())
.max_tokens(1200);
if reasoning_model.supports_frequency_presence_penalty() {
}
let _ = builder;
let response = client
.chat_with_history(&[ChatMessage::user(problem)])
.model(reasoning_model.as_str())
.max_tokens(1200)
.send()
.await?;
if let Some(content) = response.content() {
println!("🤖 Response:\n{content}\n");
}
let u = &response.usage;
print!(
"📊 Tokens — prompt: {}, completion: {}, total: {}",
u.prompt_tokens, u.completion_tokens, u.total_tokens
);
if let Some(r) = u.reasoning_tokens {
print!(", reasoning: {r} 🧠");
}
if let Some(c) = u.cached_prompt_tokens {
print!(", cached: {c} 💰");
}
println!("\n");
println!("── Same question: grok-4-1-fast-reasoning (for comparison) ──\n");
let response = client
.chat_with_history(&[ChatMessage::user(problem)])
.model(fast_model.as_str())
.max_tokens(1200)
.send()
.await?;
if let Some(content) = response.content() {
println!("🤖 Response:\n{content}\n");
}
let u = &response.usage;
print!(
"📊 Tokens — prompt: {}, completion: {}, total: {}",
u.prompt_tokens, u.completion_tokens, u.total_tokens
);
if let Some(r) = u.reasoning_tokens {
print!(", reasoning: {r} 🧠");
}
if let Some(c) = u.cached_prompt_tokens {
print!(", cached: {c} 💰");
}
println!();
println!("\n✨ Done!");
Ok(())
}