grok_api 0.1.6

Rust client library for the Grok AI API (xAI)
Documentation
//! Reasoning model example — demonstrates grok-4.20-0309-reasoning.
//!
//! Grok 4.20 reasoning models:
//!   • Use internal chain-of-thought for complex problems
//!   • Do NOT support `presence_penalty`, `frequency_penalty`, or `stop`
//!   • Do NOT support `reasoning_effort`
//!   • `logprobs` is silently ignored
//!
//! Run with:
//!   cargo run --example reasoning

use grok_api::{ChatMessage, GrokClient, Model, Result};

#[tokio::main]
async fn main() -> Result<()> {
    tracing_subscriber::fmt::init();

    let api_key = std::env::var("GROK_API_KEY").expect("GROK_API_KEY environment variable not set");

    let client = GrokClient::new(&api_key)?;

    println!("🧠 Grok API — Reasoning Model Demo\n");

    // ── Show model capabilities ───────────────────────────────────────────────
    let reasoning_model = Model::Grok4_20_0309Reasoning;
    let fast_model = Model::Grok4_1FastReasoning;

    println!("Model capabilities:");
    for model in [reasoning_model, fast_model] {
        println!(
            "  {:<36} | reasoning={:<5} | ctx={:>12} | logprobs={}",
            model.as_str(),
            model.is_reasoning_model(),
            model
                .context_window()
                .map(|n| format!("{n}"))
                .unwrap_or_else(|| "N/A".into()),
            model.supports_logprobs(),
        );
    }
    println!();

    // ── Hard reasoning task → grok-4.20-0309-reasoning ───────────────────────
    println!("── Hard problem: grok-4.20-0309-reasoning ──\n");

    let problem = "\
        A Rust program spawns N worker threads. Each thread needs exclusive \
        mutable access to a shared counter, but the main thread must also read \
        the counter after all workers finish. \
        Design the minimal safe solution using only the standard library \
        (no external crates). Explain your choice of synchronisation primitive \
        and show the complete code.";

    println!("Problem:\n{problem}\n");

    // Guard: reasoning models must NOT receive presence/frequency penalties.
    // The `supports_frequency_presence_penalty()` helper makes this safe.
    let builder = client
        .chat_with_history(&[ChatMessage::user(problem)])
        .model(reasoning_model.as_str())
        .max_tokens(1200);

    // Only attach these params for models that support them
    if reasoning_model.supports_frequency_presence_penalty() {
        // (would call .frequency_penalty() / .presence_penalty() here)
        // Not needed for this model — skipped safely.
    }

    let _ = builder; // suppress unused warning if no further chaining needed

    // Re-build cleanly without unsupported params
    let response = client
        .chat_with_history(&[ChatMessage::user(problem)])
        .model(reasoning_model.as_str())
        .max_tokens(1200)
        .send()
        .await?;

    if let Some(content) = response.content() {
        println!("🤖 Response:\n{content}\n");
    }

    let u = &response.usage;
    print!(
        "📊 Tokens — prompt: {}, completion: {}, total: {}",
        u.prompt_tokens, u.completion_tokens, u.total_tokens
    );
    if let Some(r) = u.reasoning_tokens {
        print!(", reasoning: {r} 🧠");
    }
    if let Some(c) = u.cached_prompt_tokens {
        print!(", cached: {c} 💰");
    }
    println!("\n");

    // ── Same question with fast model for comparison ──────────────────────────
    println!("── Same question: grok-4-1-fast-reasoning (for comparison) ──\n");

    let response = client
        .chat_with_history(&[ChatMessage::user(problem)])
        .model(fast_model.as_str())
        .max_tokens(1200)
        .send()
        .await?;

    if let Some(content) = response.content() {
        println!("🤖 Response:\n{content}\n");
    }

    let u = &response.usage;
    print!(
        "📊 Tokens — prompt: {}, completion: {}, total: {}",
        u.prompt_tokens, u.completion_tokens, u.total_tokens
    );
    if let Some(r) = u.reasoning_tokens {
        print!(", reasoning: {r} 🧠");
    }
    if let Some(c) = u.cached_prompt_tokens {
        print!(", cached: {c} 💰");
    }
    println!();

    println!("\n✨ Done!");
    Ok(())
}