codex-cli-sdk 0.0.1

Rust SDK for the OpenAI Codex CLI
Documentation
//! Local providers — run against lmstudio, ollama, or any OpenAI-compatible server.
//!
//! Set `local_provider` to the provider name and `model` to the local model ID.
//! Also demonstrates `ReasoningEffort` for models that support it.
//!
//! Prerequisites:
//!   - lmstudio: start the local server, load a model
//!   - ollama:   `ollama serve` and `ollama pull <model>`
//!
//! ```bash
//! cargo run --example 07_local_provider
//! ```

use codex_cli_sdk::config::ReasoningEffort;
use codex_cli_sdk::{Codex, CodexConfig, ThreadOptions};

#[tokio::main]
async fn main() -> codex_cli_sdk::Result<()> {
    let codex = Codex::new(CodexConfig::default())?;

    // ── lmstudio ──────────────────────────────────────────────────
    println!("=== lmstudio (qwen2.5-coder-7b) ===");
    let options = ThreadOptions::builder()
        .local_provider("lmstudio")
        .model("qwen2.5-coder-7b-instruct")
        .build();

    let mut thread = codex.start_thread(options);
    let turn = thread
        .run(
            "Write a fizzbuzz implementation in Rust",
            Default::default(),
        )
        .await?;
    println!("{}", turn.final_response);

    // ── ollama ────────────────────────────────────────────────────
    println!("\n=== ollama (deepseek-r1) with high reasoning ===");
    let options = ThreadOptions::builder()
        .local_provider("ollama")
        .model("deepseek-r1:8b")
        .reasoning_effort(ReasoningEffort::High)
        .build();

    let mut thread = codex.start_thread(options);
    let turn = thread
        .run(
            "What is the time complexity of quicksort in the worst case, and why?",
            Default::default(),
        )
        .await?;
    println!("{}", turn.final_response);

    if let Some(usage) = &turn.usage {
        println!(
            "\nTokens: {} in / {} out (cached: {})",
            usage.input_tokens, usage.output_tokens, usage.cached_input_tokens
        );
    }

    Ok(())
}