codio 0.1.1

Production-ready commit message generator using local Ollama LLM
Documentation
use crate::error::AppError;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::time::Duration;

pub const OLLAMA_URL: &str = "http://localhost:11434/api/generate";
pub const OLLAMA_TAGS_URL: &str = "http://localhost:11434/api/tags";

#[derive(Debug, Serialize)]
struct OllamaGenerateRequest<'a> {
    model: &'a str,
    prompt: &'a str,
    stream: bool,
}

#[derive(Debug, Deserialize)]
struct OllamaGenerateResponse {
    response: String,
}

#[derive(Debug, Deserialize)]
struct OllamaTagsResponse {
    #[serde(default)]
    models: Vec<OllamaModel>,
}

#[derive(Debug, Deserialize)]
struct OllamaModel {
    name: String,
}

#[derive(Debug, Clone)]
pub struct ModelStatus {
    pub reachable: bool,
    pub installed_models: Vec<String>,
    pub selected_model_available: bool,
}

pub async fn generate_with_ollama(
    url: &str,
    model: &str,
    prompt: &str,
    verbose_flag: bool,
) -> Result<String, AppError> {
    if verbose_flag {
        eprintln!("[verbose] sending request to Ollama model '{model}' at {url}");
    }

    let client = Client::builder()
        .connect_timeout(Duration::from_secs(5))
        .timeout(Duration::from_secs(90))
        .build()?;

    let response = client
        .post(url)
        .json(&OllamaGenerateRequest {
            model,
            prompt,
            stream: false,
        })
        .send()
        .await;

    let response = match response {
        Ok(resp) => resp,
        Err(err) => {
            if err.is_connect() {
                return Err(AppError::Message(
                    "Failed to connect to Ollama at http://localhost:11434. Start ollama: ollama serve"
                        .to_string(),
                ));
            }
            if err.is_timeout() {
                return Err(AppError::Message(
                    "Ollama request timed out. Try a smaller diff (--max-chars), or use a faster model."
                        .to_string(),
                ));
            }
            return Err(AppError::Http(err));
        }
    };

    if !response.status().is_success() {
        let status = response.status();
        let body = response
            .text()
            .await
            .unwrap_or_else(|_| "<unavailable>".to_string());
        return Err(AppError::Message(format!(
            "Ollama API returned HTTP {status}. Response: {body}"
        )));
    }

    let parsed: OllamaGenerateResponse = response.json().await?;
    if parsed.response.trim().is_empty() {
        return Err(AppError::Message(
            "Ollama returned an empty response. Try again or use a different model.".to_string(),
        ));
    }

    Ok(parsed.response)
}

pub async fn query_model_status(selected_model: &str) -> Result<ModelStatus, AppError> {
    let client = Client::builder()
        .connect_timeout(Duration::from_secs(3))
        .timeout(Duration::from_secs(8))
        .build()?;

    let response = client.get(OLLAMA_TAGS_URL).send().await;
    let response = match response {
        Ok(resp) => resp,
        Err(err) => {
            if err.is_connect() || err.is_timeout() {
                return Ok(ModelStatus {
                    reachable: false,
                    installed_models: Vec::new(),
                    selected_model_available: false,
                });
            }
            return Err(AppError::Http(err));
        }
    };

    if !response.status().is_success() {
        return Ok(ModelStatus {
            reachable: false,
            installed_models: Vec::new(),
            selected_model_available: false,
        });
    }

    let parsed: OllamaTagsResponse = response.json().await?;
    let installed_models: Vec<String> = parsed.models.into_iter().map(|m| m.name).collect();
    let selected_model_available = installed_models
        .iter()
        .any(|name| name == selected_model || name == &format!("{selected_model}:latest"));

    Ok(ModelStatus {
        reachable: true,
        installed_models,
        selected_model_available,
    })
}