agcodex_ollama/
lib.rs

1mod client;
2mod parser;
3mod pull;
4mod url;
5
6use agcodex_core::config::Config;
7pub use client::OllamaClient;
8pub use pull::CliProgressReporter;
9pub use pull::PullEvent;
10pub use pull::PullProgressReporter;
11pub use pull::TuiProgressReporter;
12
13/// Default OSS model to use when `--oss` is passed without an explicit `-m`.
14pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b";
15
16/// Prepare the local OSS environment when `--oss` is selected.
17///
18/// - Ensures a local Ollama server is reachable.
19/// - Checks if the model exists locally and pulls it if missing.
20pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> {
21    // Only download when the requested model is the default OSS model (or when -m is not provided).
22    let model = config.model.as_ref();
23
24    // Verify local Ollama is reachable.
25    let ollama_client = crate::OllamaClient::try_from_oss_provider(config).await?;
26
27    // If the model is not present locally, pull it.
28    match ollama_client.fetch_models().await {
29        Ok(models) => {
30            if !models.iter().any(|m| m == model) {
31                let mut reporter = crate::CliProgressReporter::new();
32                ollama_client
33                    .pull_with_reporter(model, &mut reporter)
34                    .await?;
35            }
36        }
37        Err(err) => {
38            // Not fatal; higher layers may still proceed and surface errors later.
39            tracing::warn!("Failed to query local models from Ollama: {}.", err);
40        }
41    }
42
43    Ok(())
44}