1mod client;
2mod parser;
3mod pull;
4mod url;
5
6use agcodex_core::config::Config;
7pub use client::OllamaClient;
8pub use pull::CliProgressReporter;
9pub use pull::PullEvent;
10pub use pull::PullProgressReporter;
11pub use pull::TuiProgressReporter;
12
13pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b";
15
16pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> {
21 let model = config.model.as_ref();
23
24 let ollama_client = crate::OllamaClient::try_from_oss_provider(config).await?;
26
27 match ollama_client.fetch_models().await {
29 Ok(models) => {
30 if !models.iter().any(|m| m == model) {
31 let mut reporter = crate::CliProgressReporter::new();
32 ollama_client
33 .pull_with_reporter(model, &mut reporter)
34 .await?;
35 }
36 }
37 Err(err) => {
38 tracing::warn!("Failed to query local models from Ollama: {}.", err);
40 }
41 }
42
43 Ok(())
44}