llm_link/llm/
models.rs

1use super::Client;
2use crate::llm::types::Model;
3use crate::settings::LlmBackendSettings;
4use anyhow::Result;
5use llm_connector::Provider;
6
7impl Client {
8    /// List available models
9    pub async fn list_models(&self) -> Result<Vec<Model>> {
10        let provider_name = match &self.backend {
11            LlmBackendSettings::OpenAI { .. } => "openai",
12            LlmBackendSettings::Anthropic { .. } => "anthropic",
13            LlmBackendSettings::Zhipu { .. } => "zhipu",
14            LlmBackendSettings::Ollama { .. } => "ollama",
15            LlmBackendSettings::Aliyun { .. } => "aliyun",
16            LlmBackendSettings::Volcengine { .. } => "volcengine",
17            LlmBackendSettings::Tencent { .. } => "tencent",
18            LlmBackendSettings::Longcat { .. } => "longcat",
19        };
20
21        // Special handling for Ollama - get actual installed models
22        if provider_name == "ollama" {
23            if let Some(ollama_client) = self.llm_client.as_ollama() {
24                // Try to get actual installed models from Ollama
25                match ollama_client.models().await {
26                    Ok(ollama_models) => {
27                        let models: Vec<Model> = ollama_models.into_iter()
28                            .map(|model_name| Model { id: model_name })
29                            .collect();
30
31                        if !models.is_empty() {
32                            return Ok(models);
33                        }
34                    }
35                    Err(e) => {
36                        eprintln!("Warning: Failed to get Ollama models: {}, falling back to config", e);
37                    }
38                }
39            }
40        }
41
42        // For other providers or if Ollama API fails, use configuration file
43        let model_infos = self.models_config.get_models_for_provider(provider_name);
44
45        // Convert ModelInfo to Model
46        let models: Vec<Model> = model_infos.into_iter().map(|info| Model {
47            id: info.id,
48        }).collect();
49
50        // If no models found in config, fall back to current model from backend config
51        if models.is_empty() {
52            let fallback_model = match &self.backend {
53                LlmBackendSettings::OpenAI { model, .. } => model.clone(),
54                LlmBackendSettings::Anthropic { model, .. } => model.clone(),
55                LlmBackendSettings::Zhipu { model, .. } => model.clone(),
56                LlmBackendSettings::Ollama { model, .. } => model.clone(),
57                LlmBackendSettings::Aliyun { model, .. } => model.clone(),
58                LlmBackendSettings::Volcengine { model, .. } => model.clone(),
59                LlmBackendSettings::Tencent { model, .. } => model.clone(),
60                LlmBackendSettings::Longcat { model, .. } => model.clone(),
61            };
62
63            Ok(vec![Model { id: fallback_model }])
64        } else {
65            Ok(models)
66        }
67    }
68}
69