llm_link/llm/
models.rs

1use super::Client;
2use crate::llm::types::Model;
3use crate::settings::LlmBackendSettings;
4use anyhow::Result;
5use llm_connector::Provider;
6
7impl Client {
8    /// List available models
9    pub async fn list_models(&self) -> Result<Vec<Model>> {
10        let provider_name = match &self.backend {
11            LlmBackendSettings::OpenAI { .. } => "openai",
12            LlmBackendSettings::Anthropic { .. } => "anthropic",
13            LlmBackendSettings::Zhipu { .. } => "zhipu",
14            LlmBackendSettings::Ollama { .. } => "ollama",
15            LlmBackendSettings::Aliyun { .. } => "aliyun",
16            LlmBackendSettings::Volcengine { .. } => "volcengine",
17            LlmBackendSettings::Tencent { .. } => "tencent",
18            LlmBackendSettings::Longcat { .. } => "longcat",
19            LlmBackendSettings::Moonshot { .. } => "moonshot",
20        };
21
22        // Special handling for Ollama - get actual installed models
23        if provider_name == "ollama" {
24            if let Some(ollama_client) = self.llm_client.as_ollama() {
25                // Try to get actual installed models from Ollama
26                match ollama_client.models().await {
27                    Ok(ollama_models) => {
28                        let models: Vec<Model> = ollama_models.into_iter()
29                            .map(|model_name| Model { id: model_name })
30                            .collect();
31
32                        if !models.is_empty() {
33                            return Ok(models);
34                        }
35                    }
36                    Err(e) => {
37                        eprintln!("Warning: Failed to get Ollama models: {}, falling back to config", e);
38                    }
39                }
40            }
41        }
42
43        // For other providers or if Ollama API fails, use configuration file
44        let model_infos = self.models_config.get_models_for_provider(provider_name);
45
46        // Convert ModelInfo to Model
47        let models: Vec<Model> = model_infos.into_iter().map(|info| Model {
48            id: info.id,
49        }).collect();
50
51        // If no models found in config, fall back to current model from backend config
52        if models.is_empty() {
53            let fallback_model = match &self.backend {
54                LlmBackendSettings::OpenAI { model, .. } => model.clone(),
55                LlmBackendSettings::Anthropic { model, .. } => model.clone(),
56                LlmBackendSettings::Zhipu { model, .. } => model.clone(),
57                LlmBackendSettings::Ollama { model, .. } => model.clone(),
58                LlmBackendSettings::Aliyun { model, .. } => model.clone(),
59                LlmBackendSettings::Volcengine { model, .. } => model.clone(),
60                LlmBackendSettings::Tencent { model, .. } => model.clone(),
61                LlmBackendSettings::Longcat { model, .. } => model.clone(),
62                LlmBackendSettings::Moonshot { model, .. } => model.clone(),
63            };
64
65            Ok(vec![Model { id: fallback_model }])
66        } else {
67            Ok(models)
68        }
69    }
70}
71