1use super::Client;
2use crate::llm::types::Model;
3use crate::settings::LlmBackendSettings;
4use anyhow::Result;
5use llm_connector::Provider;
6
7impl Client {
8 pub async fn list_models(&self) -> Result<Vec<Model>> {
10 let provider_name = match &self.backend {
11 LlmBackendSettings::OpenAI { .. } => "openai",
12 LlmBackendSettings::Anthropic { .. } => "anthropic",
13 LlmBackendSettings::Zhipu { .. } => "zhipu",
14 LlmBackendSettings::Ollama { .. } => "ollama",
15 LlmBackendSettings::Aliyun { .. } => "aliyun",
16 LlmBackendSettings::Volcengine { .. } => "volcengine",
17 LlmBackendSettings::Tencent { .. } => "tencent",
18 };
19
20 if provider_name == "ollama" {
22 if let Some(ollama_client) = self.llm_client.as_ollama() {
23 match ollama_client.models().await {
25 Ok(ollama_models) => {
26 let models: Vec<Model> = ollama_models.into_iter()
27 .map(|model_name| Model { id: model_name })
28 .collect();
29
30 if !models.is_empty() {
31 return Ok(models);
32 }
33 }
34 Err(e) => {
35 eprintln!("Warning: Failed to get Ollama models: {}, falling back to config", e);
36 }
37 }
38 }
39 }
40
41 let model_infos = self.models_config.get_models_for_provider(provider_name);
43
44 let models: Vec<Model> = model_infos.into_iter().map(|info| Model {
46 id: info.id,
47 }).collect();
48
49 if models.is_empty() {
51 let fallback_model = match &self.backend {
52 LlmBackendSettings::OpenAI { model, .. } => model.clone(),
53 LlmBackendSettings::Anthropic { model, .. } => model.clone(),
54 LlmBackendSettings::Zhipu { model, .. } => model.clone(),
55 LlmBackendSettings::Ollama { model, .. } => model.clone(),
56 LlmBackendSettings::Aliyun { model, .. } => model.clone(),
57 LlmBackendSettings::Volcengine { model, .. } => model.clone(),
58 LlmBackendSettings::Tencent { model, .. } => model.clone(),
59 };
60
61 Ok(vec![Model { id: fallback_model }])
62 } else {
63 Ok(models)
64 }
65 }
66}
67