llm_link/llm/
mod.rs

1mod chat;
2mod models;
3mod stream;
4mod types;
5
6pub use types::{Model, Response};
7
8use crate::models::ModelsConfig;
9use crate::settings::LlmBackendSettings;
10use anyhow::Result;
11use llm_connector::LlmClient;
12
13/// Unified LLM client that wraps llm-connector for all providers
14pub struct Client {
15    backend: LlmBackendSettings,
16    llm_client: LlmClient,
17    models_config: ModelsConfig,
18}
19
20impl Client {
21    /// Create a new client with the specified backend configuration
22    pub fn new(config: &LlmBackendSettings) -> Result<Self> {
23        let llm_client = match config {
24            LlmBackendSettings::OpenAI {
25                api_key, base_url, ..
26            } => {
27                // Use 30 second timeout for better reliability
28                if let Some(base_url) = base_url {
29                    LlmClient::openai_compatible(api_key, base_url, "openai")?
30                } else {
31                    LlmClient::openai(api_key)?
32                }
33            }
34            LlmBackendSettings::Anthropic { api_key, .. } => {
35                // Use 30 second timeout for better reliability
36                LlmClient::anthropic(api_key)?
37            }
38            LlmBackendSettings::Aliyun { api_key, .. } => LlmClient::aliyun(api_key)?,
39            LlmBackendSettings::Zhipu { api_key, .. } => {
40                // Use Zhipu OpenAI compatible mode for better reliability
41                LlmClient::zhipu_openai_compatible(api_key)?
42            }
43            LlmBackendSettings::Volcengine { api_key, .. } => LlmClient::volcengine(api_key)?,
44            LlmBackendSettings::Tencent { api_key, .. } => LlmClient::tencent(api_key)?,
45            LlmBackendSettings::Longcat { api_key, .. } => {
46                // Longcat uses OpenAI compatible API
47                LlmClient::openai_compatible(api_key, "https://api.longcat.chat/v1", "longcat")?
48            }
49            LlmBackendSettings::Ollama { base_url, .. } => {
50                if base_url.is_some() {
51                    // For custom Ollama URLs, we might need to use openai_compatible
52                    // But for now, let's use the standard ollama method
53                    LlmClient::ollama()?
54                } else {
55                    LlmClient::ollama()?
56                }
57            }
58        };
59
60        // Load models configuration
61        let models_config = ModelsConfig::load_with_fallback();
62
63        Ok(Self {
64            backend: config.clone(),
65            llm_client,
66            models_config,
67        })
68    }
69}