llm_link/api/
mod.rs

1pub mod openai;
2pub mod ollama;
3pub mod anthropic;
4pub mod convert;
5pub mod config;
6
7use crate::settings::{Settings, LlmBackendSettings};
8use crate::service::Service as LlmService;
9use crate::models::ModelsConfig;
10use axum::response::Json;
11use axum::extract::State;
12use axum::http::StatusCode;
13use serde_json::json;
14use std::sync::{Arc, RwLock};
15use anyhow::Result;
16
17/// 应用状态
18#[derive(Clone)]
19pub struct AppState {
20    pub llm_service: Arc<RwLock<LlmService>>,
21    pub config: Arc<RwLock<Settings>>,
22}
23
24impl AppState {
25    pub fn new(llm_service: LlmService, config: Settings) -> Self {
26        Self {
27            llm_service: Arc::new(RwLock::new(llm_service)),
28            config: Arc::new(RwLock::new(config)),
29        }
30    }
31
32    /// 动态更新 LLM 服务配置
33    ///
34    /// 这个方法允许在运行时更新 LLM 后端配置,而无需重启服务
35    pub fn update_llm_service(&self, new_backend: &LlmBackendSettings) -> Result<()> {
36        // 创建新的 LLM 服务
37        let new_service = LlmService::new(new_backend)?;
38
39        // 更新服务
40        {
41            let mut service = self.llm_service.write()
42                .map_err(|e| anyhow::anyhow!("Failed to acquire write lock for llm_service: {}", e))?;
43            *service = new_service;
44        }
45
46        // 更新配置
47        {
48            let mut config = self.config.write()
49                .map_err(|e| anyhow::anyhow!("Failed to acquire write lock for config: {}", e))?;
50            config.llm_backend = new_backend.clone();
51        }
52
53        Ok(())
54    }
55
56    /// 获取当前配置的副本
57    pub fn get_current_config(&self) -> Result<Settings> {
58        self.config.read()
59            .map_err(|e| anyhow::anyhow!("Failed to acquire read lock for config: {}", e))
60            .map(|config| config.clone())
61    }
62}
63
64/// 健康检查端点
65pub async fn health_check() -> Json<serde_json::Value> {
66    Json(json!({
67        "status": "ok",
68        "service": "llm-link",
69        "version": "0.1.0"
70    }))
71}
72
73/// 调试测试端点
74pub async fn debug_test() -> Json<serde_json::Value> {
75    Json(json!({
76        "debug": "test",
77        "timestamp": "2025-10-15T16:00:00Z"
78    }))
79}
80
81/// 获取完整的 provider 和 model 信息
82pub async fn info(
83    State(state): State<AppState>,
84) -> Result<Json<serde_json::Value>, StatusCode> {
85    let config = state.config.read()
86        .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
87    let current_provider = get_provider_name(&config.llm_backend);
88    let current_model = get_current_model(&config.llm_backend);
89    
90    let models_config = ModelsConfig::load_with_fallback();
91
92    // Build supported_providers from the dynamic HashMap
93    let mut supported_providers: Vec<serde_json::Value> = models_config.providers
94        .iter()
95        .map(|(name, provider_models)| {
96            json!({
97                "name": name,
98                "models": provider_models.models,
99            })
100        })
101        .collect();
102
103    // Sort by provider name for consistent output
104    supported_providers.sort_by(|a, b| {
105        a["name"].as_str().unwrap_or("").cmp(b["name"].as_str().unwrap_or(""))
106    });
107
108    let mut api_endpoints = serde_json::Map::with_capacity(3);
109
110    if let Some(ollama_config) = &config.apis.ollama {
111        if ollama_config.enabled {
112            api_endpoints.insert("ollama".to_string(), json!({
113                "path": ollama_config.path,
114                "enabled": true,
115                "auth_required": ollama_config.api_key.is_some(),
116            }));
117        }
118    }
119
120    if let Some(openai_config) = &config.apis.openai {
121        if openai_config.enabled {
122            api_endpoints.insert("openai".to_string(), json!({
123                "path": openai_config.path,
124                "enabled": true,
125                "auth_required": openai_config.api_key.is_some(),
126            }));
127        }
128    }
129
130    if let Some(anthropic_config) = &config.apis.anthropic {
131        if anthropic_config.enabled {
132            api_endpoints.insert("anthropic".to_string(), json!({
133                "path": anthropic_config.path,
134                "enabled": true,
135            }));
136        }
137    }
138
139    let response = json!({
140        "service": "llm-link",
141        "version": "0.3.3",
142        "current_provider": current_provider,
143        "current_model": current_model,
144        "supported_providers": supported_providers,
145        "api_endpoints": api_endpoints,
146    });
147
148    Ok(Json(response))
149}
150
151fn get_provider_name(backend: &LlmBackendSettings) -> &str {
152    match backend {
153        LlmBackendSettings::OpenAI { .. } => "openai",
154        LlmBackendSettings::Anthropic { .. } => "anthropic",
155        LlmBackendSettings::Zhipu { .. } => "zhipu",
156        LlmBackendSettings::Ollama { .. } => "ollama",
157        LlmBackendSettings::Aliyun { .. } => "aliyun",
158        LlmBackendSettings::Volcengine { .. } => "volcengine",
159        LlmBackendSettings::Tencent { .. } => "tencent",
160        LlmBackendSettings::Longcat { .. } => "longcat",
161        LlmBackendSettings::Moonshot { .. } => "moonshot",
162    }
163}
164
165fn get_current_model(backend: &LlmBackendSettings) -> String {
166    match backend {
167        LlmBackendSettings::OpenAI { model, .. } => model.clone(),
168        LlmBackendSettings::Anthropic { model, .. } => model.clone(),
169        LlmBackendSettings::Zhipu { model, .. } => model.clone(),
170        LlmBackendSettings::Ollama { model, .. } => model.clone(),
171        LlmBackendSettings::Aliyun { model, .. } => model.clone(),
172        LlmBackendSettings::Volcengine { model, .. } => model.clone(),
173        LlmBackendSettings::Tencent { model, .. } => model.clone(),
174        LlmBackendSettings::Longcat { model, .. } => model.clone(),
175        LlmBackendSettings::Moonshot { model, .. } => model.clone(),
176    }
177}