llm_link/api/
mod.rs

1pub mod openai;
2pub mod ollama;
3pub mod anthropic;
4pub mod convert;
5pub mod config;
6
7use crate::settings::{Settings, LlmBackendSettings};
8use crate::service::Service as LlmService;
9use crate::models::ModelsConfig;
10use axum::response::Json;
11use axum::extract::State;
12use axum::http::StatusCode;
13use serde_json::json;
14use std::sync::{Arc, RwLock};
15use anyhow::Result;
16
17/// 应用状态
18#[derive(Clone)]
19pub struct AppState {
20    pub llm_service: Arc<RwLock<LlmService>>,
21    pub config: Arc<RwLock<Settings>>,
22}
23
24impl AppState {
25    pub fn new(llm_service: LlmService, config: Settings) -> Self {
26        Self {
27            llm_service: Arc::new(RwLock::new(llm_service)),
28            config: Arc::new(RwLock::new(config)),
29        }
30    }
31
32    /// 动态更新 LLM 服务配置
33    ///
34    /// 这个方法允许在运行时更新 LLM 后端配置,而无需重启服务
35    pub fn update_llm_service(&self, new_backend: &LlmBackendSettings) -> Result<()> {
36        // 创建新的 LLM 服务
37        let new_service = LlmService::new(new_backend)?;
38
39        // 更新服务
40        {
41            let mut service = self.llm_service.write().unwrap();
42            *service = new_service;
43        }
44
45        // 更新配置
46        {
47            let mut config = self.config.write().unwrap();
48            config.llm_backend = new_backend.clone();
49        }
50
51        Ok(())
52    }
53
54    /// 获取当前配置的只读副本
55    pub fn get_current_config(&self) -> Settings {
56        self.config.read().unwrap().clone()
57    }
58}
59
60/// 健康检查端点
61pub async fn health_check() -> Json<serde_json::Value> {
62    Json(json!({
63        "status": "ok",
64        "service": "llm-link",
65        "version": "0.1.0"
66    }))
67}
68
69/// 调试测试端点
70pub async fn debug_test() -> Json<serde_json::Value> {
71    Json(json!({
72        "debug": "test",
73        "timestamp": "2025-10-15T16:00:00Z"
74    }))
75}
76
77/// 获取完整的 provider 和 model 信息
78pub async fn info(
79    State(state): State<AppState>,
80) -> Result<Json<serde_json::Value>, StatusCode> {
81    let config = state.config.read().unwrap();
82    let current_provider = get_provider_name(&config.llm_backend);
83    let current_model = get_current_model(&config.llm_backend);
84    
85    let models_config = ModelsConfig::load_with_fallback();
86    
87    let supported_providers = vec![
88        json!({
89            "name": "openai",
90            "models": models_config.openai.models,
91        }),
92        json!({
93            "name": "anthropic",
94            "models": models_config.anthropic.models,
95        }),
96        json!({
97            "name": "zhipu",
98            "models": models_config.zhipu.models,
99        }),
100        json!({
101            "name": "ollama",
102            "models": models_config.ollama.models,
103        }),
104        json!({
105            "name": "aliyun",
106            "models": models_config.aliyun.models,
107        }),
108        json!({
109            "name": "volcengine",
110            "models": vec![
111                json!({
112                    "id": "ep-20241023xxxxx-xxxxx",
113                    "name": "Doubao Model",
114                    "description": "Volcengine Doubao model endpoint"
115                })
116            ],
117        }),
118        json!({
119            "name": "tencent",
120            "models": vec![
121                json!({
122                    "id": "hunyuan-lite",
123                    "name": "Hunyuan Lite",
124                    "description": "Tencent Hunyuan lite model"
125                })
126            ],
127        }),
128    ];
129
130    let mut api_endpoints = serde_json::Map::new();
131
132    if let Some(ollama_config) = &config.apis.ollama {
133        if ollama_config.enabled {
134            api_endpoints.insert("ollama".to_string(), json!({
135                "path": ollama_config.path,
136                "enabled": true,
137                "auth_required": ollama_config.api_key.is_some(),
138            }));
139        }
140    }
141
142    if let Some(openai_config) = &config.apis.openai {
143        if openai_config.enabled {
144            api_endpoints.insert("openai".to_string(), json!({
145                "path": openai_config.path,
146                "enabled": true,
147                "auth_required": openai_config.api_key.is_some(),
148            }));
149        }
150    }
151
152    if let Some(anthropic_config) = &config.apis.anthropic {
153        if anthropic_config.enabled {
154            api_endpoints.insert("anthropic".to_string(), json!({
155                "path": anthropic_config.path,
156                "enabled": true,
157            }));
158        }
159    }
160
161    let response = json!({
162        "service": "llm-link",
163        "version": "0.2.4",
164        "current_provider": current_provider,
165        "current_model": current_model,
166        "supported_providers": supported_providers,
167        "api_endpoints": api_endpoints,
168    });
169
170    Ok(Json(response))
171}
172
173fn get_provider_name(backend: &LlmBackendSettings) -> &str {
174    match backend {
175        LlmBackendSettings::OpenAI { .. } => "openai",
176        LlmBackendSettings::Anthropic { .. } => "anthropic",
177        LlmBackendSettings::Zhipu { .. } => "zhipu",
178        LlmBackendSettings::Ollama { .. } => "ollama",
179        LlmBackendSettings::Aliyun { .. } => "aliyun",
180        LlmBackendSettings::Volcengine { .. } => "volcengine",
181        LlmBackendSettings::Tencent { .. } => "tencent",
182    }
183}
184
185fn get_current_model(backend: &LlmBackendSettings) -> String {
186    match backend {
187        LlmBackendSettings::OpenAI { model, .. } => model.clone(),
188        LlmBackendSettings::Anthropic { model, .. } => model.clone(),
189        LlmBackendSettings::Zhipu { model, .. } => model.clone(),
190        LlmBackendSettings::Ollama { model, .. } => model.clone(),
191        LlmBackendSettings::Aliyun { model, .. } => model.clone(),
192        LlmBackendSettings::Volcengine { model, .. } => model.clone(),
193        LlmBackendSettings::Tencent { model, .. } => model.clone(),
194    }
195}