llm_link/api/
mod.rs

1pub mod openai;
2pub mod ollama;
3pub mod anthropic;
4pub mod convert;
5
6use crate::settings::{Settings, LlmBackendSettings};
7use crate::service::Service as LlmService;
8use crate::models::ModelsConfig;
9use axum::response::Json;
10use axum::extract::State;
11use axum::http::StatusCode;
12use serde_json::json;
13use std::sync::Arc;
14
15/// 应用状态
16#[derive(Clone)]
17pub struct AppState {
18    pub llm_service: Arc<LlmService>,
19    pub config: Arc<Settings>,
20}
21
22impl AppState {
23    pub fn new(llm_service: LlmService, config: Settings) -> Self {
24        Self {
25            llm_service: Arc::new(llm_service),
26            config: Arc::new(config),
27        }
28    }
29}
30
31/// 健康检查端点
32pub async fn health_check() -> Json<serde_json::Value> {
33    Json(json!({
34        "status": "ok",
35        "service": "llm-link",
36        "version": "0.1.0"
37    }))
38}
39
40/// 调试测试端点
41pub async fn debug_test() -> Json<serde_json::Value> {
42    Json(json!({
43        "debug": "test",
44        "timestamp": "2025-10-15T16:00:00Z"
45    }))
46}
47
48/// 获取完整的 provider 和 model 信息
49pub async fn info(
50    State(state): State<AppState>,
51) -> Result<Json<serde_json::Value>, StatusCode> {
52    let current_provider = get_provider_name(&state.config.llm_backend);
53    let current_model = get_current_model(&state.config.llm_backend);
54    
55    let models_config = ModelsConfig::load_with_fallback();
56    
57    let supported_providers = vec![
58        json!({
59            "name": "openai",
60            "models": models_config.openai.models,
61        }),
62        json!({
63            "name": "anthropic",
64            "models": models_config.anthropic.models,
65        }),
66        json!({
67            "name": "zhipu",
68            "models": models_config.zhipu.models,
69        }),
70        json!({
71            "name": "ollama",
72            "models": models_config.ollama.models,
73        }),
74        json!({
75            "name": "aliyun",
76            "models": models_config.aliyun.models,
77        }),
78        json!({
79            "name": "volcengine",
80            "models": vec![
81                json!({
82                    "id": "ep-20241023xxxxx-xxxxx",
83                    "name": "Doubao Model",
84                    "description": "Volcengine Doubao model endpoint"
85                })
86            ],
87        }),
88        json!({
89            "name": "tencent",
90            "models": vec![
91                json!({
92                    "id": "hunyuan-lite",
93                    "name": "Hunyuan Lite",
94                    "description": "Tencent Hunyuan lite model"
95                })
96            ],
97        }),
98    ];
99
100    let mut api_endpoints = serde_json::Map::new();
101    
102    if let Some(ollama_config) = &state.config.apis.ollama {
103        if ollama_config.enabled {
104            api_endpoints.insert("ollama".to_string(), json!({
105                "path": ollama_config.path,
106                "enabled": true,
107                "auth_required": ollama_config.api_key.is_some(),
108            }));
109        }
110    }
111    
112    if let Some(openai_config) = &state.config.apis.openai {
113        if openai_config.enabled {
114            api_endpoints.insert("openai".to_string(), json!({
115                "path": openai_config.path,
116                "enabled": true,
117                "auth_required": openai_config.api_key.is_some(),
118            }));
119        }
120    }
121    
122    if let Some(anthropic_config) = &state.config.apis.anthropic {
123        if anthropic_config.enabled {
124            api_endpoints.insert("anthropic".to_string(), json!({
125                "path": anthropic_config.path,
126                "enabled": true,
127            }));
128        }
129    }
130
131    let response = json!({
132        "service": "llm-link",
133        "version": "0.2.4",
134        "current_provider": current_provider,
135        "current_model": current_model,
136        "supported_providers": supported_providers,
137        "api_endpoints": api_endpoints,
138    });
139
140    Ok(Json(response))
141}
142
143fn get_provider_name(backend: &LlmBackendSettings) -> &str {
144    match backend {
145        LlmBackendSettings::OpenAI { .. } => "openai",
146        LlmBackendSettings::Anthropic { .. } => "anthropic",
147        LlmBackendSettings::Zhipu { .. } => "zhipu",
148        LlmBackendSettings::Ollama { .. } => "ollama",
149        LlmBackendSettings::Aliyun { .. } => "aliyun",
150        LlmBackendSettings::Volcengine { .. } => "volcengine",
151        LlmBackendSettings::Tencent { .. } => "tencent",
152    }
153}
154
155fn get_current_model(backend: &LlmBackendSettings) -> String {
156    match backend {
157        LlmBackendSettings::OpenAI { model, .. } => model.clone(),
158        LlmBackendSettings::Anthropic { model, .. } => model.clone(),
159        LlmBackendSettings::Zhipu { model, .. } => model.clone(),
160        LlmBackendSettings::Ollama { model, .. } => model.clone(),
161        LlmBackendSettings::Aliyun { model, .. } => model.clone(),
162        LlmBackendSettings::Volcengine { model, .. } => model.clone(),
163        LlmBackendSettings::Tencent { model, .. } => model.clone(),
164    }
165}