1pub mod openai;
2pub mod ollama;
3pub mod anthropic;
4pub mod convert;
5pub mod config;
6
7use crate::settings::{Settings, LlmBackendSettings};
8use crate::service::Service as LlmService;
9use crate::models::ModelsConfig;
10use axum::response::Json;
11use axum::extract::State;
12use axum::http::StatusCode;
13use serde_json::json;
14use std::sync::Arc;
15use tokio::sync::RwLock;
16use anyhow::Result;
17
18#[derive(Clone)]
20pub struct AppState {
21 pub llm_service: Arc<RwLock<LlmService>>,
22 pub config: Arc<RwLock<Settings>>,
23}
24
25impl AppState {
26 pub fn new(llm_service: LlmService, config: Settings) -> Self {
27 Self {
28 llm_service: Arc::new(RwLock::new(llm_service)),
29 config: Arc::new(RwLock::new(config)),
30 }
31 }
32
33 pub async fn update_llm_service(&self, new_backend: &LlmBackendSettings) -> Result<()> {
37 let new_service = LlmService::new(new_backend)?;
39
40 {
42 let mut service = self.llm_service.write().await;
43 *service = new_service;
44 }
45
46 {
48 let mut config = self.config.write().await;
49 config.llm_backend = new_backend.clone();
50 }
51
52 Ok(())
53 }
54
55 pub async fn get_current_config(&self) -> Result<Settings> {
57 let config = self.config.read().await;
58 Ok(config.clone())
59 }
60}
61
62pub async fn health_check() -> Json<serde_json::Value> {
64 Json(json!({
65 "status": "ok",
66 "service": "llm-link",
67 "version": "0.1.0"
68 }))
69}
70
71pub async fn debug_test() -> Json<serde_json::Value> {
73 Json(json!({
74 "debug": "test",
75 "timestamp": "2025-10-15T16:00:00Z"
76 }))
77}
78
79pub async fn info(
81 State(state): State<AppState>,
82) -> Result<Json<serde_json::Value>, StatusCode> {
83 let config = state.config.read().await;
84 let current_provider = get_provider_name(&config.llm_backend);
85 let current_model = get_current_model(&config.llm_backend);
86
87 let models_config = ModelsConfig::load_with_fallback();
88
89 let mut supported_providers: Vec<serde_json::Value> = models_config.providers
91 .iter()
92 .map(|(name, provider_models)| {
93 json!({
94 "name": name,
95 "models": provider_models.models,
96 })
97 })
98 .collect();
99
100 supported_providers.sort_by(|a, b| {
102 a["name"].as_str().unwrap_or("").cmp(b["name"].as_str().unwrap_or(""))
103 });
104
105 let mut api_endpoints = serde_json::Map::with_capacity(3);
106
107 if let Some(ollama_config) = &config.apis.ollama {
108 if ollama_config.enabled {
109 api_endpoints.insert("ollama".to_string(), json!({
110 "path": ollama_config.path,
111 "enabled": true,
112 "auth_required": ollama_config.api_key.is_some(),
113 }));
114 }
115 }
116
117 if let Some(openai_config) = &config.apis.openai {
118 if openai_config.enabled {
119 api_endpoints.insert("openai".to_string(), json!({
120 "path": openai_config.path,
121 "enabled": true,
122 "auth_required": openai_config.api_key.is_some(),
123 }));
124 }
125 }
126
127 if let Some(anthropic_config) = &config.apis.anthropic {
128 if anthropic_config.enabled {
129 api_endpoints.insert("anthropic".to_string(), json!({
130 "path": anthropic_config.path,
131 "enabled": true,
132 }));
133 }
134 }
135
136 let response = json!({
137 "service": "llm-link",
138 "version": "0.3.3",
139 "current_provider": current_provider,
140 "current_model": current_model,
141 "supported_providers": supported_providers,
142 "api_endpoints": api_endpoints,
143 });
144
145 Ok(Json(response))
146}
147
148fn get_provider_name(backend: &LlmBackendSettings) -> &str {
149 match backend {
150 LlmBackendSettings::OpenAI { .. } => "openai",
151 LlmBackendSettings::Anthropic { .. } => "anthropic",
152 LlmBackendSettings::Zhipu { .. } => "zhipu",
153 LlmBackendSettings::Ollama { .. } => "ollama",
154 LlmBackendSettings::Aliyun { .. } => "aliyun",
155 LlmBackendSettings::Volcengine { .. } => "volcengine",
156 LlmBackendSettings::Tencent { .. } => "tencent",
157 LlmBackendSettings::Longcat { .. } => "longcat",
158 LlmBackendSettings::Moonshot { .. } => "moonshot",
159 LlmBackendSettings::Minimax { .. } => "minimax",
160 }
161}
162
163fn get_current_model(backend: &LlmBackendSettings) -> String {
164 match backend {
165 LlmBackendSettings::OpenAI { model, .. } => model.clone(),
166 LlmBackendSettings::Anthropic { model, .. } => model.clone(),
167 LlmBackendSettings::Zhipu { model, .. } => model.clone(),
168 LlmBackendSettings::Ollama { model, .. } => model.clone(),
169 LlmBackendSettings::Aliyun { model, .. } => model.clone(),
170 LlmBackendSettings::Volcengine { model, .. } => model.clone(),
171 LlmBackendSettings::Tencent { model, .. } => model.clone(),
172 LlmBackendSettings::Longcat { model, .. } => model.clone(),
173 LlmBackendSettings::Moonshot { model, .. } => model.clone(),
174 LlmBackendSettings::Minimax { model, .. } => model.clone(),
175 }
176}