1pub mod openai;
2pub mod ollama;
3pub mod anthropic;
4pub mod convert;
5pub mod config;
6
7use crate::settings::{Settings, LlmBackendSettings};
8use crate::service::Service as LlmService;
9use crate::models::ModelsConfig;
10use axum::response::Json;
11use axum::extract::State;
12use axum::http::StatusCode;
13use serde_json::json;
14use std::sync::{Arc, RwLock};
15use anyhow::Result;
16
17#[derive(Clone)]
19pub struct AppState {
20 pub llm_service: Arc<RwLock<LlmService>>,
21 pub config: Arc<RwLock<Settings>>,
22}
23
24impl AppState {
25 pub fn new(llm_service: LlmService, config: Settings) -> Self {
26 Self {
27 llm_service: Arc::new(RwLock::new(llm_service)),
28 config: Arc::new(RwLock::new(config)),
29 }
30 }
31
32 pub fn update_llm_service(&self, new_backend: &LlmBackendSettings) -> Result<()> {
36 let new_service = LlmService::new(new_backend)?;
38
39 {
41 let mut service = self.llm_service.write().unwrap();
42 *service = new_service;
43 }
44
45 {
47 let mut config = self.config.write().unwrap();
48 config.llm_backend = new_backend.clone();
49 }
50
51 Ok(())
52 }
53
54 pub fn get_current_config(&self) -> Settings {
56 self.config.read().unwrap().clone()
57 }
58}
59
60pub async fn health_check() -> Json<serde_json::Value> {
62 Json(json!({
63 "status": "ok",
64 "service": "llm-link",
65 "version": "0.1.0"
66 }))
67}
68
69pub async fn debug_test() -> Json<serde_json::Value> {
71 Json(json!({
72 "debug": "test",
73 "timestamp": "2025-10-15T16:00:00Z"
74 }))
75}
76
77pub async fn info(
79 State(state): State<AppState>,
80) -> Result<Json<serde_json::Value>, StatusCode> {
81 let config = state.config.read().unwrap();
82 let current_provider = get_provider_name(&config.llm_backend);
83 let current_model = get_current_model(&config.llm_backend);
84
85 let models_config = ModelsConfig::load_with_fallback();
86
87 let mut supported_providers: Vec<serde_json::Value> = models_config.providers
89 .iter()
90 .map(|(name, provider_models)| {
91 json!({
92 "name": name,
93 "models": provider_models.models,
94 })
95 })
96 .collect();
97
98 supported_providers.sort_by(|a, b| {
100 a["name"].as_str().unwrap_or("").cmp(b["name"].as_str().unwrap_or(""))
101 });
102
103 let mut api_endpoints = serde_json::Map::new();
104
105 if let Some(ollama_config) = &config.apis.ollama {
106 if ollama_config.enabled {
107 api_endpoints.insert("ollama".to_string(), json!({
108 "path": ollama_config.path,
109 "enabled": true,
110 "auth_required": ollama_config.api_key.is_some(),
111 }));
112 }
113 }
114
115 if let Some(openai_config) = &config.apis.openai {
116 if openai_config.enabled {
117 api_endpoints.insert("openai".to_string(), json!({
118 "path": openai_config.path,
119 "enabled": true,
120 "auth_required": openai_config.api_key.is_some(),
121 }));
122 }
123 }
124
125 if let Some(anthropic_config) = &config.apis.anthropic {
126 if anthropic_config.enabled {
127 api_endpoints.insert("anthropic".to_string(), json!({
128 "path": anthropic_config.path,
129 "enabled": true,
130 }));
131 }
132 }
133
134 let response = json!({
135 "service": "llm-link",
136 "version": "0.3.2",
137 "current_provider": current_provider,
138 "current_model": current_model,
139 "supported_providers": supported_providers,
140 "api_endpoints": api_endpoints,
141 });
142
143 Ok(Json(response))
144}
145
146fn get_provider_name(backend: &LlmBackendSettings) -> &str {
147 match backend {
148 LlmBackendSettings::OpenAI { .. } => "openai",
149 LlmBackendSettings::Anthropic { .. } => "anthropic",
150 LlmBackendSettings::Zhipu { .. } => "zhipu",
151 LlmBackendSettings::Ollama { .. } => "ollama",
152 LlmBackendSettings::Aliyun { .. } => "aliyun",
153 LlmBackendSettings::Volcengine { .. } => "volcengine",
154 LlmBackendSettings::Tencent { .. } => "tencent",
155 LlmBackendSettings::Longcat { .. } => "longcat",
156 }
157}
158
159fn get_current_model(backend: &LlmBackendSettings) -> String {
160 match backend {
161 LlmBackendSettings::OpenAI { model, .. } => model.clone(),
162 LlmBackendSettings::Anthropic { model, .. } => model.clone(),
163 LlmBackendSettings::Zhipu { model, .. } => model.clone(),
164 LlmBackendSettings::Ollama { model, .. } => model.clone(),
165 LlmBackendSettings::Aliyun { model, .. } => model.clone(),
166 LlmBackendSettings::Volcengine { model, .. } => model.clone(),
167 LlmBackendSettings::Tencent { model, .. } => model.clone(),
168 LlmBackendSettings::Longcat { model, .. } => model.clone(),
169 }
170}