1use axum::{extract::State, http::StatusCode, response::Json};
2use serde::{Deserialize, Serialize};
3use serde_json::json;
4use tracing::{info, error};
5use std::sync::atomic::{AtomicU64, Ordering};
6
7use crate::api::AppState;
8
9fn mask_api_key(api_key: &str) -> String {
11 if api_key.len() <= 8 {
12 "*".repeat(api_key.len())
13 } else {
14 format!("{}***{}", &api_key[..4], &api_key[api_key.len()-4..])
15 }
16}
17
18fn validate_api_key(provider: &str, api_key: &str) -> Result<(), String> {
20 if api_key.trim().is_empty() {
21 return Err("API key cannot be empty".to_string());
22 }
23
24 match provider {
25 "openai" => {
26 if !api_key.starts_with("sk-") {
27 return Err("OpenAI API key should start with 'sk-'".to_string());
28 }
29 }
30 "anthropic" => {
31 if !api_key.starts_with("sk-ant-") {
32 return Err("Anthropic API key should start with 'sk-ant-'".to_string());
33 }
34 }
35 "zhipu" => {
36 if api_key.len() < 10 {
38 return Err("Zhipu API key seems too short".to_string());
39 }
40 }
41 "ollama" => {
42 }
44 _ => {
45 if api_key.len() < 10 {
47 return Err("API key seems too short".to_string());
48 }
49 }
50 }
51
52 Ok(())
53}
54
55fn validate_provider(provider: &str) -> Result<(), String> {
57 match provider {
58 "openai" | "anthropic" | "zhipu" | "ollama" | "aliyun" | "volcengine" | "tencent" | "longcat" => Ok(()),
59 _ => Err(format!("Unsupported provider: {}", provider)),
60 }
61}
62
63static INSTANCE_ID: AtomicU64 = AtomicU64::new(0);
65
66pub fn init_instance_id() {
67 use std::time::{SystemTime, UNIX_EPOCH};
68 let timestamp = SystemTime::now()
69 .duration_since(UNIX_EPOCH)
70 .unwrap()
71 .as_secs();
72 INSTANCE_ID.store(timestamp, Ordering::SeqCst);
73}
74
75pub fn get_instance_id() -> u64 {
76 INSTANCE_ID.load(Ordering::SeqCst)
77}
78
79#[derive(Debug, Deserialize)]
80pub struct UpdateConfigRequest {
81 pub provider: String,
82 pub api_key: String,
83 #[serde(default)]
84 pub model: Option<String>,
85 #[serde(default)]
86 pub base_url: Option<String>,
87}
88
89#[derive(Debug, Deserialize)]
90pub struct UpdateKeyRequest {
91 pub provider: String,
92 pub api_key: String,
93 #[serde(default)]
94 pub base_url: Option<String>,
95}
96
97#[derive(Debug, Deserialize)]
98pub struct SwitchProviderRequest {
99 pub provider: String,
100 #[serde(default)]
101 pub model: Option<String>,
102 #[serde(default)]
103 pub api_key: Option<String>,
104 #[serde(default)]
105 pub base_url: Option<String>,
106}
107
108#[derive(Debug, Serialize)]
109pub struct CurrentConfigResponse {
110 pub provider: String,
111 pub model: String,
112 pub has_api_key: bool,
113 pub has_base_url: bool,
114 pub supports_hot_reload: bool,
115}
116
117pub async fn get_current_config(
119 State(state): State<AppState>,
120) -> Result<Json<CurrentConfigResponse>, StatusCode> {
121 use crate::settings::LlmBackendSettings;
122
123 let config = state.config.read().unwrap();
124 let (provider, model, has_api_key, has_base_url) = match &config.llm_backend {
125 LlmBackendSettings::OpenAI { model, base_url, .. } => {
126 ("openai", model.clone(), true, base_url.is_some())
127 }
128 LlmBackendSettings::Anthropic { model, .. } => {
129 ("anthropic", model.clone(), true, false)
130 }
131 LlmBackendSettings::Zhipu { model, base_url, .. } => {
132 ("zhipu", model.clone(), true, base_url.is_some())
133 }
134 LlmBackendSettings::Ollama { model, base_url } => {
135 ("ollama", model.clone(), false, base_url.is_some())
136 }
137 LlmBackendSettings::Aliyun { model, .. } => {
138 ("aliyun", model.clone(), true, false)
139 }
140 LlmBackendSettings::Volcengine { model, .. } => {
141 ("volcengine", model.clone(), true, false)
142 }
143 LlmBackendSettings::Tencent { model, .. } => {
144 ("tencent", model.clone(), true, false)
145 }
146 LlmBackendSettings::Longcat { model, .. } => {
147 ("longcat", model.clone(), true, false)
148 }
149 };
150
151 Ok(Json(CurrentConfigResponse {
152 provider: provider.to_string(),
153 model,
154 has_api_key,
155 has_base_url,
156 supports_hot_reload: true, }))
158}
159
160pub async fn get_health(
164 State(state): State<AppState>,
165) -> Json<serde_json::Value> {
166 use crate::settings::LlmBackendSettings;
167
168 let config = state.config.read().unwrap();
169 let (provider, model) = match &config.llm_backend {
170 LlmBackendSettings::OpenAI { model, .. } => ("openai", model.clone()),
171 LlmBackendSettings::Anthropic { model, .. } => ("anthropic", model.clone()),
172 LlmBackendSettings::Zhipu { model, .. } => ("zhipu", model.clone()),
173 LlmBackendSettings::Ollama { model, .. } => ("ollama", model.clone()),
174 LlmBackendSettings::Aliyun { model, .. } => ("aliyun", model.clone()),
175 LlmBackendSettings::Volcengine { model, .. } => ("volcengine", model.clone()),
176 LlmBackendSettings::Tencent { model, .. } => ("tencent", model.clone()),
177 LlmBackendSettings::Longcat { model, .. } => ("longcat", model.clone()),
178 };
179
180 Json(json!({
181 "status": "ok",
182 "instance_id": get_instance_id(),
183 "pid": std::process::id(),
184 "provider": provider,
185 "model": model,
186 }))
187}
188
189pub async fn update_config_for_restart(
200 State(_state): State<AppState>,
201 Json(request): Json<UpdateConfigRequest>,
202) -> Result<Json<serde_json::Value>, StatusCode> {
203 info!("🔧 Preparing config update for provider: {}", request.provider);
204
205 let default_model = request.model.clone().or_else(|| {
207 match request.provider.as_str() {
208 "openai" => Some("gpt-4o".to_string()),
209 "anthropic" => Some("claude-3-5-sonnet-20241022".to_string()),
210 "zhipu" => Some("glm-4-flash".to_string()),
211 "ollama" => Some("llama2".to_string()),
212 "aliyun" => Some("qwen-turbo".to_string()),
213 "volcengine" => Some("ep-20241023xxxxx-xxxxx".to_string()),
214 "tencent" => Some("hunyuan-lite".to_string()),
215 _ => None,
216 }
217 });
218
219 let model = match default_model {
220 Some(m) => m,
221 None => {
222 error!("❌ Unknown provider: {}", request.provider);
223 return Err(StatusCode::BAD_REQUEST);
224 }
225 };
226
227 let mut env_vars = serde_json::Map::new();
229
230 let api_key_var = match request.provider.as_str() {
232 "openai" => "OPENAI_API_KEY",
233 "anthropic" => "ANTHROPIC_API_KEY",
234 "zhipu" => "ZHIPU_API_KEY",
235 "aliyun" => "ALIYUN_API_KEY",
236 "volcengine" => "VOLCENGINE_API_KEY",
237 "tencent" => "TENCENT_API_KEY",
238 "ollama" => "", _ => return Err(StatusCode::BAD_REQUEST),
240 };
241
242 if !api_key_var.is_empty() {
243 env_vars.insert(api_key_var.to_string(), json!(request.api_key));
244 }
245
246 if let Some(base_url) = request.base_url {
248 let base_url_var = match request.provider.as_str() {
249 "openai" => "OPENAI_BASE_URL",
250 "zhipu" => "ZHIPU_BASE_URL",
251 "ollama" => "OLLAMA_BASE_URL",
252 _ => "",
253 };
254 if !base_url_var.is_empty() {
255 env_vars.insert(base_url_var.to_string(), json!(base_url));
256 }
257 }
258
259 info!("✅ Config prepared for restart with provider: {}", request.provider);
260
261 Ok(Json(json!({
262 "status": "success",
263 "message": format!("Config prepared for provider: {}", request.provider),
264 "restart_required": true,
265 "current_instance_id": get_instance_id(),
266 "env_vars": env_vars,
267 "cli_args": {
268 "provider": request.provider,
269 "model": model,
270 }
271 })))
272}
273
274pub async fn validate_key(
278 State(_state): State<AppState>,
279 Json(request): Json<UpdateConfigRequest>,
280) -> Result<Json<serde_json::Value>, StatusCode> {
281 use crate::settings::LlmBackendSettings;
282 use crate::service::Service;
283
284 info!("🔍 Validating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
285
286 let model = request.model.clone().unwrap_or_else(|| "test-model".to_string());
288
289 let test_backend = match request.provider.as_str() {
290 "openai" => LlmBackendSettings::OpenAI {
291 api_key: request.api_key.clone(),
292 base_url: request.base_url.clone(),
293 model,
294 },
295 "anthropic" => LlmBackendSettings::Anthropic {
296 api_key: request.api_key.clone(),
297 model,
298 },
299 "zhipu" => LlmBackendSettings::Zhipu {
300 api_key: request.api_key.clone(),
301 base_url: request.base_url.clone(),
302 model,
303 },
304 "ollama" => LlmBackendSettings::Ollama {
305 base_url: request.base_url.clone(),
306 model,
307 },
308 "aliyun" => LlmBackendSettings::Aliyun {
309 api_key: request.api_key.clone(),
310 model,
311 },
312 "volcengine" => LlmBackendSettings::Volcengine {
313 api_key: request.api_key.clone(),
314 model,
315 },
316 "tencent" => LlmBackendSettings::Tencent {
317 api_key: request.api_key.clone(),
318 model,
319 },
320 "longcat" => LlmBackendSettings::Longcat {
321 api_key: request.api_key.clone(),
322 model,
323 },
324 _ => {
325 error!("❌ Unsupported provider: {}", request.provider);
326 return Err(StatusCode::BAD_REQUEST);
327 }
328 };
329
330 match Service::new(&test_backend) {
332 Ok(service) => {
333 match service.list_models().await {
334 Ok(models) => {
335 info!("✅ API key validated successfully, found {} models", models.len());
336 Ok(Json(json!({
337 "status": "valid",
338 "message": "API key is valid",
339 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
340 })))
341 }
342 Err(e) => {
343 error!("❌ API key validation failed: {:?}", e);
344 Ok(Json(json!({
345 "status": "invalid",
346 "message": format!("Failed to list models: {}", e),
347 })))
348 }
349 }
350 }
351 Err(e) => {
352 error!("❌ Failed to create service: {:?}", e);
353 Ok(Json(json!({
354 "status": "error",
355 "message": format!("Failed to create service: {}", e),
356 })))
357 }
358 }
359}
360
361pub async fn get_pid() -> Json<serde_json::Value> {
365 let pid = std::process::id();
366
367 Json(json!({
368 "pid": pid,
369 "message": "Use this PID to restart the service"
370 }))
371}
372
373pub async fn validate_key_for_update(
377 State(_state): State<AppState>,
378 Json(request): Json<UpdateKeyRequest>,
379) -> Result<Json<serde_json::Value>, StatusCode> {
380 use crate::settings::LlmBackendSettings;
381 use crate::service::Service;
382
383 info!("🔍 Validating API key for hot update - provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
384
385 let model = match request.provider.as_str() {
387 "openai" => "gpt-4o".to_string(),
388 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
389 "zhipu" => "glm-4-flash".to_string(),
390 "ollama" => "llama2".to_string(),
391 "aliyun" => "qwen-turbo".to_string(),
392 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
393 "tencent" => "hunyuan-lite".to_string(),
394 "longcat" => "LongCat-Flash-Chat".to_string(),
395 _ => {
396 error!("❌ Unsupported provider: {}", request.provider);
397 return Err(StatusCode::BAD_REQUEST);
398 }
399 };
400
401 let test_backend = match request.provider.as_str() {
402 "openai" => LlmBackendSettings::OpenAI {
403 api_key: request.api_key.clone(),
404 base_url: request.base_url.clone(),
405 model,
406 },
407 "anthropic" => LlmBackendSettings::Anthropic {
408 api_key: request.api_key.clone(),
409 model,
410 },
411 "zhipu" => LlmBackendSettings::Zhipu {
412 api_key: request.api_key.clone(),
413 base_url: request.base_url.clone(),
414 model,
415 },
416 "ollama" => LlmBackendSettings::Ollama {
417 base_url: request.base_url.clone(),
418 model,
419 },
420 "aliyun" => LlmBackendSettings::Aliyun {
421 api_key: request.api_key.clone(),
422 model,
423 },
424 "volcengine" => LlmBackendSettings::Volcengine {
425 api_key: request.api_key.clone(),
426 model,
427 },
428 "tencent" => LlmBackendSettings::Tencent {
429 api_key: request.api_key.clone(),
430 model,
431 },
432 "longcat" => LlmBackendSettings::Longcat {
433 api_key: request.api_key.clone(),
434 model,
435 },
436 _ => {
437 error!("❌ Unsupported provider: {}", request.provider);
438 return Err(StatusCode::BAD_REQUEST);
439 }
440 };
441
442 match Service::new(&test_backend) {
444 Ok(service) => {
445 match service.list_models().await {
446 Ok(models) => {
447 info!("✅ API key validated successfully for hot update, found {} models", models.len());
448 Ok(Json(json!({
449 "status": "valid",
450 "message": "API key is valid and ready for hot update",
451 "provider": request.provider,
452 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
453 "supports_hot_reload": true,
454 })))
455 }
456 Err(e) => {
457 error!("❌ API key validation failed for hot update: {:?}", e);
458 Ok(Json(json!({
459 "status": "invalid",
460 "message": format!("Failed to list models: {}", e),
461 "provider": request.provider,
462 })))
463 }
464 }
465 }
466 Err(e) => {
467 error!("❌ Failed to create service for hot update validation: {:?}", e);
468 Ok(Json(json!({
469 "status": "error",
470 "message": format!("Failed to create service: {}", e),
471 "provider": request.provider,
472 })))
473 }
474 }
475}
476
477pub async fn update_key(
481 State(state): State<AppState>,
482 Json(request): Json<UpdateKeyRequest>,
483) -> Result<Json<serde_json::Value>, StatusCode> {
484 if let Err(e) = validate_provider(&request.provider) {
486 error!("❌ Invalid provider: {}", e);
487 return Err(StatusCode::BAD_REQUEST);
488 }
489
490 if request.provider != "ollama" {
491 if let Err(e) = validate_api_key(&request.provider, &request.api_key) {
492 error!("❌ Invalid API key format: {}", e);
493 return Ok(Json(json!({
494 "status": "error",
495 "message": format!("Invalid API key format: {}", e),
496 })));
497 }
498 }
499
500 info!("🔧 Updating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
501
502 let current_config = state.get_current_config();
504
505 let new_backend = match request.provider.as_str() {
507 "openai" => {
508 if let crate::settings::LlmBackendSettings::OpenAI { model, .. } = ¤t_config.llm_backend {
509 crate::settings::LlmBackendSettings::OpenAI {
510 api_key: request.api_key.clone(),
511 base_url: request.base_url.clone(),
512 model: model.clone(),
513 }
514 } else {
515 crate::settings::LlmBackendSettings::OpenAI {
517 api_key: request.api_key.clone(),
518 base_url: request.base_url.clone(),
519 model: "gpt-4o".to_string(),
520 }
521 }
522 }
523 "anthropic" => {
524 if let crate::settings::LlmBackendSettings::Anthropic { model, .. } = ¤t_config.llm_backend {
525 crate::settings::LlmBackendSettings::Anthropic {
526 api_key: request.api_key.clone(),
527 model: model.clone(),
528 }
529 } else {
530 crate::settings::LlmBackendSettings::Anthropic {
531 api_key: request.api_key.clone(),
532 model: "claude-3-5-sonnet-20241022".to_string(),
533 }
534 }
535 }
536 "zhipu" => {
537 if let crate::settings::LlmBackendSettings::Zhipu { model, .. } = ¤t_config.llm_backend {
538 crate::settings::LlmBackendSettings::Zhipu {
539 api_key: request.api_key.clone(),
540 base_url: request.base_url.clone(),
541 model: model.clone(),
542 }
543 } else {
544 crate::settings::LlmBackendSettings::Zhipu {
545 api_key: request.api_key.clone(),
546 base_url: request.base_url.clone(),
547 model: "glm-4-flash".to_string(),
548 }
549 }
550 }
551 "aliyun" => {
552 if let crate::settings::LlmBackendSettings::Aliyun { model, .. } = ¤t_config.llm_backend {
553 crate::settings::LlmBackendSettings::Aliyun {
554 api_key: request.api_key.clone(),
555 model: model.clone(),
556 }
557 } else {
558 crate::settings::LlmBackendSettings::Aliyun {
559 api_key: request.api_key.clone(),
560 model: "qwen-turbo".to_string(),
561 }
562 }
563 }
564 "volcengine" => {
565 if let crate::settings::LlmBackendSettings::Volcengine { model, .. } = ¤t_config.llm_backend {
566 crate::settings::LlmBackendSettings::Volcengine {
567 api_key: request.api_key.clone(),
568 model: model.clone(),
569 }
570 } else {
571 crate::settings::LlmBackendSettings::Volcengine {
572 api_key: request.api_key.clone(),
573 model: "ep-20241023xxxxx-xxxxx".to_string(),
574 }
575 }
576 }
577 "tencent" => {
578 if let crate::settings::LlmBackendSettings::Tencent { model, .. } = ¤t_config.llm_backend {
579 crate::settings::LlmBackendSettings::Tencent {
580 api_key: request.api_key.clone(),
581 model: model.clone(),
582 }
583 } else {
584 crate::settings::LlmBackendSettings::Tencent {
585 api_key: request.api_key.clone(),
586 model: "hunyuan-lite".to_string(),
587 }
588 }
589 }
590 "longcat" => {
591 if let crate::settings::LlmBackendSettings::Longcat { model, .. } = ¤t_config.llm_backend {
592 crate::settings::LlmBackendSettings::Longcat {
593 api_key: request.api_key.clone(),
594 model: model.clone(),
595 }
596 } else {
597 crate::settings::LlmBackendSettings::Longcat {
598 api_key: request.api_key.clone(),
599 model: "LongCat-Flash-Chat".to_string(),
600 }
601 }
602 }
603 "ollama" => {
604 if let crate::settings::LlmBackendSettings::Ollama { model, .. } = ¤t_config.llm_backend {
605 crate::settings::LlmBackendSettings::Ollama {
606 base_url: request.base_url.clone(),
607 model: model.clone(),
608 }
609 } else {
610 crate::settings::LlmBackendSettings::Ollama {
611 base_url: request.base_url.clone(),
612 model: "llama2".to_string(),
613 }
614 }
615 }
616 _ => {
617 error!("❌ Unsupported provider: {}", request.provider);
618 return Err(StatusCode::BAD_REQUEST);
619 }
620 };
621
622 match state.update_llm_service(&new_backend) {
624 Ok(()) => {
625 info!("✅ API key updated successfully for provider: {}", request.provider);
626 Ok(Json(json!({
627 "status": "success",
628 "message": format!("API key updated for provider: {}", request.provider),
629 "provider": request.provider,
630 "restart_required": false,
631 })))
632 }
633 Err(e) => {
634 error!("❌ Failed to update API key: {:?}", e);
635 Ok(Json(json!({
636 "status": "error",
637 "message": format!("Failed to update API key: {}", e),
638 })))
639 }
640 }
641}
642
643pub async fn switch_provider(
647 State(state): State<AppState>,
648 Json(request): Json<SwitchProviderRequest>,
649) -> Result<Json<serde_json::Value>, StatusCode> {
650 if let Err(e) = validate_provider(&request.provider) {
652 error!("❌ Invalid provider: {}", e);
653 return Err(StatusCode::BAD_REQUEST);
654 }
655
656 let masked_key = request.api_key.as_ref().map(|k| mask_api_key(k)).unwrap_or_else(|| "none".to_string());
657 info!("🔄 Switching to provider: {} (key: {})", request.provider, masked_key);
658
659 let current_config = state.get_current_config();
661
662 let api_key = if let Some(key) = request.api_key {
664 key
665 } else {
666 match request.provider.as_str() {
668 "openai" => {
669 if let crate::settings::LlmBackendSettings::OpenAI { api_key, .. } = ¤t_config.llm_backend {
670 api_key.clone()
671 } else {
672 error!("❌ No API key provided for OpenAI and none found in current config");
673 return Err(StatusCode::BAD_REQUEST);
674 }
675 }
676 "anthropic" => {
677 if let crate::settings::LlmBackendSettings::Anthropic { api_key, .. } = ¤t_config.llm_backend {
678 api_key.clone()
679 } else {
680 error!("❌ No API key provided for Anthropic and none found in current config");
681 return Err(StatusCode::BAD_REQUEST);
682 }
683 }
684 "zhipu" => {
685 if let crate::settings::LlmBackendSettings::Zhipu { api_key, .. } = ¤t_config.llm_backend {
686 api_key.clone()
687 } else {
688 error!("❌ No API key provided for Zhipu and none found in current config");
689 return Err(StatusCode::BAD_REQUEST);
690 }
691 }
692 "aliyun" => {
693 if let crate::settings::LlmBackendSettings::Aliyun { api_key, .. } = ¤t_config.llm_backend {
694 api_key.clone()
695 } else {
696 error!("❌ No API key provided for Aliyun and none found in current config");
697 return Err(StatusCode::BAD_REQUEST);
698 }
699 }
700 "volcengine" => {
701 if let crate::settings::LlmBackendSettings::Volcengine { api_key, .. } = ¤t_config.llm_backend {
702 api_key.clone()
703 } else {
704 error!("❌ No API key provided for Volcengine and none found in current config");
705 return Err(StatusCode::BAD_REQUEST);
706 }
707 }
708 "tencent" => {
709 if let crate::settings::LlmBackendSettings::Tencent { api_key, .. } = ¤t_config.llm_backend {
710 api_key.clone()
711 } else {
712 error!("❌ No API key provided for Tencent and none found in current config");
713 return Err(StatusCode::BAD_REQUEST);
714 }
715 }
716 "longcat" => {
717 if let crate::settings::LlmBackendSettings::Longcat { api_key, .. } = ¤t_config.llm_backend {
718 api_key.clone()
719 } else {
720 error!("❌ No API key provided for Longcat and none found in current config");
721 return Err(StatusCode::BAD_REQUEST);
722 }
723 }
724 "ollama" => String::new(), _ => {
726 error!("❌ Unsupported provider: {}", request.provider);
727 return Err(StatusCode::BAD_REQUEST);
728 }
729 }
730 };
731
732 let model = request.model.unwrap_or_else(|| {
734 match request.provider.as_str() {
735 "openai" => "gpt-4o".to_string(),
736 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
737 "zhipu" => "glm-4-flash".to_string(),
738 "ollama" => "llama2".to_string(),
739 "aliyun" => "qwen-turbo".to_string(),
740 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
741 "tencent" => "hunyuan-lite".to_string(),
742 "longcat" => "LongCat-Flash-Chat".to_string(),
743 _ => "default-model".to_string(),
744 }
745 });
746
747 let new_backend = match request.provider.as_str() {
749 "openai" => crate::settings::LlmBackendSettings::OpenAI {
750 api_key,
751 base_url: request.base_url,
752 model,
753 },
754 "anthropic" => crate::settings::LlmBackendSettings::Anthropic {
755 api_key,
756 model,
757 },
758 "zhipu" => crate::settings::LlmBackendSettings::Zhipu {
759 api_key,
760 base_url: request.base_url,
761 model,
762 },
763 "ollama" => crate::settings::LlmBackendSettings::Ollama {
764 base_url: request.base_url,
765 model,
766 },
767 "aliyun" => crate::settings::LlmBackendSettings::Aliyun {
768 api_key,
769 model,
770 },
771 "volcengine" => crate::settings::LlmBackendSettings::Volcengine {
772 api_key,
773 model,
774 },
775 "tencent" => crate::settings::LlmBackendSettings::Tencent {
776 api_key,
777 model,
778 },
779 "longcat" => crate::settings::LlmBackendSettings::Longcat {
780 api_key,
781 model,
782 },
783 _ => {
784 error!("❌ Unsupported provider: {}", request.provider);
785 return Err(StatusCode::BAD_REQUEST);
786 }
787 };
788
789 match state.update_llm_service(&new_backend) {
791 Ok(()) => {
792 info!("✅ Provider switched successfully to: {}", request.provider);
793 Ok(Json(json!({
794 "status": "success",
795 "message": format!("Provider switched to: {}", request.provider),
796 "provider": request.provider,
797 "model": new_backend.get_model(),
798 "restart_required": false,
799 })))
800 }
801 Err(e) => {
802 error!("❌ Failed to switch provider: {:?}", e);
803 Ok(Json(json!({
804 "status": "error",
805 "message": format!("Failed to switch provider: {}", e),
806 })))
807 }
808 }
809}
810
811pub async fn shutdown() -> Json<serde_json::Value> {
816 info!("🛑 Shutdown requested via API");
817
818 Json(json!({
822 "status": "success",
823 "message": "Shutdown signal sent. Please restart with new configuration.",
824 }))
825}