1use axum::{extract::State, http::StatusCode, response::Json};
2use serde::{Deserialize, Serialize};
3use serde_json::json;
4use tracing::{info, error};
5use std::sync::atomic::{AtomicU64, Ordering};
6
7use crate::api::AppState;
8
9fn mask_api_key(api_key: &str) -> String {
11 if api_key.len() <= 8 {
12 "*".repeat(api_key.len())
13 } else {
14 format!("{}***{}", &api_key[..4], &api_key[api_key.len()-4..])
15 }
16}
17
18fn validate_api_key(provider: &str, api_key: &str) -> Result<(), String> {
20 if api_key.trim().is_empty() {
21 return Err("API key cannot be empty".to_string());
22 }
23
24 match provider {
25 "openai" => {
26 if !api_key.starts_with("sk-") {
27 return Err("OpenAI API key should start with 'sk-'".to_string());
28 }
29 }
30 "anthropic" => {
31 if !api_key.starts_with("sk-ant-") {
32 return Err("Anthropic API key should start with 'sk-ant-'".to_string());
33 }
34 }
35 "zhipu" => {
36 if api_key.len() < 10 {
38 return Err("Zhipu API key seems too short".to_string());
39 }
40 }
41 "ollama" => {
42 }
44 _ => {
45 if api_key.len() < 10 {
47 return Err("API key seems too short".to_string());
48 }
49 }
50 }
51
52 Ok(())
53}
54
55fn validate_provider(provider: &str) -> Result<(), String> {
57 match provider {
58 "openai" | "anthropic" | "zhipu" | "ollama" | "aliyun" | "volcengine" | "tencent" | "longcat" => Ok(()),
59 _ => Err(format!("Unsupported provider: {}", provider)),
60 }
61}
62
63static INSTANCE_ID: AtomicU64 = AtomicU64::new(0);
65
66pub fn init_instance_id() {
67 use std::time::{SystemTime, UNIX_EPOCH};
68 let timestamp = SystemTime::now()
69 .duration_since(UNIX_EPOCH)
70 .unwrap()
71 .as_secs();
72 INSTANCE_ID.store(timestamp, Ordering::SeqCst);
73}
74
75pub fn get_instance_id() -> u64 {
76 INSTANCE_ID.load(Ordering::SeqCst)
77}
78
79#[derive(Debug, Deserialize)]
80pub struct UpdateConfigRequest {
81 pub provider: String,
82 pub api_key: String,
83 #[serde(default)]
84 pub model: Option<String>,
85 #[serde(default)]
86 pub base_url: Option<String>,
87}
88
89#[derive(Debug, Deserialize)]
90pub struct UpdateKeyRequest {
91 pub provider: String,
92 pub api_key: String,
93 #[serde(default)]
94 pub base_url: Option<String>,
95}
96
97#[derive(Debug, Deserialize)]
98pub struct SwitchProviderRequest {
99 pub provider: String,
100 #[serde(default)]
101 pub model: Option<String>,
102 #[serde(default)]
103 pub api_key: Option<String>,
104 #[serde(default)]
105 pub base_url: Option<String>,
106}
107
108#[derive(Debug, Serialize)]
109pub struct CurrentConfigResponse {
110 pub provider: String,
111 pub model: String,
112 pub has_api_key: bool,
113 pub has_base_url: bool,
114 pub supports_hot_reload: bool,
115}
116
117pub async fn get_current_config(
119 State(state): State<AppState>,
120) -> Result<Json<CurrentConfigResponse>, StatusCode> {
121 use crate::settings::LlmBackendSettings;
122
123 let config = state.config.read().unwrap();
124 let (provider, model, has_api_key, has_base_url) = match &config.llm_backend {
125 LlmBackendSettings::OpenAI { model, base_url, .. } => {
126 ("openai", model.clone(), true, base_url.is_some())
127 }
128 LlmBackendSettings::Anthropic { model, .. } => {
129 ("anthropic", model.clone(), true, false)
130 }
131 LlmBackendSettings::Zhipu { model, base_url, .. } => {
132 ("zhipu", model.clone(), true, base_url.is_some())
133 }
134 LlmBackendSettings::Ollama { model, base_url } => {
135 ("ollama", model.clone(), false, base_url.is_some())
136 }
137 LlmBackendSettings::Aliyun { model, .. } => {
138 ("aliyun", model.clone(), true, false)
139 }
140 LlmBackendSettings::Volcengine { model, .. } => {
141 ("volcengine", model.clone(), true, false)
142 }
143 LlmBackendSettings::Tencent { model, .. } => {
144 ("tencent", model.clone(), true, false)
145 }
146 LlmBackendSettings::Longcat { model, .. } => {
147 ("longcat", model.clone(), true, false)
148 }
149 LlmBackendSettings::Moonshot { model, .. } => {
150 ("moonshot", model.clone(), true, false)
151 }
152 };
153
154 Ok(Json(CurrentConfigResponse {
155 provider: provider.to_string(),
156 model,
157 has_api_key,
158 has_base_url,
159 supports_hot_reload: true, }))
161}
162
163pub async fn get_health(
167 State(state): State<AppState>,
168) -> Json<serde_json::Value> {
169 use crate::settings::LlmBackendSettings;
170
171 let config = state.config.read().unwrap();
172 let (provider, model) = match &config.llm_backend {
173 LlmBackendSettings::OpenAI { model, .. } => ("openai", model.clone()),
174 LlmBackendSettings::Anthropic { model, .. } => ("anthropic", model.clone()),
175 LlmBackendSettings::Zhipu { model, .. } => ("zhipu", model.clone()),
176 LlmBackendSettings::Ollama { model, .. } => ("ollama", model.clone()),
177 LlmBackendSettings::Aliyun { model, .. } => ("aliyun", model.clone()),
178 LlmBackendSettings::Volcengine { model, .. } => ("volcengine", model.clone()),
179 LlmBackendSettings::Tencent { model, .. } => ("tencent", model.clone()),
180 LlmBackendSettings::Longcat { model, .. } => ("longcat", model.clone()),
181 LlmBackendSettings::Moonshot { model, .. } => ("moonshot", model.clone()),
182 };
183
184 Json(json!({
185 "status": "ok",
186 "instance_id": get_instance_id(),
187 "pid": std::process::id(),
188 "provider": provider,
189 "model": model,
190 }))
191}
192
193pub async fn update_config_for_restart(
204 State(_state): State<AppState>,
205 Json(request): Json<UpdateConfigRequest>,
206) -> Result<Json<serde_json::Value>, StatusCode> {
207 info!("🔧 Preparing config update for provider: {}", request.provider);
208
209 let default_model = request.model.clone().or_else(|| {
211 match request.provider.as_str() {
212 "openai" => Some("gpt-4o".to_string()),
213 "anthropic" => Some("claude-3-5-sonnet-20241022".to_string()),
214 "zhipu" => Some("glm-4-flash".to_string()),
215 "ollama" => Some("llama2".to_string()),
216 "aliyun" => Some("qwen-turbo".to_string()),
217 "volcengine" => Some("ep-20241023xxxxx-xxxxx".to_string()),
218 "tencent" => Some("hunyuan-lite".to_string()),
219 _ => None,
220 }
221 });
222
223 let model = match default_model {
224 Some(m) => m,
225 None => {
226 error!("❌ Unknown provider: {}", request.provider);
227 return Err(StatusCode::BAD_REQUEST);
228 }
229 };
230
231 let mut env_vars = serde_json::Map::new();
233
234 let api_key_var = match request.provider.as_str() {
236 "openai" => "OPENAI_API_KEY",
237 "anthropic" => "ANTHROPIC_API_KEY",
238 "zhipu" => "ZHIPU_API_KEY",
239 "aliyun" => "ALIYUN_API_KEY",
240 "volcengine" => "VOLCENGINE_API_KEY",
241 "tencent" => "TENCENT_API_KEY",
242 "ollama" => "", _ => return Err(StatusCode::BAD_REQUEST),
244 };
245
246 if !api_key_var.is_empty() {
247 env_vars.insert(api_key_var.to_string(), json!(request.api_key));
248 }
249
250 if let Some(base_url) = request.base_url {
252 let base_url_var = match request.provider.as_str() {
253 "openai" => "OPENAI_BASE_URL",
254 "zhipu" => "ZHIPU_BASE_URL",
255 "ollama" => "OLLAMA_BASE_URL",
256 _ => "",
257 };
258 if !base_url_var.is_empty() {
259 env_vars.insert(base_url_var.to_string(), json!(base_url));
260 }
261 }
262
263 info!("✅ Config prepared for restart with provider: {}", request.provider);
264
265 Ok(Json(json!({
266 "status": "success",
267 "message": format!("Config prepared for provider: {}", request.provider),
268 "restart_required": true,
269 "current_instance_id": get_instance_id(),
270 "env_vars": env_vars,
271 "cli_args": {
272 "provider": request.provider,
273 "model": model,
274 }
275 })))
276}
277
278pub async fn validate_key(
282 State(_state): State<AppState>,
283 Json(request): Json<UpdateConfigRequest>,
284) -> Result<Json<serde_json::Value>, StatusCode> {
285 use crate::settings::LlmBackendSettings;
286 use crate::service::Service;
287
288 info!("🔍 Validating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
289
290 let model = request.model.clone().unwrap_or_else(|| "test-model".to_string());
292
293 let test_backend = match request.provider.as_str() {
294 "openai" => LlmBackendSettings::OpenAI {
295 api_key: request.api_key.clone(),
296 base_url: request.base_url.clone(),
297 model,
298 },
299 "anthropic" => LlmBackendSettings::Anthropic {
300 api_key: request.api_key.clone(),
301 model,
302 },
303 "zhipu" => LlmBackendSettings::Zhipu {
304 api_key: request.api_key.clone(),
305 base_url: request.base_url.clone(),
306 model,
307 },
308 "ollama" => LlmBackendSettings::Ollama {
309 base_url: request.base_url.clone(),
310 model,
311 },
312 "aliyun" => LlmBackendSettings::Aliyun {
313 api_key: request.api_key.clone(),
314 model,
315 },
316 "volcengine" => LlmBackendSettings::Volcengine {
317 api_key: request.api_key.clone(),
318 model,
319 },
320 "tencent" => LlmBackendSettings::Tencent {
321 api_key: request.api_key.clone(),
322 model,
323 },
324 "longcat" => LlmBackendSettings::Longcat {
325 api_key: request.api_key.clone(),
326 model,
327 },
328 "moonshot" => LlmBackendSettings::Moonshot {
329 api_key: request.api_key.clone(),
330 model,
331 },
332 _ => {
333 error!("❌ Unsupported provider: {}", request.provider);
334 return Err(StatusCode::BAD_REQUEST);
335 }
336 };
337
338 match Service::new(&test_backend) {
340 Ok(service) => {
341 match service.list_models().await {
342 Ok(models) => {
343 info!("✅ API key validated successfully, found {} models", models.len());
344 Ok(Json(json!({
345 "status": "valid",
346 "message": "API key is valid",
347 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
348 })))
349 }
350 Err(e) => {
351 error!("❌ API key validation failed: {:?}", e);
352 Ok(Json(json!({
353 "status": "invalid",
354 "message": format!("Failed to list models: {}", e),
355 })))
356 }
357 }
358 }
359 Err(e) => {
360 error!("❌ Failed to create service: {:?}", e);
361 Ok(Json(json!({
362 "status": "error",
363 "message": format!("Failed to create service: {}", e),
364 })))
365 }
366 }
367}
368
369pub async fn get_pid() -> Json<serde_json::Value> {
373 let pid = std::process::id();
374
375 Json(json!({
376 "pid": pid,
377 "message": "Use this PID to restart the service"
378 }))
379}
380
381pub async fn validate_key_for_update(
385 State(_state): State<AppState>,
386 Json(request): Json<UpdateKeyRequest>,
387) -> Result<Json<serde_json::Value>, StatusCode> {
388 use crate::settings::LlmBackendSettings;
389 use crate::service::Service;
390
391 info!("🔍 Validating API key for hot update - provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
392
393 let model = match request.provider.as_str() {
395 "openai" => "gpt-4o".to_string(),
396 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
397 "zhipu" => "glm-4-flash".to_string(),
398 "ollama" => "llama2".to_string(),
399 "aliyun" => "qwen-turbo".to_string(),
400 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
401 "tencent" => "hunyuan-lite".to_string(),
402 "longcat" => "LongCat-Flash-Chat".to_string(),
403 _ => {
404 error!("❌ Unsupported provider: {}", request.provider);
405 return Err(StatusCode::BAD_REQUEST);
406 }
407 };
408
409 let test_backend = match request.provider.as_str() {
410 "openai" => LlmBackendSettings::OpenAI {
411 api_key: request.api_key.clone(),
412 base_url: request.base_url.clone(),
413 model,
414 },
415 "anthropic" => LlmBackendSettings::Anthropic {
416 api_key: request.api_key.clone(),
417 model,
418 },
419 "zhipu" => LlmBackendSettings::Zhipu {
420 api_key: request.api_key.clone(),
421 base_url: request.base_url.clone(),
422 model,
423 },
424 "ollama" => LlmBackendSettings::Ollama {
425 base_url: request.base_url.clone(),
426 model,
427 },
428 "aliyun" => LlmBackendSettings::Aliyun {
429 api_key: request.api_key.clone(),
430 model,
431 },
432 "volcengine" => LlmBackendSettings::Volcengine {
433 api_key: request.api_key.clone(),
434 model,
435 },
436 "tencent" => LlmBackendSettings::Tencent {
437 api_key: request.api_key.clone(),
438 model,
439 },
440 "longcat" => LlmBackendSettings::Longcat {
441 api_key: request.api_key.clone(),
442 model,
443 },
444 "moonshot" => LlmBackendSettings::Moonshot {
445 api_key: request.api_key.clone(),
446 model,
447 },
448 _ => {
449 error!("❌ Unsupported provider: {}", request.provider);
450 return Err(StatusCode::BAD_REQUEST);
451 }
452 };
453
454 match Service::new(&test_backend) {
456 Ok(service) => {
457 match service.list_models().await {
458 Ok(models) => {
459 info!("✅ API key validated successfully for hot update, found {} models", models.len());
460 Ok(Json(json!({
461 "status": "valid",
462 "message": "API key is valid and ready for hot update",
463 "provider": request.provider,
464 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
465 "supports_hot_reload": true,
466 })))
467 }
468 Err(e) => {
469 error!("❌ API key validation failed for hot update: {:?}", e);
470 Ok(Json(json!({
471 "status": "invalid",
472 "message": format!("Failed to list models: {}", e),
473 "provider": request.provider,
474 })))
475 }
476 }
477 }
478 Err(e) => {
479 error!("❌ Failed to create service for hot update validation: {:?}", e);
480 Ok(Json(json!({
481 "status": "error",
482 "message": format!("Failed to create service: {}", e),
483 "provider": request.provider,
484 })))
485 }
486 }
487}
488
489pub async fn update_key(
493 State(state): State<AppState>,
494 Json(request): Json<UpdateKeyRequest>,
495) -> Result<Json<serde_json::Value>, StatusCode> {
496 if let Err(e) = validate_provider(&request.provider) {
498 error!("❌ Invalid provider: {}", e);
499 return Err(StatusCode::BAD_REQUEST);
500 }
501
502 if request.provider != "ollama" {
503 if let Err(e) = validate_api_key(&request.provider, &request.api_key) {
504 error!("❌ Invalid API key format: {}", e);
505 return Ok(Json(json!({
506 "status": "error",
507 "message": format!("Invalid API key format: {}", e),
508 })));
509 }
510 }
511
512 info!("🔧 Updating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
513
514 let current_config = state.get_current_config();
516
517 let new_backend = match request.provider.as_str() {
519 "openai" => {
520 if let crate::settings::LlmBackendSettings::OpenAI { model, .. } = ¤t_config.llm_backend {
521 crate::settings::LlmBackendSettings::OpenAI {
522 api_key: request.api_key.clone(),
523 base_url: request.base_url.clone(),
524 model: model.clone(),
525 }
526 } else {
527 crate::settings::LlmBackendSettings::OpenAI {
529 api_key: request.api_key.clone(),
530 base_url: request.base_url.clone(),
531 model: "gpt-4o".to_string(),
532 }
533 }
534 }
535 "anthropic" => {
536 if let crate::settings::LlmBackendSettings::Anthropic { model, .. } = ¤t_config.llm_backend {
537 crate::settings::LlmBackendSettings::Anthropic {
538 api_key: request.api_key.clone(),
539 model: model.clone(),
540 }
541 } else {
542 crate::settings::LlmBackendSettings::Anthropic {
543 api_key: request.api_key.clone(),
544 model: "claude-3-5-sonnet-20241022".to_string(),
545 }
546 }
547 }
548 "zhipu" => {
549 if let crate::settings::LlmBackendSettings::Zhipu { model, .. } = ¤t_config.llm_backend {
550 crate::settings::LlmBackendSettings::Zhipu {
551 api_key: request.api_key.clone(),
552 base_url: request.base_url.clone(),
553 model: model.clone(),
554 }
555 } else {
556 crate::settings::LlmBackendSettings::Zhipu {
557 api_key: request.api_key.clone(),
558 base_url: request.base_url.clone(),
559 model: "glm-4-flash".to_string(),
560 }
561 }
562 }
563 "aliyun" => {
564 if let crate::settings::LlmBackendSettings::Aliyun { model, .. } = ¤t_config.llm_backend {
565 crate::settings::LlmBackendSettings::Aliyun {
566 api_key: request.api_key.clone(),
567 model: model.clone(),
568 }
569 } else {
570 crate::settings::LlmBackendSettings::Aliyun {
571 api_key: request.api_key.clone(),
572 model: "qwen-turbo".to_string(),
573 }
574 }
575 }
576 "volcengine" => {
577 if let crate::settings::LlmBackendSettings::Volcengine { model, .. } = ¤t_config.llm_backend {
578 crate::settings::LlmBackendSettings::Volcengine {
579 api_key: request.api_key.clone(),
580 model: model.clone(),
581 }
582 } else {
583 crate::settings::LlmBackendSettings::Volcengine {
584 api_key: request.api_key.clone(),
585 model: "ep-20241023xxxxx-xxxxx".to_string(),
586 }
587 }
588 }
589 "tencent" => {
590 if let crate::settings::LlmBackendSettings::Tencent { model, .. } = ¤t_config.llm_backend {
591 crate::settings::LlmBackendSettings::Tencent {
592 api_key: request.api_key.clone(),
593 model: model.clone(),
594 }
595 } else {
596 crate::settings::LlmBackendSettings::Tencent {
597 api_key: request.api_key.clone(),
598 model: "hunyuan-lite".to_string(),
599 }
600 }
601 }
602 "longcat" => {
603 if let crate::settings::LlmBackendSettings::Longcat { model, .. } = ¤t_config.llm_backend {
604 crate::settings::LlmBackendSettings::Longcat {
605 api_key: request.api_key.clone(),
606 model: model.clone(),
607 }
608 } else {
609 crate::settings::LlmBackendSettings::Longcat {
610 api_key: request.api_key.clone(),
611 model: "LongCat-Flash-Chat".to_string(),
612 }
613 }
614 }
615 "moonshot" => {
616 if let crate::settings::LlmBackendSettings::Moonshot { model, .. } = ¤t_config.llm_backend {
617 crate::settings::LlmBackendSettings::Moonshot {
618 api_key: request.api_key.clone(),
619 model: model.clone(),
620 }
621 } else {
622 crate::settings::LlmBackendSettings::Moonshot {
623 api_key: request.api_key.clone(),
624 model: "kimi-k2-turbo-preview".to_string(),
625 }
626 }
627 }
628 "ollama" => {
629 if let crate::settings::LlmBackendSettings::Ollama { model, .. } = ¤t_config.llm_backend {
630 crate::settings::LlmBackendSettings::Ollama {
631 base_url: request.base_url.clone(),
632 model: model.clone(),
633 }
634 } else {
635 crate::settings::LlmBackendSettings::Ollama {
636 base_url: request.base_url.clone(),
637 model: "llama2".to_string(),
638 }
639 }
640 }
641 _ => {
642 error!("❌ Unsupported provider: {}", request.provider);
643 return Err(StatusCode::BAD_REQUEST);
644 }
645 };
646
647 match state.update_llm_service(&new_backend) {
649 Ok(()) => {
650 info!("✅ API key updated successfully for provider: {}", request.provider);
651 Ok(Json(json!({
652 "status": "success",
653 "message": format!("API key updated for provider: {}", request.provider),
654 "provider": request.provider,
655 "restart_required": false,
656 })))
657 }
658 Err(e) => {
659 error!("❌ Failed to update API key: {:?}", e);
660 Ok(Json(json!({
661 "status": "error",
662 "message": format!("Failed to update API key: {}", e),
663 })))
664 }
665 }
666}
667
668pub async fn switch_provider(
672 State(state): State<AppState>,
673 Json(request): Json<SwitchProviderRequest>,
674) -> Result<Json<serde_json::Value>, StatusCode> {
675 if let Err(e) = validate_provider(&request.provider) {
677 error!("❌ Invalid provider: {}", e);
678 return Err(StatusCode::BAD_REQUEST);
679 }
680
681 let masked_key = request.api_key.as_ref().map(|k| mask_api_key(k)).unwrap_or_else(|| "none".to_string());
682 info!("🔄 Switching to provider: {} (key: {})", request.provider, masked_key);
683
684 let current_config = state.get_current_config();
686
687 let api_key = if let Some(key) = request.api_key {
689 key
690 } else {
691 match request.provider.as_str() {
693 "openai" => {
694 if let crate::settings::LlmBackendSettings::OpenAI { api_key, .. } = ¤t_config.llm_backend {
695 api_key.clone()
696 } else {
697 error!("❌ No API key provided for OpenAI and none found in current config");
698 return Err(StatusCode::BAD_REQUEST);
699 }
700 }
701 "anthropic" => {
702 if let crate::settings::LlmBackendSettings::Anthropic { api_key, .. } = ¤t_config.llm_backend {
703 api_key.clone()
704 } else {
705 error!("❌ No API key provided for Anthropic and none found in current config");
706 return Err(StatusCode::BAD_REQUEST);
707 }
708 }
709 "zhipu" => {
710 if let crate::settings::LlmBackendSettings::Zhipu { api_key, .. } = ¤t_config.llm_backend {
711 api_key.clone()
712 } else {
713 error!("❌ No API key provided for Zhipu and none found in current config");
714 return Err(StatusCode::BAD_REQUEST);
715 }
716 }
717 "aliyun" => {
718 if let crate::settings::LlmBackendSettings::Aliyun { api_key, .. } = ¤t_config.llm_backend {
719 api_key.clone()
720 } else {
721 error!("❌ No API key provided for Aliyun and none found in current config");
722 return Err(StatusCode::BAD_REQUEST);
723 }
724 }
725 "volcengine" => {
726 if let crate::settings::LlmBackendSettings::Volcengine { api_key, .. } = ¤t_config.llm_backend {
727 api_key.clone()
728 } else {
729 error!("❌ No API key provided for Volcengine and none found in current config");
730 return Err(StatusCode::BAD_REQUEST);
731 }
732 }
733 "tencent" => {
734 if let crate::settings::LlmBackendSettings::Tencent { api_key, .. } = ¤t_config.llm_backend {
735 api_key.clone()
736 } else {
737 error!("❌ No API key provided for Tencent and none found in current config");
738 return Err(StatusCode::BAD_REQUEST);
739 }
740 }
741 "longcat" => {
742 if let crate::settings::LlmBackendSettings::Longcat { api_key, .. } = ¤t_config.llm_backend {
743 api_key.clone()
744 } else {
745 error!("❌ No API key provided for Longcat and none found in current config");
746 return Err(StatusCode::BAD_REQUEST);
747 }
748 }
749 "moonshot" => {
750 if let crate::settings::LlmBackendSettings::Moonshot { api_key, .. } = ¤t_config.llm_backend {
751 api_key.clone()
752 } else {
753 error!("❌ No API key provided for Moonshot and none found in current config");
754 return Err(StatusCode::BAD_REQUEST);
755 }
756 }
757 "ollama" => String::new(), _ => {
759 error!("❌ Unsupported provider: {}", request.provider);
760 return Err(StatusCode::BAD_REQUEST);
761 }
762 }
763 };
764
765 let model = request.model.unwrap_or_else(|| {
767 match request.provider.as_str() {
768 "openai" => "gpt-4o".to_string(),
769 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
770 "zhipu" => "glm-4-flash".to_string(),
771 "ollama" => "llama2".to_string(),
772 "aliyun" => "qwen-turbo".to_string(),
773 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
774 "tencent" => "hunyuan-lite".to_string(),
775 "longcat" => "LongCat-Flash-Chat".to_string(),
776 _ => "default-model".to_string(),
777 }
778 });
779
780 let new_backend = match request.provider.as_str() {
782 "openai" => crate::settings::LlmBackendSettings::OpenAI {
783 api_key,
784 base_url: request.base_url,
785 model,
786 },
787 "anthropic" => crate::settings::LlmBackendSettings::Anthropic {
788 api_key,
789 model,
790 },
791 "zhipu" => crate::settings::LlmBackendSettings::Zhipu {
792 api_key,
793 base_url: request.base_url,
794 model,
795 },
796 "ollama" => crate::settings::LlmBackendSettings::Ollama {
797 base_url: request.base_url,
798 model,
799 },
800 "aliyun" => crate::settings::LlmBackendSettings::Aliyun {
801 api_key,
802 model,
803 },
804 "volcengine" => crate::settings::LlmBackendSettings::Volcengine {
805 api_key,
806 model,
807 },
808 "tencent" => crate::settings::LlmBackendSettings::Tencent {
809 api_key,
810 model,
811 },
812 "longcat" => crate::settings::LlmBackendSettings::Longcat {
813 api_key,
814 model,
815 },
816 "moonshot" => crate::settings::LlmBackendSettings::Moonshot {
817 api_key,
818 model,
819 },
820 _ => {
821 error!("❌ Unsupported provider: {}", request.provider);
822 return Err(StatusCode::BAD_REQUEST);
823 }
824 };
825
826 match state.update_llm_service(&new_backend) {
828 Ok(()) => {
829 info!("✅ Provider switched successfully to: {}", request.provider);
830 Ok(Json(json!({
831 "status": "success",
832 "message": format!("Provider switched to: {}", request.provider),
833 "provider": request.provider,
834 "model": new_backend.get_model(),
835 "restart_required": false,
836 })))
837 }
838 Err(e) => {
839 error!("❌ Failed to switch provider: {:?}", e);
840 Ok(Json(json!({
841 "status": "error",
842 "message": format!("Failed to switch provider: {}", e),
843 })))
844 }
845 }
846}
847
848pub async fn shutdown() -> Json<serde_json::Value> {
853 info!("🛑 Shutdown requested via API");
854
855 Json(json!({
859 "status": "success",
860 "message": "Shutdown signal sent. Please restart with new configuration.",
861 }))
862}