1use axum::{extract::State, http::StatusCode, response::Json};
2use serde::{Deserialize, Serialize};
3use serde_json::json;
4use tracing::{info, error};
5use std::sync::atomic::{AtomicU64, Ordering};
6
7use crate::api::AppState;
8
9fn mask_api_key(api_key: &str) -> String {
11 if api_key.len() <= 8 {
12 "*".repeat(api_key.len())
13 } else {
14 format!("{}***{}", &api_key[..4], &api_key[api_key.len()-4..])
15 }
16}
17
18fn validate_api_key(provider: &str, api_key: &str) -> Result<(), String> {
20 if api_key.trim().is_empty() {
21 return Err("API key cannot be empty".to_string());
22 }
23
24 match provider {
25 "openai" => {
26 if !api_key.starts_with("sk-") {
27 return Err("OpenAI API key should start with 'sk-'".to_string());
28 }
29 }
30 "anthropic" => {
31 if !api_key.starts_with("sk-ant-") {
32 return Err("Anthropic API key should start with 'sk-ant-'".to_string());
33 }
34 }
35 "zhipu" => {
36 if api_key.len() < 10 {
38 return Err("Zhipu API key seems too short".to_string());
39 }
40 }
41 "ollama" => {
42 }
44 _ => {
45 if api_key.len() < 10 {
47 return Err("API key seems too short".to_string());
48 }
49 }
50 }
51
52 Ok(())
53}
54
55fn validate_provider(provider: &str) -> Result<(), String> {
57 match provider {
58 "openai" | "anthropic" | "zhipu" | "ollama" | "aliyun" | "volcengine" | "tencent" | "longcat" => Ok(()),
59 _ => Err(format!("Unsupported provider: {}", provider)),
60 }
61}
62
63static INSTANCE_ID: AtomicU64 = AtomicU64::new(0);
65
66pub fn init_instance_id() {
67 use std::time::{SystemTime, UNIX_EPOCH};
68 let timestamp = SystemTime::now()
69 .duration_since(UNIX_EPOCH)
70 .expect("System time went backwards")
71 .as_secs();
72 INSTANCE_ID.store(timestamp, Ordering::SeqCst);
73}
74
75pub fn get_instance_id() -> u64 {
76 INSTANCE_ID.load(Ordering::SeqCst)
77}
78
79#[derive(Debug, Deserialize)]
80pub struct UpdateConfigRequest {
81 pub provider: String,
82 pub api_key: String,
83 #[serde(default)]
84 pub model: Option<String>,
85 #[serde(default)]
86 pub base_url: Option<String>,
87}
88
89#[derive(Debug, Deserialize)]
90pub struct UpdateKeyRequest {
91 pub provider: String,
92 pub api_key: String,
93 #[serde(default)]
94 pub base_url: Option<String>,
95}
96
97#[derive(Debug, Deserialize)]
98pub struct SwitchProviderRequest {
99 pub provider: String,
100 #[serde(default)]
101 pub model: Option<String>,
102 #[serde(default)]
103 pub api_key: Option<String>,
104 #[serde(default)]
105 pub base_url: Option<String>,
106}
107
108#[derive(Debug, Serialize)]
109pub struct CurrentConfigResponse {
110 pub provider: String,
111 pub model: String,
112 pub has_api_key: bool,
113 pub has_base_url: bool,
114 pub supports_hot_reload: bool,
115}
116
117pub async fn get_current_config(
119 State(state): State<AppState>,
120) -> Result<Json<CurrentConfigResponse>, StatusCode> {
121 use crate::settings::LlmBackendSettings;
122
123 let config = state.config.read()
124 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
125 let (provider, model, has_api_key, has_base_url) = match &config.llm_backend {
126 LlmBackendSettings::OpenAI { model, base_url, .. } => {
127 ("openai", model.clone(), true, base_url.is_some())
128 }
129 LlmBackendSettings::Anthropic { model, .. } => {
130 ("anthropic", model.clone(), true, false)
131 }
132 LlmBackendSettings::Zhipu { model, base_url, .. } => {
133 ("zhipu", model.clone(), true, base_url.is_some())
134 }
135 LlmBackendSettings::Ollama { model, base_url } => {
136 ("ollama", model.clone(), false, base_url.is_some())
137 }
138 LlmBackendSettings::Aliyun { model, .. } => {
139 ("aliyun", model.clone(), true, false)
140 }
141 LlmBackendSettings::Volcengine { model, .. } => {
142 ("volcengine", model.clone(), true, false)
143 }
144 LlmBackendSettings::Tencent { model, .. } => {
145 ("tencent", model.clone(), true, false)
146 }
147 LlmBackendSettings::Longcat { model, .. } => {
148 ("longcat", model.clone(), true, false)
149 }
150 LlmBackendSettings::Moonshot { model, .. } => {
151 ("moonshot", model.clone(), true, false)
152 }
153 };
154
155 Ok(Json(CurrentConfigResponse {
156 provider: provider.to_string(),
157 model,
158 has_api_key,
159 has_base_url,
160 supports_hot_reload: true, }))
162}
163
164pub async fn get_health(
168 State(state): State<AppState>,
169) -> Result<Json<serde_json::Value>, StatusCode> {
170 use crate::settings::LlmBackendSettings;
171
172 let config = state.config.read()
173 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
174 let (provider, model) = match &config.llm_backend {
175 LlmBackendSettings::OpenAI { model, .. } => ("openai", model.clone()),
176 LlmBackendSettings::Anthropic { model, .. } => ("anthropic", model.clone()),
177 LlmBackendSettings::Zhipu { model, .. } => ("zhipu", model.clone()),
178 LlmBackendSettings::Ollama { model, .. } => ("ollama", model.clone()),
179 LlmBackendSettings::Aliyun { model, .. } => ("aliyun", model.clone()),
180 LlmBackendSettings::Volcengine { model, .. } => ("volcengine", model.clone()),
181 LlmBackendSettings::Tencent { model, .. } => ("tencent", model.clone()),
182 LlmBackendSettings::Longcat { model, .. } => ("longcat", model.clone()),
183 LlmBackendSettings::Moonshot { model, .. } => ("moonshot", model.clone()),
184 };
185
186 Ok(Json(json!({
187 "status": "ok",
188 "instance_id": get_instance_id(),
189 "pid": std::process::id(),
190 "provider": provider,
191 "model": model,
192 })))
193}
194
195pub async fn update_config_for_restart(
206 State(_state): State<AppState>,
207 Json(request): Json<UpdateConfigRequest>,
208) -> Result<Json<serde_json::Value>, StatusCode> {
209 info!("🔧 Preparing config update for provider: {}", request.provider);
210
211 let model = if let Some(model) = request.model {
213 model
214 } else {
215 match request.provider.as_str() {
216 "openai" => "gpt-4o".to_string(),
217 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
218 "zhipu" => "glm-4-flash".to_string(),
219 "ollama" => "llama2".to_string(),
220 "aliyun" => "qwen-turbo".to_string(),
221 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
222 "tencent" => "hunyuan-lite".to_string(),
223 _ => {
224 error!("❌ Unknown provider: {}", request.provider);
225 return Err(StatusCode::BAD_REQUEST);
226 }
227 }
228 };
229
230 let mut env_vars = serde_json::Map::with_capacity(3);
232
233 let api_key_var = match request.provider.as_str() {
235 "openai" => "OPENAI_API_KEY",
236 "anthropic" => "ANTHROPIC_API_KEY",
237 "zhipu" => "ZHIPU_API_KEY",
238 "aliyun" => "ALIYUN_API_KEY",
239 "volcengine" => "VOLCENGINE_API_KEY",
240 "tencent" => "TENCENT_API_KEY",
241 "ollama" => "", _ => return Err(StatusCode::BAD_REQUEST),
243 };
244
245 if !api_key_var.is_empty() {
246 env_vars.insert(api_key_var.to_string(), json!(request.api_key));
247 }
248
249 if let Some(base_url) = request.base_url {
251 let base_url_var = match request.provider.as_str() {
252 "openai" => "OPENAI_BASE_URL",
253 "zhipu" => "ZHIPU_BASE_URL",
254 "ollama" => "OLLAMA_BASE_URL",
255 _ => "",
256 };
257 if !base_url_var.is_empty() {
258 env_vars.insert(base_url_var.to_string(), json!(base_url));
259 }
260 }
261
262 info!("✅ Config prepared for restart with provider: {}", request.provider);
263
264 Ok(Json(json!({
265 "status": "success",
266 "message": format!("Config prepared for provider: {}", request.provider),
267 "restart_required": true,
268 "current_instance_id": get_instance_id(),
269 "env_vars": env_vars,
270 "cli_args": {
271 "provider": request.provider,
272 "model": model,
273 }
274 })))
275}
276
277pub async fn validate_key(
281 State(_state): State<AppState>,
282 Json(request): Json<UpdateConfigRequest>,
283) -> Result<Json<serde_json::Value>, StatusCode> {
284 use crate::settings::LlmBackendSettings;
285 use crate::service::Service;
286
287 info!("🔍 Validating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
288
289 let model = if let Some(model) = request.model {
291 model
292 } else {
293 "test-model".to_string()
294 };
295
296 let test_backend = match request.provider.as_str() {
297 "openai" => LlmBackendSettings::OpenAI {
298 api_key: request.api_key.clone(),
299 base_url: request.base_url.clone(),
300 model,
301 },
302 "anthropic" => LlmBackendSettings::Anthropic {
303 api_key: request.api_key.clone(),
304 model,
305 },
306 "zhipu" => LlmBackendSettings::Zhipu {
307 api_key: request.api_key.clone(),
308 base_url: request.base_url.clone(),
309 model,
310 },
311 "ollama" => LlmBackendSettings::Ollama {
312 base_url: request.base_url.clone(),
313 model,
314 },
315 "aliyun" => LlmBackendSettings::Aliyun {
316 api_key: request.api_key.clone(),
317 model,
318 },
319 "volcengine" => LlmBackendSettings::Volcengine {
320 api_key: request.api_key.clone(),
321 model,
322 },
323 "tencent" => LlmBackendSettings::Tencent {
324 api_key: request.api_key.clone(),
325 model,
326 },
327 "longcat" => LlmBackendSettings::Longcat {
328 api_key: request.api_key.clone(),
329 model,
330 },
331 "moonshot" => LlmBackendSettings::Moonshot {
332 api_key: request.api_key.clone(),
333 model,
334 },
335 _ => {
336 error!("❌ Unsupported provider: {}", request.provider);
337 return Err(StatusCode::BAD_REQUEST);
338 }
339 };
340
341 match Service::new(&test_backend) {
343 Ok(service) => {
344 match service.list_models().await {
345 Ok(models) => {
346 info!("✅ API key validated successfully, found {} models", models.len());
347 Ok(Json(json!({
348 "status": "valid",
349 "message": "API key is valid",
350 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
351 })))
352 }
353 Err(e) => {
354 error!("❌ API key validation failed: {:?}", e);
355 Ok(Json(json!({
356 "status": "invalid",
357 "message": format!("Failed to list models: {}", e),
358 })))
359 }
360 }
361 }
362 Err(e) => {
363 error!("❌ Failed to create service: {:?}", e);
364 Ok(Json(json!({
365 "status": "error",
366 "message": format!("Failed to create service: {}", e),
367 })))
368 }
369 }
370}
371
372pub async fn get_pid() -> Json<serde_json::Value> {
376 let pid = std::process::id();
377
378 Json(json!({
379 "pid": pid,
380 "message": "Use this PID to restart the service"
381 }))
382}
383
384pub async fn validate_key_for_update(
388 State(_state): State<AppState>,
389 Json(request): Json<UpdateKeyRequest>,
390) -> Result<Json<serde_json::Value>, StatusCode> {
391 use crate::settings::LlmBackendSettings;
392 use crate::service::Service;
393
394 info!("🔍 Validating API key for hot update - provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
395
396 let model = match request.provider.as_str() {
398 "openai" => "gpt-4o".to_string(),
399 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
400 "zhipu" => "glm-4-flash".to_string(),
401 "ollama" => "llama2".to_string(),
402 "aliyun" => "qwen-turbo".to_string(),
403 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
404 "tencent" => "hunyuan-lite".to_string(),
405 "longcat" => "LongCat-Flash-Chat".to_string(),
406 _ => {
407 error!("❌ Unsupported provider: {}", request.provider);
408 return Err(StatusCode::BAD_REQUEST);
409 }
410 };
411
412 let test_backend = match request.provider.as_str() {
413 "openai" => LlmBackendSettings::OpenAI {
414 api_key: request.api_key.clone(),
415 base_url: request.base_url.clone(),
416 model,
417 },
418 "anthropic" => LlmBackendSettings::Anthropic {
419 api_key: request.api_key.clone(),
420 model,
421 },
422 "zhipu" => LlmBackendSettings::Zhipu {
423 api_key: request.api_key.clone(),
424 base_url: request.base_url.clone(),
425 model,
426 },
427 "ollama" => LlmBackendSettings::Ollama {
428 base_url: request.base_url.clone(),
429 model,
430 },
431 "aliyun" => LlmBackendSettings::Aliyun {
432 api_key: request.api_key.clone(),
433 model,
434 },
435 "volcengine" => LlmBackendSettings::Volcengine {
436 api_key: request.api_key.clone(),
437 model,
438 },
439 "tencent" => LlmBackendSettings::Tencent {
440 api_key: request.api_key.clone(),
441 model,
442 },
443 "longcat" => LlmBackendSettings::Longcat {
444 api_key: request.api_key.clone(),
445 model,
446 },
447 "moonshot" => LlmBackendSettings::Moonshot {
448 api_key: request.api_key.clone(),
449 model,
450 },
451 _ => {
452 error!("❌ Unsupported provider: {}", request.provider);
453 return Err(StatusCode::BAD_REQUEST);
454 }
455 };
456
457 match Service::new(&test_backend) {
459 Ok(service) => {
460 match service.list_models().await {
461 Ok(models) => {
462 info!("✅ API key validated successfully for hot update, found {} models", models.len());
463 Ok(Json(json!({
464 "status": "valid",
465 "message": "API key is valid and ready for hot update",
466 "provider": request.provider,
467 "models": models.iter().map(|m| &m.id).collect::<Vec<_>>(),
468 "supports_hot_reload": true,
469 })))
470 }
471 Err(e) => {
472 error!("❌ API key validation failed for hot update: {:?}", e);
473 Ok(Json(json!({
474 "status": "invalid",
475 "message": format!("Failed to list models: {}", e),
476 "provider": request.provider,
477 })))
478 }
479 }
480 }
481 Err(e) => {
482 error!("❌ Failed to create service for hot update validation: {:?}", e);
483 Ok(Json(json!({
484 "status": "error",
485 "message": format!("Failed to create service: {}", e),
486 "provider": request.provider,
487 })))
488 }
489 }
490}
491
492pub async fn update_key(
496 State(state): State<AppState>,
497 Json(request): Json<UpdateKeyRequest>,
498) -> Result<Json<serde_json::Value>, StatusCode> {
499 if let Err(e) = validate_provider(&request.provider) {
501 error!("❌ Invalid provider: {}", e);
502 return Err(StatusCode::BAD_REQUEST);
503 }
504
505 if request.provider != "ollama" {
506 if let Err(e) = validate_api_key(&request.provider, &request.api_key) {
507 error!("❌ Invalid API key format: {}", e);
508 return Ok(Json(json!({
509 "status": "error",
510 "message": format!("Invalid API key format: {}", e),
511 })));
512 }
513 }
514
515 info!("🔧 Updating API key for provider: {} (key: {})", request.provider, mask_api_key(&request.api_key));
516
517 let current_config = state.get_current_config()
519 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
520
521 let new_backend = match request.provider.as_str() {
523 "openai" => {
524 if let crate::settings::LlmBackendSettings::OpenAI { model, .. } = ¤t_config.llm_backend {
525 crate::settings::LlmBackendSettings::OpenAI {
526 api_key: request.api_key.clone(),
527 base_url: request.base_url.clone(),
528 model: model.clone(),
529 }
530 } else {
531 crate::settings::LlmBackendSettings::OpenAI {
533 api_key: request.api_key.clone(),
534 base_url: request.base_url.clone(),
535 model: "gpt-4o".to_string(),
536 }
537 }
538 }
539 "anthropic" => {
540 if let crate::settings::LlmBackendSettings::Anthropic { model, .. } = ¤t_config.llm_backend {
541 crate::settings::LlmBackendSettings::Anthropic {
542 api_key: request.api_key.clone(),
543 model: model.clone(),
544 }
545 } else {
546 crate::settings::LlmBackendSettings::Anthropic {
547 api_key: request.api_key.clone(),
548 model: "claude-3-5-sonnet-20241022".to_string(),
549 }
550 }
551 }
552 "zhipu" => {
553 if let crate::settings::LlmBackendSettings::Zhipu { model, .. } = ¤t_config.llm_backend {
554 crate::settings::LlmBackendSettings::Zhipu {
555 api_key: request.api_key.clone(),
556 base_url: request.base_url.clone(),
557 model: model.clone(),
558 }
559 } else {
560 crate::settings::LlmBackendSettings::Zhipu {
561 api_key: request.api_key.clone(),
562 base_url: request.base_url.clone(),
563 model: "glm-4-flash".to_string(),
564 }
565 }
566 }
567 "aliyun" => {
568 if let crate::settings::LlmBackendSettings::Aliyun { model, .. } = ¤t_config.llm_backend {
569 crate::settings::LlmBackendSettings::Aliyun {
570 api_key: request.api_key.clone(),
571 model: model.clone(),
572 }
573 } else {
574 crate::settings::LlmBackendSettings::Aliyun {
575 api_key: request.api_key.clone(),
576 model: "qwen-turbo".to_string(),
577 }
578 }
579 }
580 "volcengine" => {
581 if let crate::settings::LlmBackendSettings::Volcengine { model, .. } = ¤t_config.llm_backend {
582 crate::settings::LlmBackendSettings::Volcengine {
583 api_key: request.api_key.clone(),
584 model: model.clone(),
585 }
586 } else {
587 crate::settings::LlmBackendSettings::Volcengine {
588 api_key: request.api_key.clone(),
589 model: "ep-20241023xxxxx-xxxxx".to_string(),
590 }
591 }
592 }
593 "tencent" => {
594 if let crate::settings::LlmBackendSettings::Tencent { model, .. } = ¤t_config.llm_backend {
595 crate::settings::LlmBackendSettings::Tencent {
596 api_key: request.api_key.clone(),
597 model: model.clone(),
598 }
599 } else {
600 crate::settings::LlmBackendSettings::Tencent {
601 api_key: request.api_key.clone(),
602 model: "hunyuan-lite".to_string(),
603 }
604 }
605 }
606 "longcat" => {
607 if let crate::settings::LlmBackendSettings::Longcat { model, .. } = ¤t_config.llm_backend {
608 crate::settings::LlmBackendSettings::Longcat {
609 api_key: request.api_key.clone(),
610 model: model.clone(),
611 }
612 } else {
613 crate::settings::LlmBackendSettings::Longcat {
614 api_key: request.api_key.clone(),
615 model: "LongCat-Flash-Chat".to_string(),
616 }
617 }
618 }
619 "moonshot" => {
620 if let crate::settings::LlmBackendSettings::Moonshot { model, .. } = ¤t_config.llm_backend {
621 crate::settings::LlmBackendSettings::Moonshot {
622 api_key: request.api_key.clone(),
623 model: model.clone(),
624 }
625 } else {
626 crate::settings::LlmBackendSettings::Moonshot {
627 api_key: request.api_key.clone(),
628 model: "kimi-k2-turbo-preview".to_string(),
629 }
630 }
631 }
632 "ollama" => {
633 if let crate::settings::LlmBackendSettings::Ollama { model, .. } = ¤t_config.llm_backend {
634 crate::settings::LlmBackendSettings::Ollama {
635 base_url: request.base_url.clone(),
636 model: model.clone(),
637 }
638 } else {
639 crate::settings::LlmBackendSettings::Ollama {
640 base_url: request.base_url.clone(),
641 model: "llama2".to_string(),
642 }
643 }
644 }
645 _ => {
646 error!("❌ Unsupported provider: {}", request.provider);
647 return Err(StatusCode::BAD_REQUEST);
648 }
649 };
650
651 match state.update_llm_service(&new_backend) {
653 Ok(()) => {
654 info!("✅ API key updated successfully for provider: {}", request.provider);
655 Ok(Json(json!({
656 "status": "success",
657 "message": format!("API key updated for provider: {}", request.provider),
658 "provider": request.provider,
659 "restart_required": false,
660 })))
661 }
662 Err(e) => {
663 error!("❌ Failed to update API key: {:?}", e);
664 Ok(Json(json!({
665 "status": "error",
666 "message": format!("Failed to update API key: {}", e),
667 })))
668 }
669 }
670}
671
672pub async fn switch_provider(
676 State(state): State<AppState>,
677 Json(request): Json<SwitchProviderRequest>,
678) -> Result<Json<serde_json::Value>, StatusCode> {
679 if let Err(e) = validate_provider(&request.provider) {
681 error!("❌ Invalid provider: {}", e);
682 return Err(StatusCode::BAD_REQUEST);
683 }
684
685 let masked_key = request.api_key.as_ref().map(|k| mask_api_key(k)).unwrap_or_else(|| "none".to_string());
686 info!("🔄 Switching to provider: {} (key: {})", request.provider, masked_key);
687
688 let current_config = state.get_current_config()
690 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
691
692 let api_key = if let Some(key) = request.api_key {
694 key
695 } else {
696 match request.provider.as_str() {
698 "openai" => {
699 if let crate::settings::LlmBackendSettings::OpenAI { api_key, .. } = ¤t_config.llm_backend {
700 api_key.clone()
701 } else {
702 error!("❌ No API key provided for OpenAI and none found in current config");
703 return Err(StatusCode::BAD_REQUEST);
704 }
705 }
706 "anthropic" => {
707 if let crate::settings::LlmBackendSettings::Anthropic { api_key, .. } = ¤t_config.llm_backend {
708 api_key.clone()
709 } else {
710 error!("❌ No API key provided for Anthropic and none found in current config");
711 return Err(StatusCode::BAD_REQUEST);
712 }
713 }
714 "zhipu" => {
715 if let crate::settings::LlmBackendSettings::Zhipu { api_key, .. } = ¤t_config.llm_backend {
716 api_key.clone()
717 } else {
718 error!("❌ No API key provided for Zhipu and none found in current config");
719 return Err(StatusCode::BAD_REQUEST);
720 }
721 }
722 "aliyun" => {
723 if let crate::settings::LlmBackendSettings::Aliyun { api_key, .. } = ¤t_config.llm_backend {
724 api_key.clone()
725 } else {
726 error!("❌ No API key provided for Aliyun and none found in current config");
727 return Err(StatusCode::BAD_REQUEST);
728 }
729 }
730 "volcengine" => {
731 if let crate::settings::LlmBackendSettings::Volcengine { api_key, .. } = ¤t_config.llm_backend {
732 api_key.clone()
733 } else {
734 error!("❌ No API key provided for Volcengine and none found in current config");
735 return Err(StatusCode::BAD_REQUEST);
736 }
737 }
738 "tencent" => {
739 if let crate::settings::LlmBackendSettings::Tencent { api_key, .. } = ¤t_config.llm_backend {
740 api_key.clone()
741 } else {
742 error!("❌ No API key provided for Tencent and none found in current config");
743 return Err(StatusCode::BAD_REQUEST);
744 }
745 }
746 "longcat" => {
747 if let crate::settings::LlmBackendSettings::Longcat { api_key, .. } = ¤t_config.llm_backend {
748 api_key.clone()
749 } else {
750 error!("❌ No API key provided for Longcat and none found in current config");
751 return Err(StatusCode::BAD_REQUEST);
752 }
753 }
754 "moonshot" => {
755 if let crate::settings::LlmBackendSettings::Moonshot { api_key, .. } = ¤t_config.llm_backend {
756 api_key.clone()
757 } else {
758 error!("❌ No API key provided for Moonshot and none found in current config");
759 return Err(StatusCode::BAD_REQUEST);
760 }
761 }
762 "ollama" => String::new(), _ => {
764 error!("❌ Unsupported provider: {}", request.provider);
765 return Err(StatusCode::BAD_REQUEST);
766 }
767 }
768 };
769
770 let model = request.model.unwrap_or_else(|| {
772 match request.provider.as_str() {
773 "openai" => "gpt-4o".to_string(),
774 "anthropic" => "claude-3-5-sonnet-20241022".to_string(),
775 "zhipu" => "glm-4-flash".to_string(),
776 "ollama" => "llama2".to_string(),
777 "aliyun" => "qwen-turbo".to_string(),
778 "volcengine" => "ep-20241023xxxxx-xxxxx".to_string(),
779 "tencent" => "hunyuan-lite".to_string(),
780 "longcat" => "LongCat-Flash-Chat".to_string(),
781 _ => "default-model".to_string(),
782 }
783 });
784
785 let new_backend = match request.provider.as_str() {
787 "openai" => crate::settings::LlmBackendSettings::OpenAI {
788 api_key,
789 base_url: request.base_url,
790 model,
791 },
792 "anthropic" => crate::settings::LlmBackendSettings::Anthropic {
793 api_key,
794 model,
795 },
796 "zhipu" => crate::settings::LlmBackendSettings::Zhipu {
797 api_key,
798 base_url: request.base_url,
799 model,
800 },
801 "ollama" => crate::settings::LlmBackendSettings::Ollama {
802 base_url: request.base_url,
803 model,
804 },
805 "aliyun" => crate::settings::LlmBackendSettings::Aliyun {
806 api_key,
807 model,
808 },
809 "volcengine" => crate::settings::LlmBackendSettings::Volcengine {
810 api_key,
811 model,
812 },
813 "tencent" => crate::settings::LlmBackendSettings::Tencent {
814 api_key,
815 model,
816 },
817 "longcat" => crate::settings::LlmBackendSettings::Longcat {
818 api_key,
819 model,
820 },
821 "moonshot" => crate::settings::LlmBackendSettings::Moonshot {
822 api_key,
823 model,
824 },
825 _ => {
826 error!("❌ Unsupported provider: {}", request.provider);
827 return Err(StatusCode::BAD_REQUEST);
828 }
829 };
830
831 match state.update_llm_service(&new_backend) {
833 Ok(()) => {
834 info!("✅ Provider switched successfully to: {}", request.provider);
835 Ok(Json(json!({
836 "status": "success",
837 "message": format!("Provider switched to: {}", request.provider),
838 "provider": request.provider,
839 "model": new_backend.get_model(),
840 "restart_required": false,
841 })))
842 }
843 Err(e) => {
844 error!("❌ Failed to switch provider: {:?}", e);
845 Ok(Json(json!({
846 "status": "error",
847 "message": format!("Failed to switch provider: {}", e),
848 })))
849 }
850 }
851}
852
853pub async fn shutdown() -> Json<serde_json::Value> {
858 info!("🛑 Shutdown requested via API");
859
860 Json(json!({
864 "status": "success",
865 "message": "Shutdown signal sent. Please restart with new configuration.",
866 }))
867}