Skip to main content

rush_sync_server/server/
shared.rs

1use crate::core::config::Config;
2use crate::proxy::ProxyManager;
3use crate::server::persistence::ServerRegistry;
4use crate::server::types::{ServerContext, ServerStatus};
5use crate::server::utils::port::is_port_available;
6use std::sync::{Arc, OnceLock};
7
8static SHARED_CONTEXT: OnceLock<ServerContext> = OnceLock::new();
9static PERSISTENT_REGISTRY: OnceLock<ServerRegistry> = OnceLock::new();
10static PROXY_MANAGER: OnceLock<Arc<ProxyManager>> = OnceLock::new();
11
12pub fn get_shared_context() -> &'static ServerContext {
13    SHARED_CONTEXT.get_or_init(ServerContext::default)
14}
15
16pub fn get_persistent_registry() -> &'static ServerRegistry {
17    PERSISTENT_REGISTRY.get_or_init(|| match ServerRegistry::new() {
18        Ok(registry) => registry,
19        Err(e) => {
20            log::error!(
21                "Failed to initialize server registry: {}, using fallback",
22                e
23            );
24            ServerRegistry::with_fallback()
25        }
26    })
27}
28
29pub fn get_proxy_manager() -> &'static Arc<ProxyManager> {
30    PROXY_MANAGER.get_or_init(|| {
31        // Load config and create proxy manager
32        let config = tokio::task::block_in_place(|| {
33            tokio::runtime::Handle::current().block_on(async {
34                crate::core::config::Config::load()
35                    .await
36                    .unwrap_or_default()
37            })
38        });
39
40        Arc::new(ProxyManager::new(config.proxy))
41    })
42}
43
44// Start the proxy system
45async fn start_proxy_system(config: &Config) -> crate::core::error::Result<()> {
46    if !config.proxy.enabled {
47        log::info!("Proxy system disabled in config");
48        return Ok(());
49    }
50
51    let proxy_manager = get_proxy_manager();
52
53    // Start proxy server (HTTP + HTTPS)
54    Arc::clone(proxy_manager).start_proxy_server().await?;
55
56    log::info!("Proxy system started:");
57    log::info!(
58        "  HTTP:  http://{{name}}.{}:{}",
59        config.server.production_domain,
60        config.proxy.port
61    );
62
63    let https_port = config.proxy.port + config.proxy.https_port_offset;
64    log::info!(
65        "  HTTPS: https://{{name}}.{}:{}",
66        config.server.production_domain,
67        https_port
68    );
69
70    Ok(())
71}
72
73pub async fn initialize_server_system() -> crate::core::error::Result<()> {
74    let config = Config::load().await?;
75
76    crate::server::handlers::web::set_global_config(config.clone());
77
78    let registry = get_persistent_registry();
79    let context = get_shared_context();
80
81    let mut persistent_servers = registry.load_servers().await?;
82    let mut corrected_servers = 0;
83
84    for (_server_id, persistent_info) in persistent_servers.iter_mut() {
85        match persistent_info.status {
86            ServerStatus::Running => {
87                if !is_port_available(persistent_info.port, &config.server.bind_address) {
88                    log::warn!(
89                        "Server {} claims to be running on port {}, but port is occupied",
90                        persistent_info.name,
91                        persistent_info.port
92                    );
93                } else {
94                    log::info!(
95                        "Server {} was running but is no longer active, correcting status",
96                        persistent_info.name
97                    );
98                }
99                persistent_info.status = ServerStatus::Stopped;
100                corrected_servers += 1;
101            }
102            ServerStatus::Failed => {
103                log::info!(
104                    "Server {} was failed, resetting to stopped on restart",
105                    persistent_info.name
106                );
107                persistent_info.status = ServerStatus::Stopped;
108                corrected_servers += 1;
109            }
110            ServerStatus::Stopped => {}
111        }
112    }
113
114    if corrected_servers > 0 {
115        registry.save_servers(&persistent_servers).await?;
116        log::info!(
117            "Corrected {} server statuses after program restart",
118            corrected_servers
119        );
120    }
121
122    {
123        let mut servers = context.servers.write().map_err(|e| {
124            crate::core::error::AppError::Validation(format!("servers lock poisoned: {}", e))
125        })?;
126        servers.clear();
127        for (id, persistent_info) in persistent_servers.iter() {
128            let server_info = crate::server::types::ServerInfo::from(persistent_info.clone());
129            servers.insert(id.clone(), server_info);
130        }
131    }
132
133    log::info!(
134        "Server system initialized with {} persistent servers",
135        persistent_servers.len()
136    );
137    log::info!(
138        "Server Config: Port Range {}-{}, Max Concurrent: {}, Workers: {}",
139        config.server.port_range_start,
140        config.server.port_range_end,
141        config.server.max_concurrent,
142        config.server.workers
143    );
144    log::info!(
145        "Logging Config: Max Size {}MB, Archives: {}, Compression: {}, Request Logging: {}",
146        config.logging.max_file_size_mb,
147        config.logging.max_archive_files,
148        config.logging.compress_archives,
149        config.logging.log_requests
150    );
151
152    let auto_start_servers = registry.get_auto_start_servers(&persistent_servers);
153    if !auto_start_servers.is_empty() {
154        log::info!(
155            "Found {} servers marked for auto-start",
156            auto_start_servers.len()
157        );
158
159        if auto_start_servers.len() > config.server.max_concurrent {
160            log::warn!(
161                "Auto-start servers ({}) exceed max_concurrent ({}), some will be skipped",
162                auto_start_servers.len(),
163                config.server.max_concurrent
164            );
165        }
166    }
167
168    // Initialize proxy manager
169    if config.proxy.enabled {
170        // 1. Start proxy system (HTTP + HTTPS)
171        if let Err(e) = start_proxy_system(&config).await {
172            log::error!("Failed to start proxy system: {}", e);
173        } else {
174            // 2. Start HTTP redirect (port 80, serves ACME challenges + redirects to HTTPS)
175            if let Err(e) = start_http_redirect_server(&config).await {
176                log::warn!("Failed to start HTTP redirect: {}", e);
177            }
178
179            // Note: Proxy routes for auto-started servers are registered later
180            // by create_web_server() in auto_start_servers(), not here.
181        }
182
183        log::info!(
184            "  DNS: Point *.{} to this server",
185            config.server.production_domain
186        );
187
188        // 4. Start ACME background provisioning + auto hot-reload
189        // ACME runs AFTER proxy is ready (5s delay), provisions cert, then hot-reloads proxy TLS.
190        // No manual restart needed — new connections automatically use the LE certificate.
191        if config.server.use_lets_encrypt && config.server.production_domain != "localhost" {
192            let cert_dir = crate::core::helpers::get_base_dir()
193                .map(|b| b.join(&config.server.cert_dir))
194                .unwrap_or_else(|_| std::path::PathBuf::from(&config.server.cert_dir));
195
196            // Collect subdomains only from actually registered servers and proxy routes.
197            // Every SAN must have a valid DNS record pointing to this server,
198            // otherwise Let's Encrypt HTTP-01 validation fails for the entire certificate.
199            // "default", "blog" and "myapp" are built-in proxy routes (served directly, not via add_route).
200            let mut subdomains: Vec<String> = vec!["default".to_string(), "blog".to_string(), "myapp".to_string()];
201            for (_id, persistent_info) in persistent_servers.iter() {
202                if !subdomains.contains(&persistent_info.name) {
203                    subdomains.push(persistent_info.name.clone());
204                }
205            }
206            let proxy_manager = get_proxy_manager();
207            let routes = proxy_manager.get_routes().await;
208            for route in &routes {
209                if !subdomains.contains(&route.subdomain) {
210                    subdomains.push(route.subdomain.clone());
211                }
212            }
213
214            crate::server::acme::start_acme_background(
215                config.server.production_domain.clone(),
216                cert_dir,
217                config.server.acme_email.clone(),
218                false,
219                subdomains,
220            );
221            log::info!(
222                "ACME: Background provisioning + auto hot-reload started for {}",
223                config.server.production_domain
224            );
225        }
226    } else {
227        log::info!("Reverse Proxy disabled in configuration");
228    }
229
230    Ok(())
231}
232
233pub async fn persist_server_update(server_id: &str, status: crate::server::types::ServerStatus) {
234    let registry = get_persistent_registry();
235    if let Err(e) = registry.update_server_status(server_id, status).await {
236        log::error!("Failed to persist server status update: {}", e);
237    }
238}
239
240pub async fn shutdown_all_servers_on_exit() -> crate::core::error::Result<()> {
241    let config = Config::load().await.unwrap_or_default();
242    let registry = get_persistent_registry();
243    let context = get_shared_context();
244
245    let server_handles: Vec<_> = {
246        let mut handles = context.handles.write().map_err(|e| {
247            crate::core::error::AppError::Validation(format!("handles lock poisoned: {}", e))
248        })?;
249        handles.drain().collect()
250    };
251
252    log::info!("Shutting down {} active servers...", server_handles.len());
253
254    let shutdown_timeout = std::time::Duration::from_secs(config.server.shutdown_timeout);
255
256    for (server_id, handle) in server_handles {
257        log::info!("Stopping server {}", server_id);
258
259        if tokio::time::timeout(shutdown_timeout, handle.stop(true))
260            .await
261            .is_err()
262        {
263            log::warn!("Server {} shutdown timeout, forcing stop", server_id);
264            handle.stop(false).await;
265        }
266
267        // Persist stopped status
268        let _ = registry
269            .update_server_status(&server_id, ServerStatus::Stopped)
270            .await;
271    }
272
273    // Save analytics data before exit
274    crate::server::analytics::save_analytics_on_shutdown();
275
276    log::info!("Server system shutdown complete");
277    Ok(())
278}
279
280pub async fn validate_server_creation(
281    name: &str,
282    port: Option<u16>,
283) -> crate::core::error::Result<()> {
284    let config = Config::load().await?;
285    let context = get_shared_context();
286    let servers = context.servers.read().map_err(|e| {
287        crate::core::error::AppError::Validation(format!("servers lock poisoned: {}", e))
288    })?;
289
290    if servers.len() >= config.server.max_concurrent {
291        return Err(crate::core::error::AppError::Validation(format!(
292            "Server limit reached: {}/{}. Use 'cleanup' command to remove stopped servers.",
293            servers.len(),
294            config.server.max_concurrent
295        )));
296    }
297
298    if let Some(port) = port {
299        if port < config.server.port_range_start || port > config.server.port_range_end {
300            return Err(crate::core::error::AppError::Validation(format!(
301                "Port {} outside configured range {}-{}",
302                port, config.server.port_range_start, config.server.port_range_end
303            )));
304        }
305    }
306
307    if servers.values().any(|s| s.name == name) {
308        return Err(crate::core::error::AppError::Validation(format!(
309            "Server name '{}' already exists",
310            name
311        )));
312    }
313
314    Ok(())
315}
316
317pub async fn get_server_system_stats() -> serde_json::Value {
318    let config = Config::load().await.unwrap_or_default();
319    let context = get_shared_context();
320    let servers = match context.servers.read() {
321        Ok(s) => s,
322        Err(_) => return serde_json::json!({"error": "lock poisoned"}),
323    };
324
325    let running_count = servers
326        .values()
327        .filter(|s| s.status == ServerStatus::Running)
328        .count();
329    let stopped_count = servers
330        .values()
331        .filter(|s| s.status == ServerStatus::Stopped)
332        .count();
333    let failed_count = servers
334        .values()
335        .filter(|s| s.status == ServerStatus::Failed)
336        .count();
337
338    serde_json::json!({
339        "total_servers": servers.len(),
340        "running": running_count,
341        "stopped": stopped_count,
342        "failed": failed_count,
343        "max_concurrent": config.server.max_concurrent,
344        "utilization_percent": (servers.len() as f64 / config.server.max_concurrent as f64 * 100.0),
345        "port_range": format!("{}-{}", config.server.port_range_start, config.server.port_range_end),
346        "available_ports": config.server.port_range_end - config.server.port_range_start + 1,
347        "proxy": {
348            "enabled": config.proxy.enabled,
349            "http_port": config.proxy.port,
350            "https_port": config.proxy.port + config.proxy.https_port_offset,
351            "redirect_port": if is_port_available(80, "0.0.0.0") { None } else { Some(80) }
352        },
353        "config": {
354            "workers_per_server": config.server.workers,
355            "shutdown_timeout_sec": config.server.shutdown_timeout,
356            "startup_delay_ms": config.server.startup_delay_ms,
357            "logging": {
358                "max_file_size_mb": config.logging.max_file_size_mb,
359                "max_archives": config.logging.max_archive_files,
360                "compression": config.logging.compress_archives,
361                "request_logging": config.logging.log_requests,
362                "security_alerts": config.logging.log_security_alerts,
363                "performance_monitoring": config.logging.log_performance
364            }
365        }
366    })
367}
368
369pub async fn auto_start_servers() -> crate::core::error::Result<Vec<String>> {
370    let config = Config::load().await?;
371    let registry = get_persistent_registry();
372    let ctx = get_shared_context();
373    let auto_start_list = {
374        let servers = registry.load_servers().await?;
375        registry.get_auto_start_servers(&servers)
376    };
377
378    if auto_start_list.is_empty() {
379        return Ok(vec![]);
380    }
381
382    let max_to_start = config.server.max_concurrent.min(auto_start_list.len());
383    let mut started_servers = Vec::new();
384
385    for server in auto_start_list.iter().take(max_to_start) {
386        log::info!(
387            "Auto-starting server: {} on port {}",
388            server.name,
389            server.port
390        );
391
392        // Check port availability
393        if !is_port_available(server.port, &config.server.bind_address) {
394            log::warn!(
395                "Port {} not available for server '{}', skipping",
396                server.port,
397                server.name
398            );
399            continue;
400        }
401
402        let server_info = crate::server::types::ServerInfo::from(server.clone());
403
404        // Actually start the web server (bind port, serve HTTP)
405        match crate::server::handlers::web::create_web_server(ctx, server_info.clone(), &config) {
406            Ok(handle) => {
407                // Store handle
408                if let Ok(mut handles) = ctx.handles.write() {
409                    handles.insert(server_info.id.clone(), handle);
410                }
411
412                // Update status in memory
413                if let Ok(mut servers) = ctx.servers.write() {
414                    if let Some(s) = servers.get_mut(&server_info.id) {
415                        s.status = ServerStatus::Running;
416                    }
417                }
418
419                // Persist status
420                let server_id = server_info.id.clone();
421                tokio::spawn(async move {
422                    persist_server_update(&server_id, ServerStatus::Running).await;
423                });
424
425                started_servers.push(format!("{}:{}", server_info.name, server_info.port));
426                log::info!(
427                    "Server '{}' started on http://{}:{}",
428                    server_info.name,
429                    config.server.bind_address,
430                    server_info.port
431                );
432            }
433            Err(e) => {
434                log::error!(
435                    "Failed to auto-start server '{}': {}",
436                    server.name,
437                    e
438                );
439
440                // Mark as failed
441                let server_id = server_info.id.clone();
442                tokio::spawn(async move {
443                    persist_server_update(&server_id, ServerStatus::Failed).await;
444                });
445            }
446        }
447    }
448
449    if auto_start_list.len() > max_to_start {
450        log::warn!(
451            "Skipped {} auto-start servers due to max_concurrent limit of {}",
452            auto_start_list.len() - max_to_start,
453            config.server.max_concurrent
454        );
455    }
456
457    Ok(started_servers)
458}
459
460async fn start_http_redirect_server(config: &Config) -> crate::core::error::Result<()> {
461    let redirect_port = 80;
462    // In Docker, host port 443 maps to container port 3443.
463    // The redirect must use the EXTERNAL port that clients see (443), not the internal one (3443).
464    // EXTERNAL_HTTPS_PORT env var overrides the computed internal port.
465    let target_https_port = std::env::var("EXTERNAL_HTTPS_PORT")
466        .ok()
467        .and_then(|v| v.parse::<u16>().ok())
468        .unwrap_or_else(|| config.proxy.port + config.proxy.https_port_offset);
469
470    if !crate::server::utils::port::is_port_available(redirect_port, "0.0.0.0") {
471        log::warn!(
472            "Port {} already in use - HTTP redirect disabled",
473            redirect_port
474        );
475        return Ok(());
476    }
477
478    log::info!(
479        "Starting HTTP->HTTPS redirect server on port {}",
480        redirect_port
481    );
482
483    // Use std::thread::spawn to avoid Send requirements on the future
484    std::thread::spawn(move || {
485        // Single-threaded tokio runtime for the redirect server
486        let rt = tokio::runtime::Builder::new_current_thread()
487            .enable_all()
488            .build()
489            .expect("Failed to build single-thread runtime for redirect server");
490
491        rt.block_on(async move {
492            let redirect_server =
493                crate::server::redirect::HttpRedirectServer::new(redirect_port, target_https_port);
494
495            if let Err(e) = redirect_server.run().await {
496                log::error!("HTTP redirect server error: {}", e);
497            }
498        });
499    });
500
501    // Brief wait for startup
502    tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
503    log::info!(
504        "HTTP redirect active: Port {} → HTTPS Port {}",
505        redirect_port,
506        target_https_port
507    );
508
509    Ok(())
510}