rush_sync_server/server/
shared.rs1use crate::core::config::Config;
2use crate::proxy::ProxyManager;
3use crate::server::persistence::ServerRegistry;
4use crate::server::types::{ServerContext, ServerStatus};
5use crate::server::utils::port::is_port_available;
6use std::sync::{Arc, OnceLock};
7
8static SHARED_CONTEXT: OnceLock<ServerContext> = OnceLock::new();
9static PERSISTENT_REGISTRY: OnceLock<ServerRegistry> = OnceLock::new();
10static PROXY_MANAGER: OnceLock<Arc<ProxyManager>> = OnceLock::new();
11
12pub fn get_shared_context() -> &'static ServerContext {
13 SHARED_CONTEXT.get_or_init(ServerContext::default)
14}
15
16pub fn get_persistent_registry() -> &'static ServerRegistry {
17 PERSISTENT_REGISTRY
18 .get_or_init(|| ServerRegistry::new().expect("Failed to initialize server registry"))
19}
20
21pub fn get_proxy_manager() -> &'static Arc<ProxyManager> {
22 PROXY_MANAGER.get_or_init(|| {
23 let config = tokio::task::block_in_place(|| {
25 tokio::runtime::Handle::current().block_on(async {
26 crate::core::config::Config::load()
27 .await
28 .unwrap_or_default()
29 })
30 });
31
32 Arc::new(ProxyManager::new(config.proxy))
33 })
34}
35
36pub async fn initialize_server_system() -> crate::core::error::Result<()> {
37 let config = Config::load().await?;
38 let registry = get_persistent_registry();
39 let context = get_shared_context();
40
41 let mut persistent_servers = registry.load_servers().await?;
42 let mut corrected_servers = 0;
43
44 for (_server_id, persistent_info) in persistent_servers.iter_mut() {
45 if persistent_info.status == ServerStatus::Running {
46 if !is_port_available(persistent_info.port) {
47 log::warn!(
48 "Server {} claims to be running on port {}, but port is occupied",
49 persistent_info.name,
50 persistent_info.port
51 );
52 persistent_info.status = ServerStatus::Failed;
53 corrected_servers += 1;
54 } else {
55 log::info!(
56 "Server {} was running but is no longer active, correcting status",
57 persistent_info.name
58 );
59 persistent_info.status = ServerStatus::Stopped;
60 corrected_servers += 1;
61 }
62 }
63 }
64
65 if corrected_servers > 0 {
66 registry.save_servers(&persistent_servers).await?;
67 log::info!(
68 "Corrected {} server statuses after program restart",
69 corrected_servers
70 );
71 }
72
73 {
74 let mut servers = context.servers.write().unwrap();
75 servers.clear();
76 for (id, persistent_info) in persistent_servers.iter() {
77 let server_info = crate::server::types::ServerInfo::from(persistent_info.clone());
78 servers.insert(id.clone(), server_info);
79 }
80 }
81
82 log::info!(
83 "Server system initialized with {} persistent servers",
84 persistent_servers.len()
85 );
86 log::info!(
87 "Server Config: Port Range {}-{}, Max Concurrent: {}, Workers: {}",
88 config.server.port_range_start,
89 config.server.port_range_end,
90 config.server.max_concurrent,
91 config.server.workers
92 );
93 log::info!(
94 "Logging Config: Max Size {}MB, Archives: {}, Compression: {}, Request Logging: {}",
95 config.logging.max_file_size_mb,
96 config.logging.max_archive_files,
97 config.logging.compress_archives,
98 config.logging.log_requests
99 );
100
101 let auto_start_servers = registry.get_auto_start_servers(&persistent_servers);
102 if !auto_start_servers.is_empty() {
103 log::info!(
104 "Found {} servers marked for auto-start",
105 auto_start_servers.len()
106 );
107
108 if auto_start_servers.len() > config.server.max_concurrent {
109 log::warn!(
110 "Auto-start servers ({}) exceed max_concurrent ({}), some will be skipped",
111 auto_start_servers.len(),
112 config.server.max_concurrent
113 );
114 }
115 }
116
117 if config.proxy.enabled {
119 let proxy_manager = get_proxy_manager();
120
121 if let Err(e) = Arc::clone(proxy_manager).start_proxy_server().await {
123 log::error!("Failed to start proxy server: {}", e);
124 } else {
125 log::info!("Reverse Proxy started on port {}", config.proxy.port);
126 }
127
128 for (_id, persistent_info) in persistent_servers.iter() {
130 if persistent_info.status == ServerStatus::Running {
131 if let Err(e) = proxy_manager
132 .add_route(
133 &persistent_info.name,
134 &persistent_info.id,
135 persistent_info.port,
136 )
137 .await
138 {
139 log::error!(
140 "Failed to register server {} with proxy: {}",
141 persistent_info.name,
142 e
143 );
144 }
145 }
146 }
147 } else {
148 log::info!("Reverse Proxy disabled in configuration");
149 }
150
151 log::info!(
152 "Server system initialized with {} persistent servers",
153 persistent_servers.len()
154 );
155
156 if config.proxy.enabled {
158 log::info!("Proxy Usage:");
159 log::info!(" 1. Start a server: cargo run server create myapp --port 8080");
160 log::info!(" 2. Access via: http://myapp.localhost");
161 log::info!(" 3. Add to /etc/hosts: 127.0.0.1 myapp.localhost");
162 }
163
164 Ok(())
165}
166
167pub async fn persist_server_update(server_id: &str, status: crate::server::types::ServerStatus) {
168 let registry = get_persistent_registry();
169 if let Err(e) = registry.update_server_status(server_id, status).await {
170 log::error!("Failed to persist server status update: {}", e);
171 }
172}
173
174pub async fn shutdown_all_servers_on_exit() -> crate::core::error::Result<()> {
175 let config = Config::load().await.unwrap_or_default();
176 let registry = get_persistent_registry();
177 let context = get_shared_context();
178
179 let server_handles: Vec<_> = {
180 let mut handles = context.handles.write().unwrap();
181 handles.drain().collect()
182 };
183
184 log::info!("Shutting down {} active servers...", server_handles.len());
185
186 let shutdown_timeout = std::time::Duration::from_secs(config.server.shutdown_timeout);
187
188 for (server_id, handle) in server_handles {
189 log::info!("Stopping server {}", server_id);
190
191 if tokio::time::timeout(shutdown_timeout, handle.stop(true))
192 .await
193 .is_err()
194 {
195 log::warn!("Server {} shutdown timeout, forcing stop", server_id);
196 handle.stop(false).await;
197 }
198
199 let _ = registry
201 .update_server_status(&server_id, ServerStatus::Stopped)
202 .await;
203 }
204
205 log::info!("Server system shutdown complete");
206 Ok(())
207}
208
209pub async fn validate_server_creation(
210 name: &str,
211 port: Option<u16>,
212) -> crate::core::error::Result<()> {
213 let config = Config::load().await?;
214 let context = get_shared_context();
215 let servers = context.servers.read().unwrap();
216
217 if servers.len() >= config.server.max_concurrent {
218 return Err(crate::core::error::AppError::Validation(format!(
219 "Server limit reached: {}/{}. Use 'cleanup' command to remove stopped servers.",
220 servers.len(),
221 config.server.max_concurrent
222 )));
223 }
224
225 if let Some(port) = port {
226 if port < config.server.port_range_start || port > config.server.port_range_end {
227 return Err(crate::core::error::AppError::Validation(format!(
228 "Port {} outside configured range {}-{}",
229 port, config.server.port_range_start, config.server.port_range_end
230 )));
231 }
232 }
233
234 if servers.values().any(|s| s.name == name) {
235 return Err(crate::core::error::AppError::Validation(format!(
236 "Server name '{}' already exists",
237 name
238 )));
239 }
240
241 Ok(())
242}
243
244pub async fn get_server_system_stats() -> serde_json::Value {
245 let config = Config::load().await.unwrap_or_default();
246 let context = get_shared_context();
247 let servers = context.servers.read().unwrap();
248
249 let running_count = servers
250 .values()
251 .filter(|s| s.status == ServerStatus::Running)
252 .count();
253 let stopped_count = servers
254 .values()
255 .filter(|s| s.status == ServerStatus::Stopped)
256 .count();
257 let failed_count = servers
258 .values()
259 .filter(|s| s.status == ServerStatus::Failed)
260 .count();
261
262 serde_json::json!({
263 "total_servers": servers.len(),
264 "running": running_count,
265 "stopped": stopped_count,
266 "failed": failed_count,
267 "max_concurrent": config.server.max_concurrent,
268 "utilization_percent": (servers.len() as f64 / config.server.max_concurrent as f64 * 100.0),
269 "port_range": format!("{}-{}", config.server.port_range_start, config.server.port_range_end),
270 "available_ports": config.server.port_range_end - config.server.port_range_start + 1,
271 "config": {
272 "workers_per_server": config.server.workers,
273 "shutdown_timeout_sec": config.server.shutdown_timeout,
274 "startup_delay_ms": config.server.startup_delay_ms,
275 "logging": {
276 "max_file_size_mb": config.logging.max_file_size_mb,
277 "max_archives": config.logging.max_archive_files,
278 "compression": config.logging.compress_archives,
279 "request_logging": config.logging.log_requests,
280 "security_alerts": config.logging.log_security_alerts,
281 "performance_monitoring": config.logging.log_performance
282 }
283 }
284 })
285}
286
287pub async fn auto_start_servers() -> crate::core::error::Result<Vec<String>> {
288 let config = Config::load().await?;
289 let registry = get_persistent_registry();
290 let auto_start_servers = {
291 let servers = registry.load_servers().await?;
292 registry.get_auto_start_servers(&servers)
293 };
294
295 if auto_start_servers.is_empty() {
296 return Ok(vec![]);
297 }
298
299 let max_to_start = config.server.max_concurrent.min(auto_start_servers.len());
300 let mut started_servers = Vec::new();
301
302 for server in auto_start_servers.iter().take(max_to_start) {
303 log::info!(
304 "Auto-starting server: {} on port {}",
305 server.name,
306 server.port
307 );
308 started_servers.push(format!("{}:{}", server.name, server.port));
309 }
310
311 if auto_start_servers.len() > max_to_start {
312 log::warn!(
313 "Skipped {} auto-start servers due to max_concurrent limit of {}",
314 auto_start_servers.len() - max_to_start,
315 config.server.max_concurrent
316 );
317 }
318
319 Ok(started_servers)
320}