qudag_cli/
startup.rs

1use std::sync::Arc;
2use std::time::{Duration, Instant};
3use tokio::sync::OnceCell;
4use tracing::{debug, info, warn};
5
6use crate::config::NodeConfig;
7use crate::performance::PerformanceTracker;
8
9/// Lazy-initialized CLI resources
10pub struct CliResources {
11    pub config: Arc<NodeConfig>,
12    pub performance_tracker: Arc<PerformanceTracker>,
13}
14
15/// Global CLI resources singleton
16static CLI_RESOURCES: OnceCell<CliResources> = OnceCell::const_new();
17
18/// Fast startup optimization
19pub struct StartupOptimizer {
20    startup_time: Instant,
21}
22
23impl Default for StartupOptimizer {
24    fn default() -> Self {
25        Self::new()
26    }
27}
28
29impl StartupOptimizer {
30    /// Create new startup optimizer
31    pub fn new() -> Self {
32        Self {
33            startup_time: Instant::now(),
34        }
35    }
36
37    /// Initialize CLI with optimized startup
38    pub async fn initialize(&self) -> Result<Arc<CliResources>, StartupError> {
39        let init_start = Instant::now();
40
41        // Use OnceCell for singleton initialization
42        let resources = CLI_RESOURCES
43            .get_or_init(|| async { self.initialize_resources().await })
44            .await;
45
46        let init_duration = init_start.elapsed();
47        if init_duration > Duration::from_millis(100) {
48            warn!(
49                "CLI initialization took {:.2}ms",
50                init_duration.as_secs_f64() * 1000.0
51            );
52        } else {
53            debug!(
54                "CLI initialized in {:.2}ms",
55                init_duration.as_secs_f64() * 1000.0
56            );
57        }
58
59        Ok(Arc::new(CliResources {
60            config: resources.config.clone(),
61            performance_tracker: resources.performance_tracker.clone(),
62        }))
63    }
64
65    /// Initialize resources with lazy loading
66    async fn initialize_resources(&self) -> CliResources {
67        let config_start = Instant::now();
68
69        // Initialize config with defaults (fast path)
70        let config = Arc::new(NodeConfig::default());
71
72        debug!(
73            "Config loaded in {:.2}ms",
74            config_start.elapsed().as_secs_f64() * 1000.0
75        );
76
77        // Initialize performance tracker
78        let perf_start = Instant::now();
79        let performance_tracker = Arc::new(PerformanceTracker::new());
80
81        debug!(
82            "Performance tracker initialized in {:.2}ms",
83            perf_start.elapsed().as_secs_f64() * 1000.0
84        );
85
86        CliResources {
87            config,
88            performance_tracker,
89        }
90    }
91
92    /// Fast logging setup with minimal overhead
93    pub fn setup_logging(&self) -> Result<(), StartupError> {
94        let log_start = Instant::now();
95
96        // Use compact format for better performance
97        tracing_subscriber::fmt()
98            .compact()
99            .with_target(false) // Reduce log overhead
100            .with_thread_ids(false) // Reduce log overhead
101            .with_file(false) // Reduce log overhead for CLI
102            .init();
103
104        debug!(
105            "Logging setup in {:.2}ms",
106            log_start.elapsed().as_secs_f64() * 1000.0
107        );
108        Ok(())
109    }
110
111    /// Pre-warm commonly used components
112    pub async fn pre_warm(&self) -> Result<(), StartupError> {
113        let warm_start = Instant::now();
114
115        // Pre-allocate common data structures
116        let _temp_hashmap: std::collections::HashMap<String, String> =
117            std::collections::HashMap::with_capacity(16);
118
119        // Pre-warm tokio runtime
120        tokio::task::yield_now().await;
121
122        debug!(
123            "Pre-warming completed in {:.2}ms",
124            warm_start.elapsed().as_secs_f64() * 1000.0
125        );
126        Ok(())
127    }
128
129    /// Get total startup time
130    pub fn get_startup_time(&self) -> Duration {
131        self.startup_time.elapsed()
132    }
133}
134
135/// Optimized command execution with caching
136pub struct CommandExecutor {
137    resources: Arc<CliResources>,
138    command_cache: std::sync::Mutex<lru::LruCache<String, CachedResult>>,
139}
140
141#[derive(Clone)]
142struct CachedResult {
143    result: String,
144    timestamp: Instant,
145    ttl: Duration,
146}
147
148impl CommandExecutor {
149    /// Create new command executor
150    pub fn new(resources: Arc<CliResources>) -> Self {
151        Self {
152            resources,
153            command_cache: std::sync::Mutex::new(lru::LruCache::new(
154                std::num::NonZeroUsize::new(32).unwrap(),
155            )),
156        }
157    }
158
159    /// Execute command with caching
160    pub async fn execute_cached<F, R>(
161        &self,
162        command_key: &str,
163        ttl: Duration,
164        executor: F,
165    ) -> Result<R, Box<dyn std::error::Error + Send + Sync>>
166    where
167        F: std::future::Future<Output = Result<R, Box<dyn std::error::Error + Send + Sync>>>,
168        R: Clone + std::fmt::Debug + serde::Serialize + serde::de::DeserializeOwned,
169    {
170        let cache_key = command_key.to_string();
171
172        // Check cache first
173        if let Some(cached) = self.get_from_cache(&cache_key) {
174            if cached.timestamp.elapsed() < cached.ttl {
175                debug!("Cache hit for command: {}", command_key);
176                return serde_json::from_str(&cached.result)
177                    .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
178            }
179        }
180
181        // Execute command
182        let cmd_tracker = self
183            .resources
184            .performance_tracker
185            .start_command(command_key);
186
187        let result = match executor.await {
188            Ok(result) => {
189                cmd_tracker.complete(true).await;
190
191                // Cache the result
192                self.cache_result(&cache_key, &result, ttl)?;
193
194                Ok(result)
195            }
196            Err(e) => {
197                cmd_tracker.complete_with_error("execution_error").await;
198                Err(e)
199            }
200        };
201
202        result
203    }
204
205    /// Get result from cache
206    fn get_from_cache(&self, key: &str) -> Option<CachedResult> {
207        let mut cache = self.command_cache.lock().ok()?;
208        cache.get(key).cloned()
209    }
210
211    /// Cache execution result
212    fn cache_result<R>(
213        &self,
214        key: &str,
215        result: &R,
216        ttl: Duration,
217    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>>
218    where
219        R: serde::Serialize,
220    {
221        let serialized = serde_json::to_string(result)?;
222        let cached_result = CachedResult {
223            result: serialized,
224            timestamp: Instant::now(),
225            ttl,
226        };
227
228        if let Ok(mut cache) = self.command_cache.lock() {
229            cache.put(key.to_string(), cached_result);
230        }
231
232        Ok(())
233    }
234
235    /// Clear cache
236    pub fn clear_cache(&self) {
237        if let Ok(mut cache) = self.command_cache.lock() {
238            cache.clear();
239        }
240    }
241}
242
243/// Resource management for efficient cleanup
244pub struct ResourceManager {
245    cleanup_tasks: Vec<tokio::task::JoinHandle<()>>,
246}
247
248impl Default for ResourceManager {
249    fn default() -> Self {
250        Self::new()
251    }
252}
253
254impl ResourceManager {
255    /// Create new resource manager
256    pub fn new() -> Self {
257        Self {
258            cleanup_tasks: Vec::new(),
259        }
260    }
261
262    /// Register cleanup task
263    pub fn register_cleanup<F>(&mut self, cleanup: F)
264    where
265        F: std::future::Future<Output = ()> + Send + 'static,
266    {
267        let handle = tokio::spawn(cleanup);
268        self.cleanup_tasks.push(handle);
269    }
270
271    /// Shutdown all resources gracefully
272    pub async fn shutdown(self) {
273        info!("Shutting down CLI resources...");
274
275        // Cancel all cleanup tasks
276        for handle in self.cleanup_tasks {
277            handle.abort();
278        }
279
280        // Give tasks time to clean up
281        tokio::time::sleep(Duration::from_millis(100)).await;
282
283        info!("CLI resources shutdown complete");
284    }
285}
286
287/// Startup error types
288#[derive(Debug, thiserror::Error)]
289pub enum StartupError {
290    #[error("Configuration error: {0}")]
291    Config(String),
292
293    #[error("Logging setup error: {0}")]
294    Logging(String),
295
296    #[error("Resource initialization error: {0}")]
297    Resource(String),
298
299    #[error("I/O error: {0}")]
300    Io(#[from] std::io::Error),
301}
302
303/// Async runtime optimization
304pub fn optimize_runtime() -> tokio::runtime::Runtime {
305    tokio::runtime::Builder::new_multi_thread()
306        .worker_threads(2) // Limit threads for CLI
307        .thread_name("qudag-cli")
308        .thread_stack_size(2 * 1024 * 1024) // 2MB stack
309        .enable_all()
310        .build()
311        .expect("Failed to create tokio runtime")
312}
313
314/// Memory-efficient command parsing
315pub fn optimize_clap_parser() -> clap::Command {
316    use clap::{Arg, ArgAction, Command};
317
318    // Pre-allocate command structure for better performance
319    Command::new("qudag")
320        .version(env!("CARGO_PKG_VERSION"))
321        .about("QuDAG node operation and management CLI")
322        .arg_required_else_help(true)
323        .subcommand_required(true)
324        .disable_help_subcommand(true) // Reduce memory usage
325        .disable_version_flag(false)
326        .subcommands([
327            Command::new("start").about("Start the QuDAG node").args([
328                Arg::new("data-dir")
329                    .long("data-dir")
330                    .help("Data directory")
331                    .value_name("DIR"),
332                Arg::new("port")
333                    .long("port")
334                    .help("Network port")
335                    .value_name("PORT")
336                    .value_parser(clap::value_parser!(u16)),
337                Arg::new("peers")
338                    .long("peers")
339                    .help("Initial peers")
340                    .value_name("PEERS")
341                    .action(ArgAction::Append),
342            ]),
343            Command::new("stop").about("Stop the QuDAG node"),
344            Command::new("status").about("Show node status"),
345            Command::new("peer")
346                .about("Peer management commands")
347                .subcommand_required(true)
348                .subcommands([
349                    Command::new("list").about("List all peers"),
350                    Command::new("add").about("Add a new peer").arg(
351                        Arg::new("address")
352                            .help("Peer address")
353                            .required(true)
354                            .value_name("ADDRESS"),
355                    ),
356                    Command::new("remove").about("Remove a peer").arg(
357                        Arg::new("address")
358                            .help("Peer address")
359                            .required(true)
360                            .value_name("ADDRESS"),
361                    ),
362                ]),
363            Command::new("network")
364                .about("Network management commands")
365                .subcommand_required(true)
366                .subcommands([
367                    Command::new("stats").about("Display network statistics"),
368                    Command::new("test").about("Test network connectivity"),
369                ]),
370            Command::new("dag").about("DAG visualization").args([
371                Arg::new("output")
372                    .long("output")
373                    .help("Output file")
374                    .value_name("FILE"),
375                Arg::new("format")
376                    .long("format")
377                    .help("Output format")
378                    .value_name("FORMAT"),
379            ]),
380        ])
381}
382
383#[cfg(test)]
384mod tests {
385    use super::*;
386
387    #[tokio::test]
388    async fn test_startup_optimizer() {
389        let optimizer = StartupOptimizer::new();
390        let resources = optimizer.initialize().await.unwrap();
391        assert!(optimizer.get_startup_time() < Duration::from_millis(500));
392    }
393
394    #[tokio::test]
395    async fn test_command_executor() {
396        let optimizer = StartupOptimizer::new();
397        let resources = optimizer.initialize().await.unwrap();
398        let executor = CommandExecutor::new(resources);
399
400        let result = executor
401            .execute_cached("test_command", Duration::from_secs(60), async {
402                Ok::<String, Box<dyn std::error::Error + Send + Sync>>("test_result".to_string())
403            })
404            .await
405            .unwrap();
406
407        assert_eq!(result, "test_result");
408    }
409
410    #[test]
411    fn test_optimized_clap_parser() {
412        let cmd = optimize_clap_parser();
413        assert_eq!(cmd.get_name(), "qudag");
414        assert!(cmd.get_subcommands().count() > 0);
415    }
416}