1use std::collections::HashMap;
2use std::sync::Arc;
3use std::time::{Duration, Instant};
4use tokio::sync::Mutex;
5use tracing::{info, warn};
6
7#[derive(Debug, Clone)]
9pub struct PerformanceMetrics {
10 pub startup_time: Duration,
11 pub command_execution_times: HashMap<String, Duration>,
12 pub memory_usage: MemoryUsage,
13 pub async_task_metrics: AsyncTaskMetrics,
14 pub error_counts: HashMap<String, usize>,
15}
16
17#[derive(Debug, Clone)]
19pub struct MemoryUsage {
20 pub initial_memory: usize,
21 pub peak_memory: usize,
22 pub current_memory: usize,
23 pub allocations: usize,
24 pub deallocations: usize,
25}
26
27#[derive(Debug, Clone)]
29pub struct AsyncTaskMetrics {
30 pub total_tasks: usize,
31 pub completed_tasks: usize,
32 pub failed_tasks: usize,
33 pub average_task_duration: Duration,
34 pub max_task_duration: Duration,
35 pub min_task_duration: Duration,
36 pub pending_tasks: usize,
37}
38
39pub struct PerformanceTracker {
41 start_time: Instant,
42 command_times: Arc<Mutex<HashMap<String, Vec<Duration>>>>,
43 memory_tracker: Arc<Mutex<MemoryUsage>>,
44 async_tracker: Arc<Mutex<AsyncTaskMetrics>>,
45 error_tracker: Arc<Mutex<HashMap<String, usize>>>,
46}
47
48impl PerformanceTracker {
49 pub fn new() -> Self {
51 let initial_memory = Self::get_memory_usage();
52
53 Self {
54 start_time: Instant::now(),
55 command_times: Arc::new(Mutex::new(HashMap::new())),
56 memory_tracker: Arc::new(Mutex::new(MemoryUsage {
57 initial_memory,
58 peak_memory: initial_memory,
59 current_memory: initial_memory,
60 allocations: 0,
61 deallocations: 0,
62 })),
63 async_tracker: Arc::new(Mutex::new(AsyncTaskMetrics {
64 total_tasks: 0,
65 completed_tasks: 0,
66 failed_tasks: 0,
67 average_task_duration: Duration::from_millis(0),
68 max_task_duration: Duration::from_millis(0),
69 min_task_duration: Duration::from_secs(u64::MAX),
70 pending_tasks: 0,
71 })),
72 error_tracker: Arc::new(Mutex::new(HashMap::new())),
73 }
74 }
75
76 pub fn start_command(self: &Arc<Self>, command: &str) -> CommandTracker {
78 let tracker = Arc::clone(self);
79
80 tokio::spawn({
81 let async_tracker = Arc::clone(&self.async_tracker);
82 async move {
83 let mut tracker = async_tracker.lock().await;
84 tracker.total_tasks += 1;
85 tracker.pending_tasks += 1;
86 }
87 });
88
89 CommandTracker {
90 command: command.to_string(),
91 start_time: Instant::now(),
92 tracker,
93 }
94 }
95
96 pub async fn record_command_completion(
98 &self,
99 command: &str,
100 duration: Duration,
101 success: bool,
102 ) {
103 let mut command_times = self.command_times.lock().await;
105 command_times
106 .entry(command.to_string())
107 .or_insert_with(Vec::new)
108 .push(duration);
109
110 let mut async_tracker = self.async_tracker.lock().await;
112 async_tracker.pending_tasks = async_tracker.pending_tasks.saturating_sub(1);
113
114 if success {
115 async_tracker.completed_tasks += 1;
116 } else {
117 async_tracker.failed_tasks += 1;
118 }
119
120 if duration > async_tracker.max_task_duration {
122 async_tracker.max_task_duration = duration;
123 }
124 if duration < async_tracker.min_task_duration {
125 async_tracker.min_task_duration = duration;
126 }
127
128 let total_completed = async_tracker.completed_tasks + async_tracker.failed_tasks;
130 if total_completed > 0 {
131 let total_duration = command_times.values().flatten().sum::<Duration>();
132 async_tracker.average_task_duration = total_duration / total_completed as u32;
133 }
134 }
135
136 pub async fn record_error(&self, error_type: &str) {
138 let mut error_tracker = self.error_tracker.lock().await;
139 *error_tracker.entry(error_type.to_string()).or_insert(0) += 1;
140 }
141
142 pub async fn update_memory_usage(&self) {
144 let current_memory = Self::get_memory_usage();
145 let mut memory_tracker = self.memory_tracker.lock().await;
146
147 memory_tracker.current_memory = current_memory;
148 if current_memory > memory_tracker.peak_memory {
149 memory_tracker.peak_memory = current_memory;
150 }
151 memory_tracker.allocations += 1;
152 }
153
154 pub async fn get_metrics(&self) -> PerformanceMetrics {
156 let command_times = self.command_times.lock().await;
157 let memory_usage = self.memory_tracker.lock().await;
158 let async_metrics = self.async_tracker.lock().await;
159 let error_counts = self.error_tracker.lock().await;
160
161 let mut command_execution_times = HashMap::new();
163 for (command, times) in command_times.iter() {
164 let avg_time = times.iter().sum::<Duration>() / times.len() as u32;
165 command_execution_times.insert(command.clone(), avg_time);
166 }
167
168 PerformanceMetrics {
169 startup_time: self.start_time.elapsed(),
170 command_execution_times,
171 memory_usage: memory_usage.clone(),
172 async_task_metrics: async_metrics.clone(),
173 error_counts: error_counts.clone(),
174 }
175 }
176
177 fn get_memory_usage() -> usize {
179 #[cfg(target_os = "linux")]
180 {
181 if let Ok(contents) = std::fs::read_to_string("/proc/self/statm") {
182 if let Some(first) = contents.split_whitespace().next() {
183 if let Ok(pages) = first.parse::<usize>() {
184 return pages * 4096; }
186 }
187 }
188 }
189
190 #[cfg(target_os = "macos")]
191 {
192 use std::mem;
194 use std::ptr;
195
196 extern "C" {
197 fn task_info(
198 task: u32,
199 flavor: u32,
200 task_info: *mut u8,
201 task_info_count: *mut u32,
202 ) -> i32;
203 fn mach_task_self() -> u32;
204 }
205
206 const MACH_TASK_BASIC_INFO: u32 = 20;
207
208 #[repr(C)]
209 struct TaskBasicInfo {
210 suspend_count: u32,
211 virtual_size: u64,
212 resident_size: u64,
213 user_time: [u32; 2],
214 system_time: [u32; 2],
215 policy: u32,
216 }
217
218 unsafe {
219 let mut info: TaskBasicInfo = mem::zeroed();
220 let mut count = (mem::size_of::<TaskBasicInfo>() / mem::size_of::<u32>()) as u32;
221
222 if task_info(
223 mach_task_self(),
224 MACH_TASK_BASIC_INFO,
225 &mut info as *mut _ as *mut u8,
226 &mut count,
227 ) == 0
228 {
229 return info.resident_size as usize;
230 }
231 }
232 }
233
234 8 * 1024 * 1024 }
237
238 pub async fn generate_report(&self) -> String {
240 let metrics = self.get_metrics().await;
241 let mut report = String::new();
242
243 report.push_str("=== CLI Performance Report ===\n\n");
244
245 report.push_str(&format!(
247 "Startup Time: {:.2}ms\n",
248 metrics.startup_time.as_secs_f64() * 1000.0
249 ));
250
251 report.push_str("\nCommand Execution Times:\n");
253 for (command, duration) in &metrics.command_execution_times {
254 report.push_str(&format!(
255 " {}: {:.2}ms\n",
256 command,
257 duration.as_secs_f64() * 1000.0
258 ));
259 }
260
261 report.push_str("\nMemory Usage:\n");
263 report.push_str(&format!(
264 " Initial: {:.2} MB\n",
265 metrics.memory_usage.initial_memory as f64 / 1024.0 / 1024.0
266 ));
267 report.push_str(&format!(
268 " Peak: {:.2} MB\n",
269 metrics.memory_usage.peak_memory as f64 / 1024.0 / 1024.0
270 ));
271 report.push_str(&format!(
272 " Current: {:.2} MB\n",
273 metrics.memory_usage.current_memory as f64 / 1024.0 / 1024.0
274 ));
275
276 report.push_str("\nAsync Task Performance:\n");
278 report.push_str(&format!(
279 " Total Tasks: {}\n",
280 metrics.async_task_metrics.total_tasks
281 ));
282 report.push_str(&format!(
283 " Completed: {}\n",
284 metrics.async_task_metrics.completed_tasks
285 ));
286 report.push_str(&format!(
287 " Failed: {}\n",
288 metrics.async_task_metrics.failed_tasks
289 ));
290 report.push_str(&format!(
291 " Pending: {}\n",
292 metrics.async_task_metrics.pending_tasks
293 ));
294 report.push_str(&format!(
295 " Average Duration: {:.2}ms\n",
296 metrics
297 .async_task_metrics
298 .average_task_duration
299 .as_secs_f64()
300 * 1000.0
301 ));
302 report.push_str(&format!(
303 " Max Duration: {:.2}ms\n",
304 metrics.async_task_metrics.max_task_duration.as_secs_f64() * 1000.0
305 ));
306 report.push_str(&format!(
307 " Min Duration: {:.2}ms\n",
308 metrics.async_task_metrics.min_task_duration.as_secs_f64() * 1000.0
309 ));
310
311 if !metrics.error_counts.is_empty() {
313 report.push_str("\nError Statistics:\n");
314 for (error_type, count) in &metrics.error_counts {
315 report.push_str(&format!(" {}: {}\n", error_type, count));
316 }
317 }
318
319 report.push_str("\nPerformance Recommendations:\n");
321
322 if metrics.startup_time > Duration::from_millis(500) {
323 report.push_str(" - Startup time is high (>500ms). Consider lazy loading or reducing dependencies.\n");
324 }
325
326 if let Some(max_cmd_time) = metrics.command_execution_times.values().max() {
327 if *max_cmd_time > Duration::from_millis(100) {
328 report.push_str(" - Some commands take >100ms. Consider async optimization.\n");
329 }
330 }
331
332 let memory_growth = metrics.memory_usage.peak_memory - metrics.memory_usage.initial_memory;
333 if memory_growth > 10 * 1024 * 1024 {
334 report.push_str(" - High memory growth detected. Check for memory leaks.\n");
336 }
337
338 if metrics.async_task_metrics.failed_tasks > 0 {
339 let failure_rate = metrics.async_task_metrics.failed_tasks as f64
340 / metrics.async_task_metrics.total_tasks as f64;
341 if failure_rate > 0.1 {
342 report.push_str(" - High async task failure rate. Improve error handling.\n");
344 }
345 }
346
347 report
348 }
349}
350
351impl Clone for PerformanceTracker {
352 fn clone(&self) -> Self {
353 Self {
354 start_time: self.start_time,
355 command_times: Arc::clone(&self.command_times),
356 memory_tracker: Arc::clone(&self.memory_tracker),
357 async_tracker: Arc::clone(&self.async_tracker),
358 error_tracker: Arc::clone(&self.error_tracker),
359 }
360 }
361}
362
363impl Default for PerformanceTracker {
364 fn default() -> Self {
365 Self::new()
366 }
367}
368
369pub struct CommandTracker {
371 command: String,
372 start_time: Instant,
373 tracker: Arc<PerformanceTracker>,
374}
375
376impl CommandTracker {
377 pub async fn complete(self, success: bool) {
379 let duration = self.start_time.elapsed();
380
381 self.tracker
382 .record_command_completion(&self.command, duration, success)
383 .await;
384 self.tracker.update_memory_usage().await;
385
386 if duration > Duration::from_millis(100) {
388 warn!(
389 "Command '{}' took {:.2}ms",
390 self.command,
391 duration.as_secs_f64() * 1000.0
392 );
393 } else {
394 info!(
395 "Command '{}' completed in {:.2}ms",
396 self.command,
397 duration.as_secs_f64() * 1000.0
398 );
399 }
400 }
401
402 pub async fn complete_with_error(self, error_type: &str) {
404 self.tracker.record_error(error_type).await;
405 self.complete(false).await;
406 }
407}
408
409pub struct AsyncOptimizer;
411
412impl AsyncOptimizer {
413 pub async fn batch_execute<F, T>(
415 mut tasks: Vec<F>,
416 ) -> Vec<Result<T, Box<dyn std::error::Error + Send + Sync>>>
417 where
418 F: std::future::Future<Output = Result<T, Box<dyn std::error::Error + Send + Sync>>>
419 + Send
420 + 'static,
421 T: Send + 'static,
422 {
423 let batch_size = std::cmp::min(tasks.len(), 10); let mut results = Vec::with_capacity(tasks.len());
425
426 while !tasks.is_empty() {
427 let chunk_len = batch_size.min(tasks.len());
428 let chunk: Vec<_> = tasks.drain(..chunk_len).collect();
429 let chunk_results = futures::future::join_all(chunk).await;
430 results.extend(chunk_results);
431 }
432
433 results
434 }
435
436 pub async fn execute_with_retry<F, T>(
438 task: F,
439 max_retries: usize,
440 timeout: Duration,
441 ) -> Result<T, Box<dyn std::error::Error + Send + Sync>>
442 where
443 F: Fn() -> std::pin::Pin<
444 Box<
445 dyn std::future::Future<
446 Output = Result<T, Box<dyn std::error::Error + Send + Sync>>,
447 > + Send,
448 >,
449 > + Send
450 + Sync,
451 {
452 for attempt in 0..=max_retries {
453 match tokio::time::timeout(timeout, task()).await {
454 Ok(Ok(result)) => return Ok(result),
455 Ok(Err(e)) => {
456 if attempt == max_retries {
457 return Err(e);
458 }
459 let delay = Duration::from_millis(100 * (1 << attempt));
461 tokio::time::sleep(delay).await;
462 }
463 Err(_) => {
464 if attempt == max_retries {
465 return Err("Task timed out".into());
466 }
467 }
468 }
469 }
470
471 unreachable!()
472 }
473}
474
475#[cfg(test)]
476mod tests {
477 use super::*;
478
479 #[tokio::test]
480 async fn test_performance_tracker() {
481 let tracker = Arc::new(PerformanceTracker::new());
482
483 let cmd_tracker = tracker.start_command("test_command");
485 tokio::time::sleep(Duration::from_millis(10)).await;
486 cmd_tracker.complete(true).await;
487
488 let metrics = tracker.get_metrics().await;
489 assert!(metrics.command_execution_times.contains_key("test_command"));
490 assert_eq!(metrics.async_task_metrics.completed_tasks, 1);
491 }
492
493 #[tokio::test]
494 async fn test_async_optimizer() {
495 use std::pin::Pin;
496 let tasks: Vec<
497 Pin<
498 Box<
499 dyn std::future::Future<
500 Output = Result<i32, Box<dyn std::error::Error + Send + Sync>>,
501 > + Send,
502 >,
503 >,
504 > = vec![
505 Box::pin(async { Ok::<i32, Box<dyn std::error::Error + Send + Sync>>(1) }),
506 Box::pin(async { Ok::<i32, Box<dyn std::error::Error + Send + Sync>>(2) }),
507 Box::pin(async { Ok::<i32, Box<dyn std::error::Error + Send + Sync>>(3) }),
508 ];
509
510 let results = AsyncOptimizer::batch_execute(tasks).await;
511 assert_eq!(results.len(), 3);
512 assert!(results.iter().all(|r| r.is_ok()));
513 }
514}