Skip to main content

scirs2_core/profiling/
advanced.rs

1//! Advanced profiling capabilities for Beta 2
2
3use crate::profiling::entries::{MemoryEntry, TimingEntry};
4use crate::profiling::profiler::Profiler;
5use std::collections::{BTreeMap, HashMap, VecDeque};
6use std::fs::File;
7use std::io::{BufWriter, Write};
8use std::sync::atomic::Ordering;
9use std::sync::Arc;
10use std::thread;
11use std::time::{Duration, Instant};
12
13/// Flame graph data structure for visualizing call hierarchies
14#[derive(Debug, Clone)]
15pub struct FlameGraphNode {
16    /// Function name
17    pub name: String,
18    /// Total execution time
19    pub total_time: Duration,
20    /// Self execution time (excluding children)
21    pub self_time: Duration,
22    /// Number of samples
23    pub samples: u64,
24    /// Child nodes
25    pub children: BTreeMap<String, FlameGraphNode>,
26    /// Call depth
27    pub depth: usize,
28}
29
30impl FlameGraphNode {
31    /// Create a new flame graph node
32    pub fn new(name: String, depth: usize) -> Self {
33        Self {
34            name,
35            total_time: Duration::from_secs(0),
36            self_time: Duration::from_secs(0),
37            samples: 0,
38            children: BTreeMap::new(),
39            depth,
40        }
41    }
42
43    /// Add a sample to this node
44    pub fn add_sample(&mut self, duration: Duration) {
45        self.total_time += duration;
46        self.samples += 1;
47    }
48
49    /// Calculate self time by subtracting children's time
50    pub fn calculate_self_time(&mut self) {
51        let children_time: Duration = self.children.values().map(|child| child.total_time).sum();
52        self.self_time = self.total_time.saturating_sub(children_time);
53
54        // Recursively calculate for children
55        for child in self.children.values_mut() {
56            child.calculate_self_time();
57        }
58    }
59
60    /// Generate flame graph format output
61    pub fn to_flame_graph_format(&self, prefix: &str) -> Vec<String> {
62        let mut lines = Vec::new();
63        let current_stack = if prefix.is_empty() {
64            self.name.clone()
65        } else {
66            format!("{prefix};{}", self.name)
67        };
68
69        if self.self_time.as_nanos() > 0 {
70            {
71                let nanos = self.self_time.as_nanos();
72                lines.push(format!("{current_stack} {nanos}"));
73            }
74        }
75
76        for child in self.children.values() {
77            lines.extend(child.to_flame_graph_format(&current_stack));
78        }
79
80        lines
81    }
82}
83
84/// Flame graph generator
85#[derive(Debug)]
86pub struct FlameGraphGenerator {
87    /// Root node of the flame graph
88    root: FlameGraphNode,
89    /// Current call stack
90    call_stack: Vec<String>,
91    /// Stack of start times
92    time_stack: Vec<Instant>,
93}
94
95impl FlameGraphGenerator {
96    /// Create a new flame graph generator
97    pub fn new() -> Self {
98        Self {
99            root: FlameGraphNode::new("root".to_string(), 0),
100            call_stack: Vec::new(),
101            time_stack: Vec::new(),
102        }
103    }
104
105    /// Start a new function call
106    pub fn start_call(&mut self, functionname: &str) {
107        self.call_stack.push(functionname.to_string());
108        self.time_stack.push(Instant::now());
109    }
110
111    /// End the current function call
112    pub fn end_call(&mut self) {
113        if let (Some(_function_name), Some(start_time)) =
114            (self.call_stack.pop(), self.time_stack.pop())
115        {
116            let duration = start_time.elapsed();
117
118            // Navigate to the correct node in the tree
119            let mut current_node = &mut self.root;
120            for (_depth, name) in self.call_stack.iter().enumerate() {
121                current_node = current_node
122                    .children
123                    .entry(name.clone())
124                    .or_insert_with(|| FlameGraphNode::new(name.clone(), _depth + 1));
125            }
126
127            // Add the sample
128            current_node.add_sample(duration);
129        }
130    }
131
132    /// Generate the flame graph
133    pub fn generate(&mut self) -> FlameGraphNode {
134        self.root.calculate_self_time();
135        self.root.clone()
136    }
137
138    /// Export flame graph to file
139    pub fn export_to_file(&mut self, path: &str) -> Result<(), std::io::Error> {
140        let flame_graph = self.generate();
141        let lines = flame_graph.to_flame_graph_format("");
142
143        let file = File::create(path)?;
144        let mut writer = BufWriter::new(file);
145
146        for line in lines {
147            writeln!(writer, "{line}")?;
148        }
149
150        writer.flush()?;
151        Ok(())
152    }
153}
154
155impl Default for FlameGraphGenerator {
156    fn default() -> Self {
157        Self::new()
158    }
159}
160
161/// Performance bottleneck detection configuration
162#[derive(Debug, Clone)]
163pub struct BottleneckConfig {
164    /// Minimum execution time threshold (operations slower than this are considered bottlenecks)
165    pub min_execution_threshold: Duration,
166    /// Memory usage threshold (operations using more memory than this are flagged)
167    pub memory_threshold: usize,
168    /// CPU usage threshold (0.0 to 1.0)
169    pub cpu_threshold: f64,
170    /// Minimum number of calls to consider for bottleneck analysis
171    pub min_calls: usize,
172    /// Enable automatic suggestions
173    pub enable_suggestions: bool,
174}
175
176impl Default for BottleneckConfig {
177    fn default() -> Self {
178        Self {
179            min_execution_threshold: Duration::from_millis(100),
180            memory_threshold: 1024 * 1024, // 1 MB
181            cpu_threshold: 0.8,            // 80%
182            min_calls: 5,
183            enable_suggestions: true,
184        }
185    }
186}
187
188/// Bottleneck detection result
189#[derive(Debug, Clone)]
190pub struct BottleneckReport {
191    /// Operation name
192    pub operation: String,
193    /// Bottleneck type
194    pub bottleneck_type: BottleneckType,
195    /// Severity score (0.0 to 1.0, higher is more severe)
196    pub severity: f64,
197    /// Description of the issue
198    pub description: String,
199    /// Optimization suggestions
200    pub suggestions: Vec<String>,
201    /// Performance statistics
202    pub stats: PerformanceStats,
203}
204
205/// Type of bottleneck detected
206#[derive(Debug, Clone, PartialEq)]
207pub enum BottleneckType {
208    /// Slow execution time
209    SlowExecution,
210    /// High memory usage
211    HighMemoryUsage,
212    /// High CPU usage
213    HighCpuUsage,
214    /// Frequent calls (hot path)
215    HotPath,
216    /// Memory leaks
217    MemoryLeak,
218    /// Inefficient algorithm
219    IneffientAlgorithm,
220}
221
222/// Performance statistics for bottleneck analysis
223#[derive(Debug, Clone)]
224pub struct PerformanceStats {
225    /// Total calls
226    pub calls: usize,
227    /// Total execution time
228    pub total_time: Duration,
229    /// Average execution time
230    pub avg_time: Duration,
231    /// Maximum execution time
232    pub max_time: Duration,
233    /// Total memory usage
234    pub total_memory: usize,
235    /// Average memory usage
236    pub avg_memory: f64,
237    /// Maximum memory usage
238    pub max_memory: usize,
239    /// CPU utilization
240    pub cpu_utilization: f64,
241}
242
243/// Automated bottleneck detector
244#[derive(Debug)]
245pub struct BottleneckDetector {
246    /// Configuration for detection
247    config: BottleneckConfig,
248    /// Performance history
249    #[allow(dead_code)]
250    performance_history: HashMap<String, Vec<PerformanceStats>>,
251}
252
253impl BottleneckDetector {
254    /// Create a new bottleneck detector
255    pub fn new(config: BottleneckConfig) -> Self {
256        Self {
257            config,
258            performance_history: HashMap::new(),
259        }
260    }
261
262    /// Analyze profiling data for bottlenecks
263    pub fn analyze(&mut self, profiler: &Profiler) -> Vec<BottleneckReport> {
264        let mut reports = Vec::new();
265
266        // Analyze timing data
267        for (operation, timing_entry) in profiler.timings() {
268            if timing_entry.calls() >= self.config.min_calls {
269                let stats = PerformanceStats {
270                    calls: timing_entry.calls(),
271                    total_time: timing_entry.total_duration(),
272                    avg_time: timing_entry.average_duration(),
273                    max_time: timing_entry.max_duration(),
274                    total_memory: 0, // Would be populated from memory tracking
275                    avg_memory: 0.0,
276                    max_memory: 0,
277                    cpu_utilization: 0.0, // Would be populated from CPU monitoring
278                };
279
280                // Check for slow execution
281                if stats.avg_time > self.config.min_execution_threshold {
282                    let severity = (stats.avg_time.as_secs_f64()
283                        / self.config.min_execution_threshold.as_secs_f64())
284                    .min(1.0);
285                    let mut suggestions = Vec::new();
286
287                    if self.config.enable_suggestions {
288                        suggestions.extend([
289                            "Consider algorithm optimization".to_string(),
290                            "Profile inner functions for specific bottlenecks".to_string(),
291                            "Check for unnecessary allocations".to_string(),
292                            "Consider parallel processing if applicable".to_string(),
293                        ]);
294                    }
295
296                    reports.push(BottleneckReport {
297                        operation: operation.clone(),
298                        bottleneck_type: BottleneckType::SlowExecution,
299                        severity,
300                        description: format!(
301                            "Operation '{}' takes {:.2}ms on average, which exceeds the threshold of {:.2}ms",
302                            operation,
303                            stats.avg_time.as_secs_f64() * 1000.0,
304                            self.config.min_execution_threshold.as_secs_f64() * 1000.0
305                        ),
306                        suggestions,
307                        stats: stats.clone(),
308                    });
309                }
310
311                // Check for hot paths (frequent calls)
312                if stats.calls > 1000 {
313                    let severity = (stats.calls as f64 / 10000.0).min(1.0);
314                    let mut suggestions = Vec::new();
315
316                    if self.config.enable_suggestions {
317                        suggestions.extend([
318                            "Consider caching results if applicable".to_string(),
319                            "Look for opportunities to batch operations".to_string(),
320                            "Profile for micro-optimizations".to_string(),
321                            "Consider memoization for pure functions".to_string(),
322                        ]);
323                    }
324
325                    reports.push(BottleneckReport {
326                        operation: operation.clone(),
327                        bottleneck_type: BottleneckType::HotPath,
328                        severity,
329                        description: format!(
330                            "Operation '{}' is called {} times, indicating a hot path",
331                            operation, stats.calls
332                        ),
333                        suggestions,
334                        stats,
335                    });
336                }
337            }
338        }
339
340        // Analyze memory data
341        for (operation, memory_entry) in profiler.memory() {
342            if memory_entry.allocations() >= self.config.min_calls {
343                let avg_memory =
344                    memory_entry.total_delta() as f64 / memory_entry.allocations() as f64;
345
346                if memory_entry.max_delta() > self.config.memory_threshold {
347                    let severity = (memory_entry.max_delta() as f64
348                        / (self.config.memory_threshold as f64 * 2.0))
349                        .min(1.0);
350                    let mut suggestions = Vec::new();
351
352                    if self.config.enable_suggestions {
353                        suggestions.extend([
354                            "Consider pre-allocating memory where possible".to_string(),
355                            "Look for opportunities to reuse memory".to_string(),
356                            "Check for memory leaks".to_string(),
357                            "Consider using memory pools".to_string(),
358                        ]);
359                    }
360
361                    reports.push(BottleneckReport {
362                        operation: operation.clone(),
363                        bottleneck_type: BottleneckType::HighMemoryUsage,
364                        severity,
365                        description: format!(
366                            "Operation '{}' uses up to {:.2}MB of memory, exceeding threshold of {:.2}MB",
367                            operation,
368                            memory_entry.max_delta() as f64 / 1024.0 / 1024.0,
369                            self.config.memory_threshold as f64 / 1024.0 / 1024.0
370                        ),
371                        suggestions,
372                        stats: PerformanceStats {
373                            calls: memory_entry.allocations(),
374                            total_time: Duration::from_secs(0),
375                            avg_time: Duration::from_secs(0),
376                            max_time: Duration::from_secs(0),
377                            total_memory: memory_entry.total_delta() as usize,
378                            avg_memory,
379                            max_memory: memory_entry.max_delta(),
380                            cpu_utilization: 0.0,
381                        },
382                    });
383                }
384            }
385        }
386
387        reports
388    }
389
390    /// Print bottleneck report
391    pub fn print_report(&self, reports: &[BottleneckReport]) {
392        if reports.is_empty() {
393            println!("No performance bottlenecks detected.");
394            return;
395        }
396
397        println!("\n=== Bottleneck Analysis Report ===");
398
399        for report in reports {
400            println!("\nšŸ” Operation: {}", report.operation);
401            println!("   Type: {:?}", report.bottleneck_type);
402            println!("   Severity: {:.1}%", report.severity * 100.0);
403            println!("   Description: {}", report.description);
404
405            if !report.suggestions.is_empty() {
406                println!("   Suggestions:");
407                for suggestion in &report.suggestions {
408                    println!("     • {suggestion}");
409                }
410            }
411
412            println!("   Stats:");
413            println!("     • Calls: {}", report.stats.calls);
414            if report.stats.total_time.as_nanos() > 0 {
415                println!(
416                    "     • Avg Time: {:.2}ms",
417                    report.stats.avg_time.as_secs_f64() * 1000.0
418                );
419                println!(
420                    "     • Max Time: {:.2}ms",
421                    report.stats.max_time.as_secs_f64() * 1000.0
422                );
423            }
424            if report.stats.total_memory > 0 {
425                println!(
426                    "     • Avg Memory: {:.2}KB",
427                    report.stats.avg_memory / 1024.0
428                );
429                println!(
430                    "     • Max Memory: {:.2}KB",
431                    report.stats.max_memory as f64 / 1024.0
432                );
433            }
434        }
435    }
436}
437
438impl Default for BottleneckDetector {
439    fn default() -> Self {
440        Self::new(BottleneckConfig::default())
441    }
442}
443
444/// System resource monitor for tracking CPU, memory, and network usage
445#[derive(Debug)]
446pub struct SystemResourceMonitor {
447    /// Monitoring interval
448    interval: Duration,
449    /// Whether monitoring is active
450    active: Arc<std::sync::atomic::AtomicBool>,
451    /// CPU usage history
452    cpu_history: Arc<std::sync::Mutex<VecDeque<f64>>>,
453    /// Memory usage history
454    memory_history: Arc<std::sync::Mutex<VecDeque<usize>>>,
455    /// Network I/O history (bytes)
456    network_history: Arc<std::sync::Mutex<VecDeque<(u64, u64)>>>, // (bytes_in, bytes_out)
457}
458
459impl SystemResourceMonitor {
460    /// Create a new system resource monitor
461    pub fn new(interval: Duration) -> Self {
462        Self {
463            interval,
464            active: Arc::new(std::sync::atomic::AtomicBool::new(false)),
465            cpu_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
466            memory_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
467            network_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
468        }
469    }
470
471    /// Start monitoring system resources
472    pub fn start(&self) {
473        self.active.store(true, Ordering::Relaxed);
474
475        let active = self.active.clone();
476        let cpu_history = self.cpu_history.clone();
477        let memory_history = self.memory_history.clone();
478        let network_history = self.network_history.clone();
479        let interval = self.interval;
480
481        thread::spawn(move || {
482            while active.load(Ordering::Relaxed) {
483                // Sample CPU usage
484                let cpu_usage = Self::get_cpu_usage();
485                if let Ok(mut cpu_hist) = cpu_history.lock() {
486                    cpu_hist.push_back(cpu_usage);
487                    if cpu_hist.len() > 1000 {
488                        cpu_hist.pop_front();
489                    }
490                }
491
492                // Sample memory usage
493                let memory_usage = Self::get_memory_usage();
494                if let Ok(mut mem_hist) = memory_history.lock() {
495                    mem_hist.push_back(memory_usage);
496                    if mem_hist.len() > 1000 {
497                        mem_hist.pop_front();
498                    }
499                }
500
501                // Sample network usage
502                let network_usage = Self::get_network_usage();
503                if let Ok(mut net_hist) = network_history.lock() {
504                    net_hist.push_back(network_usage);
505                    if net_hist.len() > 1000 {
506                        net_hist.pop_front();
507                    }
508                }
509
510                thread::sleep(interval);
511            }
512        });
513    }
514
515    /// Stop monitoring
516    pub fn stop(&self) {
517        self.active.store(false, Ordering::Relaxed);
518    }
519
520    /// Get current CPU usage (0.0 to 1.0)
521    fn get_cpu_usage() -> f64 {
522        // This is a simplified implementation
523        // In a real implementation, you would use platform-specific APIs
524        #[cfg(target_os = "linux")]
525        {
526            // On Linux, parse /proc/stat
527            0.5 // Placeholder
528        }
529
530        #[cfg(target_os = "macos")]
531        {
532            // On macOS, use host_processor_info
533            0.5 // Placeholder
534        }
535
536        #[cfg(target_os = "windows")]
537        {
538            // On Windows, use GetSystemTimes
539            0.5 // Placeholder
540        }
541
542        #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))]
543        {
544            0.5 // Fallback placeholder
545        }
546    }
547
548    /// Get current memory usage in bytes
549    fn get_memory_usage() -> usize {
550        // Simplified implementation - would use platform-specific APIs
551        1024 * 1024 * 512 // 512 MB placeholder
552    }
553
554    /// Get current network usage (bytes_in, bytes_out)
555    fn get_network_usage() -> (u64, u64) {
556        // Simplified implementation - would parse /proc/net/dev on Linux
557        (1024, 1024) // Placeholder
558    }
559
560    /// Get resource usage statistics
561    pub fn get_stats(&self) -> ResourceStats {
562        let cpu_hist = self.cpu_history.lock().expect("Operation failed");
563        let memory_hist = self.memory_history.lock().expect("Operation failed");
564        let network_hist = self.network_history.lock().expect("Operation failed");
565
566        let avg_cpu = if cpu_hist.is_empty() {
567            0.0
568        } else {
569            cpu_hist.iter().sum::<f64>() / cpu_hist.len() as f64
570        };
571
572        let max_cpu = cpu_hist.iter().fold(0.0f64, |a, &b| a.max(b));
573
574        let avg_memory = if memory_hist.is_empty() {
575            0
576        } else {
577            memory_hist.iter().sum::<usize>() / memory_hist.len()
578        };
579
580        let max_memory = memory_hist.iter().max().copied().unwrap_or(0);
581
582        let total_network_in: u64 = network_hist.iter().map(|(bytes_in_, _)| *bytes_in_).sum();
583        let total_network_out: u64 = network_hist.iter().map(|(_, bytes_out)| *bytes_out).sum();
584
585        ResourceStats {
586            avg_cpu_usage: avg_cpu,
587            max_cpu_usage: max_cpu,
588            avg_memory_usage: avg_memory,
589            max_memory_usage: max_memory,
590            total_network_in,
591            total_network_out,
592            sample_count: cpu_hist.len(),
593        }
594    }
595}
596
597impl Default for SystemResourceMonitor {
598    fn default() -> Self {
599        Self::new(Duration::from_secs(1))
600    }
601}
602
603/// Resource usage statistics
604#[derive(Debug, Clone)]
605pub struct ResourceStats {
606    /// Average CPU usage (0.0 to 1.0)
607    pub avg_cpu_usage: f64,
608    /// Maximum CPU usage (0.0 to 1.0)
609    pub max_cpu_usage: f64,
610    /// Average memory usage (bytes)
611    pub avg_memory_usage: usize,
612    /// Maximum memory usage (bytes)
613    pub max_memory_usage: usize,
614    /// Total network bytes received
615    pub total_network_in: u64,
616    /// Total network bytes sent
617    pub total_network_out: u64,
618    /// Number of samples collected
619    pub sample_count: usize,
620}
621
622/// Differential profiler for comparing performance between runs
623#[derive(Debug)]
624pub struct DifferentialProfiler {
625    /// Baseline profiling data
626    baseline: Option<ProfileSnapshot>,
627    /// Current profiling data
628    current: Option<ProfileSnapshot>,
629}
630
631/// Snapshot of profiling data at a point in time
632#[derive(Debug, Clone)]
633pub struct ProfileSnapshot {
634    /// Timing data
635    pub timings: HashMap<String, TimingEntry>,
636    /// Memory data
637    pub memory: HashMap<String, MemoryEntry>,
638    /// Resource usage at snapshot time
639    pub resources: Option<ResourceStats>,
640    /// Timestamp of snapshot
641    pub timestamp: std::time::Instant,
642    /// Optional label for the snapshot
643    pub label: Option<String>,
644}
645
646impl DifferentialProfiler {
647    /// Create a new differential profiler
648    pub fn new() -> Self {
649        Self {
650            baseline: None,
651            current: None,
652        }
653    }
654
655    /// Set the baseline snapshot
656    pub fn setbaseline(&mut self, profiler: &Profiler, label: Option<String>) {
657        self.baseline = Some(ProfileSnapshot {
658            timings: profiler.timings().clone(),
659            memory: profiler.memory().clone(),
660            resources: None,
661            timestamp: std::time::Instant::now(),
662            label,
663        });
664    }
665
666    /// Set the current snapshot
667    pub fn set_current(&mut self, profiler: &Profiler, label: Option<String>) {
668        self.current = Some(ProfileSnapshot {
669            timings: profiler.timings().clone(),
670            memory: profiler.memory().clone(),
671            resources: None,
672            timestamp: std::time::Instant::now(),
673            label,
674        });
675    }
676
677    /// Generate a differential report
678    pub fn generate_diff_report(&self) -> Option<DifferentialReport> {
679        if let (Some(baseline), Some(current)) = (&self.baseline, &self.current) {
680            Some(DifferentialReport::new(baseline, current))
681        } else {
682            None
683        }
684    }
685}
686
687impl Default for DifferentialProfiler {
688    fn default() -> Self {
689        Self::new()
690    }
691}
692
693/// Differential profiling report
694#[derive(Debug)]
695pub struct DifferentialReport {
696    /// Timing differences
697    pub timing_diffs: HashMap<String, TimingDiff>,
698    /// Memory differences
699    pub memory_diffs: HashMap<String, MemoryDiff>,
700    /// Overall performance change
701    pub overall_change: PerformanceChange,
702    /// Report generation timestamp
703    pub generated_at: std::time::Instant,
704}
705
706impl DifferentialReport {
707    /// Create a new differential report
708    pub fn new(baseline: &ProfileSnapshot, current: &ProfileSnapshot) -> Self {
709        let mut timing_diffs = HashMap::new();
710        let mut memory_diffs = HashMap::new();
711
712        // Calculate timing differences
713        for (operation, current_timing) in &current.timings {
714            if let Some(baseline_timing) = baseline.timings.get(operation) {
715                timing_diffs.insert(
716                    operation.clone(),
717                    TimingDiff::new(baseline_timing, current_timing),
718                );
719            }
720        }
721
722        // Calculate memory differences
723        for (operation, current_memory) in &current.memory {
724            if let Some(baseline_memory) = baseline.memory.get(operation) {
725                memory_diffs.insert(
726                    operation.clone(),
727                    MemoryDiff::new(baseline_memory, current_memory),
728                );
729            }
730        }
731
732        // Calculate overall performance change
733        let overall_change = PerformanceChange::calculate(&timing_diffs, &memory_diffs);
734
735        Self {
736            timing_diffs,
737            memory_diffs,
738            overall_change,
739            generated_at: std::time::Instant::now(),
740        }
741    }
742
743    /// Print the differential report
744    pub fn print(&self) {
745        println!("\n=== Differential Profiling Report ===");
746
747        if !self.timing_diffs.is_empty() {
748            println!("\nTiming Changes:");
749            println!(
750                "{:<30} {:<15} {:<15} {:<15}",
751                "Operation", "Baseline (ms)", "Current (ms)", "Change (%)"
752            );
753            println!("{}", "-".repeat(80));
754
755            for (operation, diff) in &self.timing_diffs {
756                println!(
757                    "{:<30} {:<15.2} {:<15.2} {:>+14.1}%",
758                    operation,
759                    diff.baseline_avg.as_secs_f64() * 1000.0,
760                    diff.current_avg.as_secs_f64() * 1000.0,
761                    diff.percentage_change
762                );
763            }
764        }
765
766        if !self.memory_diffs.is_empty() {
767            println!("\nMemory Changes:");
768            println!(
769                "{:<30} {:<15} {:<15} {:<15}",
770                "Operation", "Baseline (KB)", "Current (KB)", "Change (%)"
771            );
772            println!("{}", "-".repeat(80));
773
774            for (operation, diff) in &self.memory_diffs {
775                println!(
776                    "{:<30} {:<15.2} {:<15.2} {:>+14.1}%",
777                    operation,
778                    diff.baseline_avg / 1024.0,
779                    diff.current_avg / 1024.0,
780                    diff.percentage_change
781                );
782            }
783        }
784
785        println!("\nOverall Performance:");
786        println!(
787            "  • Timing Change: {:+.1}%",
788            self.overall_change.timing_change
789        );
790        println!(
791            "  • Memory Change: {:+.1}%",
792            self.overall_change.memory_change
793        );
794        println!("  • Recommendation: {}", self.overall_change.recommendation);
795    }
796}
797
798/// Timing difference between baseline and current
799#[derive(Debug)]
800pub struct TimingDiff {
801    /// Baseline average duration
802    pub baseline_avg: Duration,
803    /// Current average duration
804    pub current_avg: Duration,
805    /// Percentage change (positive = slower, negative = faster)
806    pub percentage_change: f64,
807}
808
809impl TimingDiff {
810    /// Create a new timing difference
811    pub fn new(baseline: &TimingEntry, current: &TimingEntry) -> Self {
812        let baseline_avg = baseline.average_duration();
813        let current_avg = current.average_duration();
814        let percentage_change = if baseline_avg.as_nanos() > 0 {
815            ((current_avg.as_nanos() as f64 - baseline_avg.as_nanos() as f64)
816                / baseline_avg.as_nanos() as f64)
817                * 100.0
818        } else {
819            0.0
820        };
821
822        Self {
823            baseline_avg,
824            current_avg,
825            percentage_change,
826        }
827    }
828}
829
830/// Memory difference between baseline and current
831#[derive(Debug)]
832pub struct MemoryDiff {
833    /// Baseline average memory usage
834    pub baseline_avg: f64,
835    /// Current average memory usage
836    pub current_avg: f64,
837    /// Percentage change (positive = more memory, negative = less memory)
838    pub percentage_change: f64,
839}
840
841impl MemoryDiff {
842    /// Create a new memory difference
843    pub fn new(baseline: &MemoryEntry, current: &MemoryEntry) -> Self {
844        let baseline_avg = if baseline.allocations() > 0 {
845            baseline.total_delta() as f64 / baseline.allocations() as f64
846        } else {
847            0.0
848        };
849
850        let current_avg = if current.allocations() > 0 {
851            current.total_delta() as f64 / current.allocations() as f64
852        } else {
853            0.0
854        };
855
856        let percentage_change = if baseline_avg.abs() > 0.0 {
857            ((current_avg - baseline_avg) / baseline_avg.abs()) * 100.0
858        } else {
859            0.0
860        };
861
862        Self {
863            baseline_avg,
864            current_avg,
865            percentage_change,
866        }
867    }
868}
869
870/// Overall performance change summary
871#[derive(Debug)]
872pub struct PerformanceChange {
873    /// Overall timing change percentage
874    pub timing_change: f64,
875    /// Overall memory change percentage
876    pub memory_change: f64,
877    /// Performance recommendation
878    pub recommendation: String,
879}
880
881impl PerformanceChange {
882    /// Calculate overall performance change
883    pub fn calculate(
884        timing_diffs: &HashMap<String, TimingDiff>,
885        memory_diffs: &HashMap<String, MemoryDiff>,
886    ) -> Self {
887        let timing_change = if timing_diffs.is_empty() {
888            0.0
889        } else {
890            timing_diffs
891                .values()
892                .map(|diff| diff.percentage_change)
893                .sum::<f64>()
894                / timing_diffs.len() as f64
895        };
896
897        let memory_change = if memory_diffs.is_empty() {
898            0.0
899        } else {
900            memory_diffs
901                .values()
902                .map(|diff| diff.percentage_change)
903                .sum::<f64>()
904                / memory_diffs.len() as f64
905        };
906
907        let recommendation = match (timing_change > 5.0, memory_change > 10.0) {
908            (true, true) => "Performance degraded significantly in both time and memory. Review recent changes.".to_string(),
909            (true, false) => "Execution time increased. Consider profiling hot paths for optimization opportunities.".to_string(),
910            (false, true) => "Memory usage increased. Review memory allocation patterns and consider optimization.".to_string(),
911            (false, false) => {
912                if timing_change < -5.0 || memory_change < -10.0 {
913                    "Performance improved! Consider documenting the optimizations made.".to_string()
914                } else {
915                    "Performance is stable with minimal changes.".to_string()
916                }
917            }
918        };
919
920        Self {
921            timing_change,
922            memory_change,
923            recommendation,
924        }
925    }
926}
927
928/// Performance profiler with export capabilities
929#[derive(Debug)]
930pub struct ExportableProfiler {
931    /// Base profiler
932    profiler: Profiler,
933    /// Additional metadata
934    metadata: HashMap<String, String>,
935}
936
937impl ExportableProfiler {
938    /// Create a new exportable profiler
939    pub fn new() -> Self {
940        Self {
941            profiler: Profiler::new(),
942            metadata: HashMap::new(),
943        }
944    }
945
946    /// Add metadata
947    pub fn add_metadata(&mut self, key: String, value: String) {
948        self.metadata.insert(key, value);
949    }
950
951    /// Export profiling data to JSON
952    pub fn export_to_json(&self, path: &str) -> Result<(), std::io::Error> {
953        use std::fs::File;
954        use std::io::BufWriter;
955
956        let file = File::create(path)?;
957        let mut writer = BufWriter::new(file);
958
959        // In a real implementation, you would use serde to serialize the data
960        // For now, we'll create a simple JSON structure manually
961        let json_data = format!(
962            r#"{{
963                "metadata": {:#?},
964                "timings": {:#?},
965                "memory": {:#?}
966            }}"#,
967            self.metadata,
968            self.profiler.timings(),
969            self.profiler.memory()
970        );
971
972        std::io::Write::write_all(&mut writer, json_data.as_bytes())?;
973        Ok(())
974    }
975
976    /// Export profiling data to CSV
977    pub fn export_to_csv(&self, path: &str) -> Result<(), std::io::Error> {
978        let file = File::create(path)?;
979        let mut writer = BufWriter::new(file);
980
981        // Write timing data
982        writeln!(writer, "Operation,Calls,Total_ms,Average_ms,Max_ms")?;
983        for (operation, timing) in self.profiler.timings() {
984            writeln!(
985                writer,
986                "{},{},{:.2},{:.2},{:.2}",
987                operation,
988                timing.calls(),
989                timing.total_duration().as_secs_f64() * 1000.0,
990                timing.average_duration().as_secs_f64() * 1000.0,
991                timing.max_duration().as_secs_f64() * 1000.0
992            )?;
993        }
994
995        writer.flush()?;
996        Ok(())
997    }
998
999    /// Get access to the underlying profiler
1000    pub fn profiler(&mut self) -> &mut Profiler {
1001        &mut self.profiler
1002    }
1003}
1004
1005impl Default for ExportableProfiler {
1006    fn default() -> Self {
1007        Self::new()
1008    }
1009}