1use crate::profiling::entries::{MemoryEntry, TimingEntry};
4use crate::profiling::profiler::Profiler;
5use std::collections::{BTreeMap, HashMap, VecDeque};
6use std::fs::File;
7use std::io::{BufWriter, Write};
8use std::sync::atomic::Ordering;
9use std::sync::Arc;
10use std::thread;
11use std::time::{Duration, Instant};
12
13#[derive(Debug, Clone)]
15pub struct FlameGraphNode {
16 pub name: String,
18 pub total_time: Duration,
20 pub self_time: Duration,
22 pub samples: u64,
24 pub children: BTreeMap<String, FlameGraphNode>,
26 pub depth: usize,
28}
29
30impl FlameGraphNode {
31 pub fn new(name: String, depth: usize) -> Self {
33 Self {
34 name,
35 total_time: Duration::from_secs(0),
36 self_time: Duration::from_secs(0),
37 samples: 0,
38 children: BTreeMap::new(),
39 depth,
40 }
41 }
42
43 pub fn add_sample(&mut self, duration: Duration) {
45 self.total_time += duration;
46 self.samples += 1;
47 }
48
49 pub fn calculate_self_time(&mut self) {
51 let children_time: Duration = self.children.values().map(|child| child.total_time).sum();
52 self.self_time = self.total_time.saturating_sub(children_time);
53
54 for child in self.children.values_mut() {
56 child.calculate_self_time();
57 }
58 }
59
60 pub fn to_flame_graph_format(&self, prefix: &str) -> Vec<String> {
62 let mut lines = Vec::new();
63 let current_stack = if prefix.is_empty() {
64 self.name.clone()
65 } else {
66 format!("{prefix};{}", self.name)
67 };
68
69 if self.self_time.as_nanos() > 0 {
70 {
71 let nanos = self.self_time.as_nanos();
72 lines.push(format!("{current_stack} {nanos}"));
73 }
74 }
75
76 for child in self.children.values() {
77 lines.extend(child.to_flame_graph_format(¤t_stack));
78 }
79
80 lines
81 }
82}
83
84#[derive(Debug)]
86pub struct FlameGraphGenerator {
87 root: FlameGraphNode,
89 call_stack: Vec<String>,
91 time_stack: Vec<Instant>,
93}
94
95impl FlameGraphGenerator {
96 pub fn new() -> Self {
98 Self {
99 root: FlameGraphNode::new("root".to_string(), 0),
100 call_stack: Vec::new(),
101 time_stack: Vec::new(),
102 }
103 }
104
105 pub fn start_call(&mut self, functionname: &str) {
107 self.call_stack.push(functionname.to_string());
108 self.time_stack.push(Instant::now());
109 }
110
111 pub fn end_call(&mut self) {
113 if let (Some(_function_name), Some(start_time)) =
114 (self.call_stack.pop(), self.time_stack.pop())
115 {
116 let duration = start_time.elapsed();
117
118 let mut current_node = &mut self.root;
120 for (_depth, name) in self.call_stack.iter().enumerate() {
121 current_node = current_node
122 .children
123 .entry(name.clone())
124 .or_insert_with(|| FlameGraphNode::new(name.clone(), _depth + 1));
125 }
126
127 current_node.add_sample(duration);
129 }
130 }
131
132 pub fn generate(&mut self) -> FlameGraphNode {
134 self.root.calculate_self_time();
135 self.root.clone()
136 }
137
138 pub fn export_to_file(&mut self, path: &str) -> Result<(), std::io::Error> {
140 let flame_graph = self.generate();
141 let lines = flame_graph.to_flame_graph_format("");
142
143 let file = File::create(path)?;
144 let mut writer = BufWriter::new(file);
145
146 for line in lines {
147 writeln!(writer, "{line}")?;
148 }
149
150 writer.flush()?;
151 Ok(())
152 }
153}
154
155impl Default for FlameGraphGenerator {
156 fn default() -> Self {
157 Self::new()
158 }
159}
160
161#[derive(Debug, Clone)]
163pub struct BottleneckConfig {
164 pub min_execution_threshold: Duration,
166 pub memory_threshold: usize,
168 pub cpu_threshold: f64,
170 pub min_calls: usize,
172 pub enable_suggestions: bool,
174}
175
176impl Default for BottleneckConfig {
177 fn default() -> Self {
178 Self {
179 min_execution_threshold: Duration::from_millis(100),
180 memory_threshold: 1024 * 1024, cpu_threshold: 0.8, min_calls: 5,
183 enable_suggestions: true,
184 }
185 }
186}
187
188#[derive(Debug, Clone)]
190pub struct BottleneckReport {
191 pub operation: String,
193 pub bottleneck_type: BottleneckType,
195 pub severity: f64,
197 pub description: String,
199 pub suggestions: Vec<String>,
201 pub stats: PerformanceStats,
203}
204
205#[derive(Debug, Clone, PartialEq)]
207pub enum BottleneckType {
208 SlowExecution,
210 HighMemoryUsage,
212 HighCpuUsage,
214 HotPath,
216 MemoryLeak,
218 IneffientAlgorithm,
220}
221
222#[derive(Debug, Clone)]
224pub struct PerformanceStats {
225 pub calls: usize,
227 pub total_time: Duration,
229 pub avg_time: Duration,
231 pub max_time: Duration,
233 pub total_memory: usize,
235 pub avg_memory: f64,
237 pub max_memory: usize,
239 pub cpu_utilization: f64,
241}
242
243#[derive(Debug)]
245pub struct BottleneckDetector {
246 config: BottleneckConfig,
248 #[allow(dead_code)]
250 performance_history: HashMap<String, Vec<PerformanceStats>>,
251}
252
253impl BottleneckDetector {
254 pub fn new(config: BottleneckConfig) -> Self {
256 Self {
257 config,
258 performance_history: HashMap::new(),
259 }
260 }
261
262 pub fn analyze(&mut self, profiler: &Profiler) -> Vec<BottleneckReport> {
264 let mut reports = Vec::new();
265
266 for (operation, timing_entry) in profiler.timings() {
268 if timing_entry.calls() >= self.config.min_calls {
269 let stats = PerformanceStats {
270 calls: timing_entry.calls(),
271 total_time: timing_entry.total_duration(),
272 avg_time: timing_entry.average_duration(),
273 max_time: timing_entry.max_duration(),
274 total_memory: 0, avg_memory: 0.0,
276 max_memory: 0,
277 cpu_utilization: 0.0, };
279
280 if stats.avg_time > self.config.min_execution_threshold {
282 let severity = (stats.avg_time.as_secs_f64()
283 / self.config.min_execution_threshold.as_secs_f64())
284 .min(1.0);
285 let mut suggestions = Vec::new();
286
287 if self.config.enable_suggestions {
288 suggestions.extend([
289 "Consider algorithm optimization".to_string(),
290 "Profile inner functions for specific bottlenecks".to_string(),
291 "Check for unnecessary allocations".to_string(),
292 "Consider parallel processing if applicable".to_string(),
293 ]);
294 }
295
296 reports.push(BottleneckReport {
297 operation: operation.clone(),
298 bottleneck_type: BottleneckType::SlowExecution,
299 severity,
300 description: format!(
301 "Operation '{}' takes {:.2}ms on average, which exceeds the threshold of {:.2}ms",
302 operation,
303 stats.avg_time.as_secs_f64() * 1000.0,
304 self.config.min_execution_threshold.as_secs_f64() * 1000.0
305 ),
306 suggestions,
307 stats: stats.clone(),
308 });
309 }
310
311 if stats.calls > 1000 {
313 let severity = (stats.calls as f64 / 10000.0).min(1.0);
314 let mut suggestions = Vec::new();
315
316 if self.config.enable_suggestions {
317 suggestions.extend([
318 "Consider caching results if applicable".to_string(),
319 "Look for opportunities to batch operations".to_string(),
320 "Profile for micro-optimizations".to_string(),
321 "Consider memoization for pure functions".to_string(),
322 ]);
323 }
324
325 reports.push(BottleneckReport {
326 operation: operation.clone(),
327 bottleneck_type: BottleneckType::HotPath,
328 severity,
329 description: format!(
330 "Operation '{}' is called {} times, indicating a hot path",
331 operation, stats.calls
332 ),
333 suggestions,
334 stats,
335 });
336 }
337 }
338 }
339
340 for (operation, memory_entry) in profiler.memory() {
342 if memory_entry.allocations() >= self.config.min_calls {
343 let avg_memory =
344 memory_entry.total_delta() as f64 / memory_entry.allocations() as f64;
345
346 if memory_entry.max_delta() > self.config.memory_threshold {
347 let severity = (memory_entry.max_delta() as f64
348 / (self.config.memory_threshold as f64 * 2.0))
349 .min(1.0);
350 let mut suggestions = Vec::new();
351
352 if self.config.enable_suggestions {
353 suggestions.extend([
354 "Consider pre-allocating memory where possible".to_string(),
355 "Look for opportunities to reuse memory".to_string(),
356 "Check for memory leaks".to_string(),
357 "Consider using memory pools".to_string(),
358 ]);
359 }
360
361 reports.push(BottleneckReport {
362 operation: operation.clone(),
363 bottleneck_type: BottleneckType::HighMemoryUsage,
364 severity,
365 description: format!(
366 "Operation '{}' uses up to {:.2}MB of memory, exceeding threshold of {:.2}MB",
367 operation,
368 memory_entry.max_delta() as f64 / 1024.0 / 1024.0,
369 self.config.memory_threshold as f64 / 1024.0 / 1024.0
370 ),
371 suggestions,
372 stats: PerformanceStats {
373 calls: memory_entry.allocations(),
374 total_time: Duration::from_secs(0),
375 avg_time: Duration::from_secs(0),
376 max_time: Duration::from_secs(0),
377 total_memory: memory_entry.total_delta() as usize,
378 avg_memory,
379 max_memory: memory_entry.max_delta(),
380 cpu_utilization: 0.0,
381 },
382 });
383 }
384 }
385 }
386
387 reports
388 }
389
390 pub fn print_report(&self, reports: &[BottleneckReport]) {
392 if reports.is_empty() {
393 println!("No performance bottlenecks detected.");
394 return;
395 }
396
397 println!("\n=== Bottleneck Analysis Report ===");
398
399 for report in reports {
400 println!("\nš Operation: {}", report.operation);
401 println!(" Type: {:?}", report.bottleneck_type);
402 println!(" Severity: {:.1}%", report.severity * 100.0);
403 println!(" Description: {}", report.description);
404
405 if !report.suggestions.is_empty() {
406 println!(" Suggestions:");
407 for suggestion in &report.suggestions {
408 println!(" ⢠{suggestion}");
409 }
410 }
411
412 println!(" Stats:");
413 println!(" ⢠Calls: {}", report.stats.calls);
414 if report.stats.total_time.as_nanos() > 0 {
415 println!(
416 " ⢠Avg Time: {:.2}ms",
417 report.stats.avg_time.as_secs_f64() * 1000.0
418 );
419 println!(
420 " ⢠Max Time: {:.2}ms",
421 report.stats.max_time.as_secs_f64() * 1000.0
422 );
423 }
424 if report.stats.total_memory > 0 {
425 println!(
426 " ⢠Avg Memory: {:.2}KB",
427 report.stats.avg_memory / 1024.0
428 );
429 println!(
430 " ⢠Max Memory: {:.2}KB",
431 report.stats.max_memory as f64 / 1024.0
432 );
433 }
434 }
435 }
436}
437
438impl Default for BottleneckDetector {
439 fn default() -> Self {
440 Self::new(BottleneckConfig::default())
441 }
442}
443
444#[derive(Debug)]
446pub struct SystemResourceMonitor {
447 interval: Duration,
449 active: Arc<std::sync::atomic::AtomicBool>,
451 cpu_history: Arc<std::sync::Mutex<VecDeque<f64>>>,
453 memory_history: Arc<std::sync::Mutex<VecDeque<usize>>>,
455 network_history: Arc<std::sync::Mutex<VecDeque<(u64, u64)>>>, }
458
459impl SystemResourceMonitor {
460 pub fn new(interval: Duration) -> Self {
462 Self {
463 interval,
464 active: Arc::new(std::sync::atomic::AtomicBool::new(false)),
465 cpu_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
466 memory_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
467 network_history: Arc::new(std::sync::Mutex::new(VecDeque::new())),
468 }
469 }
470
471 pub fn start(&self) {
473 self.active.store(true, Ordering::Relaxed);
474
475 let active = self.active.clone();
476 let cpu_history = self.cpu_history.clone();
477 let memory_history = self.memory_history.clone();
478 let network_history = self.network_history.clone();
479 let interval = self.interval;
480
481 thread::spawn(move || {
482 while active.load(Ordering::Relaxed) {
483 let cpu_usage = Self::get_cpu_usage();
485 if let Ok(mut cpu_hist) = cpu_history.lock() {
486 cpu_hist.push_back(cpu_usage);
487 if cpu_hist.len() > 1000 {
488 cpu_hist.pop_front();
489 }
490 }
491
492 let memory_usage = Self::get_memory_usage();
494 if let Ok(mut mem_hist) = memory_history.lock() {
495 mem_hist.push_back(memory_usage);
496 if mem_hist.len() > 1000 {
497 mem_hist.pop_front();
498 }
499 }
500
501 let network_usage = Self::get_network_usage();
503 if let Ok(mut net_hist) = network_history.lock() {
504 net_hist.push_back(network_usage);
505 if net_hist.len() > 1000 {
506 net_hist.pop_front();
507 }
508 }
509
510 thread::sleep(interval);
511 }
512 });
513 }
514
515 pub fn stop(&self) {
517 self.active.store(false, Ordering::Relaxed);
518 }
519
520 fn get_cpu_usage() -> f64 {
522 #[cfg(target_os = "linux")]
525 {
526 0.5 }
529
530 #[cfg(target_os = "macos")]
531 {
532 0.5 }
535
536 #[cfg(target_os = "windows")]
537 {
538 0.5 }
541
542 #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))]
543 {
544 0.5 }
546 }
547
548 fn get_memory_usage() -> usize {
550 1024 * 1024 * 512 }
553
554 fn get_network_usage() -> (u64, u64) {
556 (1024, 1024) }
559
560 pub fn get_stats(&self) -> ResourceStats {
562 let cpu_hist = self.cpu_history.lock().expect("Operation failed");
563 let memory_hist = self.memory_history.lock().expect("Operation failed");
564 let network_hist = self.network_history.lock().expect("Operation failed");
565
566 let avg_cpu = if cpu_hist.is_empty() {
567 0.0
568 } else {
569 cpu_hist.iter().sum::<f64>() / cpu_hist.len() as f64
570 };
571
572 let max_cpu = cpu_hist.iter().fold(0.0f64, |a, &b| a.max(b));
573
574 let avg_memory = if memory_hist.is_empty() {
575 0
576 } else {
577 memory_hist.iter().sum::<usize>() / memory_hist.len()
578 };
579
580 let max_memory = memory_hist.iter().max().copied().unwrap_or(0);
581
582 let total_network_in: u64 = network_hist.iter().map(|(bytes_in_, _)| *bytes_in_).sum();
583 let total_network_out: u64 = network_hist.iter().map(|(_, bytes_out)| *bytes_out).sum();
584
585 ResourceStats {
586 avg_cpu_usage: avg_cpu,
587 max_cpu_usage: max_cpu,
588 avg_memory_usage: avg_memory,
589 max_memory_usage: max_memory,
590 total_network_in,
591 total_network_out,
592 sample_count: cpu_hist.len(),
593 }
594 }
595}
596
597impl Default for SystemResourceMonitor {
598 fn default() -> Self {
599 Self::new(Duration::from_secs(1))
600 }
601}
602
603#[derive(Debug, Clone)]
605pub struct ResourceStats {
606 pub avg_cpu_usage: f64,
608 pub max_cpu_usage: f64,
610 pub avg_memory_usage: usize,
612 pub max_memory_usage: usize,
614 pub total_network_in: u64,
616 pub total_network_out: u64,
618 pub sample_count: usize,
620}
621
622#[derive(Debug)]
624pub struct DifferentialProfiler {
625 baseline: Option<ProfileSnapshot>,
627 current: Option<ProfileSnapshot>,
629}
630
631#[derive(Debug, Clone)]
633pub struct ProfileSnapshot {
634 pub timings: HashMap<String, TimingEntry>,
636 pub memory: HashMap<String, MemoryEntry>,
638 pub resources: Option<ResourceStats>,
640 pub timestamp: std::time::Instant,
642 pub label: Option<String>,
644}
645
646impl DifferentialProfiler {
647 pub fn new() -> Self {
649 Self {
650 baseline: None,
651 current: None,
652 }
653 }
654
655 pub fn setbaseline(&mut self, profiler: &Profiler, label: Option<String>) {
657 self.baseline = Some(ProfileSnapshot {
658 timings: profiler.timings().clone(),
659 memory: profiler.memory().clone(),
660 resources: None,
661 timestamp: std::time::Instant::now(),
662 label,
663 });
664 }
665
666 pub fn set_current(&mut self, profiler: &Profiler, label: Option<String>) {
668 self.current = Some(ProfileSnapshot {
669 timings: profiler.timings().clone(),
670 memory: profiler.memory().clone(),
671 resources: None,
672 timestamp: std::time::Instant::now(),
673 label,
674 });
675 }
676
677 pub fn generate_diff_report(&self) -> Option<DifferentialReport> {
679 if let (Some(baseline), Some(current)) = (&self.baseline, &self.current) {
680 Some(DifferentialReport::new(baseline, current))
681 } else {
682 None
683 }
684 }
685}
686
687impl Default for DifferentialProfiler {
688 fn default() -> Self {
689 Self::new()
690 }
691}
692
693#[derive(Debug)]
695pub struct DifferentialReport {
696 pub timing_diffs: HashMap<String, TimingDiff>,
698 pub memory_diffs: HashMap<String, MemoryDiff>,
700 pub overall_change: PerformanceChange,
702 pub generated_at: std::time::Instant,
704}
705
706impl DifferentialReport {
707 pub fn new(baseline: &ProfileSnapshot, current: &ProfileSnapshot) -> Self {
709 let mut timing_diffs = HashMap::new();
710 let mut memory_diffs = HashMap::new();
711
712 for (operation, current_timing) in ¤t.timings {
714 if let Some(baseline_timing) = baseline.timings.get(operation) {
715 timing_diffs.insert(
716 operation.clone(),
717 TimingDiff::new(baseline_timing, current_timing),
718 );
719 }
720 }
721
722 for (operation, current_memory) in ¤t.memory {
724 if let Some(baseline_memory) = baseline.memory.get(operation) {
725 memory_diffs.insert(
726 operation.clone(),
727 MemoryDiff::new(baseline_memory, current_memory),
728 );
729 }
730 }
731
732 let overall_change = PerformanceChange::calculate(&timing_diffs, &memory_diffs);
734
735 Self {
736 timing_diffs,
737 memory_diffs,
738 overall_change,
739 generated_at: std::time::Instant::now(),
740 }
741 }
742
743 pub fn print(&self) {
745 println!("\n=== Differential Profiling Report ===");
746
747 if !self.timing_diffs.is_empty() {
748 println!("\nTiming Changes:");
749 println!(
750 "{:<30} {:<15} {:<15} {:<15}",
751 "Operation", "Baseline (ms)", "Current (ms)", "Change (%)"
752 );
753 println!("{}", "-".repeat(80));
754
755 for (operation, diff) in &self.timing_diffs {
756 println!(
757 "{:<30} {:<15.2} {:<15.2} {:>+14.1}%",
758 operation,
759 diff.baseline_avg.as_secs_f64() * 1000.0,
760 diff.current_avg.as_secs_f64() * 1000.0,
761 diff.percentage_change
762 );
763 }
764 }
765
766 if !self.memory_diffs.is_empty() {
767 println!("\nMemory Changes:");
768 println!(
769 "{:<30} {:<15} {:<15} {:<15}",
770 "Operation", "Baseline (KB)", "Current (KB)", "Change (%)"
771 );
772 println!("{}", "-".repeat(80));
773
774 for (operation, diff) in &self.memory_diffs {
775 println!(
776 "{:<30} {:<15.2} {:<15.2} {:>+14.1}%",
777 operation,
778 diff.baseline_avg / 1024.0,
779 diff.current_avg / 1024.0,
780 diff.percentage_change
781 );
782 }
783 }
784
785 println!("\nOverall Performance:");
786 println!(
787 " ⢠Timing Change: {:+.1}%",
788 self.overall_change.timing_change
789 );
790 println!(
791 " ⢠Memory Change: {:+.1}%",
792 self.overall_change.memory_change
793 );
794 println!(" ⢠Recommendation: {}", self.overall_change.recommendation);
795 }
796}
797
798#[derive(Debug)]
800pub struct TimingDiff {
801 pub baseline_avg: Duration,
803 pub current_avg: Duration,
805 pub percentage_change: f64,
807}
808
809impl TimingDiff {
810 pub fn new(baseline: &TimingEntry, current: &TimingEntry) -> Self {
812 let baseline_avg = baseline.average_duration();
813 let current_avg = current.average_duration();
814 let percentage_change = if baseline_avg.as_nanos() > 0 {
815 ((current_avg.as_nanos() as f64 - baseline_avg.as_nanos() as f64)
816 / baseline_avg.as_nanos() as f64)
817 * 100.0
818 } else {
819 0.0
820 };
821
822 Self {
823 baseline_avg,
824 current_avg,
825 percentage_change,
826 }
827 }
828}
829
830#[derive(Debug)]
832pub struct MemoryDiff {
833 pub baseline_avg: f64,
835 pub current_avg: f64,
837 pub percentage_change: f64,
839}
840
841impl MemoryDiff {
842 pub fn new(baseline: &MemoryEntry, current: &MemoryEntry) -> Self {
844 let baseline_avg = if baseline.allocations() > 0 {
845 baseline.total_delta() as f64 / baseline.allocations() as f64
846 } else {
847 0.0
848 };
849
850 let current_avg = if current.allocations() > 0 {
851 current.total_delta() as f64 / current.allocations() as f64
852 } else {
853 0.0
854 };
855
856 let percentage_change = if baseline_avg.abs() > 0.0 {
857 ((current_avg - baseline_avg) / baseline_avg.abs()) * 100.0
858 } else {
859 0.0
860 };
861
862 Self {
863 baseline_avg,
864 current_avg,
865 percentage_change,
866 }
867 }
868}
869
870#[derive(Debug)]
872pub struct PerformanceChange {
873 pub timing_change: f64,
875 pub memory_change: f64,
877 pub recommendation: String,
879}
880
881impl PerformanceChange {
882 pub fn calculate(
884 timing_diffs: &HashMap<String, TimingDiff>,
885 memory_diffs: &HashMap<String, MemoryDiff>,
886 ) -> Self {
887 let timing_change = if timing_diffs.is_empty() {
888 0.0
889 } else {
890 timing_diffs
891 .values()
892 .map(|diff| diff.percentage_change)
893 .sum::<f64>()
894 / timing_diffs.len() as f64
895 };
896
897 let memory_change = if memory_diffs.is_empty() {
898 0.0
899 } else {
900 memory_diffs
901 .values()
902 .map(|diff| diff.percentage_change)
903 .sum::<f64>()
904 / memory_diffs.len() as f64
905 };
906
907 let recommendation = match (timing_change > 5.0, memory_change > 10.0) {
908 (true, true) => "Performance degraded significantly in both time and memory. Review recent changes.".to_string(),
909 (true, false) => "Execution time increased. Consider profiling hot paths for optimization opportunities.".to_string(),
910 (false, true) => "Memory usage increased. Review memory allocation patterns and consider optimization.".to_string(),
911 (false, false) => {
912 if timing_change < -5.0 || memory_change < -10.0 {
913 "Performance improved! Consider documenting the optimizations made.".to_string()
914 } else {
915 "Performance is stable with minimal changes.".to_string()
916 }
917 }
918 };
919
920 Self {
921 timing_change,
922 memory_change,
923 recommendation,
924 }
925 }
926}
927
928#[derive(Debug)]
930pub struct ExportableProfiler {
931 profiler: Profiler,
933 metadata: HashMap<String, String>,
935}
936
937impl ExportableProfiler {
938 pub fn new() -> Self {
940 Self {
941 profiler: Profiler::new(),
942 metadata: HashMap::new(),
943 }
944 }
945
946 pub fn add_metadata(&mut self, key: String, value: String) {
948 self.metadata.insert(key, value);
949 }
950
951 pub fn export_to_json(&self, path: &str) -> Result<(), std::io::Error> {
953 use std::fs::File;
954 use std::io::BufWriter;
955
956 let file = File::create(path)?;
957 let mut writer = BufWriter::new(file);
958
959 let json_data = format!(
962 r#"{{
963 "metadata": {:#?},
964 "timings": {:#?},
965 "memory": {:#?}
966 }}"#,
967 self.metadata,
968 self.profiler.timings(),
969 self.profiler.memory()
970 );
971
972 std::io::Write::write_all(&mut writer, json_data.as_bytes())?;
973 Ok(())
974 }
975
976 pub fn export_to_csv(&self, path: &str) -> Result<(), std::io::Error> {
978 let file = File::create(path)?;
979 let mut writer = BufWriter::new(file);
980
981 writeln!(writer, "Operation,Calls,Total_ms,Average_ms,Max_ms")?;
983 for (operation, timing) in self.profiler.timings() {
984 writeln!(
985 writer,
986 "{},{},{:.2},{:.2},{:.2}",
987 operation,
988 timing.calls(),
989 timing.total_duration().as_secs_f64() * 1000.0,
990 timing.average_duration().as_secs_f64() * 1000.0,
991 timing.max_duration().as_secs_f64() * 1000.0
992 )?;
993 }
994
995 writer.flush()?;
996 Ok(())
997 }
998
999 pub fn profiler(&mut self) -> &mut Profiler {
1001 &mut self.profiler
1002 }
1003}
1004
1005impl Default for ExportableProfiler {
1006 fn default() -> Self {
1007 Self::new()
1008 }
1009}