1use std::collections::HashMap;
6
7use super::functions::profiler_now_ns;
8
9#[allow(dead_code)]
11#[derive(Clone, Debug)]
12pub struct TimelineEntry {
13 pub timestamp_ns: u64,
15 pub label: String,
17 pub duration_ns: u64,
19 pub category: String,
21}
22impl TimelineEntry {
23 #[allow(dead_code)]
25 pub fn new(timestamp_ns: u64, label: &str, duration_ns: u64, category: &str) -> Self {
26 Self {
27 timestamp_ns,
28 label: label.to_string(),
29 duration_ns,
30 category: category.to_string(),
31 }
32 }
33}
34#[allow(dead_code)]
36pub struct ComprehensiveProfilingReport {
37 pub event_report: ProfileReport,
39 pub memory_profile: MemoryProfile,
41 pub flat_profile: Vec<(String, usize)>,
43 pub cumulative_profile: Vec<(String, usize)>,
45 pub gc_summary: String,
47}
48impl ComprehensiveProfilingReport {
49 #[allow(dead_code)]
51 pub fn build(session: &ProfilingSession) -> Self {
52 let event_report = session.profiler.generate_report();
53 let memory_profile = session.profiler.memory_profile();
54 let flat_profile = session.sampler.flat_profile();
55 let cumulative_profile = session.sampler.cumulative_profile();
56 let gc_summary = format!(
57 "GC cycles: {}, total alloc: {} bytes",
58 event_report.gc_cycles, memory_profile.total_allocs,
59 );
60 Self {
61 event_report,
62 memory_profile,
63 flat_profile,
64 cumulative_profile,
65 gc_summary,
66 }
67 }
68 #[allow(dead_code)]
70 pub fn to_text(&self) -> String {
71 let mut out = self.event_report.to_text();
72 out.push('\n');
73 out.push_str(&self.memory_profile.to_text());
74 out.push('\n');
75 out.push_str(&self.gc_summary);
76 out.push('\n');
77 if !self.flat_profile.is_empty() {
78 out.push_str("\nFlat profile:\n");
79 for (name, count) in &self.flat_profile {
80 out.push_str(&format!(" {:40} {}\n", name, count));
81 }
82 }
83 out
84 }
85}
86#[allow(dead_code)]
88pub struct GcProfiler {
89 records: Vec<GcCollectionRecord>,
90}
91impl GcProfiler {
92 #[allow(dead_code)]
94 pub fn new() -> Self {
95 Self {
96 records: Vec::new(),
97 }
98 }
99 #[allow(dead_code)]
101 pub fn record(&mut self, collected: usize, live: usize, pause_ns: u64) {
102 let ts = profiler_now_ns();
103 self.records
104 .push(GcCollectionRecord::new(ts, collected, live, pause_ns));
105 }
106 #[allow(dead_code)]
108 pub fn collection_count(&self) -> usize {
109 self.records.len()
110 }
111 #[allow(dead_code)]
113 pub fn total_collected(&self) -> usize {
114 self.records.iter().map(|r| r.collected).sum()
115 }
116 #[allow(dead_code)]
118 pub fn avg_pause_ns(&self) -> f64 {
119 if self.records.is_empty() {
120 0.0
121 } else {
122 let total: u64 = self.records.iter().map(|r| r.pause_ns).sum();
123 total as f64 / self.records.len() as f64
124 }
125 }
126 #[allow(dead_code)]
128 pub fn max_pause_ns(&self) -> u64 {
129 self.records.iter().map(|r| r.pause_ns).max().unwrap_or(0)
130 }
131 #[allow(dead_code)]
133 pub fn summary(&self) -> String {
134 format!(
135 "GC: {} collections, {} total collected, avg_pause={:.0}ns, max_pause={}ns",
136 self.collection_count(),
137 self.total_collected(),
138 self.avg_pause_ns(),
139 self.max_pause_ns(),
140 )
141 }
142}
143#[allow(dead_code)]
145pub struct TacticProfileLog {
146 events: Vec<TacticProfilingEvent>,
147}
148impl TacticProfileLog {
149 #[allow(dead_code)]
151 pub fn new() -> Self {
152 Self { events: Vec::new() }
153 }
154 #[allow(dead_code)]
156 pub fn record(&mut self, event: TacticProfilingEvent) {
157 self.events.push(event);
158 }
159 #[allow(dead_code)]
161 pub fn total_duration_ns(&self) -> u64 {
162 self.events.iter().map(|e| e.duration_ns).sum()
163 }
164 #[allow(dead_code)]
166 pub fn success_count(&self) -> usize {
167 self.events.iter().filter(|e| e.success).count()
168 }
169 #[allow(dead_code)]
171 pub fn top_slow(&self, n: usize) -> Vec<&TacticProfilingEvent> {
172 let mut sorted: Vec<&TacticProfilingEvent> = self.events.iter().collect();
173 sorted.sort_by(|a, b| b.duration_ns.cmp(&a.duration_ns));
174 sorted.truncate(n);
175 sorted
176 }
177 #[allow(dead_code)]
179 pub fn avg_duration_ns(&self) -> f64 {
180 if self.events.is_empty() {
181 0.0
182 } else {
183 self.total_duration_ns() as f64 / self.events.len() as f64
184 }
185 }
186}
187#[allow(dead_code)]
189#[derive(Clone, Debug)]
190pub struct GcCollectionRecord {
191 pub timestamp_ns: u64,
193 pub collected: usize,
195 pub live: usize,
197 pub pause_ns: u64,
199}
200impl GcCollectionRecord {
201 #[allow(dead_code)]
203 pub fn new(timestamp_ns: u64, collected: usize, live: usize, pause_ns: u64) -> Self {
204 Self {
205 timestamp_ns,
206 collected,
207 live,
208 pause_ns,
209 }
210 }
211 #[allow(dead_code)]
213 pub fn efficiency(&self) -> f64 {
214 let total = self.collected + self.live;
215 if total == 0 {
216 0.0
217 } else {
218 self.collected as f64 / total as f64
219 }
220 }
221}
222#[derive(Clone, Debug)]
224pub enum ProfilingEvent {
225 FunctionCall {
227 name: String,
229 depth: u32,
231 },
232 FunctionReturn {
234 name: String,
236 duration_ns: u64,
238 },
239 Allocation {
241 size: usize,
243 tag: String,
245 },
246 Deallocation {
248 size: usize,
250 tag: String,
252 },
253 GcCycle {
255 collected: usize,
257 live: usize,
259 },
260 TacticStep {
262 tactic_name: String,
264 goal_count: u32,
266 },
267}
268#[derive(Clone, Debug)]
270pub struct ProfileReport {
271 pub total_calls: usize,
273 pub total_alloc_bytes: usize,
275 pub hot_functions: Vec<(String, u64)>,
277 pub gc_cycles: usize,
279}
280impl ProfileReport {
281 pub fn to_text(&self) -> String {
283 let mut out = String::new();
284 out.push_str("=== Profile Report ===\n");
285 out.push_str(&format!("Total function calls : {}\n", self.total_calls));
286 out.push_str(&format!(
287 "Total allocations : {} bytes\n",
288 self.total_alloc_bytes
289 ));
290 out.push_str(&format!("GC cycles : {}\n", self.gc_cycles));
291 if !self.hot_functions.is_empty() {
292 out.push_str("\nHot functions (top 10):\n");
293 for (i, (name, ns)) in self.hot_functions.iter().enumerate() {
294 out.push_str(&format!(" {:2}. {:40} {:>12} ns\n", i + 1, name, ns));
295 }
296 }
297 out
298 }
299 pub fn to_json(&self) -> String {
301 let hot_json: Vec<String> = self
302 .hot_functions
303 .iter()
304 .map(|(name, ns)| format!("{{\"name\":\"{}\",\"duration_ns\":{}}}", name, ns))
305 .collect();
306 format!(
307 "{{\"total_calls\":{},\"total_alloc_bytes\":{},\"gc_cycles\":{},\"hot_functions\":[{}]}}",
308 self.total_calls, self.total_alloc_bytes, self.gc_cycles, hot_json.join(",")
309 )
310 }
311}
312#[allow(dead_code)]
314#[derive(Clone, Debug)]
315pub struct TacticProfilingEvent {
316 pub tactic: String,
318 pub duration_ns: u64,
320 pub success: bool,
322 pub goals_before: u32,
324 pub goals_after: u32,
326}
327impl TacticProfilingEvent {
328 #[allow(dead_code)]
330 pub fn new(
331 tactic: &str,
332 duration_ns: u64,
333 success: bool,
334 goals_before: u32,
335 goals_after: u32,
336 ) -> Self {
337 Self {
338 tactic: tactic.to_string(),
339 duration_ns,
340 success,
341 goals_before,
342 goals_after,
343 }
344 }
345 #[allow(dead_code)]
347 pub fn goals_eliminated(&self) -> i32 {
348 self.goals_before as i32 - self.goals_after as i32
349 }
350}
351#[allow(dead_code)]
353pub struct HeatMap {
354 pub buckets: usize,
356 pub span_ns: u64,
358 pub counts: Vec<u64>,
360}
361impl HeatMap {
362 #[allow(dead_code)]
364 pub fn new(buckets: usize, span_ns: u64) -> Self {
365 Self {
366 buckets,
367 span_ns,
368 counts: vec![0; buckets],
369 }
370 }
371 #[allow(dead_code)]
373 pub fn record(&mut self, timestamp_ns: u64, start_ns: u64) {
374 if self.span_ns == 0 || self.buckets == 0 {
375 return;
376 }
377 let offset = timestamp_ns.saturating_sub(start_ns);
378 let bucket = ((offset as u128 * self.buckets as u128) / self.span_ns as u128) as usize;
379 let bucket = bucket.min(self.buckets - 1);
380 self.counts[bucket] += 1;
381 }
382 #[allow(dead_code)]
384 pub fn peak_bucket(&self) -> usize {
385 self.counts
386 .iter()
387 .enumerate()
388 .max_by_key(|(_, &v)| v)
389 .map(|(i, _)| i)
390 .unwrap_or(0)
391 }
392 #[allow(dead_code)]
394 pub fn render_ascii(&self) -> String {
395 let max_count = *self.counts.iter().max().unwrap_or(&1).max(&1);
396 let height = 8usize;
397 let mut rows: Vec<String> = Vec::new();
398 for row in (0..height).rev() {
399 let threshold = (row as f64 / height as f64 * max_count as f64) as u64;
400 let line: String = self
401 .counts
402 .iter()
403 .map(|&c| if c > threshold { '#' } else { ' ' })
404 .collect();
405 rows.push(format!("|{}", line));
406 }
407 rows.push(format!("+{}", "-".repeat(self.buckets)));
408 rows.join("\n")
409 }
410}
411#[allow(dead_code)]
413#[derive(Clone, Debug)]
414pub struct TimelineAnnotation {
415 pub timestamp_ns: u64,
417 pub text: String,
419 pub category: String,
421}
422impl TimelineAnnotation {
423 #[allow(dead_code)]
425 pub fn new(timestamp_ns: u64, text: &str, category: &str) -> Self {
426 Self {
427 timestamp_ns,
428 text: text.to_string(),
429 category: category.to_string(),
430 }
431 }
432}
433#[allow(dead_code)]
435#[derive(Clone, Debug, Default)]
436pub struct PerfCounter {
437 pub instructions_retired: u64,
439 pub cache_misses: u64,
441 pub branch_mispredictions: u64,
443 pub context_switches: u64,
445 pub cycles: u64,
447}
448impl PerfCounter {
449 #[allow(dead_code)]
451 pub fn new() -> Self {
452 Self::default()
453 }
454 #[allow(dead_code)]
456 pub fn simulate_instructions(&mut self, n: u64) {
457 self.instructions_retired += n;
458 self.cycles += n;
459 }
460 #[allow(dead_code)]
462 pub fn simulate_cache_miss(&mut self) {
463 self.cache_misses += 1;
464 self.cycles += 200;
465 }
466 #[allow(dead_code)]
468 pub fn simulate_branch_misprediction(&mut self) {
469 self.branch_mispredictions += 1;
470 self.cycles += 15;
471 }
472 #[allow(dead_code)]
474 pub fn ipc(&self) -> f64 {
475 if self.cycles == 0 {
476 0.0
477 } else {
478 self.instructions_retired as f64 / self.cycles as f64
479 }
480 }
481 #[allow(dead_code)]
483 pub fn cache_miss_rate_per_1k(&self) -> f64 {
484 if self.instructions_retired == 0 {
485 0.0
486 } else {
487 (self.cache_misses as f64 / self.instructions_retired as f64) * 1000.0
488 }
489 }
490 #[allow(dead_code)]
492 pub fn summary(&self) -> String {
493 format!(
494 "PerfCounters: instr={}, cycles={}, IPC={:.2}, cache_misses={}, branch_mispredict={}",
495 self.instructions_retired,
496 self.cycles,
497 self.ipc(),
498 self.cache_misses,
499 self.branch_mispredictions
500 )
501 }
502}
503#[allow(dead_code)]
505#[derive(Clone, Debug)]
506pub struct ProfilerConfig {
507 pub event_profiling: bool,
509 pub sampling_profiling: bool,
511 pub sampling_interval_ns: u64,
513 pub max_events: usize,
515 pub track_gc: bool,
517 pub track_allocs: bool,
519}
520impl ProfilerConfig {
521 #[allow(dead_code)]
523 pub fn new() -> Self {
524 Self::default()
525 }
526 #[allow(dead_code)]
528 pub fn enable_all(mut self) -> Self {
529 self.event_profiling = true;
530 self.sampling_profiling = true;
531 self
532 }
533 #[allow(dead_code)]
535 pub fn disable_all(mut self) -> Self {
536 self.event_profiling = false;
537 self.sampling_profiling = false;
538 self
539 }
540}
541#[allow(dead_code)]
543#[derive(Clone, Debug)]
544pub struct ProfileSample {
545 pub timestamp_ns: u64,
547 pub call_stack: Vec<String>,
549 pub thread_id: u64,
551}
552impl ProfileSample {
553 #[allow(dead_code)]
555 pub fn new(timestamp_ns: u64, call_stack: Vec<String>, thread_id: u64) -> Self {
556 Self {
557 timestamp_ns,
558 call_stack,
559 thread_id,
560 }
561 }
562 #[allow(dead_code)]
564 pub fn top_function(&self) -> Option<&str> {
565 self.call_stack.first().map(|s| s.as_str())
566 }
567 #[allow(dead_code)]
569 pub fn depth(&self) -> usize {
570 self.call_stack.len()
571 }
572}
573#[allow(dead_code)]
575pub struct CountingStep {
576 pub step_name: String,
577 pub counts: HashMap<String, u64>,
578}
579impl CountingStep {
580 #[allow(dead_code)]
582 pub fn new(name: &str) -> Self {
583 Self {
584 step_name: name.to_string(),
585 counts: HashMap::new(),
586 }
587 }
588 pub(super) fn variant_name(event: &ProfilingEvent) -> &'static str {
590 match event {
591 ProfilingEvent::FunctionCall { .. } => "FunctionCall",
592 ProfilingEvent::FunctionReturn { .. } => "FunctionReturn",
593 ProfilingEvent::Allocation { .. } => "Allocation",
594 ProfilingEvent::Deallocation { .. } => "Deallocation",
595 ProfilingEvent::GcCycle { .. } => "GcCycle",
596 ProfilingEvent::TacticStep { .. } => "TacticStep",
597 }
598 }
599}
600#[allow(dead_code)]
602pub struct RealTimeMonitor {
603 pub name: String,
605 pub snapshots: Vec<(u64, String, f64)>,
607 pub capacity: usize,
609}
610impl RealTimeMonitor {
611 #[allow(dead_code)]
613 pub fn new(name: &str, capacity: usize) -> Self {
614 Self {
615 name: name.to_string(),
616 snapshots: Vec::new(),
617 capacity,
618 }
619 }
620 #[allow(dead_code)]
622 pub fn record(&mut self, metric: &str, value: f64) {
623 let ts = profiler_now_ns();
624 if self.snapshots.len() >= self.capacity {
625 self.snapshots.remove(0);
626 }
627 self.snapshots.push((ts, metric.to_string(), value));
628 }
629 #[allow(dead_code)]
631 pub fn latest(&self, metric: &str) -> Option<f64> {
632 self.snapshots
633 .iter()
634 .rev()
635 .find(|(_, m, _)| m == metric)
636 .map(|(_, _, v)| *v)
637 }
638 #[allow(dead_code)]
640 pub fn avg(&self, metric: &str) -> f64 {
641 let values: Vec<f64> = self
642 .snapshots
643 .iter()
644 .filter(|(_, m, _)| m == metric)
645 .map(|(_, _, v)| *v)
646 .collect();
647 if values.is_empty() {
648 0.0
649 } else {
650 values.iter().sum::<f64>() / values.len() as f64
651 }
652 }
653 #[allow(dead_code)]
655 pub fn count(&self, metric: &str) -> usize {
656 self.snapshots
657 .iter()
658 .filter(|(_, m, _)| m == metric)
659 .count()
660 }
661}
662#[allow(dead_code)]
664pub struct Histogram {
665 buckets: Vec<HistogramBucket>,
666 pub total: u64,
668 pub sum: f64,
670}
671impl Histogram {
672 #[allow(dead_code)]
674 pub fn new(n: usize, min_val: f64, max_val: f64) -> Self {
675 let width = (max_val - min_val) / n as f64;
676 let buckets = (0..n)
677 .map(|i| HistogramBucket {
678 lower: min_val + i as f64 * width,
679 upper: min_val + (i + 1) as f64 * width,
680 count: 0,
681 })
682 .collect();
683 Self {
684 buckets,
685 total: 0,
686 sum: 0.0,
687 }
688 }
689 #[allow(dead_code)]
691 pub fn record(&mut self, value: f64) {
692 self.total += 1;
693 self.sum += value;
694 if let Some(bucket) = self
695 .buckets
696 .iter_mut()
697 .find(|b| value >= b.lower && value < b.upper)
698 {
699 bucket.count += 1;
700 } else if let Some(last) = self.buckets.last_mut() {
701 if value >= last.lower {
702 last.count += 1;
703 }
704 }
705 }
706 #[allow(dead_code)]
708 pub fn mean(&self) -> f64 {
709 if self.total == 0 {
710 0.0
711 } else {
712 self.sum / self.total as f64
713 }
714 }
715 #[allow(dead_code)]
717 pub fn mode_bucket(&self) -> Option<&HistogramBucket> {
718 self.buckets.iter().max_by_key(|b| b.count)
719 }
720 #[allow(dead_code)]
722 pub fn render_ascii(&self) -> String {
723 let max_count = self
724 .buckets
725 .iter()
726 .map(|b| b.count)
727 .max()
728 .unwrap_or(1)
729 .max(1);
730 let bar_width = 40usize;
731 let mut out = String::new();
732 for bucket in &self.buckets {
733 let bar_len = (bucket.count as usize * bar_width) / max_count as usize;
734 let bar = "#".repeat(bar_len);
735 out.push_str(&format!(
736 "[{:.2}, {:.2}): {:6} | {}\n",
737 bucket.lower, bucket.upper, bucket.count, bar
738 ));
739 }
740 out
741 }
742}
743#[allow(dead_code)]
745pub struct AllocationTracker {
746 stats: HashMap<String, AllocationStat>,
747}
748impl AllocationTracker {
749 #[allow(dead_code)]
751 pub fn new() -> Self {
752 Self {
753 stats: HashMap::new(),
754 }
755 }
756 #[allow(dead_code)]
758 pub fn record_alloc(&mut self, tag: &str, bytes: u64) {
759 let s = self.stats.entry(tag.to_string()).or_default();
760 s.total_bytes += bytes;
761 s.alloc_count += 1;
762 s.live_bytes += bytes;
763 }
764 #[allow(dead_code)]
766 pub fn record_dealloc(&mut self, tag: &str, bytes: u64) {
767 let s = self.stats.entry(tag.to_string()).or_default();
768 s.dealloc_count += 1;
769 s.live_bytes = s.live_bytes.saturating_sub(bytes);
770 }
771 #[allow(dead_code)]
773 pub fn stats_for(&self, tag: &str) -> Option<&AllocationStat> {
774 self.stats.get(tag)
775 }
776 #[allow(dead_code)]
778 pub fn total_live_bytes(&self) -> u64 {
779 self.stats.values().map(|s| s.live_bytes).sum()
780 }
781 #[allow(dead_code)]
783 pub fn total_allocated_bytes(&self) -> u64 {
784 self.stats.values().map(|s| s.total_bytes).sum()
785 }
786 #[allow(dead_code)]
788 pub fn top_allocators(&self, n: usize) -> Vec<(&str, u64)> {
789 let mut v: Vec<(&str, u64)> = self
790 .stats
791 .iter()
792 .map(|(k, v)| (k.as_str(), v.total_bytes))
793 .collect();
794 v.sort_by(|a, b| b.1.cmp(&a.1));
795 v.truncate(n);
796 v
797 }
798}
799#[allow(dead_code)]
801#[derive(Clone, Debug)]
802pub struct FlameNode {
803 pub name: String,
805 pub count: u64,
807 pub children: Vec<FlameNode>,
809}
810impl FlameNode {
811 #[allow(dead_code)]
813 pub fn new(name: &str) -> Self {
814 Self {
815 name: name.to_string(),
816 count: 0,
817 children: Vec::new(),
818 }
819 }
820 #[allow(dead_code)]
822 pub fn get_or_create_child(&mut self, name: &str) -> &mut FlameNode {
823 if let Some(pos) = self.children.iter().position(|c| c.name == name) {
824 &mut self.children[pos]
825 } else {
826 self.children.push(FlameNode::new(name));
827 self.children
828 .last_mut()
829 .expect("just pushed a child so last_mut must return Some")
830 }
831 }
832 #[allow(dead_code)]
834 pub fn total(&self) -> u64 {
835 self.count + self.children.iter().map(|c| c.total()).sum::<u64>()
836 }
837 #[allow(dead_code)]
839 pub fn format(&self, depth: usize) -> String {
840 let indent = " ".repeat(depth);
841 let mut out = format!("{}{} ({})\n", indent, self.name, self.count);
842 for child in &self.children {
843 out.push_str(&child.format(depth + 1));
844 }
845 out
846 }
847}
848#[allow(dead_code)]
850#[derive(Clone, Debug)]
851pub struct CallTreeNode {
852 pub name: String,
854 pub inclusive_ns: u64,
856 pub exclusive_ns: u64,
858 pub call_count: u64,
860 pub children: Vec<CallTreeNode>,
862}
863impl CallTreeNode {
864 #[allow(dead_code)]
866 pub fn new(name: &str) -> Self {
867 Self {
868 name: name.to_string(),
869 inclusive_ns: 0,
870 exclusive_ns: 0,
871 call_count: 0,
872 children: Vec::new(),
873 }
874 }
875 #[allow(dead_code)]
877 pub fn avg_exclusive_ns(&self) -> f64 {
878 if self.call_count == 0 {
879 0.0
880 } else {
881 self.exclusive_ns as f64 / self.call_count as f64
882 }
883 }
884 #[allow(dead_code)]
886 pub fn avg_inclusive_ns(&self) -> f64 {
887 if self.call_count == 0 {
888 0.0
889 } else {
890 self.inclusive_ns as f64 / self.call_count as f64
891 }
892 }
893 #[allow(dead_code)]
895 pub fn find_child(&self, name: &str) -> Option<&CallTreeNode> {
896 self.children.iter().find(|c| c.name == name)
897 }
898}
899pub struct Profiler {
901 pub enabled: bool,
903 pub events: Vec<(u64, ProfilingEvent)>,
905 pub call_stack: Vec<(String, u64)>,
907}
908impl Profiler {
909 pub fn new() -> Self {
911 Self {
912 enabled: false,
913 events: Vec::new(),
914 call_stack: Vec::new(),
915 }
916 }
917 pub fn enable(&mut self) {
919 self.enabled = true;
920 }
921 pub fn disable(&mut self) {
923 self.enabled = false;
924 }
925 pub fn record(&mut self, event: ProfilingEvent) {
927 if self.enabled {
928 let ts = Self::now_ns();
929 self.events.push((ts, event));
930 }
931 }
932 pub fn enter_function(&mut self, name: &str) {
934 if self.enabled {
935 let ts = Self::now_ns();
936 let depth = self.call_stack.len() as u32;
937 self.call_stack.push((name.to_string(), ts));
938 self.events.push((
939 ts,
940 ProfilingEvent::FunctionCall {
941 name: name.to_string(),
942 depth,
943 },
944 ));
945 }
946 }
947 pub fn exit_function(&mut self, name: &str) {
949 if self.enabled {
950 let ts = Self::now_ns();
951 let duration_ns =
952 if let Some(idx) = self.call_stack.iter().rposition(|(n, _)| n == name) {
953 let entry_ts = self.call_stack[idx].1;
954 self.call_stack.remove(idx);
955 ts.saturating_sub(entry_ts)
956 } else {
957 0
958 };
959 self.events.push((
960 ts,
961 ProfilingEvent::FunctionReturn {
962 name: name.to_string(),
963 duration_ns,
964 },
965 ));
966 }
967 }
968 pub fn alloc(&mut self, size: usize, tag: &str) {
970 if self.enabled {
971 let ts = Self::now_ns();
972 self.events.push((
973 ts,
974 ProfilingEvent::Allocation {
975 size,
976 tag: tag.to_string(),
977 },
978 ));
979 }
980 }
981 pub fn dealloc(&mut self, size: usize, tag: &str) {
983 if self.enabled {
984 let ts = Self::now_ns();
985 self.events.push((
986 ts,
987 ProfilingEvent::Deallocation {
988 size,
989 tag: tag.to_string(),
990 },
991 ));
992 }
993 }
994 pub fn gc_cycle(&mut self, collected: usize, live: usize) {
996 if self.enabled {
997 let ts = Self::now_ns();
998 self.events
999 .push((ts, ProfilingEvent::GcCycle { collected, live }));
1000 }
1001 }
1002 pub fn generate_report(&self) -> ProfileReport {
1004 let mut total_calls: usize = 0;
1005 let mut total_alloc_bytes: usize = 0;
1006 let mut gc_cycles: usize = 0;
1007 let mut fn_durations: HashMap<String, u64> = HashMap::new();
1008 for (_, event) in &self.events {
1009 match event {
1010 ProfilingEvent::FunctionCall { .. } => {
1011 total_calls += 1;
1012 }
1013 ProfilingEvent::FunctionReturn { name, duration_ns } => {
1014 *fn_durations.entry(name.clone()).or_insert(0) += duration_ns;
1015 }
1016 ProfilingEvent::Allocation { size, .. } => {
1017 total_alloc_bytes += size;
1018 }
1019 ProfilingEvent::GcCycle { .. } => {
1020 gc_cycles += 1;
1021 }
1022 _ => {}
1023 }
1024 }
1025 let mut hot_functions: Vec<(String, u64)> = fn_durations.into_iter().collect();
1026 hot_functions.sort_by(|a, b| b.1.cmp(&a.1));
1027 hot_functions.truncate(10);
1028 ProfileReport {
1029 total_calls,
1030 total_alloc_bytes,
1031 hot_functions,
1032 gc_cycles,
1033 }
1034 }
1035 pub fn memory_profile(&self) -> MemoryProfile {
1037 let mut current_bytes: usize = 0;
1038 let mut peak_bytes: usize = 0;
1039 let mut total_allocs: usize = 0;
1040 for (_, event) in &self.events {
1041 match event {
1042 ProfilingEvent::Allocation { size, .. } => {
1043 current_bytes += size;
1044 total_allocs += 1;
1045 if current_bytes > peak_bytes {
1046 peak_bytes = current_bytes;
1047 }
1048 }
1049 ProfilingEvent::Deallocation { size, .. } => {
1050 current_bytes = current_bytes.saturating_sub(*size);
1051 }
1052 _ => {}
1053 }
1054 }
1055 MemoryProfile {
1056 peak_bytes,
1057 current_bytes,
1058 total_allocs,
1059 }
1060 }
1061 fn now_ns() -> u64 {
1062 use std::time::{SystemTime, UNIX_EPOCH};
1063 SystemTime::now()
1064 .duration_since(UNIX_EPOCH)
1065 .map(|d| d.as_nanos() as u64)
1066 .unwrap_or(0)
1067 }
1068}
1069#[allow(dead_code)]
1071pub struct ProfilingMiddleware {
1072 pub profiler: Profiler,
1074 pub active: bool,
1076}
1077impl ProfilingMiddleware {
1078 #[allow(dead_code)]
1080 pub fn new() -> Self {
1081 let mut profiler = Profiler::new();
1082 profiler.enable();
1083 Self {
1084 profiler,
1085 active: true,
1086 }
1087 }
1088 #[allow(dead_code)]
1090 pub fn instrument<F, T>(&mut self, name: &str, f: F) -> T
1091 where
1092 F: FnOnce() -> T,
1093 {
1094 if self.active {
1095 self.profiler.enter_function(name);
1096 }
1097 let result = f();
1098 if self.active {
1099 self.profiler.exit_function(name);
1100 }
1101 result
1102 }
1103 #[allow(dead_code)]
1105 pub fn report(&self) -> ProfileReport {
1106 self.profiler.generate_report()
1107 }
1108}
1109#[allow(dead_code)]
1111pub struct TimelineView {
1112 pub entries: Vec<TimelineEntry>,
1114}
1115impl TimelineView {
1116 #[allow(dead_code)]
1118 pub fn new() -> Self {
1119 Self {
1120 entries: Vec::new(),
1121 }
1122 }
1123 #[allow(dead_code)]
1125 pub fn build(profiler: &Profiler) -> Self {
1126 let mut view = TimelineView::new();
1127 for (ts, event) in &profiler.events {
1128 let entry = match event {
1129 ProfilingEvent::FunctionCall { name, depth } => {
1130 TimelineEntry::new(*ts, &format!("CALL {}[d={}]", name, depth), 0, "function")
1131 }
1132 ProfilingEvent::FunctionReturn { name, duration_ns } => TimelineEntry::new(
1133 *ts,
1134 &format!("RET {} ({}ns)", name, duration_ns),
1135 *duration_ns,
1136 "function",
1137 ),
1138 ProfilingEvent::Allocation { size, tag } => {
1139 TimelineEntry::new(*ts, &format!("ALLOC {} ({} bytes)", tag, size), 0, "memory")
1140 }
1141 ProfilingEvent::Deallocation { size, tag } => {
1142 TimelineEntry::new(*ts, &format!("FREE {} ({} bytes)", tag, size), 0, "memory")
1143 }
1144 ProfilingEvent::GcCycle { collected, live } => TimelineEntry::new(
1145 *ts,
1146 &format!("GC: collected={} live={}", collected, live),
1147 0,
1148 "gc",
1149 ),
1150 ProfilingEvent::TacticStep {
1151 tactic_name,
1152 goal_count,
1153 } => TimelineEntry::new(
1154 *ts,
1155 &format!("TACTIC {} goals={}", tactic_name, goal_count),
1156 0,
1157 "tactic",
1158 ),
1159 };
1160 view.entries.push(entry);
1161 }
1162 view
1163 }
1164 #[allow(dead_code)]
1166 pub fn by_category(&self, category: &str) -> Vec<&TimelineEntry> {
1167 self.entries
1168 .iter()
1169 .filter(|e| e.category == category)
1170 .collect()
1171 }
1172 #[allow(dead_code)]
1174 pub fn span_ns(&self) -> u64 {
1175 let min = self
1176 .entries
1177 .iter()
1178 .map(|e| e.timestamp_ns)
1179 .min()
1180 .unwrap_or(0);
1181 let max = self
1182 .entries
1183 .iter()
1184 .map(|e| e.timestamp_ns + e.duration_ns)
1185 .max()
1186 .unwrap_or(0);
1187 max.saturating_sub(min)
1188 }
1189}
1190#[allow(dead_code)]
1192pub struct AnnotatedTimeline {
1193 pub entries: Vec<TimelineEntry>,
1195 pub annotations: Vec<TimelineAnnotation>,
1197}
1198impl AnnotatedTimeline {
1199 #[allow(dead_code)]
1201 pub fn new() -> Self {
1202 Self {
1203 entries: Vec::new(),
1204 annotations: Vec::new(),
1205 }
1206 }
1207 #[allow(dead_code)]
1209 pub fn annotate(&mut self, annotation: TimelineAnnotation) {
1210 self.annotations.push(annotation);
1211 }
1212 #[allow(dead_code)]
1214 pub fn annotations_in_range(&self, start_ns: u64, end_ns: u64) -> Vec<&TimelineAnnotation> {
1215 self.annotations
1216 .iter()
1217 .filter(|a| a.timestamp_ns >= start_ns && a.timestamp_ns <= end_ns)
1218 .collect()
1219 }
1220}
1221#[allow(dead_code)]
1223#[derive(Clone, Debug)]
1224pub struct EventFilter {
1225 pub function_names: Vec<String>,
1227 pub min_timestamp_ns: u64,
1229 pub max_timestamp_ns: u64,
1231 pub min_alloc_bytes: usize,
1233}
1234impl EventFilter {
1235 #[allow(dead_code)]
1237 pub fn new() -> Self {
1238 Self {
1239 function_names: Vec::new(),
1240 min_timestamp_ns: 0,
1241 max_timestamp_ns: u64::MAX,
1242 min_alloc_bytes: 0,
1243 }
1244 }
1245 #[allow(dead_code)]
1247 pub fn matches(&self, ts: u64, event: &ProfilingEvent) -> bool {
1248 if ts < self.min_timestamp_ns || ts > self.max_timestamp_ns {
1249 return false;
1250 }
1251 if !self.function_names.is_empty() {
1252 let name = match event {
1253 ProfilingEvent::FunctionCall { name, .. } => Some(name.as_str()),
1254 ProfilingEvent::FunctionReturn { name, .. } => Some(name.as_str()),
1255 _ => None,
1256 };
1257 if let Some(n) = name {
1258 if !self.function_names.iter().any(|f| f == n) {
1259 return false;
1260 }
1261 }
1262 }
1263 if let ProfilingEvent::Allocation { size, .. } = event {
1264 if *size < self.min_alloc_bytes {
1265 return false;
1266 }
1267 }
1268 true
1269 }
1270 #[allow(dead_code)]
1272 pub fn apply<'a>(&self, events: &'a [(u64, ProfilingEvent)]) -> Vec<&'a (u64, ProfilingEvent)> {
1273 events
1274 .iter()
1275 .filter(|(ts, ev)| self.matches(*ts, ev))
1276 .collect()
1277 }
1278}
1279#[derive(Clone, Debug)]
1281pub struct MemoryProfile {
1282 pub peak_bytes: usize,
1284 pub current_bytes: usize,
1286 pub total_allocs: usize,
1288}
1289impl MemoryProfile {
1290 pub fn to_text(&self) -> String {
1292 format!(
1293 "=== Memory Profile ===\nPeak usage : {} bytes\nCurrent usage : {} bytes\nTotal allocs : {}\n",
1294 self.peak_bytes, self.current_bytes, self.total_allocs
1295 )
1296 }
1297}
1298#[allow(dead_code)]
1300pub struct FlameGraph {
1301 pub root: FlameNode,
1303 pub total_samples: u64,
1305}
1306impl FlameGraph {
1307 #[allow(dead_code)]
1309 pub fn new() -> Self {
1310 Self {
1311 root: FlameNode::new("(all)"),
1312 total_samples: 0,
1313 }
1314 }
1315 #[allow(dead_code)]
1317 pub fn add_stack(&mut self, stack: &[String]) {
1318 self.total_samples += 1;
1319 self.root.count += 1;
1320 let mut node = &mut self.root;
1321 for frame in stack.iter().rev() {
1322 node = node.get_or_create_child(frame);
1323 node.count += 1;
1324 }
1325 }
1326 #[allow(dead_code)]
1328 pub fn from_profiler(profiler: &SamplingProfiler) -> Self {
1329 let mut fg = FlameGraph::new();
1330 for sample in &profiler.samples {
1331 fg.add_stack(&sample.call_stack);
1332 }
1333 fg
1334 }
1335 #[allow(dead_code)]
1337 pub fn render_text(&self) -> String {
1338 self.root.format(0)
1339 }
1340}
1341#[allow(dead_code)]
1343#[derive(Clone, Debug, Default)]
1344pub struct HistogramBucket {
1345 pub lower: f64,
1347 pub upper: f64,
1349 pub count: u64,
1351}
1352#[allow(dead_code)]
1354pub struct ProfilingSession {
1355 pub profiler: Profiler,
1357 pub sampler: SamplingProfiler,
1359 pub alloc_tracker: AllocationTracker,
1361 pub tactic_log: TacticProfileLog,
1363 pub name: String,
1365 pub running: bool,
1367}
1368impl ProfilingSession {
1369 #[allow(dead_code)]
1371 pub fn new(name: &str) -> Self {
1372 Self {
1373 profiler: Profiler::new(),
1374 sampler: SamplingProfiler::new(1_000_000),
1375 alloc_tracker: AllocationTracker::new(),
1376 tactic_log: TacticProfileLog::new(),
1377 name: name.to_string(),
1378 running: false,
1379 }
1380 }
1381 #[allow(dead_code)]
1383 pub fn start(&mut self) {
1384 self.profiler.enable();
1385 self.sampler.enable();
1386 self.running = true;
1387 }
1388 #[allow(dead_code)]
1390 pub fn stop(&mut self) {
1391 self.profiler.disable();
1392 self.sampler.disable();
1393 self.running = false;
1394 }
1395 #[allow(dead_code)]
1397 pub fn enter_function(&mut self, name: &str) {
1398 self.profiler.enter_function(name);
1399 self.sampler.enter(name);
1400 }
1401 #[allow(dead_code)]
1403 pub fn exit_function(&mut self, name: &str) {
1404 self.profiler.exit_function(name);
1405 self.sampler.leave(name);
1406 }
1407 #[allow(dead_code)]
1409 pub fn alloc(&mut self, bytes: usize, tag: &str) {
1410 self.profiler.alloc(bytes, tag);
1411 self.alloc_tracker.record_alloc(tag, bytes as u64);
1412 }
1413 #[allow(dead_code)]
1415 pub fn dealloc(&mut self, bytes: usize, tag: &str) {
1416 self.profiler.dealloc(bytes, tag);
1417 self.alloc_tracker.record_dealloc(tag, bytes as u64);
1418 }
1419 #[allow(dead_code)]
1421 pub fn combined_report(&self) -> String {
1422 let profile_report = self.profiler.generate_report();
1423 let mem_profile = self.profiler.memory_profile();
1424 format!(
1425 "=== ProfilingSession: {} ===\n{}\n{}\nTactic steps: {}\nSamples: {}\nLive bytes: {}",
1426 self.name,
1427 profile_report.to_text(),
1428 mem_profile.to_text(),
1429 self.tactic_log.success_count(),
1430 self.sampler.sample_count(),
1431 self.alloc_tracker.total_live_bytes(),
1432 )
1433 }
1434}
1435#[allow(dead_code)]
1437pub struct SamplingProfiler {
1438 pub samples: Vec<ProfileSample>,
1440 pub enabled: bool,
1442 pub interval_ns: u64,
1444 pub current_stack: Vec<String>,
1446}
1447impl SamplingProfiler {
1448 #[allow(dead_code)]
1450 pub fn new(interval_ns: u64) -> Self {
1451 Self {
1452 samples: Vec::new(),
1453 enabled: false,
1454 interval_ns,
1455 current_stack: Vec::new(),
1456 }
1457 }
1458 #[allow(dead_code)]
1460 pub fn enable(&mut self) {
1461 self.enabled = true;
1462 }
1463 #[allow(dead_code)]
1465 pub fn disable(&mut self) {
1466 self.enabled = false;
1467 }
1468 #[allow(dead_code)]
1470 pub fn enter(&mut self, function: &str) {
1471 if self.enabled {
1472 self.current_stack.insert(0, function.to_string());
1473 }
1474 }
1475 #[allow(dead_code)]
1477 pub fn leave(&mut self, function: &str) {
1478 if self.enabled {
1479 if let Some(pos) = self.current_stack.iter().position(|s| s == function) {
1480 self.current_stack.remove(pos);
1481 }
1482 }
1483 }
1484 #[allow(dead_code)]
1486 pub fn take_sample(&mut self, thread_id: u64) {
1487 if self.enabled {
1488 let ts = profiler_now_ns();
1489 self.samples.push(ProfileSample::new(
1490 ts,
1491 self.current_stack.clone(),
1492 thread_id,
1493 ));
1494 }
1495 }
1496 #[allow(dead_code)]
1498 pub fn flat_profile(&self) -> Vec<(String, usize)> {
1499 let mut counts: HashMap<String, usize> = HashMap::new();
1500 for sample in &self.samples {
1501 if let Some(top) = sample.top_function() {
1502 *counts.entry(top.to_string()).or_insert(0) += 1;
1503 }
1504 }
1505 let mut result: Vec<(String, usize)> = counts.into_iter().collect();
1506 result.sort_by(|a, b| b.1.cmp(&a.1));
1507 result
1508 }
1509 #[allow(dead_code)]
1512 pub fn cumulative_profile(&self) -> Vec<(String, usize)> {
1513 let mut counts: HashMap<String, usize> = HashMap::new();
1514 for sample in &self.samples {
1515 for func in &sample.call_stack {
1516 *counts.entry(func.clone()).or_insert(0) += 1;
1517 }
1518 }
1519 let mut result: Vec<(String, usize)> = counts.into_iter().collect();
1520 result.sort_by(|a, b| b.1.cmp(&a.1));
1521 result
1522 }
1523 #[allow(dead_code)]
1525 pub fn sample_count(&self) -> usize {
1526 self.samples.len()
1527 }
1528 #[allow(dead_code)]
1530 pub fn avg_stack_depth(&self) -> f64 {
1531 if self.samples.is_empty() {
1532 return 0.0;
1533 }
1534 let total: usize = self.samples.iter().map(|s| s.depth()).sum();
1535 total as f64 / self.samples.len() as f64
1536 }
1537}
1538#[allow(dead_code)]
1540#[derive(Clone, Debug, Default)]
1541pub struct AllocationStat {
1542 pub total_bytes: u64,
1544 pub alloc_count: u64,
1546 pub dealloc_count: u64,
1548 pub live_bytes: u64,
1550}
1551#[allow(dead_code)]
1553#[derive(Clone, Debug)]
1554pub struct StackSnapshot {
1555 pub timestamp_ns: u64,
1557 pub frames: Vec<String>,
1559 pub label: Option<String>,
1561}
1562impl StackSnapshot {
1563 #[allow(dead_code)]
1565 pub fn new(timestamp_ns: u64, frames: Vec<String>) -> Self {
1566 Self {
1567 timestamp_ns,
1568 frames,
1569 label: None,
1570 }
1571 }
1572 #[allow(dead_code)]
1574 pub fn with_label(mut self, label: &str) -> Self {
1575 self.label = Some(label.to_string());
1576 self
1577 }
1578 #[allow(dead_code)]
1580 pub fn depth(&self) -> usize {
1581 self.frames.len()
1582 }
1583 #[allow(dead_code)]
1585 pub fn format(&self) -> String {
1586 let label_str = self.label.as_deref().unwrap_or("(no label)");
1587 let mut out = format!("Stack at {} ns [{}]:\n", self.timestamp_ns, label_str);
1588 for (i, frame) in self.frames.iter().enumerate() {
1589 out.push_str(&format!(" {:3}: {}\n", i, frame));
1590 }
1591 out
1592 }
1593}