Skip to main content

oxilean_runtime/profiler/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use std::collections::HashMap;
6
7use super::functions::profiler_now_ns;
8
9/// A single entry in the profiling timeline.
10#[allow(dead_code)]
11#[derive(Clone, Debug)]
12pub struct TimelineEntry {
13    /// Timestamp in nanoseconds.
14    pub timestamp_ns: u64,
15    /// Short description.
16    pub label: String,
17    /// Duration in nanoseconds (0 for instantaneous events).
18    pub duration_ns: u64,
19    /// Category tag.
20    pub category: String,
21}
22impl TimelineEntry {
23    /// Create a new timeline entry.
24    #[allow(dead_code)]
25    pub fn new(timestamp_ns: u64, label: &str, duration_ns: u64, category: &str) -> Self {
26        Self {
27            timestamp_ns,
28            label: label.to_string(),
29            duration_ns,
30            category: category.to_string(),
31        }
32    }
33}
34/// A comprehensive profiling report combining all profiler outputs.
35#[allow(dead_code)]
36pub struct ComprehensiveProfilingReport {
37    /// Event-based profile report.
38    pub event_report: ProfileReport,
39    /// Memory profile.
40    pub memory_profile: MemoryProfile,
41    /// Flat sampling profile.
42    pub flat_profile: Vec<(String, usize)>,
43    /// Cumulative sampling profile.
44    pub cumulative_profile: Vec<(String, usize)>,
45    /// GC summary.
46    pub gc_summary: String,
47}
48impl ComprehensiveProfilingReport {
49    /// Build a comprehensive report from a session.
50    #[allow(dead_code)]
51    pub fn build(session: &ProfilingSession) -> Self {
52        let event_report = session.profiler.generate_report();
53        let memory_profile = session.profiler.memory_profile();
54        let flat_profile = session.sampler.flat_profile();
55        let cumulative_profile = session.sampler.cumulative_profile();
56        let gc_summary = format!(
57            "GC cycles: {}, total alloc: {} bytes",
58            event_report.gc_cycles, memory_profile.total_allocs,
59        );
60        Self {
61            event_report,
62            memory_profile,
63            flat_profile,
64            cumulative_profile,
65            gc_summary,
66        }
67    }
68    /// Format as a text report.
69    #[allow(dead_code)]
70    pub fn to_text(&self) -> String {
71        let mut out = self.event_report.to_text();
72        out.push('\n');
73        out.push_str(&self.memory_profile.to_text());
74        out.push('\n');
75        out.push_str(&self.gc_summary);
76        out.push('\n');
77        if !self.flat_profile.is_empty() {
78            out.push_str("\nFlat profile:\n");
79            for (name, count) in &self.flat_profile {
80                out.push_str(&format!("  {:40} {}\n", name, count));
81            }
82        }
83        out
84    }
85}
86/// Tracks GC activity across a run.
87#[allow(dead_code)]
88pub struct GcProfiler {
89    records: Vec<GcCollectionRecord>,
90}
91impl GcProfiler {
92    /// Create a new GC profiler.
93    #[allow(dead_code)]
94    pub fn new() -> Self {
95        Self {
96            records: Vec::new(),
97        }
98    }
99    /// Record a GC collection.
100    #[allow(dead_code)]
101    pub fn record(&mut self, collected: usize, live: usize, pause_ns: u64) {
102        let ts = profiler_now_ns();
103        self.records
104            .push(GcCollectionRecord::new(ts, collected, live, pause_ns));
105    }
106    /// Number of GC collections recorded.
107    #[allow(dead_code)]
108    pub fn collection_count(&self) -> usize {
109        self.records.len()
110    }
111    /// Total objects collected across all GC cycles.
112    #[allow(dead_code)]
113    pub fn total_collected(&self) -> usize {
114        self.records.iter().map(|r| r.collected).sum()
115    }
116    /// Average pause time in nanoseconds.
117    #[allow(dead_code)]
118    pub fn avg_pause_ns(&self) -> f64 {
119        if self.records.is_empty() {
120            0.0
121        } else {
122            let total: u64 = self.records.iter().map(|r| r.pause_ns).sum();
123            total as f64 / self.records.len() as f64
124        }
125    }
126    /// Maximum pause time seen.
127    #[allow(dead_code)]
128    pub fn max_pause_ns(&self) -> u64 {
129        self.records.iter().map(|r| r.pause_ns).max().unwrap_or(0)
130    }
131    /// Human-readable summary.
132    #[allow(dead_code)]
133    pub fn summary(&self) -> String {
134        format!(
135            "GC: {} collections, {} total collected, avg_pause={:.0}ns, max_pause={}ns",
136            self.collection_count(),
137            self.total_collected(),
138            self.avg_pause_ns(),
139            self.max_pause_ns(),
140        )
141    }
142}
143/// A log of tactic profiling events.
144#[allow(dead_code)]
145pub struct TacticProfileLog {
146    events: Vec<TacticProfilingEvent>,
147}
148impl TacticProfileLog {
149    /// Create a new log.
150    #[allow(dead_code)]
151    pub fn new() -> Self {
152        Self { events: Vec::new() }
153    }
154    /// Record an event.
155    #[allow(dead_code)]
156    pub fn record(&mut self, event: TacticProfilingEvent) {
157        self.events.push(event);
158    }
159    /// Total duration of all tactic steps.
160    #[allow(dead_code)]
161    pub fn total_duration_ns(&self) -> u64 {
162        self.events.iter().map(|e| e.duration_ns).sum()
163    }
164    /// Number of successful tactic applications.
165    #[allow(dead_code)]
166    pub fn success_count(&self) -> usize {
167        self.events.iter().filter(|e| e.success).count()
168    }
169    /// Top N slowest tactics by duration.
170    #[allow(dead_code)]
171    pub fn top_slow(&self, n: usize) -> Vec<&TacticProfilingEvent> {
172        let mut sorted: Vec<&TacticProfilingEvent> = self.events.iter().collect();
173        sorted.sort_by(|a, b| b.duration_ns.cmp(&a.duration_ns));
174        sorted.truncate(n);
175        sorted
176    }
177    /// Average duration per tactic step.
178    #[allow(dead_code)]
179    pub fn avg_duration_ns(&self) -> f64 {
180        if self.events.is_empty() {
181            0.0
182        } else {
183            self.total_duration_ns() as f64 / self.events.len() as f64
184        }
185    }
186}
187/// Statistics for a single GC collection.
188#[allow(dead_code)]
189#[derive(Clone, Debug)]
190pub struct GcCollectionRecord {
191    /// When this collection happened.
192    pub timestamp_ns: u64,
193    /// Objects collected.
194    pub collected: usize,
195    /// Objects remaining live.
196    pub live: usize,
197    /// Duration of the pause in nanoseconds.
198    pub pause_ns: u64,
199}
200impl GcCollectionRecord {
201    /// Create a new GC collection record.
202    #[allow(dead_code)]
203    pub fn new(timestamp_ns: u64, collected: usize, live: usize, pause_ns: u64) -> Self {
204        Self {
205            timestamp_ns,
206            collected,
207            live,
208            pause_ns,
209        }
210    }
211    /// Collection efficiency: fraction of objects collected vs total seen.
212    #[allow(dead_code)]
213    pub fn efficiency(&self) -> f64 {
214        let total = self.collected + self.live;
215        if total == 0 {
216            0.0
217        } else {
218            self.collected as f64 / total as f64
219        }
220    }
221}
222/// A single profiling event captured by the runtime.
223#[derive(Clone, Debug)]
224pub enum ProfilingEvent {
225    /// A function was called.
226    FunctionCall {
227        /// Name of the function.
228        name: String,
229        /// Current call depth at the time of the call.
230        depth: u32,
231    },
232    /// A function returned.
233    FunctionReturn {
234        /// Name of the function.
235        name: String,
236        /// Elapsed time in nanoseconds.
237        duration_ns: u64,
238    },
239    /// Memory was allocated.
240    Allocation {
241        /// Number of bytes allocated.
242        size: usize,
243        /// Descriptive tag for the allocation.
244        tag: String,
245    },
246    /// Memory was freed.
247    Deallocation {
248        /// Number of bytes freed.
249        size: usize,
250        /// Descriptive tag for the allocation.
251        tag: String,
252    },
253    /// A garbage collection cycle completed.
254    GcCycle {
255        /// Number of objects collected.
256        collected: usize,
257        /// Number of live objects remaining.
258        live: usize,
259    },
260    /// A tactic step was executed.
261    TacticStep {
262        /// Name of the tactic.
263        tactic_name: String,
264        /// Number of open goals after this step.
265        goal_count: u32,
266    },
267}
268/// Summary report generated from profiling data.
269#[derive(Clone, Debug)]
270pub struct ProfileReport {
271    /// Total number of function calls recorded.
272    pub total_calls: usize,
273    /// Total bytes allocated.
274    pub total_alloc_bytes: usize,
275    /// Top 10 hottest functions sorted by total duration (name, ns).
276    pub hot_functions: Vec<(String, u64)>,
277    /// Number of GC cycles recorded.
278    pub gc_cycles: usize,
279}
280impl ProfileReport {
281    /// Format the report as human-readable text.
282    pub fn to_text(&self) -> String {
283        let mut out = String::new();
284        out.push_str("=== Profile Report ===\n");
285        out.push_str(&format!("Total function calls : {}\n", self.total_calls));
286        out.push_str(&format!(
287            "Total allocations   : {} bytes\n",
288            self.total_alloc_bytes
289        ));
290        out.push_str(&format!("GC cycles           : {}\n", self.gc_cycles));
291        if !self.hot_functions.is_empty() {
292            out.push_str("\nHot functions (top 10):\n");
293            for (i, (name, ns)) in self.hot_functions.iter().enumerate() {
294                out.push_str(&format!("  {:2}. {:40} {:>12} ns\n", i + 1, name, ns));
295            }
296        }
297        out
298    }
299    /// Format the report as a JSON string.
300    pub fn to_json(&self) -> String {
301        let hot_json: Vec<String> = self
302            .hot_functions
303            .iter()
304            .map(|(name, ns)| format!("{{\"name\":\"{}\",\"duration_ns\":{}}}", name, ns))
305            .collect();
306        format!(
307            "{{\"total_calls\":{},\"total_alloc_bytes\":{},\"gc_cycles\":{},\"hot_functions\":[{}]}}",
308            self.total_calls, self.total_alloc_bytes, self.gc_cycles, hot_json.join(",")
309        )
310    }
311}
312/// A profiling event specific to tactic execution.
313#[allow(dead_code)]
314#[derive(Clone, Debug)]
315pub struct TacticProfilingEvent {
316    /// Name of the tactic.
317    pub tactic: String,
318    /// Duration in nanoseconds.
319    pub duration_ns: u64,
320    /// Whether the tactic succeeded.
321    pub success: bool,
322    /// Number of goals before the tactic.
323    pub goals_before: u32,
324    /// Number of goals after the tactic.
325    pub goals_after: u32,
326}
327impl TacticProfilingEvent {
328    /// Create a new tactic profiling event.
329    #[allow(dead_code)]
330    pub fn new(
331        tactic: &str,
332        duration_ns: u64,
333        success: bool,
334        goals_before: u32,
335        goals_after: u32,
336    ) -> Self {
337        Self {
338            tactic: tactic.to_string(),
339            duration_ns,
340            success,
341            goals_before,
342            goals_after,
343        }
344    }
345    /// Number of goals eliminated by this tactic step.
346    #[allow(dead_code)]
347    pub fn goals_eliminated(&self) -> i32 {
348        self.goals_before as i32 - self.goals_after as i32
349    }
350}
351/// A heat map showing call density over time.
352#[allow(dead_code)]
353pub struct HeatMap {
354    /// Number of time buckets.
355    pub buckets: usize,
356    /// Total time span covered in nanoseconds.
357    pub span_ns: u64,
358    /// Counts per bucket.
359    pub counts: Vec<u64>,
360}
361impl HeatMap {
362    /// Create a heat map with `buckets` time slots covering `span_ns`.
363    #[allow(dead_code)]
364    pub fn new(buckets: usize, span_ns: u64) -> Self {
365        Self {
366            buckets,
367            span_ns,
368            counts: vec![0; buckets],
369        }
370    }
371    /// Record an event at the given timestamp.
372    #[allow(dead_code)]
373    pub fn record(&mut self, timestamp_ns: u64, start_ns: u64) {
374        if self.span_ns == 0 || self.buckets == 0 {
375            return;
376        }
377        let offset = timestamp_ns.saturating_sub(start_ns);
378        let bucket = ((offset as u128 * self.buckets as u128) / self.span_ns as u128) as usize;
379        let bucket = bucket.min(self.buckets - 1);
380        self.counts[bucket] += 1;
381    }
382    /// Return the bucket with the maximum count.
383    #[allow(dead_code)]
384    pub fn peak_bucket(&self) -> usize {
385        self.counts
386            .iter()
387            .enumerate()
388            .max_by_key(|(_, &v)| v)
389            .map(|(i, _)| i)
390            .unwrap_or(0)
391    }
392    /// Format as ASCII art.
393    #[allow(dead_code)]
394    pub fn render_ascii(&self) -> String {
395        let max_count = *self.counts.iter().max().unwrap_or(&1).max(&1);
396        let height = 8usize;
397        let mut rows: Vec<String> = Vec::new();
398        for row in (0..height).rev() {
399            let threshold = (row as f64 / height as f64 * max_count as f64) as u64;
400            let line: String = self
401                .counts
402                .iter()
403                .map(|&c| if c > threshold { '#' } else { ' ' })
404                .collect();
405            rows.push(format!("|{}", line));
406        }
407        rows.push(format!("+{}", "-".repeat(self.buckets)));
408        rows.join("\n")
409    }
410}
411/// An annotation on a timeline.
412#[allow(dead_code)]
413#[derive(Clone, Debug)]
414pub struct TimelineAnnotation {
415    /// Timestamp of the annotation.
416    pub timestamp_ns: u64,
417    /// Text of the annotation.
418    pub text: String,
419    /// Category (e.g., "checkpoint", "error").
420    pub category: String,
421}
422impl TimelineAnnotation {
423    /// Create a new annotation.
424    #[allow(dead_code)]
425    pub fn new(timestamp_ns: u64, text: &str, category: &str) -> Self {
426        Self {
427            timestamp_ns,
428            text: text.to_string(),
429            category: category.to_string(),
430        }
431    }
432}
433/// Simulated hardware performance counter.
434#[allow(dead_code)]
435#[derive(Clone, Debug, Default)]
436pub struct PerfCounter {
437    /// Instructions retired (simulated).
438    pub instructions_retired: u64,
439    /// Cache misses (simulated).
440    pub cache_misses: u64,
441    /// Branch mispredictions (simulated).
442    pub branch_mispredictions: u64,
443    /// Context switches (simulated).
444    pub context_switches: u64,
445    /// Cycles elapsed (simulated).
446    pub cycles: u64,
447}
448impl PerfCounter {
449    /// Create zeroed performance counters.
450    #[allow(dead_code)]
451    pub fn new() -> Self {
452        Self::default()
453    }
454    /// Simulate a number of instructions.
455    #[allow(dead_code)]
456    pub fn simulate_instructions(&mut self, n: u64) {
457        self.instructions_retired += n;
458        self.cycles += n;
459    }
460    /// Simulate a cache miss.
461    #[allow(dead_code)]
462    pub fn simulate_cache_miss(&mut self) {
463        self.cache_misses += 1;
464        self.cycles += 200;
465    }
466    /// Simulate a branch misprediction.
467    #[allow(dead_code)]
468    pub fn simulate_branch_misprediction(&mut self) {
469        self.branch_mispredictions += 1;
470        self.cycles += 15;
471    }
472    /// IPC (instructions per cycle).
473    #[allow(dead_code)]
474    pub fn ipc(&self) -> f64 {
475        if self.cycles == 0 {
476            0.0
477        } else {
478            self.instructions_retired as f64 / self.cycles as f64
479        }
480    }
481    /// Cache miss rate per 1000 instructions.
482    #[allow(dead_code)]
483    pub fn cache_miss_rate_per_1k(&self) -> f64 {
484        if self.instructions_retired == 0 {
485            0.0
486        } else {
487            (self.cache_misses as f64 / self.instructions_retired as f64) * 1000.0
488        }
489    }
490    /// Format a human-readable summary.
491    #[allow(dead_code)]
492    pub fn summary(&self) -> String {
493        format!(
494            "PerfCounters: instr={}, cycles={}, IPC={:.2}, cache_misses={}, branch_mispredict={}",
495            self.instructions_retired,
496            self.cycles,
497            self.ipc(),
498            self.cache_misses,
499            self.branch_mispredictions
500        )
501    }
502}
503/// Configuration for the runtime profiler.
504#[allow(dead_code)]
505#[derive(Clone, Debug)]
506pub struct ProfilerConfig {
507    /// Whether event-based profiling is enabled.
508    pub event_profiling: bool,
509    /// Whether sampling-based profiling is enabled.
510    pub sampling_profiling: bool,
511    /// Sampling interval in nanoseconds.
512    pub sampling_interval_ns: u64,
513    /// Maximum number of events to store before overwriting old ones.
514    pub max_events: usize,
515    /// Whether to include GC events.
516    pub track_gc: bool,
517    /// Whether to include allocation events.
518    pub track_allocs: bool,
519}
520impl ProfilerConfig {
521    /// Create default configuration.
522    #[allow(dead_code)]
523    pub fn new() -> Self {
524        Self::default()
525    }
526    /// Enable all profiling.
527    #[allow(dead_code)]
528    pub fn enable_all(mut self) -> Self {
529        self.event_profiling = true;
530        self.sampling_profiling = true;
531        self
532    }
533    /// Disable all profiling.
534    #[allow(dead_code)]
535    pub fn disable_all(mut self) -> Self {
536        self.event_profiling = false;
537        self.sampling_profiling = false;
538        self
539    }
540}
541/// A single sample captured by the sampling profiler.
542#[allow(dead_code)]
543#[derive(Clone, Debug)]
544pub struct ProfileSample {
545    /// Timestamp in nanoseconds.
546    pub timestamp_ns: u64,
547    /// Call stack at the time of sampling (most recent first).
548    pub call_stack: Vec<String>,
549    /// Thread identifier (0 for main thread).
550    pub thread_id: u64,
551}
552impl ProfileSample {
553    /// Create a new sample.
554    #[allow(dead_code)]
555    pub fn new(timestamp_ns: u64, call_stack: Vec<String>, thread_id: u64) -> Self {
556        Self {
557            timestamp_ns,
558            call_stack,
559            thread_id,
560        }
561    }
562    /// Returns the function at the top of the call stack, if any.
563    #[allow(dead_code)]
564    pub fn top_function(&self) -> Option<&str> {
565        self.call_stack.first().map(|s| s.as_str())
566    }
567    /// Returns the depth of the call stack.
568    #[allow(dead_code)]
569    pub fn depth(&self) -> usize {
570        self.call_stack.len()
571    }
572}
573/// A counter step: counts events by variant name.
574#[allow(dead_code)]
575pub struct CountingStep {
576    pub step_name: String,
577    pub counts: HashMap<String, u64>,
578}
579impl CountingStep {
580    /// Create a new counting step.
581    #[allow(dead_code)]
582    pub fn new(name: &str) -> Self {
583        Self {
584            step_name: name.to_string(),
585            counts: HashMap::new(),
586        }
587    }
588    /// Event variant name for counting.
589    pub(super) fn variant_name(event: &ProfilingEvent) -> &'static str {
590        match event {
591            ProfilingEvent::FunctionCall { .. } => "FunctionCall",
592            ProfilingEvent::FunctionReturn { .. } => "FunctionReturn",
593            ProfilingEvent::Allocation { .. } => "Allocation",
594            ProfilingEvent::Deallocation { .. } => "Deallocation",
595            ProfilingEvent::GcCycle { .. } => "GcCycle",
596            ProfilingEvent::TacticStep { .. } => "TacticStep",
597        }
598    }
599}
600/// A simple real-time monitor that collects snapshots of key metrics.
601#[allow(dead_code)]
602pub struct RealTimeMonitor {
603    /// Name of the monitor.
604    pub name: String,
605    /// Collected metric snapshots: (timestamp_ns, metric_name, value).
606    pub snapshots: Vec<(u64, String, f64)>,
607    /// Maximum snapshots to keep.
608    pub capacity: usize,
609}
610impl RealTimeMonitor {
611    /// Create a new monitor.
612    #[allow(dead_code)]
613    pub fn new(name: &str, capacity: usize) -> Self {
614        Self {
615            name: name.to_string(),
616            snapshots: Vec::new(),
617            capacity,
618        }
619    }
620    /// Record a metric value.
621    #[allow(dead_code)]
622    pub fn record(&mut self, metric: &str, value: f64) {
623        let ts = profiler_now_ns();
624        if self.snapshots.len() >= self.capacity {
625            self.snapshots.remove(0);
626        }
627        self.snapshots.push((ts, metric.to_string(), value));
628    }
629    /// Get the most recent value for a metric.
630    #[allow(dead_code)]
631    pub fn latest(&self, metric: &str) -> Option<f64> {
632        self.snapshots
633            .iter()
634            .rev()
635            .find(|(_, m, _)| m == metric)
636            .map(|(_, _, v)| *v)
637    }
638    /// Average value for a metric.
639    #[allow(dead_code)]
640    pub fn avg(&self, metric: &str) -> f64 {
641        let values: Vec<f64> = self
642            .snapshots
643            .iter()
644            .filter(|(_, m, _)| m == metric)
645            .map(|(_, _, v)| *v)
646            .collect();
647        if values.is_empty() {
648            0.0
649        } else {
650            values.iter().sum::<f64>() / values.len() as f64
651        }
652    }
653    /// Count of snapshots for a metric.
654    #[allow(dead_code)]
655    pub fn count(&self, metric: &str) -> usize {
656        self.snapshots
657            .iter()
658            .filter(|(_, m, _)| m == metric)
659            .count()
660    }
661}
662/// A histogram for profiling measurements.
663#[allow(dead_code)]
664pub struct Histogram {
665    buckets: Vec<HistogramBucket>,
666    /// Total observations.
667    pub total: u64,
668    /// Sum of all observations (for mean).
669    pub sum: f64,
670}
671impl Histogram {
672    /// Create a histogram with `n` equal-width buckets in `[min_val, max_val]`.
673    #[allow(dead_code)]
674    pub fn new(n: usize, min_val: f64, max_val: f64) -> Self {
675        let width = (max_val - min_val) / n as f64;
676        let buckets = (0..n)
677            .map(|i| HistogramBucket {
678                lower: min_val + i as f64 * width,
679                upper: min_val + (i + 1) as f64 * width,
680                count: 0,
681            })
682            .collect();
683        Self {
684            buckets,
685            total: 0,
686            sum: 0.0,
687        }
688    }
689    /// Record a value.
690    #[allow(dead_code)]
691    pub fn record(&mut self, value: f64) {
692        self.total += 1;
693        self.sum += value;
694        if let Some(bucket) = self
695            .buckets
696            .iter_mut()
697            .find(|b| value >= b.lower && value < b.upper)
698        {
699            bucket.count += 1;
700        } else if let Some(last) = self.buckets.last_mut() {
701            if value >= last.lower {
702                last.count += 1;
703            }
704        }
705    }
706    /// Mean of all recorded values.
707    #[allow(dead_code)]
708    pub fn mean(&self) -> f64 {
709        if self.total == 0 {
710            0.0
711        } else {
712            self.sum / self.total as f64
713        }
714    }
715    /// Bucket with the most observations (mode bucket).
716    #[allow(dead_code)]
717    pub fn mode_bucket(&self) -> Option<&HistogramBucket> {
718        self.buckets.iter().max_by_key(|b| b.count)
719    }
720    /// Render a simple ASCII histogram.
721    #[allow(dead_code)]
722    pub fn render_ascii(&self) -> String {
723        let max_count = self
724            .buckets
725            .iter()
726            .map(|b| b.count)
727            .max()
728            .unwrap_or(1)
729            .max(1);
730        let bar_width = 40usize;
731        let mut out = String::new();
732        for bucket in &self.buckets {
733            let bar_len = (bucket.count as usize * bar_width) / max_count as usize;
734            let bar = "#".repeat(bar_len);
735            out.push_str(&format!(
736                "[{:.2}, {:.2}): {:6} | {}\n",
737                bucket.lower, bucket.upper, bucket.count, bar
738            ));
739        }
740        out
741    }
742}
743/// Tracks allocations grouped by tag.
744#[allow(dead_code)]
745pub struct AllocationTracker {
746    stats: HashMap<String, AllocationStat>,
747}
748impl AllocationTracker {
749    /// Create a new tracker.
750    #[allow(dead_code)]
751    pub fn new() -> Self {
752        Self {
753            stats: HashMap::new(),
754        }
755    }
756    /// Record an allocation.
757    #[allow(dead_code)]
758    pub fn record_alloc(&mut self, tag: &str, bytes: u64) {
759        let s = self.stats.entry(tag.to_string()).or_default();
760        s.total_bytes += bytes;
761        s.alloc_count += 1;
762        s.live_bytes += bytes;
763    }
764    /// Record a deallocation.
765    #[allow(dead_code)]
766    pub fn record_dealloc(&mut self, tag: &str, bytes: u64) {
767        let s = self.stats.entry(tag.to_string()).or_default();
768        s.dealloc_count += 1;
769        s.live_bytes = s.live_bytes.saturating_sub(bytes);
770    }
771    /// Get stats for a tag.
772    #[allow(dead_code)]
773    pub fn stats_for(&self, tag: &str) -> Option<&AllocationStat> {
774        self.stats.get(tag)
775    }
776    /// Total live bytes across all tags.
777    #[allow(dead_code)]
778    pub fn total_live_bytes(&self) -> u64 {
779        self.stats.values().map(|s| s.live_bytes).sum()
780    }
781    /// Total allocated bytes across all tags.
782    #[allow(dead_code)]
783    pub fn total_allocated_bytes(&self) -> u64 {
784        self.stats.values().map(|s| s.total_bytes).sum()
785    }
786    /// Top N tags by total allocated bytes.
787    #[allow(dead_code)]
788    pub fn top_allocators(&self, n: usize) -> Vec<(&str, u64)> {
789        let mut v: Vec<(&str, u64)> = self
790            .stats
791            .iter()
792            .map(|(k, v)| (k.as_str(), v.total_bytes))
793            .collect();
794        v.sort_by(|a, b| b.1.cmp(&a.1));
795        v.truncate(n);
796        v
797    }
798}
799/// A node in a flame graph tree.
800#[allow(dead_code)]
801#[derive(Clone, Debug)]
802pub struct FlameNode {
803    /// Function name.
804    pub name: String,
805    /// Number of samples at or below this node.
806    pub count: u64,
807    /// Children nodes.
808    pub children: Vec<FlameNode>,
809}
810impl FlameNode {
811    /// Create a new node.
812    #[allow(dead_code)]
813    pub fn new(name: &str) -> Self {
814        Self {
815            name: name.to_string(),
816            count: 0,
817            children: Vec::new(),
818        }
819    }
820    /// Find or create a child with the given name.
821    #[allow(dead_code)]
822    pub fn get_or_create_child(&mut self, name: &str) -> &mut FlameNode {
823        if let Some(pos) = self.children.iter().position(|c| c.name == name) {
824            &mut self.children[pos]
825        } else {
826            self.children.push(FlameNode::new(name));
827            self.children
828                .last_mut()
829                .expect("just pushed a child so last_mut must return Some")
830        }
831    }
832    /// Total samples in the subtree rooted here.
833    #[allow(dead_code)]
834    pub fn total(&self) -> u64 {
835        self.count + self.children.iter().map(|c| c.total()).sum::<u64>()
836    }
837    /// Format the flame node as an indented tree.
838    #[allow(dead_code)]
839    pub fn format(&self, depth: usize) -> String {
840        let indent = "  ".repeat(depth);
841        let mut out = format!("{}{} ({})\n", indent, self.name, self.count);
842        for child in &self.children {
843            out.push_str(&child.format(depth + 1));
844        }
845        out
846    }
847}
848/// A node in a call tree (for inclusive/exclusive timing analysis).
849#[allow(dead_code)]
850#[derive(Clone, Debug)]
851pub struct CallTreeNode {
852    /// Function name.
853    pub name: String,
854    /// Total (inclusive) time in ns.
855    pub inclusive_ns: u64,
856    /// Self (exclusive) time in ns.
857    pub exclusive_ns: u64,
858    /// Number of calls.
859    pub call_count: u64,
860    /// Child nodes.
861    pub children: Vec<CallTreeNode>,
862}
863impl CallTreeNode {
864    /// Create a new call tree node.
865    #[allow(dead_code)]
866    pub fn new(name: &str) -> Self {
867        Self {
868            name: name.to_string(),
869            inclusive_ns: 0,
870            exclusive_ns: 0,
871            call_count: 0,
872            children: Vec::new(),
873        }
874    }
875    /// Average self time per call.
876    #[allow(dead_code)]
877    pub fn avg_exclusive_ns(&self) -> f64 {
878        if self.call_count == 0 {
879            0.0
880        } else {
881            self.exclusive_ns as f64 / self.call_count as f64
882        }
883    }
884    /// Average inclusive time per call.
885    #[allow(dead_code)]
886    pub fn avg_inclusive_ns(&self) -> f64 {
887        if self.call_count == 0 {
888            0.0
889        } else {
890            self.inclusive_ns as f64 / self.call_count as f64
891        }
892    }
893    /// Find a child with the given name.
894    #[allow(dead_code)]
895    pub fn find_child(&self, name: &str) -> Option<&CallTreeNode> {
896        self.children.iter().find(|c| c.name == name)
897    }
898}
899/// Lightweight profiler that records events for later analysis.
900pub struct Profiler {
901    /// Whether profiling is currently active.
902    pub enabled: bool,
903    /// Recorded events as `(timestamp_ns, event)` pairs.
904    pub events: Vec<(u64, ProfilingEvent)>,
905    /// Stack of `(function_name, entry_timestamp_ns)` entries.
906    pub call_stack: Vec<(String, u64)>,
907}
908impl Profiler {
909    /// Create a new, disabled profiler.
910    pub fn new() -> Self {
911        Self {
912            enabled: false,
913            events: Vec::new(),
914            call_stack: Vec::new(),
915        }
916    }
917    /// Enable profiling.
918    pub fn enable(&mut self) {
919        self.enabled = true;
920    }
921    /// Disable profiling.
922    pub fn disable(&mut self) {
923        self.enabled = false;
924    }
925    /// Record an arbitrary profiling event (no-op when disabled).
926    pub fn record(&mut self, event: ProfilingEvent) {
927        if self.enabled {
928            let ts = Self::now_ns();
929            self.events.push((ts, event));
930        }
931    }
932    /// Record a function entry and push it onto the call stack.
933    pub fn enter_function(&mut self, name: &str) {
934        if self.enabled {
935            let ts = Self::now_ns();
936            let depth = self.call_stack.len() as u32;
937            self.call_stack.push((name.to_string(), ts));
938            self.events.push((
939                ts,
940                ProfilingEvent::FunctionCall {
941                    name: name.to_string(),
942                    depth,
943                },
944            ));
945        }
946    }
947    /// Record a function exit and pop it from the call stack.
948    pub fn exit_function(&mut self, name: &str) {
949        if self.enabled {
950            let ts = Self::now_ns();
951            let duration_ns =
952                if let Some(idx) = self.call_stack.iter().rposition(|(n, _)| n == name) {
953                    let entry_ts = self.call_stack[idx].1;
954                    self.call_stack.remove(idx);
955                    ts.saturating_sub(entry_ts)
956                } else {
957                    0
958                };
959            self.events.push((
960                ts,
961                ProfilingEvent::FunctionReturn {
962                    name: name.to_string(),
963                    duration_ns,
964                },
965            ));
966        }
967    }
968    /// Record a memory allocation.
969    pub fn alloc(&mut self, size: usize, tag: &str) {
970        if self.enabled {
971            let ts = Self::now_ns();
972            self.events.push((
973                ts,
974                ProfilingEvent::Allocation {
975                    size,
976                    tag: tag.to_string(),
977                },
978            ));
979        }
980    }
981    /// Record a memory deallocation.
982    pub fn dealloc(&mut self, size: usize, tag: &str) {
983        if self.enabled {
984            let ts = Self::now_ns();
985            self.events.push((
986                ts,
987                ProfilingEvent::Deallocation {
988                    size,
989                    tag: tag.to_string(),
990                },
991            ));
992        }
993    }
994    /// Record a GC cycle.
995    pub fn gc_cycle(&mut self, collected: usize, live: usize) {
996        if self.enabled {
997            let ts = Self::now_ns();
998            self.events
999                .push((ts, ProfilingEvent::GcCycle { collected, live }));
1000        }
1001    }
1002    /// Generate a report from the recorded events.
1003    pub fn generate_report(&self) -> ProfileReport {
1004        let mut total_calls: usize = 0;
1005        let mut total_alloc_bytes: usize = 0;
1006        let mut gc_cycles: usize = 0;
1007        let mut fn_durations: HashMap<String, u64> = HashMap::new();
1008        for (_, event) in &self.events {
1009            match event {
1010                ProfilingEvent::FunctionCall { .. } => {
1011                    total_calls += 1;
1012                }
1013                ProfilingEvent::FunctionReturn { name, duration_ns } => {
1014                    *fn_durations.entry(name.clone()).or_insert(0) += duration_ns;
1015                }
1016                ProfilingEvent::Allocation { size, .. } => {
1017                    total_alloc_bytes += size;
1018                }
1019                ProfilingEvent::GcCycle { .. } => {
1020                    gc_cycles += 1;
1021                }
1022                _ => {}
1023            }
1024        }
1025        let mut hot_functions: Vec<(String, u64)> = fn_durations.into_iter().collect();
1026        hot_functions.sort_by(|a, b| b.1.cmp(&a.1));
1027        hot_functions.truncate(10);
1028        ProfileReport {
1029            total_calls,
1030            total_alloc_bytes,
1031            hot_functions,
1032            gc_cycles,
1033        }
1034    }
1035    /// Generate a memory profile from the recorded events.
1036    pub fn memory_profile(&self) -> MemoryProfile {
1037        let mut current_bytes: usize = 0;
1038        let mut peak_bytes: usize = 0;
1039        let mut total_allocs: usize = 0;
1040        for (_, event) in &self.events {
1041            match event {
1042                ProfilingEvent::Allocation { size, .. } => {
1043                    current_bytes += size;
1044                    total_allocs += 1;
1045                    if current_bytes > peak_bytes {
1046                        peak_bytes = current_bytes;
1047                    }
1048                }
1049                ProfilingEvent::Deallocation { size, .. } => {
1050                    current_bytes = current_bytes.saturating_sub(*size);
1051                }
1052                _ => {}
1053            }
1054        }
1055        MemoryProfile {
1056            peak_bytes,
1057            current_bytes,
1058            total_allocs,
1059        }
1060    }
1061    fn now_ns() -> u64 {
1062        use std::time::{SystemTime, UNIX_EPOCH};
1063        SystemTime::now()
1064            .duration_since(UNIX_EPOCH)
1065            .map(|d| d.as_nanos() as u64)
1066            .unwrap_or(0)
1067    }
1068}
1069/// A simple middleware layer that automatically profiles function calls.
1070#[allow(dead_code)]
1071pub struct ProfilingMiddleware {
1072    /// Inner profiler instance.
1073    pub profiler: Profiler,
1074    /// Whether this middleware is active.
1075    pub active: bool,
1076}
1077impl ProfilingMiddleware {
1078    /// Create a new active middleware.
1079    #[allow(dead_code)]
1080    pub fn new() -> Self {
1081        let mut profiler = Profiler::new();
1082        profiler.enable();
1083        Self {
1084            profiler,
1085            active: true,
1086        }
1087    }
1088    /// Invoke a closure with profiling.
1089    #[allow(dead_code)]
1090    pub fn instrument<F, T>(&mut self, name: &str, f: F) -> T
1091    where
1092        F: FnOnce() -> T,
1093    {
1094        if self.active {
1095            self.profiler.enter_function(name);
1096        }
1097        let result = f();
1098        if self.active {
1099            self.profiler.exit_function(name);
1100        }
1101        result
1102    }
1103    /// Get a report.
1104    #[allow(dead_code)]
1105    pub fn report(&self) -> ProfileReport {
1106        self.profiler.generate_report()
1107    }
1108}
1109/// A timeline view built from profiling events.
1110#[allow(dead_code)]
1111pub struct TimelineView {
1112    /// Entries in the timeline.
1113    pub entries: Vec<TimelineEntry>,
1114}
1115impl TimelineView {
1116    /// Create an empty timeline.
1117    #[allow(dead_code)]
1118    pub fn new() -> Self {
1119        Self {
1120            entries: Vec::new(),
1121        }
1122    }
1123    /// Build a timeline view from a profiler's events.
1124    #[allow(dead_code)]
1125    pub fn build(profiler: &Profiler) -> Self {
1126        let mut view = TimelineView::new();
1127        for (ts, event) in &profiler.events {
1128            let entry = match event {
1129                ProfilingEvent::FunctionCall { name, depth } => {
1130                    TimelineEntry::new(*ts, &format!("CALL {}[d={}]", name, depth), 0, "function")
1131                }
1132                ProfilingEvent::FunctionReturn { name, duration_ns } => TimelineEntry::new(
1133                    *ts,
1134                    &format!("RET {} ({}ns)", name, duration_ns),
1135                    *duration_ns,
1136                    "function",
1137                ),
1138                ProfilingEvent::Allocation { size, tag } => {
1139                    TimelineEntry::new(*ts, &format!("ALLOC {} ({} bytes)", tag, size), 0, "memory")
1140                }
1141                ProfilingEvent::Deallocation { size, tag } => {
1142                    TimelineEntry::new(*ts, &format!("FREE {} ({} bytes)", tag, size), 0, "memory")
1143                }
1144                ProfilingEvent::GcCycle { collected, live } => TimelineEntry::new(
1145                    *ts,
1146                    &format!("GC: collected={} live={}", collected, live),
1147                    0,
1148                    "gc",
1149                ),
1150                ProfilingEvent::TacticStep {
1151                    tactic_name,
1152                    goal_count,
1153                } => TimelineEntry::new(
1154                    *ts,
1155                    &format!("TACTIC {} goals={}", tactic_name, goal_count),
1156                    0,
1157                    "tactic",
1158                ),
1159            };
1160            view.entries.push(entry);
1161        }
1162        view
1163    }
1164    /// Filter entries by category.
1165    #[allow(dead_code)]
1166    pub fn by_category(&self, category: &str) -> Vec<&TimelineEntry> {
1167        self.entries
1168            .iter()
1169            .filter(|e| e.category == category)
1170            .collect()
1171    }
1172    /// Total duration covered by the timeline.
1173    #[allow(dead_code)]
1174    pub fn span_ns(&self) -> u64 {
1175        let min = self
1176            .entries
1177            .iter()
1178            .map(|e| e.timestamp_ns)
1179            .min()
1180            .unwrap_or(0);
1181        let max = self
1182            .entries
1183            .iter()
1184            .map(|e| e.timestamp_ns + e.duration_ns)
1185            .max()
1186            .unwrap_or(0);
1187        max.saturating_sub(min)
1188    }
1189}
1190/// An annotated timeline.
1191#[allow(dead_code)]
1192pub struct AnnotatedTimeline {
1193    /// Profiler events.
1194    pub entries: Vec<TimelineEntry>,
1195    /// Annotations overlaid on the timeline.
1196    pub annotations: Vec<TimelineAnnotation>,
1197}
1198impl AnnotatedTimeline {
1199    /// Create an empty annotated timeline.
1200    #[allow(dead_code)]
1201    pub fn new() -> Self {
1202        Self {
1203            entries: Vec::new(),
1204            annotations: Vec::new(),
1205        }
1206    }
1207    /// Add an annotation.
1208    #[allow(dead_code)]
1209    pub fn annotate(&mut self, annotation: TimelineAnnotation) {
1210        self.annotations.push(annotation);
1211    }
1212    /// Annotations in the given time range.
1213    #[allow(dead_code)]
1214    pub fn annotations_in_range(&self, start_ns: u64, end_ns: u64) -> Vec<&TimelineAnnotation> {
1215        self.annotations
1216            .iter()
1217            .filter(|a| a.timestamp_ns >= start_ns && a.timestamp_ns <= end_ns)
1218            .collect()
1219    }
1220}
1221/// A filter for profiling events.
1222#[allow(dead_code)]
1223#[derive(Clone, Debug)]
1224pub struct EventFilter {
1225    /// Only include events involving these function names (empty = all).
1226    pub function_names: Vec<String>,
1227    /// Only include events with timestamp >= this value.
1228    pub min_timestamp_ns: u64,
1229    /// Only include events with timestamp <= this value.
1230    pub max_timestamp_ns: u64,
1231    /// Only include allocation events above this size.
1232    pub min_alloc_bytes: usize,
1233}
1234impl EventFilter {
1235    /// Create an unfiltered event filter (everything passes).
1236    #[allow(dead_code)]
1237    pub fn new() -> Self {
1238        Self {
1239            function_names: Vec::new(),
1240            min_timestamp_ns: 0,
1241            max_timestamp_ns: u64::MAX,
1242            min_alloc_bytes: 0,
1243        }
1244    }
1245    /// Return `true` if the event passes this filter.
1246    #[allow(dead_code)]
1247    pub fn matches(&self, ts: u64, event: &ProfilingEvent) -> bool {
1248        if ts < self.min_timestamp_ns || ts > self.max_timestamp_ns {
1249            return false;
1250        }
1251        if !self.function_names.is_empty() {
1252            let name = match event {
1253                ProfilingEvent::FunctionCall { name, .. } => Some(name.as_str()),
1254                ProfilingEvent::FunctionReturn { name, .. } => Some(name.as_str()),
1255                _ => None,
1256            };
1257            if let Some(n) = name {
1258                if !self.function_names.iter().any(|f| f == n) {
1259                    return false;
1260                }
1261            }
1262        }
1263        if let ProfilingEvent::Allocation { size, .. } = event {
1264            if *size < self.min_alloc_bytes {
1265                return false;
1266            }
1267        }
1268        true
1269    }
1270    /// Filter a list of `(ts, event)` pairs.
1271    #[allow(dead_code)]
1272    pub fn apply<'a>(&self, events: &'a [(u64, ProfilingEvent)]) -> Vec<&'a (u64, ProfilingEvent)> {
1273        events
1274            .iter()
1275            .filter(|(ts, ev)| self.matches(*ts, ev))
1276            .collect()
1277    }
1278}
1279/// Memory usage profile.
1280#[derive(Clone, Debug)]
1281pub struct MemoryProfile {
1282    /// Peak memory usage in bytes.
1283    pub peak_bytes: usize,
1284    /// Current (live) memory usage in bytes.
1285    pub current_bytes: usize,
1286    /// Total number of allocation events.
1287    pub total_allocs: usize,
1288}
1289impl MemoryProfile {
1290    /// Format as human-readable text.
1291    pub fn to_text(&self) -> String {
1292        format!(
1293            "=== Memory Profile ===\nPeak usage    : {} bytes\nCurrent usage : {} bytes\nTotal allocs  : {}\n",
1294            self.peak_bytes, self.current_bytes, self.total_allocs
1295        )
1296    }
1297}
1298/// A flame graph built from sampling profiler data.
1299#[allow(dead_code)]
1300pub struct FlameGraph {
1301    /// Root node (synthetic "all" node).
1302    pub root: FlameNode,
1303    /// Total sample count.
1304    pub total_samples: u64,
1305}
1306impl FlameGraph {
1307    /// Create an empty flame graph.
1308    #[allow(dead_code)]
1309    pub fn new() -> Self {
1310        Self {
1311            root: FlameNode::new("(all)"),
1312            total_samples: 0,
1313        }
1314    }
1315    /// Add a call stack to the flame graph (stack is bottom-to-top order).
1316    #[allow(dead_code)]
1317    pub fn add_stack(&mut self, stack: &[String]) {
1318        self.total_samples += 1;
1319        self.root.count += 1;
1320        let mut node = &mut self.root;
1321        for frame in stack.iter().rev() {
1322            node = node.get_or_create_child(frame);
1323            node.count += 1;
1324        }
1325    }
1326    /// Build a flame graph from a sampling profiler.
1327    #[allow(dead_code)]
1328    pub fn from_profiler(profiler: &SamplingProfiler) -> Self {
1329        let mut fg = FlameGraph::new();
1330        for sample in &profiler.samples {
1331            fg.add_stack(&sample.call_stack);
1332        }
1333        fg
1334    }
1335    /// Render the flame graph as indented text.
1336    #[allow(dead_code)]
1337    pub fn render_text(&self) -> String {
1338        self.root.format(0)
1339    }
1340}
1341/// A single bucket in a histogram.
1342#[allow(dead_code)]
1343#[derive(Clone, Debug, Default)]
1344pub struct HistogramBucket {
1345    /// Lower bound of the bucket.
1346    pub lower: f64,
1347    /// Upper bound of the bucket.
1348    pub upper: f64,
1349    /// Number of observations in this bucket.
1350    pub count: u64,
1351}
1352/// Manages the lifecycle of a profiling session.
1353#[allow(dead_code)]
1354pub struct ProfilingSession {
1355    /// Main event profiler.
1356    pub profiler: Profiler,
1357    /// Sampling profiler.
1358    pub sampler: SamplingProfiler,
1359    /// Allocation tracker.
1360    pub alloc_tracker: AllocationTracker,
1361    /// Tactic event log.
1362    pub tactic_log: TacticProfileLog,
1363    /// Session name.
1364    pub name: String,
1365    /// Whether the session is running.
1366    pub running: bool,
1367}
1368impl ProfilingSession {
1369    /// Create a new session with the given name.
1370    #[allow(dead_code)]
1371    pub fn new(name: &str) -> Self {
1372        Self {
1373            profiler: Profiler::new(),
1374            sampler: SamplingProfiler::new(1_000_000),
1375            alloc_tracker: AllocationTracker::new(),
1376            tactic_log: TacticProfileLog::new(),
1377            name: name.to_string(),
1378            running: false,
1379        }
1380    }
1381    /// Start the session.
1382    #[allow(dead_code)]
1383    pub fn start(&mut self) {
1384        self.profiler.enable();
1385        self.sampler.enable();
1386        self.running = true;
1387    }
1388    /// Stop the session.
1389    #[allow(dead_code)]
1390    pub fn stop(&mut self) {
1391        self.profiler.disable();
1392        self.sampler.disable();
1393        self.running = false;
1394    }
1395    /// Record a function call (in both profilers).
1396    #[allow(dead_code)]
1397    pub fn enter_function(&mut self, name: &str) {
1398        self.profiler.enter_function(name);
1399        self.sampler.enter(name);
1400    }
1401    /// Record a function return.
1402    #[allow(dead_code)]
1403    pub fn exit_function(&mut self, name: &str) {
1404        self.profiler.exit_function(name);
1405        self.sampler.leave(name);
1406    }
1407    /// Record an allocation.
1408    #[allow(dead_code)]
1409    pub fn alloc(&mut self, bytes: usize, tag: &str) {
1410        self.profiler.alloc(bytes, tag);
1411        self.alloc_tracker.record_alloc(tag, bytes as u64);
1412    }
1413    /// Record a deallocation.
1414    #[allow(dead_code)]
1415    pub fn dealloc(&mut self, bytes: usize, tag: &str) {
1416        self.profiler.dealloc(bytes, tag);
1417        self.alloc_tracker.record_dealloc(tag, bytes as u64);
1418    }
1419    /// Generate a combined report.
1420    #[allow(dead_code)]
1421    pub fn combined_report(&self) -> String {
1422        let profile_report = self.profiler.generate_report();
1423        let mem_profile = self.profiler.memory_profile();
1424        format!(
1425            "=== ProfilingSession: {} ===\n{}\n{}\nTactic steps: {}\nSamples: {}\nLive bytes: {}",
1426            self.name,
1427            profile_report.to_text(),
1428            mem_profile.to_text(),
1429            self.tactic_log.success_count(),
1430            self.sampler.sample_count(),
1431            self.alloc_tracker.total_live_bytes(),
1432        )
1433    }
1434}
1435/// A sampling-based profiler.
1436#[allow(dead_code)]
1437pub struct SamplingProfiler {
1438    /// Collected samples.
1439    pub samples: Vec<ProfileSample>,
1440    /// Whether sampling is enabled.
1441    pub enabled: bool,
1442    /// Configured sampling interval in nanoseconds.
1443    pub interval_ns: u64,
1444    /// Current simulated call stack.
1445    pub current_stack: Vec<String>,
1446}
1447impl SamplingProfiler {
1448    /// Create a new sampling profiler with the given interval.
1449    #[allow(dead_code)]
1450    pub fn new(interval_ns: u64) -> Self {
1451        Self {
1452            samples: Vec::new(),
1453            enabled: false,
1454            interval_ns,
1455            current_stack: Vec::new(),
1456        }
1457    }
1458    /// Enable the profiler.
1459    #[allow(dead_code)]
1460    pub fn enable(&mut self) {
1461        self.enabled = true;
1462    }
1463    /// Disable the profiler.
1464    #[allow(dead_code)]
1465    pub fn disable(&mut self) {
1466        self.enabled = false;
1467    }
1468    /// Simulate entering a function.
1469    #[allow(dead_code)]
1470    pub fn enter(&mut self, function: &str) {
1471        if self.enabled {
1472            self.current_stack.insert(0, function.to_string());
1473        }
1474    }
1475    /// Simulate leaving a function.
1476    #[allow(dead_code)]
1477    pub fn leave(&mut self, function: &str) {
1478        if self.enabled {
1479            if let Some(pos) = self.current_stack.iter().position(|s| s == function) {
1480                self.current_stack.remove(pos);
1481            }
1482        }
1483    }
1484    /// Take a sample of the current call stack.
1485    #[allow(dead_code)]
1486    pub fn take_sample(&mut self, thread_id: u64) {
1487        if self.enabled {
1488            let ts = profiler_now_ns();
1489            self.samples.push(ProfileSample::new(
1490                ts,
1491                self.current_stack.clone(),
1492                thread_id,
1493            ));
1494        }
1495    }
1496    /// Compute the flat profile: (function_name, hit_count) sorted by count.
1497    #[allow(dead_code)]
1498    pub fn flat_profile(&self) -> Vec<(String, usize)> {
1499        let mut counts: HashMap<String, usize> = HashMap::new();
1500        for sample in &self.samples {
1501            if let Some(top) = sample.top_function() {
1502                *counts.entry(top.to_string()).or_insert(0) += 1;
1503            }
1504        }
1505        let mut result: Vec<(String, usize)> = counts.into_iter().collect();
1506        result.sort_by(|a, b| b.1.cmp(&a.1));
1507        result
1508    }
1509    /// Compute the cumulative profile: each function gets credit for every sample
1510    /// it appears in (at any depth).
1511    #[allow(dead_code)]
1512    pub fn cumulative_profile(&self) -> Vec<(String, usize)> {
1513        let mut counts: HashMap<String, usize> = HashMap::new();
1514        for sample in &self.samples {
1515            for func in &sample.call_stack {
1516                *counts.entry(func.clone()).or_insert(0) += 1;
1517            }
1518        }
1519        let mut result: Vec<(String, usize)> = counts.into_iter().collect();
1520        result.sort_by(|a, b| b.1.cmp(&a.1));
1521        result
1522    }
1523    /// Total number of samples collected.
1524    #[allow(dead_code)]
1525    pub fn sample_count(&self) -> usize {
1526        self.samples.len()
1527    }
1528    /// Average call stack depth across all samples.
1529    #[allow(dead_code)]
1530    pub fn avg_stack_depth(&self) -> f64 {
1531        if self.samples.is_empty() {
1532            return 0.0;
1533        }
1534        let total: usize = self.samples.iter().map(|s| s.depth()).sum();
1535        total as f64 / self.samples.len() as f64
1536    }
1537}
1538/// Statistics for allocations associated with a single tag.
1539#[allow(dead_code)]
1540#[derive(Clone, Debug, Default)]
1541pub struct AllocationStat {
1542    /// Total bytes allocated with this tag.
1543    pub total_bytes: u64,
1544    /// Number of allocation events.
1545    pub alloc_count: u64,
1546    /// Number of deallocation events.
1547    pub dealloc_count: u64,
1548    /// Bytes currently live.
1549    pub live_bytes: u64,
1550}
1551/// A snapshot of the call stack at a specific point in time.
1552#[allow(dead_code)]
1553#[derive(Clone, Debug)]
1554pub struct StackSnapshot {
1555    /// Timestamp when the snapshot was taken.
1556    pub timestamp_ns: u64,
1557    /// The call stack frames (most recent first).
1558    pub frames: Vec<String>,
1559    /// An optional label for this snapshot.
1560    pub label: Option<String>,
1561}
1562impl StackSnapshot {
1563    /// Create a new snapshot.
1564    #[allow(dead_code)]
1565    pub fn new(timestamp_ns: u64, frames: Vec<String>) -> Self {
1566        Self {
1567            timestamp_ns,
1568            frames,
1569            label: None,
1570        }
1571    }
1572    /// Attach a label.
1573    #[allow(dead_code)]
1574    pub fn with_label(mut self, label: &str) -> Self {
1575        self.label = Some(label.to_string());
1576        self
1577    }
1578    /// Depth of the captured stack.
1579    #[allow(dead_code)]
1580    pub fn depth(&self) -> usize {
1581        self.frames.len()
1582    }
1583    /// Format as a string.
1584    #[allow(dead_code)]
1585    pub fn format(&self) -> String {
1586        let label_str = self.label.as_deref().unwrap_or("(no label)");
1587        let mut out = format!("Stack at {} ns [{}]:\n", self.timestamp_ns, label_str);
1588        for (i, frame) in self.frames.iter().enumerate() {
1589            out.push_str(&format!("  {:3}: {}\n", i, frame));
1590        }
1591        out
1592    }
1593}