Skip to main content

oxilean_kernel/arena/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use std::marker::PhantomData;
6
7use std::collections::{HashMap, VecDeque};
8
9/// A chained-block arena that grows by adding new blocks on overflow.
10#[allow(dead_code)]
11pub struct ChainedArena {
12    blocks: Vec<ArenaBlock>,
13    block_size: usize,
14    total_alloc: usize,
15}
16#[allow(dead_code)]
17impl ChainedArena {
18    /// Creates a chained arena with the given initial block size.
19    pub fn new(block_size: usize) -> Self {
20        let first = ArenaBlock::new(block_size);
21        Self {
22            blocks: vec![first],
23            block_size,
24            total_alloc: 0,
25        }
26    }
27    /// Allocates `bytes` bytes with `align` alignment.
28    pub fn alloc(&mut self, bytes: usize, align: usize) -> usize {
29        let last = self.blocks.len() - 1;
30        if let Some(offset) = self.blocks[last].try_alloc(bytes, align) {
31            self.total_alloc += bytes;
32            return offset + last * self.block_size;
33        }
34        let new_block_size = self.block_size.max(bytes + align);
35        let mut block = ArenaBlock::new(new_block_size);
36        let offset = block
37            .try_alloc(bytes, align)
38            .expect("arena block allocation must succeed");
39        let block_idx = self.blocks.len();
40        self.blocks.push(block);
41        self.total_alloc += bytes;
42        offset + block_idx * self.block_size
43    }
44    /// Returns the total number of blocks.
45    pub fn num_blocks(&self) -> usize {
46        self.blocks.len()
47    }
48    /// Returns total bytes allocated (excluding padding).
49    pub fn total_allocated(&self) -> usize {
50        self.total_alloc
51    }
52}
53/// A dense map from arena indices to values.
54///
55/// Backed by a `Vec<Option<V>>` aligned with an arena, allowing O(1) lookup
56/// by index.
57#[derive(Debug, Clone)]
58pub struct ArenaMap<T, V> {
59    data: Vec<Option<V>>,
60    _marker: PhantomData<T>,
61}
62impl<T, V> ArenaMap<T, V> {
63    /// Create an empty `ArenaMap`.
64    pub fn new() -> Self {
65        Self {
66            data: Vec::new(),
67            _marker: PhantomData,
68        }
69    }
70    /// Insert a value for `idx`, growing the map if necessary.
71    pub fn insert(&mut self, idx: Idx<T>, value: V) {
72        let i = idx.to_usize();
73        if i >= self.data.len() {
74            self.data.resize_with(i + 1, || None);
75        }
76        self.data[i] = Some(value);
77    }
78    /// Get a reference to the value for `idx`.
79    pub fn get(&self, idx: Idx<T>) -> Option<&V> {
80        self.data.get(idx.to_usize())?.as_ref()
81    }
82    /// Get a mutable reference to the value for `idx`.
83    pub fn get_mut(&mut self, idx: Idx<T>) -> Option<&mut V> {
84        self.data.get_mut(idx.to_usize())?.as_mut()
85    }
86    /// Remove the value at `idx`.
87    pub fn remove(&mut self, idx: Idx<T>) -> Option<V> {
88        self.data.get_mut(idx.to_usize())?.take()
89    }
90    /// Check whether `idx` has a value.
91    pub fn contains(&self, idx: Idx<T>) -> bool {
92        self.get(idx).is_some()
93    }
94    /// Number of non-empty entries.
95    pub fn count(&self) -> usize {
96        self.data.iter().filter(|e| e.is_some()).count()
97    }
98    /// Iterate over all (index, value) pairs.
99    pub fn iter(&self) -> impl Iterator<Item = (Idx<T>, &V)> {
100        self.data
101            .iter()
102            .enumerate()
103            .filter_map(|(i, v)| v.as_ref().map(|val| (Idx::new(i as u32), val)))
104    }
105}
106/// A simple stack-based scope tracker.
107#[allow(dead_code)]
108pub struct ScopeStack {
109    names: Vec<String>,
110}
111#[allow(dead_code)]
112impl ScopeStack {
113    /// Creates a new empty scope stack.
114    pub fn new() -> Self {
115        Self { names: Vec::new() }
116    }
117    /// Pushes a scope name.
118    pub fn push(&mut self, name: impl Into<String>) {
119        self.names.push(name.into());
120    }
121    /// Pops the current scope.
122    pub fn pop(&mut self) -> Option<String> {
123        self.names.pop()
124    }
125    /// Returns the current (innermost) scope name, or `None`.
126    pub fn current(&self) -> Option<&str> {
127        self.names.last().map(|s| s.as_str())
128    }
129    /// Returns the depth of the scope stack.
130    pub fn depth(&self) -> usize {
131        self.names.len()
132    }
133    /// Returns `true` if the stack is empty.
134    pub fn is_empty(&self) -> bool {
135        self.names.is_empty()
136    }
137    /// Returns the full path as a dot-separated string.
138    pub fn path(&self) -> String {
139        self.names.join(".")
140    }
141}
142/// Interns strings to save memory (each unique string stored once).
143#[allow(dead_code)]
144pub struct StringInterner {
145    strings: Vec<String>,
146    map: std::collections::HashMap<String, u32>,
147}
148#[allow(dead_code)]
149impl StringInterner {
150    /// Creates a new string interner.
151    pub fn new() -> Self {
152        Self {
153            strings: Vec::new(),
154            map: std::collections::HashMap::new(),
155        }
156    }
157    /// Interns `s` and returns its ID.
158    pub fn intern(&mut self, s: &str) -> u32 {
159        if let Some(&id) = self.map.get(s) {
160            return id;
161        }
162        let id = self.strings.len() as u32;
163        self.strings.push(s.to_string());
164        self.map.insert(s.to_string(), id);
165        id
166    }
167    /// Returns the string for `id`.
168    pub fn get(&self, id: u32) -> Option<&str> {
169        self.strings.get(id as usize).map(|s| s.as_str())
170    }
171    /// Returns the total number of interned strings.
172    pub fn len(&self) -> usize {
173        self.strings.len()
174    }
175    /// Returns `true` if empty.
176    pub fn is_empty(&self) -> bool {
177        self.strings.is_empty()
178    }
179}
180/// A key-value store for diagnostic metadata.
181#[allow(dead_code)]
182pub struct DiagMeta {
183    pub(super) entries: Vec<(String, String)>,
184}
185#[allow(dead_code)]
186impl DiagMeta {
187    /// Creates an empty metadata store.
188    pub fn new() -> Self {
189        Self {
190            entries: Vec::new(),
191        }
192    }
193    /// Adds a key-value pair.
194    pub fn add(&mut self, key: impl Into<String>, val: impl Into<String>) {
195        self.entries.push((key.into(), val.into()));
196    }
197    /// Returns the value for `key`, or `None`.
198    pub fn get(&self, key: &str) -> Option<&str> {
199        self.entries
200            .iter()
201            .find(|(k, _)| k == key)
202            .map(|(_, v)| v.as_str())
203    }
204    /// Returns the number of entries.
205    pub fn len(&self) -> usize {
206        self.entries.len()
207    }
208    /// Returns `true` if empty.
209    pub fn is_empty(&self) -> bool {
210        self.entries.is_empty()
211    }
212}
213/// A counted-access cache that tracks hit and miss statistics.
214#[allow(dead_code)]
215#[allow(missing_docs)]
216pub struct StatCache<K: std::hash::Hash + Eq + Clone, V: Clone> {
217    /// The inner LRU cache.
218    pub inner: SimpleLruCache<K, V>,
219    /// Number of cache hits.
220    pub hits: u64,
221    /// Number of cache misses.
222    pub misses: u64,
223}
224#[allow(dead_code)]
225impl<K: std::hash::Hash + Eq + Clone, V: Clone> StatCache<K, V> {
226    /// Creates a new stat cache with the given capacity.
227    pub fn new(capacity: usize) -> Self {
228        Self {
229            inner: SimpleLruCache::new(capacity),
230            hits: 0,
231            misses: 0,
232        }
233    }
234    /// Performs a lookup, tracking hit/miss.
235    pub fn get(&mut self, key: &K) -> Option<&V> {
236        let result = self.inner.get(key);
237        if result.is_some() {
238            self.hits += 1;
239        } else {
240            self.misses += 1;
241        }
242        None
243    }
244    /// Inserts a key-value pair.
245    pub fn put(&mut self, key: K, val: V) {
246        self.inner.put(key, val);
247    }
248    /// Returns the hit rate.
249    pub fn hit_rate(&self) -> f64 {
250        let total = self.hits + self.misses;
251        if total == 0 {
252            return 0.0;
253        }
254        self.hits as f64 / total as f64
255    }
256}
257/// A two-generation arena (nursery + stable) for generational allocation.
258#[allow(dead_code)]
259pub struct TwoGenerationArena {
260    pub(crate) nursery: GrowableArena,
261    pub(crate) stable: GrowableArena,
262    promotions: usize,
263}
264#[allow(dead_code)]
265impl TwoGenerationArena {
266    /// Creates a new two-generation arena.
267    pub fn new(nursery_cap: usize, stable_cap: usize) -> Self {
268        Self {
269            nursery: GrowableArena::new(nursery_cap),
270            stable: GrowableArena::new(stable_cap),
271            promotions: 0,
272        }
273    }
274    /// Allocates in the nursery generation.
275    pub fn alloc_nursery(&mut self, size: usize, align: usize) -> usize {
276        self.nursery.alloc(size, align)
277    }
278    /// Promotes an allocation from nursery to stable generation.
279    pub fn promote(&mut self, size: usize, align: usize) -> usize {
280        self.promotions += 1;
281        self.stable.alloc(size, align)
282    }
283    /// Clears the nursery (minor GC).
284    pub fn minor_gc(&mut self) {
285        self.nursery.reset();
286    }
287    /// Returns the number of promotions performed.
288    pub fn num_promotions(&self) -> usize {
289        self.promotions
290    }
291}
292/// A savepoint that can be used to roll back an arena to a prior state.
293#[allow(dead_code)]
294#[allow(missing_docs)]
295pub struct ArenaCheckpoint {
296    /// The watermark position at checkpoint time.
297    pub watermark: usize,
298}
299#[allow(dead_code)]
300impl ArenaCheckpoint {
301    /// Creates a checkpoint at the current position in `arena`.
302    pub fn create(arena: &LinearArena) -> Self {
303        Self {
304            watermark: arena.used(),
305        }
306    }
307    /// Rolls back `arena` to this checkpoint.
308    pub fn restore(self, arena: &mut LinearArena) {
309        arena.top = self.watermark;
310    }
311}
312/// A monotonic timestamp in microseconds.
313#[allow(dead_code)]
314#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
315pub struct Timestamp(u64);
316#[allow(dead_code)]
317impl Timestamp {
318    /// Creates a timestamp from microseconds.
319    pub const fn from_us(us: u64) -> Self {
320        Self(us)
321    }
322    /// Returns the timestamp in microseconds.
323    pub fn as_us(self) -> u64 {
324        self.0
325    }
326    /// Returns the duration between two timestamps.
327    pub fn elapsed_since(self, earlier: Timestamp) -> u64 {
328        self.0.saturating_sub(earlier.0)
329    }
330}
331/// An arena that supports checkpointing and rollback.
332///
333/// By saving a checkpoint (the current length), the arena can be rolled back
334/// to discard all allocations made after that point.
335#[derive(Debug)]
336pub struct ScopedArena<T> {
337    inner: Arena<T>,
338}
339impl<T> ScopedArena<T> {
340    /// Create a new empty scoped arena.
341    pub fn new() -> Self {
342        Self {
343            inner: Arena::new(),
344        }
345    }
346    /// Allocate a value.
347    pub fn alloc(&mut self, value: T) -> Idx<T> {
348        self.inner.alloc(value)
349    }
350    /// Get a reference to the value at `idx`.
351    pub fn get(&self, idx: Idx<T>) -> &T {
352        self.inner.get(idx)
353    }
354    /// Create a checkpoint (the current length).
355    pub fn checkpoint(&self) -> u32 {
356        self.inner.len() as u32
357    }
358    /// Rollback to a previously saved checkpoint.
359    ///
360    /// All allocations after `checkpoint` are dropped.
361    pub fn rollback(&mut self, checkpoint: u32) {
362        self.inner.data.truncate(checkpoint as usize);
363    }
364    /// Current number of allocated values.
365    pub fn len(&self) -> usize {
366        self.inner.len()
367    }
368    /// Returns `true` if empty.
369    pub fn is_empty(&self) -> bool {
370        self.inner.is_empty()
371    }
372}
373/// A simple linear (bump) arena allocator over a fixed backing buffer.
374#[allow(dead_code)]
375pub struct LinearArena {
376    pub(crate) buf: Vec<u8>,
377    top: usize,
378    stats: ArenaStatsExt,
379}
380#[allow(dead_code)]
381impl LinearArena {
382    /// Creates a new arena with `capacity` bytes.
383    pub fn new(capacity: usize) -> Self {
384        Self {
385            buf: vec![0u8; capacity],
386            top: 0,
387            stats: ArenaStatsExt::new(),
388        }
389    }
390    /// Allocates `size` bytes with `align` alignment.
391    /// Returns the offset into the backing buffer, or `None` if full.
392    pub fn alloc(&mut self, size: usize, align: usize) -> Option<usize> {
393        let aligned = (self.top + align - 1) & !(align - 1);
394        let end = aligned.checked_add(size)?;
395        if end > self.buf.len() {
396            return None;
397        }
398        let waste = aligned - self.top;
399        self.top = end;
400        self.stats.bytes_allocated += size;
401        self.stats.alloc_count += 1;
402        self.stats.wasted_bytes += waste;
403        Some(aligned)
404    }
405    /// Resets the arena without releasing the backing buffer.
406    pub fn reset(&mut self) {
407        self.top = 0;
408        self.stats = ArenaStatsExt::new();
409    }
410    /// Returns the number of used bytes.
411    pub fn used(&self) -> usize {
412        self.top
413    }
414    /// Returns the total capacity.
415    pub fn capacity(&self) -> usize {
416        self.buf.len()
417    }
418    /// Returns a reference to the collected statistics.
419    pub fn stats(&self) -> &ArenaStatsExt {
420        &self.stats
421    }
422}
423/// Statistics about an arena's memory usage.
424#[derive(Debug, Clone, Copy, PartialEq, Eq)]
425pub struct ArenaStats {
426    /// Number of allocated values.
427    pub len: usize,
428    /// Current capacity (number of values that fit without reallocation).
429    pub capacity: usize,
430}
431impl ArenaStats {
432    /// Fraction of capacity used (0.0 to 1.0).
433    pub fn utilisation(&self) -> f64 {
434        if self.capacity == 0 {
435            0.0
436        } else {
437            self.len as f64 / self.capacity as f64
438        }
439    }
440    /// Number of free slots remaining before reallocation.
441    pub fn free(&self) -> usize {
442        self.capacity.saturating_sub(self.len)
443    }
444}
445/// A key-value annotation table for arbitrary metadata.
446#[allow(dead_code)]
447pub struct AnnotationTable {
448    map: std::collections::HashMap<String, Vec<String>>,
449}
450#[allow(dead_code)]
451impl AnnotationTable {
452    /// Creates an empty annotation table.
453    pub fn new() -> Self {
454        Self {
455            map: std::collections::HashMap::new(),
456        }
457    }
458    /// Adds an annotation value for the given key.
459    pub fn annotate(&mut self, key: impl Into<String>, val: impl Into<String>) {
460        self.map.entry(key.into()).or_default().push(val.into());
461    }
462    /// Returns all annotations for `key`.
463    pub fn get_all(&self, key: &str) -> &[String] {
464        self.map.get(key).map(|v| v.as_slice()).unwrap_or(&[])
465    }
466    /// Returns the number of distinct annotation keys.
467    pub fn num_keys(&self) -> usize {
468        self.map.len()
469    }
470    /// Returns `true` if the table has any annotations for `key`.
471    pub fn has(&self, key: &str) -> bool {
472        self.map.contains_key(key)
473    }
474}
475/// A simple bump allocator for byte slices.
476///
477/// Memory is allocated sequentially; deallocation of individual items is not
478/// supported. The entire arena can be reset at once.
479#[derive(Debug)]
480pub struct BumpArena {
481    buf: Vec<u8>,
482    pos: usize,
483}
484impl BumpArena {
485    /// Create a bump arena with the given capacity in bytes.
486    pub fn with_capacity(cap: usize) -> Self {
487        Self {
488            buf: vec![0u8; cap],
489            pos: 0,
490        }
491    }
492    /// Allocate `size` bytes, returning the start offset.
493    ///
494    /// Returns `None` if there is not enough space.
495    pub fn alloc_bytes(&mut self, size: usize) -> Option<usize> {
496        if self.pos + size > self.buf.len() {
497            None
498        } else {
499            let offset = self.pos;
500            self.pos += size;
501            Some(offset)
502        }
503    }
504    /// Write bytes at the given offset.
505    pub fn write_at(&mut self, offset: usize, data: &[u8]) {
506        self.buf[offset..offset + data.len()].copy_from_slice(data);
507    }
508    /// Read bytes from the given offset.
509    pub fn read_at(&self, offset: usize, len: usize) -> &[u8] {
510        &self.buf[offset..offset + len]
511    }
512    /// Reset the bump pointer without zeroing memory.
513    pub fn reset(&mut self) {
514        self.pos = 0;
515    }
516    /// Bytes used.
517    pub fn used(&self) -> usize {
518        self.pos
519    }
520    /// Total capacity in bytes.
521    pub fn capacity(&self) -> usize {
522        self.buf.len()
523    }
524    /// Remaining bytes.
525    pub fn remaining(&self) -> usize {
526        self.buf.len().saturating_sub(self.pos)
527    }
528}
529/// A pool of recycled arenas.
530///
531/// Allows arena instances to be reused across phases, reducing allocations.
532#[derive(Debug)]
533pub struct ArenaPool<T> {
534    pub(super) pool: Vec<Arena<T>>,
535}
536impl<T> ArenaPool<T> {
537    /// Create an empty pool.
538    pub fn new() -> Self {
539        Self { pool: Vec::new() }
540    }
541    /// Retrieve an arena from the pool (creating one if empty).
542    pub fn acquire(&mut self) -> Arena<T> {
543        self.pool.pop().unwrap_or_default()
544    }
545    /// Return an arena to the pool after clearing it.
546    pub fn release(&mut self, mut arena: Arena<T>) {
547        arena.clear();
548        self.pool.push(arena);
549    }
550    /// Number of arenas currently in the pool.
551    pub fn pool_size(&self) -> usize {
552        self.pool.len()
553    }
554}
555/// A simple LRU cache backed by a linked list + hash map.
556#[allow(dead_code)]
557pub struct SimpleLruCache<K: std::hash::Hash + Eq + Clone, V: Clone> {
558    capacity: usize,
559    map: std::collections::HashMap<K, usize>,
560    keys: Vec<K>,
561    vals: Vec<V>,
562    order: Vec<usize>,
563}
564#[allow(dead_code)]
565impl<K: std::hash::Hash + Eq + Clone, V: Clone> SimpleLruCache<K, V> {
566    /// Creates a new LRU cache with the given capacity.
567    pub fn new(capacity: usize) -> Self {
568        assert!(capacity > 0);
569        Self {
570            capacity,
571            map: std::collections::HashMap::new(),
572            keys: Vec::new(),
573            vals: Vec::new(),
574            order: Vec::new(),
575        }
576    }
577    /// Inserts or updates a key-value pair.
578    pub fn put(&mut self, key: K, val: V) {
579        if let Some(&idx) = self.map.get(&key) {
580            self.vals[idx] = val;
581            self.order.retain(|&x| x != idx);
582            self.order.insert(0, idx);
583            return;
584        }
585        if self.keys.len() >= self.capacity {
586            let evict_idx = *self
587                .order
588                .last()
589                .expect("order list must be non-empty before eviction");
590            self.map.remove(&self.keys[evict_idx]);
591            self.order.pop();
592            self.keys[evict_idx] = key.clone();
593            self.vals[evict_idx] = val;
594            self.map.insert(key, evict_idx);
595            self.order.insert(0, evict_idx);
596        } else {
597            let idx = self.keys.len();
598            self.keys.push(key.clone());
599            self.vals.push(val);
600            self.map.insert(key, idx);
601            self.order.insert(0, idx);
602        }
603    }
604    /// Returns a reference to the value for `key`, promoting it.
605    pub fn get(&mut self, key: &K) -> Option<&V> {
606        let idx = *self.map.get(key)?;
607        self.order.retain(|&x| x != idx);
608        self.order.insert(0, idx);
609        Some(&self.vals[idx])
610    }
611    /// Returns the number of entries.
612    pub fn len(&self) -> usize {
613        self.keys.len()
614    }
615    /// Returns `true` if empty.
616    pub fn is_empty(&self) -> bool {
617        self.keys.is_empty()
618    }
619}
620/// A counter that dispenses monotonically increasing `TypedId` values.
621#[allow(dead_code)]
622pub struct IdDispenser<T> {
623    next: u32,
624    _phantom: std::marker::PhantomData<T>,
625}
626#[allow(dead_code)]
627impl<T> IdDispenser<T> {
628    /// Creates a new dispenser starting from zero.
629    pub fn new() -> Self {
630        Self {
631            next: 0,
632            _phantom: std::marker::PhantomData,
633        }
634    }
635    /// Dispenses the next ID.
636    #[allow(clippy::should_implement_trait)]
637    pub fn next(&mut self) -> TypedId<T> {
638        let id = TypedId::new(self.next);
639        self.next += 1;
640        id
641    }
642    /// Returns the number of IDs dispensed.
643    pub fn count(&self) -> u32 {
644        self.next
645    }
646}
647/// A growable arena that reallocates its backing buffer when full.
648#[allow(dead_code)]
649pub struct GrowableArena {
650    pub(crate) data: Vec<u8>,
651    top: usize,
652    count: usize,
653}
654#[allow(dead_code)]
655impl GrowableArena {
656    /// Creates a new growable arena with `initial` bytes.
657    pub fn new(initial: usize) -> Self {
658        Self {
659            data: vec![0u8; initial.max(16)],
660            top: 0,
661            count: 0,
662        }
663    }
664    /// Allocates `size` bytes, growing the backing buffer if needed.
665    pub fn alloc(&mut self, size: usize, align: usize) -> usize {
666        let aligned = (self.top + align - 1) & !(align - 1);
667        let end = aligned + size;
668        if end > self.data.len() {
669            let new_cap = (self.data.len() * 2).max(end);
670            self.data.resize(new_cap, 0);
671        }
672        self.top = end;
673        self.count += 1;
674        aligned
675    }
676    /// Returns total bytes used.
677    pub fn used(&self) -> usize {
678        self.top
679    }
680    /// Returns total allocation count.
681    pub fn count(&self) -> usize {
682        self.count
683    }
684    /// Resets the arena.
685    pub fn reset(&mut self) {
686        self.top = 0;
687        self.count = 0;
688    }
689}
690/// A type-parameterised arena allocator that stores `T` values.
691#[allow(dead_code)]
692pub struct TypedArena<T> {
693    items: Vec<T>,
694}
695#[allow(dead_code)]
696impl<T> TypedArena<T> {
697    /// Creates a new typed arena.
698    pub fn new() -> Self {
699        Self { items: Vec::new() }
700    }
701    /// Allocates `val` and returns a reference with the arena's lifetime.
702    pub fn alloc(&mut self, val: T) -> &T {
703        self.items.push(val);
704        self.items.last().expect("items list must be non-empty")
705    }
706    /// Returns the number of allocated items.
707    pub fn len(&self) -> usize {
708        self.items.len()
709    }
710    /// Returns `true` if no items have been allocated.
711    pub fn is_empty(&self) -> bool {
712        self.items.is_empty()
713    }
714    /// Clears all items.
715    pub fn clear(&mut self) {
716        self.items.clear();
717    }
718}
719/// A slot that can hold a value, with lazy initialization.
720#[allow(dead_code)]
721pub struct Slot<T> {
722    inner: Option<T>,
723}
724#[allow(dead_code)]
725impl<T> Slot<T> {
726    /// Creates an empty slot.
727    pub fn empty() -> Self {
728        Self { inner: None }
729    }
730    /// Fills the slot with `val`.  Panics if already filled.
731    pub fn fill(&mut self, val: T) {
732        assert!(self.inner.is_none(), "Slot: already filled");
733        self.inner = Some(val);
734    }
735    /// Returns the slot's value, or `None`.
736    pub fn get(&self) -> Option<&T> {
737        self.inner.as_ref()
738    }
739    /// Returns `true` if the slot is filled.
740    pub fn is_filled(&self) -> bool {
741        self.inner.is_some()
742    }
743    /// Takes the value out of the slot.
744    pub fn take(&mut self) -> Option<T> {
745        self.inner.take()
746    }
747    /// Fills the slot if empty, returning a reference to the value.
748    pub fn get_or_fill_with(&mut self, f: impl FnOnce() -> T) -> &T {
749        if self.inner.is_none() {
750            self.inner = Some(f());
751        }
752        self.inner
753            .as_ref()
754            .expect("inner value must be initialized before access")
755    }
756}
757/// A contiguous range of typed indices `[start, end)`.
758///
759/// Used to represent a batch of values allocated together in an arena.
760#[derive(Clone, Copy)]
761pub struct IdxRange<T> {
762    /// Inclusive start index.
763    pub start: Idx<T>,
764    /// Exclusive end index.
765    pub end: Idx<T>,
766}
767impl<T> IdxRange<T> {
768    /// Create a new range.
769    pub fn new(start: Idx<T>, end: Idx<T>) -> Self {
770        Self { start, end }
771    }
772    /// Create an empty range starting at `idx`.
773    pub fn empty_at(idx: Idx<T>) -> Self {
774        let r = idx.raw;
775        Self {
776            start: Idx::new(r),
777            end: Idx::new(r),
778        }
779    }
780    /// Length of the range.
781    pub fn len(self) -> usize {
782        (self.end.raw as usize).saturating_sub(self.start.raw as usize)
783    }
784    /// Returns `true` if the range is empty.
785    pub fn is_empty(self) -> bool {
786        self.start.raw >= self.end.raw
787    }
788    /// Check whether `idx` is in this range.
789    pub fn contains(self, idx: Idx<T>) -> bool {
790        idx.raw >= self.start.raw && idx.raw < self.end.raw
791    }
792    /// Iterate over all indices in this range.
793    pub fn iter(self) -> impl Iterator<Item = Idx<T>> {
794        (self.start.raw..self.end.raw).map(Idx::new)
795    }
796}
797/// A fixed-size block in a chained-block arena.
798#[allow(dead_code)]
799pub struct ArenaBlock {
800    data: Vec<u8>,
801    used: usize,
802}
803#[allow(dead_code)]
804impl ArenaBlock {
805    /// Creates a block with `size` bytes capacity.
806    pub fn new(size: usize) -> Self {
807        Self {
808            data: vec![0u8; size],
809            used: 0,
810        }
811    }
812    /// Tries to allocate `bytes` with alignment `align`.
813    pub fn try_alloc(&mut self, bytes: usize, align: usize) -> Option<usize> {
814        let base = (self.used + align - 1) & !(align - 1);
815        let end = base.checked_add(bytes)?;
816        if end > self.data.len() {
817            return None;
818        }
819        self.used = end;
820        Some(base)
821    }
822    /// Returns remaining capacity.
823    pub fn remaining(&self) -> usize {
824        self.data.len() - self.used
825    }
826    /// Returns the size of the block.
827    pub fn block_size(&self) -> usize {
828        self.data.len()
829    }
830}
831/// An arena that interns values, returning the same index for equal values.
832///
833/// If a value was previously allocated, its existing index is returned
834/// instead of allocating a new copy.
835#[derive(Debug)]
836pub struct InterningArena<T: PartialEq + Clone> {
837    inner: Arena<T>,
838}
839impl<T: PartialEq + Clone> InterningArena<T> {
840    /// Create a new empty interning arena.
841    pub fn new() -> Self {
842        Self {
843            inner: Arena::new(),
844        }
845    }
846    /// Allocate or retrieve an existing value.
847    ///
848    /// If `value` is already present, returns its existing index.
849    /// Otherwise allocates a new copy.
850    pub fn intern(&mut self, value: T) -> Idx<T> {
851        for (i, v) in self.inner.data.iter().enumerate() {
852            if *v == value {
853                return Idx::new(i as u32);
854            }
855        }
856        self.inner.alloc(value)
857    }
858    /// Get a reference to the interned value at `idx`.
859    pub fn get(&self, idx: Idx<T>) -> &T {
860        self.inner.get(idx)
861    }
862    /// Number of distinct values stored.
863    pub fn len(&self) -> usize {
864        self.inner.len()
865    }
866    /// Returns `true` if the arena is empty.
867    pub fn is_empty(&self) -> bool {
868        self.inner.is_empty()
869    }
870    /// Check whether `value` is already interned.
871    pub fn contains(&self, value: &T) -> bool {
872        self.inner.data.iter().any(|v| v == value)
873    }
874    /// Collect statistics.
875    pub fn stats(&self) -> ArenaStats {
876        self.inner.stats()
877    }
878}
879/// A clock that measures elapsed time in a loop.
880#[allow(dead_code)]
881pub struct LoopClock {
882    start: std::time::Instant,
883    iters: u64,
884}
885#[allow(dead_code)]
886impl LoopClock {
887    /// Starts the clock.
888    pub fn start() -> Self {
889        Self {
890            start: std::time::Instant::now(),
891            iters: 0,
892        }
893    }
894    /// Records one iteration.
895    pub fn tick(&mut self) {
896        self.iters += 1;
897    }
898    /// Returns the elapsed time in microseconds.
899    pub fn elapsed_us(&self) -> f64 {
900        self.start.elapsed().as_secs_f64() * 1e6
901    }
902    /// Returns the average microseconds per iteration.
903    pub fn avg_us_per_iter(&self) -> f64 {
904        if self.iters == 0 {
905            return 0.0;
906        }
907        self.elapsed_us() / self.iters as f64
908    }
909    /// Returns the number of iterations.
910    pub fn iters(&self) -> u64 {
911        self.iters
912    }
913}
914/// A memoized computation slot that stores a cached value.
915#[allow(dead_code)]
916pub struct MemoSlot<T: Clone> {
917    cached: Option<T>,
918}
919#[allow(dead_code)]
920impl<T: Clone> MemoSlot<T> {
921    /// Creates an uncomputed memo slot.
922    pub fn new() -> Self {
923        Self { cached: None }
924    }
925    /// Returns the cached value, computing it with `f` if absent.
926    pub fn get_or_compute(&mut self, f: impl FnOnce() -> T) -> &T {
927        if self.cached.is_none() {
928            self.cached = Some(f());
929        }
930        self.cached
931            .as_ref()
932            .expect("cached value must be initialized before access")
933    }
934    /// Invalidates the cached value.
935    pub fn invalidate(&mut self) {
936        self.cached = None;
937    }
938    /// Returns `true` if the value has been computed.
939    pub fn is_cached(&self) -> bool {
940        self.cached.is_some()
941    }
942}
943/// A typed arena for storing values contiguously.
944///
945/// Values are never moved once allocated, so references remain valid
946/// for the lifetime of the arena.
947#[derive(Debug)]
948pub struct Arena<T> {
949    data: Vec<T>,
950}
951impl<T> Arena<T> {
952    /// Create a new empty arena.
953    #[inline]
954    pub fn new() -> Self {
955        Self { data: Vec::new() }
956    }
957    /// Create a new arena with the specified capacity.
958    #[inline]
959    pub fn with_capacity(capacity: usize) -> Self {
960        Self {
961            data: Vec::with_capacity(capacity),
962        }
963    }
964    /// Allocate a value in the arena and return its index.
965    #[inline]
966    pub fn alloc(&mut self, value: T) -> Idx<T> {
967        let idx = self.data.len();
968        assert!(idx < u32::MAX as usize, "arena overflow");
969        self.data.push(value);
970        Idx::new(idx as u32)
971    }
972    /// Allocate multiple values at once, returning the index range.
973    pub fn alloc_many(&mut self, values: impl IntoIterator<Item = T>) -> IdxRange<T> {
974        let start = Idx::new(self.data.len() as u32);
975        self.data.extend(values);
976        let end = Idx::new(self.data.len() as u32);
977        IdxRange::new(start, end)
978    }
979    /// Get a reference to the value at the given index.
980    ///
981    /// # Panics
982    /// Panics if the index is out of bounds (in debug mode).
983    #[inline]
984    pub fn get(&self, idx: Idx<T>) -> &T {
985        &self.data[idx.raw as usize]
986    }
987    /// Get a mutable reference to the value at the given index.
988    #[inline]
989    pub fn get_mut(&mut self, idx: Idx<T>) -> &mut T {
990        &mut self.data[idx.raw as usize]
991    }
992    /// Get a slice of values for the given range.
993    pub fn get_range(&self, range: IdxRange<T>) -> &[T] {
994        &self.data[range.start.to_usize()..range.end.to_usize()]
995    }
996    /// Get the number of values in the arena.
997    #[inline]
998    pub fn len(&self) -> usize {
999        self.data.len()
1000    }
1001    /// Check if the arena is empty.
1002    #[inline]
1003    pub fn is_empty(&self) -> bool {
1004        self.data.is_empty()
1005    }
1006    /// Get the index that would be assigned to the next allocation.
1007    pub fn next_idx(&self) -> Idx<T> {
1008        Idx::new(self.data.len() as u32)
1009    }
1010    /// Iterate over all (index, value) pairs.
1011    pub fn iter_indexed(&self) -> impl Iterator<Item = (Idx<T>, &T)> {
1012        self.data
1013            .iter()
1014            .enumerate()
1015            .map(|(i, v)| (Idx::new(i as u32), v))
1016    }
1017    /// Iterate over all values.
1018    pub fn iter(&self) -> impl Iterator<Item = &T> {
1019        self.data.iter()
1020    }
1021    /// Collect the current size statistics.
1022    pub fn stats(&self) -> ArenaStats {
1023        ArenaStats {
1024            len: self.data.len(),
1025            capacity: self.data.capacity(),
1026        }
1027    }
1028    /// Shrink the arena's backing storage to fit.
1029    pub fn shrink_to_fit(&mut self) {
1030        self.data.shrink_to_fit();
1031    }
1032    /// Reset the arena, dropping all values.
1033    pub fn clear(&mut self) {
1034        self.data.clear();
1035    }
1036}
1037/// A bidirectional map between two types.
1038#[allow(dead_code)]
1039pub struct BiMap<A: std::hash::Hash + Eq + Clone, B: std::hash::Hash + Eq + Clone> {
1040    forward: std::collections::HashMap<A, B>,
1041    backward: std::collections::HashMap<B, A>,
1042}
1043#[allow(dead_code)]
1044impl<A: std::hash::Hash + Eq + Clone, B: std::hash::Hash + Eq + Clone> BiMap<A, B> {
1045    /// Creates an empty bidirectional map.
1046    pub fn new() -> Self {
1047        Self {
1048            forward: std::collections::HashMap::new(),
1049            backward: std::collections::HashMap::new(),
1050        }
1051    }
1052    /// Inserts a pair `(a, b)`.
1053    pub fn insert(&mut self, a: A, b: B) {
1054        self.forward.insert(a.clone(), b.clone());
1055        self.backward.insert(b, a);
1056    }
1057    /// Looks up `b` for a given `a`.
1058    pub fn get_b(&self, a: &A) -> Option<&B> {
1059        self.forward.get(a)
1060    }
1061    /// Looks up `a` for a given `b`.
1062    pub fn get_a(&self, b: &B) -> Option<&A> {
1063        self.backward.get(b)
1064    }
1065    /// Returns the number of pairs.
1066    pub fn len(&self) -> usize {
1067        self.forward.len()
1068    }
1069    /// Returns `true` if empty.
1070    pub fn is_empty(&self) -> bool {
1071        self.forward.is_empty()
1072    }
1073}
1074/// A string slice stored in an arena (UTF-8, null-terminated).
1075#[allow(dead_code)]
1076pub struct ArenaString {
1077    pub(crate) offset: usize,
1078    len: usize,
1079}
1080#[allow(dead_code)]
1081impl ArenaString {
1082    /// Stores `s` in `arena` and returns a handle.
1083    pub fn store(arena: &mut LinearArena, s: &str) -> Option<Self> {
1084        let bytes = s.as_bytes();
1085        let offset = arena.alloc(bytes.len() + 1, 1)?;
1086        let start = offset;
1087        for (i, &b) in bytes.iter().enumerate() {
1088            arena.buf[start + i] = b;
1089        }
1090        arena.buf[start + bytes.len()] = 0;
1091        Some(Self {
1092            offset,
1093            len: bytes.len(),
1094        })
1095    }
1096    /// Returns the length of the string in bytes.
1097    pub fn len(&self) -> usize {
1098        self.len
1099    }
1100    /// Returns `true` if the string is empty.
1101    pub fn is_empty(&self) -> bool {
1102        self.len == 0
1103    }
1104}
1105/// A set of non-overlapping integer intervals.
1106#[allow(dead_code)]
1107pub struct IntervalSet {
1108    intervals: Vec<(i64, i64)>,
1109}
1110#[allow(dead_code)]
1111impl IntervalSet {
1112    /// Creates an empty interval set.
1113    pub fn new() -> Self {
1114        Self {
1115            intervals: Vec::new(),
1116        }
1117    }
1118    /// Adds the interval `[lo, hi]` to the set.
1119    pub fn add(&mut self, lo: i64, hi: i64) {
1120        if lo > hi {
1121            return;
1122        }
1123        let mut new_lo = lo;
1124        let mut new_hi = hi;
1125        let mut i = 0;
1126        while i < self.intervals.len() {
1127            let (il, ih) = self.intervals[i];
1128            if ih < new_lo - 1 {
1129                i += 1;
1130                continue;
1131            }
1132            if il > new_hi + 1 {
1133                break;
1134            }
1135            new_lo = new_lo.min(il);
1136            new_hi = new_hi.max(ih);
1137            self.intervals.remove(i);
1138        }
1139        self.intervals.insert(i, (new_lo, new_hi));
1140    }
1141    /// Returns `true` if `x` is in the set.
1142    pub fn contains(&self, x: i64) -> bool {
1143        self.intervals.iter().any(|&(lo, hi)| x >= lo && x <= hi)
1144    }
1145    /// Returns the number of intervals.
1146    pub fn num_intervals(&self) -> usize {
1147        self.intervals.len()
1148    }
1149    /// Returns the total count of integers covered.
1150    pub fn cardinality(&self) -> i64 {
1151        self.intervals.iter().map(|&(lo, hi)| hi - lo + 1).sum()
1152    }
1153}
1154/// A registry of memory regions.
1155#[allow(dead_code)]
1156pub struct MemoryRegionRegistry {
1157    regions: Vec<MemoryRegion>,
1158}
1159#[allow(dead_code)]
1160impl MemoryRegionRegistry {
1161    /// Creates an empty registry.
1162    pub fn new() -> Self {
1163        Self {
1164            regions: Vec::new(),
1165        }
1166    }
1167    /// Adds a region.
1168    pub fn add(&mut self, region: MemoryRegion) {
1169        self.regions.push(region);
1170    }
1171    /// Returns the region containing `addr`, or `None`.
1172    pub fn find(&self, addr: usize) -> Option<&MemoryRegion> {
1173        self.regions.iter().find(|r| r.contains(addr))
1174    }
1175    /// Returns the total number of regions.
1176    pub fn len(&self) -> usize {
1177        self.regions.len()
1178    }
1179    /// Returns `true` if empty.
1180    pub fn is_empty(&self) -> bool {
1181        self.regions.is_empty()
1182    }
1183}
1184/// Two arenas paired together, useful for storing interrelated values.
1185///
1186/// Allocations in each arena are independent, but the two are kept together
1187/// for lifetime management.
1188#[derive(Debug, Default)]
1189pub struct DoubleArena<A, B> {
1190    /// The first arena.
1191    pub first: Arena<A>,
1192    /// The second arena.
1193    pub second: Arena<B>,
1194}
1195impl<A, B> DoubleArena<A, B> {
1196    /// Create an empty `DoubleArena`.
1197    pub fn new() -> Self {
1198        Self {
1199            first: Arena::new(),
1200            second: Arena::new(),
1201        }
1202    }
1203    /// Allocate a pair of related values atomically.
1204    ///
1205    /// Returns `(idx_a, idx_b)` where `idx_a.raw() == idx_b.raw()` is NOT
1206    /// guaranteed — the arenas may have different lengths.
1207    pub fn alloc_pair(&mut self, a: A, b: B) -> (Idx<A>, Idx<B>) {
1208        let ia = self.first.alloc(a);
1209        let ib = self.second.alloc(b);
1210        (ia, ib)
1211    }
1212    /// Total allocations across both arenas.
1213    pub fn total_len(&self) -> usize {
1214        self.first.len() + self.second.len()
1215    }
1216}
1217/// Represents a named memory region used by the arena system.
1218#[allow(dead_code)]
1219#[allow(missing_docs)]
1220pub struct MemoryRegion {
1221    /// Human-readable label.
1222    pub label: String,
1223    /// Base offset.
1224    pub base: usize,
1225    /// Size in bytes.
1226    pub size: usize,
1227    /// Whether the region is currently in use.
1228    pub active: bool,
1229}
1230#[allow(dead_code)]
1231impl MemoryRegion {
1232    /// Creates a new memory region.
1233    pub fn new(label: impl Into<String>, base: usize, size: usize) -> Self {
1234        Self {
1235            label: label.into(),
1236            base,
1237            size,
1238            active: false,
1239        }
1240    }
1241    /// Activates the region.
1242    pub fn activate(&mut self) {
1243        self.active = true;
1244    }
1245    /// Deactivates the region.
1246    pub fn deactivate(&mut self) {
1247        self.active = false;
1248    }
1249    /// Returns the exclusive end of the region.
1250    pub fn end(&self) -> usize {
1251        self.base + self.size
1252    }
1253    /// Returns `true` if `addr` is within the region.
1254    pub fn contains(&self, addr: usize) -> bool {
1255        addr >= self.base && addr < self.end()
1256    }
1257}
1258/// A simple sparse bit set.
1259#[allow(dead_code)]
1260pub struct SparseBitSet {
1261    words: Vec<u64>,
1262}
1263#[allow(dead_code)]
1264impl SparseBitSet {
1265    /// Creates a new bit set that can hold at least `capacity` bits.
1266    pub fn new(capacity: usize) -> Self {
1267        let words = (capacity + 63) / 64;
1268        Self {
1269            words: vec![0u64; words],
1270        }
1271    }
1272    /// Sets bit `i`.
1273    pub fn set(&mut self, i: usize) {
1274        let word = i / 64;
1275        let bit = i % 64;
1276        if word < self.words.len() {
1277            self.words[word] |= 1u64 << bit;
1278        }
1279    }
1280    /// Clears bit `i`.
1281    pub fn clear(&mut self, i: usize) {
1282        let word = i / 64;
1283        let bit = i % 64;
1284        if word < self.words.len() {
1285            self.words[word] &= !(1u64 << bit);
1286        }
1287    }
1288    /// Returns `true` if bit `i` is set.
1289    pub fn get(&self, i: usize) -> bool {
1290        let word = i / 64;
1291        let bit = i % 64;
1292        self.words.get(word).is_some_and(|w| w & (1u64 << bit) != 0)
1293    }
1294    /// Returns the number of set bits.
1295    pub fn count_ones(&self) -> u32 {
1296        self.words.iter().map(|w| w.count_ones()).sum()
1297    }
1298    /// Returns the union with another bit set.
1299    pub fn union(&self, other: &SparseBitSet) -> SparseBitSet {
1300        let len = self.words.len().max(other.words.len());
1301        let mut result = SparseBitSet {
1302            words: vec![0u64; len],
1303        };
1304        for i in 0..self.words.len() {
1305            result.words[i] |= self.words[i];
1306        }
1307        for i in 0..other.words.len() {
1308            result.words[i] |= other.words[i];
1309        }
1310        result
1311    }
1312}
1313/// A FIFO work queue.
1314#[allow(dead_code)]
1315pub struct WorkQueue<T> {
1316    items: std::collections::VecDeque<T>,
1317}
1318#[allow(dead_code)]
1319impl<T> WorkQueue<T> {
1320    /// Creates a new empty queue.
1321    pub fn new() -> Self {
1322        Self {
1323            items: std::collections::VecDeque::new(),
1324        }
1325    }
1326    /// Enqueues a work item.
1327    pub fn enqueue(&mut self, item: T) {
1328        self.items.push_back(item);
1329    }
1330    /// Dequeues the next work item.
1331    pub fn dequeue(&mut self) -> Option<T> {
1332        self.items.pop_front()
1333    }
1334    /// Returns `true` if empty.
1335    pub fn is_empty(&self) -> bool {
1336        self.items.is_empty()
1337    }
1338    /// Returns the number of pending items.
1339    pub fn len(&self) -> usize {
1340        self.items.len()
1341    }
1342}
1343/// Tracks the frequency of items.
1344#[allow(dead_code)]
1345pub struct FrequencyTable<T: std::hash::Hash + Eq + Clone> {
1346    counts: std::collections::HashMap<T, u64>,
1347}
1348#[allow(dead_code)]
1349impl<T: std::hash::Hash + Eq + Clone> FrequencyTable<T> {
1350    /// Creates a new empty frequency table.
1351    pub fn new() -> Self {
1352        Self {
1353            counts: std::collections::HashMap::new(),
1354        }
1355    }
1356    /// Records one occurrence of `item`.
1357    pub fn record(&mut self, item: T) {
1358        *self.counts.entry(item).or_insert(0) += 1;
1359    }
1360    /// Returns the frequency of `item`.
1361    pub fn freq(&self, item: &T) -> u64 {
1362        self.counts.get(item).copied().unwrap_or(0)
1363    }
1364    /// Returns the item with the highest frequency.
1365    pub fn most_frequent(&self) -> Option<(&T, u64)> {
1366        self.counts
1367            .iter()
1368            .max_by_key(|(_, &v)| v)
1369            .map(|(k, &v)| (k, v))
1370    }
1371    /// Returns the total number of recordings.
1372    pub fn total(&self) -> u64 {
1373        self.counts.values().sum()
1374    }
1375    /// Returns the number of distinct items.
1376    pub fn distinct(&self) -> usize {
1377        self.counts.len()
1378    }
1379}
1380/// A type-safe wrapper around a `u32` identifier.
1381#[allow(dead_code)]
1382#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1383pub struct TypedId<T> {
1384    pub(super) id: u32,
1385    _phantom: std::marker::PhantomData<T>,
1386}
1387#[allow(dead_code)]
1388impl<T> TypedId<T> {
1389    /// Creates a new typed ID.
1390    pub const fn new(id: u32) -> Self {
1391        Self {
1392            id,
1393            _phantom: std::marker::PhantomData,
1394        }
1395    }
1396    /// Returns the raw `u32` ID.
1397    pub fn raw(&self) -> u32 {
1398        self.id
1399    }
1400}
1401/// A scoped arena that tracks a watermark for bulk deallocation.
1402#[allow(dead_code)]
1403pub struct ScopedArenaExt {
1404    pub(crate) inner: LinearArena,
1405    watermarks: Vec<usize>,
1406}
1407#[allow(dead_code)]
1408impl ScopedArenaExt {
1409    /// Creates a scoped arena with `capacity` bytes.
1410    pub fn new(capacity: usize) -> Self {
1411        Self {
1412            inner: LinearArena::new(capacity),
1413            watermarks: Vec::new(),
1414        }
1415    }
1416    /// Pushes a scope: records the current allocation watermark.
1417    pub fn push_scope(&mut self) {
1418        self.watermarks.push(self.inner.used());
1419    }
1420    /// Pops a scope: resets allocation to the saved watermark.
1421    /// Panics if no scope is active.
1422    pub fn pop_scope(&mut self) {
1423        let wm = self.watermarks.pop().expect("ScopedArena: no active scope");
1424        self.inner.top = wm;
1425    }
1426    /// Allocates `size` bytes.
1427    pub fn alloc(&mut self, size: usize, align: usize) -> Option<usize> {
1428        self.inner.alloc(size, align)
1429    }
1430    /// Returns the current depth of nested scopes.
1431    pub fn scope_depth(&self) -> usize {
1432        self.watermarks.len()
1433    }
1434}
1435/// A simple LIFO work queue.
1436#[allow(dead_code)]
1437pub struct WorkStack<T> {
1438    items: Vec<T>,
1439}
1440#[allow(dead_code)]
1441impl<T> WorkStack<T> {
1442    /// Creates a new empty stack.
1443    pub fn new() -> Self {
1444        Self { items: Vec::new() }
1445    }
1446    /// Pushes a work item.
1447    pub fn push(&mut self, item: T) {
1448        self.items.push(item);
1449    }
1450    /// Pops the next work item.
1451    pub fn pop(&mut self) -> Option<T> {
1452        self.items.pop()
1453    }
1454    /// Returns `true` if empty.
1455    pub fn is_empty(&self) -> bool {
1456        self.items.is_empty()
1457    }
1458    /// Returns the number of pending work items.
1459    pub fn len(&self) -> usize {
1460        self.items.len()
1461    }
1462}
1463/// A growable array stored entirely within a `LinearArena`.
1464///
1465/// Does NOT support drop (elements must be `Copy`).
1466#[allow(dead_code)]
1467pub struct ArenaVec<T: Copy> {
1468    base: usize,
1469    length: usize,
1470    _t: std::marker::PhantomData<T>,
1471}
1472#[allow(dead_code)]
1473impl<T: Copy> ArenaVec<T> {
1474    /// Creates a new `ArenaVec` with `capacity` elements in `arena`.
1475    pub fn new(arena: &mut LinearArena, capacity: usize) -> Option<Self> {
1476        let base = arena.alloc(
1477            capacity * std::mem::size_of::<T>(),
1478            std::mem::align_of::<T>(),
1479        )?;
1480        Some(Self {
1481            base,
1482            length: 0,
1483            _t: std::marker::PhantomData,
1484        })
1485    }
1486    /// Returns the number of elements.
1487    pub fn len(&self) -> usize {
1488        self.length
1489    }
1490    /// Returns `true` if the vec is empty.
1491    pub fn is_empty(&self) -> bool {
1492        self.length == 0
1493    }
1494}
1495/// Statistics collected from an arena allocator.
1496#[allow(dead_code)]
1497#[allow(missing_docs)]
1498pub struct ArenaStatsExt {
1499    /// Total bytes allocated.
1500    pub bytes_allocated: usize,
1501    /// Total number of allocations performed.
1502    pub alloc_count: usize,
1503    /// Number of chunks allocated.
1504    pub chunk_count: usize,
1505    /// Wasted bytes due to alignment padding.
1506    pub wasted_bytes: usize,
1507}
1508#[allow(dead_code)]
1509impl ArenaStatsExt {
1510    /// Creates a zeroed stats record.
1511    pub fn new() -> Self {
1512        Self {
1513            bytes_allocated: 0,
1514            alloc_count: 0,
1515            chunk_count: 0,
1516            wasted_bytes: 0,
1517        }
1518    }
1519    /// Returns the average allocation size in bytes.
1520    pub fn avg_alloc_size(&self) -> f64 {
1521        if self.alloc_count == 0 {
1522            return 0.0;
1523        }
1524        self.bytes_allocated as f64 / self.alloc_count as f64
1525    }
1526    /// Returns the fragmentation ratio (wasted / total).
1527    pub fn fragmentation(&self) -> f64 {
1528        let total = self.bytes_allocated + self.wasted_bytes;
1529        if total == 0 {
1530            return 0.0;
1531        }
1532        self.wasted_bytes as f64 / total as f64
1533    }
1534}
1535/// A fixed-size object pool backed by a free-list.
1536#[allow(dead_code)]
1537pub struct PoolArena {
1538    slot_size: usize,
1539    capacity: usize,
1540    free_list: Vec<usize>,
1541    data: Vec<u8>,
1542}
1543#[allow(dead_code)]
1544impl PoolArena {
1545    /// Creates a pool that holds `capacity` objects of `slot_size` bytes each.
1546    pub fn new(slot_size: usize, capacity: usize) -> Self {
1547        let data = vec![0u8; slot_size * capacity];
1548        let free_list: Vec<usize> = (0..capacity).collect();
1549        Self {
1550            slot_size,
1551            capacity,
1552            free_list,
1553            data,
1554        }
1555    }
1556    /// Allocates one slot.  Returns the slot index or `None` if exhausted.
1557    pub fn alloc_slot(&mut self) -> Option<usize> {
1558        self.free_list.pop()
1559    }
1560    /// Frees a slot by returning it to the free list.
1561    pub fn free_slot(&mut self, idx: usize) {
1562        if idx < self.capacity {
1563            self.free_list.push(idx);
1564        }
1565    }
1566    /// Returns the number of available (free) slots.
1567    pub fn available(&self) -> usize {
1568        self.free_list.len()
1569    }
1570    /// Returns the total pool capacity in slots.
1571    pub fn capacity(&self) -> usize {
1572        self.capacity
1573    }
1574    /// Returns the slot size in bytes.
1575    pub fn slot_size(&self) -> usize {
1576        self.slot_size
1577    }
1578}
1579/// An arena with O(1) deallocation via a free list.
1580///
1581/// Freed slots are tracked and reused on the next allocation.
1582#[derive(Debug)]
1583pub struct SlabArena<T> {
1584    data: Vec<Option<T>>,
1585    free_list: Vec<u32>,
1586}
1587impl<T> SlabArena<T> {
1588    /// Create a new empty slab arena.
1589    pub fn new() -> Self {
1590        Self {
1591            data: Vec::new(),
1592            free_list: Vec::new(),
1593        }
1594    }
1595    /// Create a slab arena with the given initial capacity.
1596    pub fn with_capacity(cap: usize) -> Self {
1597        Self {
1598            data: Vec::with_capacity(cap),
1599            free_list: Vec::new(),
1600        }
1601    }
1602    /// Allocate a value, reusing a freed slot if available.
1603    pub fn alloc(&mut self, value: T) -> Idx<T> {
1604        if let Some(idx) = self.free_list.pop() {
1605            self.data[idx as usize] = Some(value);
1606            Idx::new(idx)
1607        } else {
1608            let idx = self.data.len() as u32;
1609            assert!(idx < u32::MAX, "slab arena overflow");
1610            self.data.push(Some(value));
1611            Idx::new(idx)
1612        }
1613    }
1614    /// Free the value at `idx`, returning it.
1615    ///
1616    /// Returns `None` if the slot was already free.
1617    pub fn free(&mut self, idx: Idx<T>) -> Option<T> {
1618        let slot = self.data.get_mut(idx.raw as usize)?;
1619        let value = slot.take()?;
1620        self.free_list.push(idx.raw);
1621        Some(value)
1622    }
1623    /// Get a reference to the value at `idx`.
1624    ///
1625    /// Returns `None` if the slot is free.
1626    pub fn get(&self, idx: Idx<T>) -> Option<&T> {
1627        self.data.get(idx.raw as usize)?.as_ref()
1628    }
1629    /// Get a mutable reference to the value at `idx`.
1630    pub fn get_mut(&mut self, idx: Idx<T>) -> Option<&mut T> {
1631        self.data.get_mut(idx.raw as usize)?.as_mut()
1632    }
1633    /// Number of live (non-free) slots.
1634    pub fn len(&self) -> usize {
1635        self.data.iter().filter(|s| s.is_some()).count()
1636    }
1637    /// Total number of slots (including free ones).
1638    pub fn capacity_slots(&self) -> usize {
1639        self.data.len()
1640    }
1641    /// Returns `true` if there are no live values.
1642    pub fn is_empty(&self) -> bool {
1643        self.len() == 0
1644    }
1645    /// Number of slots on the free list.
1646    pub fn free_count(&self) -> usize {
1647        self.free_list.len()
1648    }
1649    /// Iterate over all live (non-free) (index, value) pairs.
1650    pub fn iter_live(&self) -> impl Iterator<Item = (Idx<T>, &T)> {
1651        self.data
1652            .iter()
1653            .enumerate()
1654            .filter_map(|(i, slot)| slot.as_ref().map(|v| (Idx::new(i as u32), v)))
1655    }
1656}
1657/// A typed index into an arena.
1658///
1659/// This is a zero-cost abstraction over `u32` that provides type safety.
1660/// Equality checks are O(1) pointer comparisons.
1661#[derive(Clone, Copy)]
1662pub struct Idx<T> {
1663    pub(super) raw: u32,
1664    _marker: PhantomData<T>,
1665}
1666impl<T> Idx<T> {
1667    /// Create a new index from a raw u32 value.
1668    ///
1669    /// # Safety
1670    /// The caller must ensure the index is valid for the target arena.
1671    #[inline]
1672    pub(crate) fn new(raw: u32) -> Self {
1673        Self {
1674            raw,
1675            _marker: PhantomData,
1676        }
1677    }
1678    /// Get the raw index value.
1679    #[inline]
1680    pub fn raw(&self) -> u32 {
1681        self.raw
1682    }
1683    /// Cast to a different type without changing the raw index.
1684    ///
1685    /// # Safety
1686    /// The caller must ensure the reinterpreted index is valid.
1687    pub fn cast<U>(self) -> Idx<U> {
1688        Idx::new(self.raw)
1689    }
1690    /// Check whether this index is the first (index 0).
1691    pub fn is_first(self) -> bool {
1692        self.raw == 0
1693    }
1694    /// Increment the index by 1 (without bounds checking).
1695    pub fn next(self) -> Self {
1696        Self::new(self.raw + 1)
1697    }
1698    /// Convert to `usize` for slice indexing.
1699    pub fn to_usize(self) -> usize {
1700        self.raw as usize
1701    }
1702}
1703/// A simple event counter with named events.
1704#[allow(dead_code)]
1705pub struct EventCounter {
1706    counts: std::collections::HashMap<String, u64>,
1707}
1708#[allow(dead_code)]
1709impl EventCounter {
1710    /// Creates a new empty counter.
1711    pub fn new() -> Self {
1712        Self {
1713            counts: std::collections::HashMap::new(),
1714        }
1715    }
1716    /// Increments the counter for `event`.
1717    pub fn inc(&mut self, event: &str) {
1718        *self.counts.entry(event.to_string()).or_insert(0) += 1;
1719    }
1720    /// Adds `n` to the counter for `event`.
1721    pub fn add(&mut self, event: &str, n: u64) {
1722        *self.counts.entry(event.to_string()).or_insert(0) += n;
1723    }
1724    /// Returns the count for `event`.
1725    pub fn get(&self, event: &str) -> u64 {
1726        self.counts.get(event).copied().unwrap_or(0)
1727    }
1728    /// Returns the total count across all events.
1729    pub fn total(&self) -> u64 {
1730        self.counts.values().sum()
1731    }
1732    /// Resets all counters.
1733    pub fn reset(&mut self) {
1734        self.counts.clear();
1735    }
1736    /// Returns all event names.
1737    pub fn event_names(&self) -> Vec<&str> {
1738        self.counts.keys().map(|s| s.as_str()).collect()
1739    }
1740}