Skip to main content

oxilean_runtime/arena/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use std::cell::Cell;
6use std::collections::HashMap;
7
8use super::functions::{
9    ARENA_ALIGN, DEFAULT_CHUNK_SIZE, MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, PAGE_SIZE,
10};
11
12/// A location within a bump arena.
13#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
14pub struct ArenaOffset {
15    /// Index of the chunk.
16    pub chunk: usize,
17    /// Byte offset within the chunk.
18    pub offset: usize,
19}
20impl ArenaOffset {
21    /// Create a new offset.
22    pub fn new(chunk: usize, offset: usize) -> Self {
23        ArenaOffset { chunk, offset }
24    }
25}
26/// A linear (bump-pointer) allocator backed by a fixed-size buffer.
27#[allow(dead_code)]
28pub struct LinearAllocator {
29    buf: Vec<u8>,
30    top: usize,
31    alloc_count: u64,
32    overflow_count: u64,
33}
34#[allow(dead_code)]
35impl LinearAllocator {
36    /// Create a linear allocator with the given buffer size.
37    pub fn new(size: usize) -> Self {
38        Self {
39            buf: vec![0u8; size.max(16)],
40            top: 0,
41            alloc_count: 0,
42            overflow_count: 0,
43        }
44    }
45    /// Allocate `size` bytes (aligned to `align`). Returns an offset into the buffer.
46    pub fn alloc_offset(&mut self, size: usize, align: usize) -> Option<usize> {
47        let align = align.next_power_of_two().max(1);
48        let aligned = (self.top + align - 1) & !(align - 1);
49        if aligned + size > self.buf.len() {
50            self.overflow_count += 1;
51            return None;
52        }
53        self.top = aligned + size;
54        self.alloc_count += 1;
55        Some(aligned)
56    }
57    /// Get a reference to bytes at the given offset.
58    pub fn get_bytes(&self, offset: usize, size: usize) -> Option<&[u8]> {
59        self.buf.get(offset..offset + size)
60    }
61    /// Get a mutable reference to bytes at the given offset.
62    pub fn get_bytes_mut(&mut self, offset: usize, size: usize) -> Option<&mut [u8]> {
63        self.buf.get_mut(offset..offset + size)
64    }
65    /// Reset the allocator (reuse the buffer).
66    pub fn reset(&mut self) {
67        self.top = 0;
68    }
69    /// Current top-of-stack offset.
70    pub fn top(&self) -> usize {
71        self.top
72    }
73    /// Total buffer capacity.
74    pub fn capacity(&self) -> usize {
75        self.buf.len()
76    }
77    /// Free bytes remaining.
78    pub fn remaining(&self) -> usize {
79        self.buf.len().saturating_sub(self.top)
80    }
81    /// Utilization fraction.
82    pub fn utilization(&self) -> f64 {
83        if self.buf.is_empty() {
84            0.0
85        } else {
86            self.top as f64 / self.buf.len() as f64
87        }
88    }
89    /// Number of successful allocations.
90    pub fn alloc_count(&self) -> u64 {
91        self.alloc_count
92    }
93    /// Number of failed (overflow) allocations.
94    pub fn overflow_count(&self) -> u64 {
95        self.overflow_count
96    }
97}
98/// Results from an arena benchmark run.
99#[allow(dead_code)]
100#[derive(Debug, Clone)]
101pub struct ArenaBenchResult {
102    pub iterations: u64,
103    pub total_bytes: u64,
104    pub allocs_per_iter: usize,
105    pub description: String,
106}
107#[allow(dead_code)]
108impl ArenaBenchResult {
109    pub fn new(iterations: u64, total_bytes: u64, allocs_per_iter: usize, desc: &str) -> Self {
110        Self {
111            iterations,
112            total_bytes,
113            allocs_per_iter,
114            description: desc.to_string(),
115        }
116    }
117    pub fn bytes_per_iter(&self) -> f64 {
118        if self.iterations == 0 {
119            0.0
120        } else {
121            self.total_bytes as f64 / self.iterations as f64
122        }
123    }
124}
125/// Statistics for a region.
126#[derive(Clone, Debug, Default)]
127pub struct RegionStats {
128    /// Number of allocations in this region.
129    pub allocations: u64,
130    /// Bytes allocated in this region.
131    pub bytes_allocated: u64,
132    /// Number of times this region was reset.
133    pub resets: u64,
134}
135/// An arena that tracks generations for safe index validation.
136///
137/// When an element is removed, the slot's generation is incremented.
138/// Indices store the generation they were created with, so stale
139/// references can be detected.
140pub struct GenerationalArena<T> {
141    /// Values with generation counters.
142    pub(super) entries: Vec<GenerationalEntry<T>>,
143    /// Free list.
144    free_list: Vec<usize>,
145    /// Current generation for new allocations.
146    pub(super) generation: u32,
147}
148impl<T> GenerationalArena<T> {
149    /// Create a new generational arena.
150    pub fn new() -> Self {
151        GenerationalArena {
152            entries: Vec::new(),
153            free_list: Vec::new(),
154            generation: 0,
155        }
156    }
157    /// Create with pre-allocated capacity.
158    pub fn with_capacity(cap: usize) -> Self {
159        GenerationalArena {
160            entries: Vec::with_capacity(cap),
161            free_list: Vec::new(),
162            generation: 0,
163        }
164    }
165    /// Insert a value and get its index.
166    pub fn insert(&mut self, value: T) -> GenIdx {
167        self.generation = self.generation.wrapping_add(1);
168        if let Some(slot) = self.free_list.pop() {
169            self.entries[slot] = GenerationalEntry {
170                value: Some(value),
171                generation: self.generation,
172            };
173            GenIdx {
174                index: slot as u32,
175                generation: self.generation,
176            }
177        } else {
178            let index = self.entries.len() as u32;
179            self.entries.push(GenerationalEntry {
180                value: Some(value),
181                generation: self.generation,
182            });
183            GenIdx {
184                index,
185                generation: self.generation,
186            }
187        }
188    }
189    /// Get a reference to a value by generational index.
190    pub fn get(&self, idx: GenIdx) -> Option<&T> {
191        let entry = self.entries.get(idx.index as usize)?;
192        if entry.generation == idx.generation {
193            entry.value.as_ref()
194        } else {
195            None
196        }
197    }
198    /// Get a mutable reference to a value by generational index.
199    pub fn get_mut(&mut self, idx: GenIdx) -> Option<&mut T> {
200        let entry = self.entries.get_mut(idx.index as usize)?;
201        if entry.generation == idx.generation {
202            entry.value.as_mut()
203        } else {
204            None
205        }
206    }
207    /// Remove a value by generational index.
208    pub fn remove(&mut self, idx: GenIdx) -> Option<T> {
209        let entry = self.entries.get_mut(idx.index as usize)?;
210        if entry.generation == idx.generation {
211            let value = entry.value.take();
212            self.free_list.push(idx.index as usize);
213            value
214        } else {
215            None
216        }
217    }
218    /// Check if an index is valid.
219    pub fn contains(&self, idx: GenIdx) -> bool {
220        self.entries
221            .get(idx.index as usize)
222            .map(|e| e.generation == idx.generation && e.value.is_some())
223            .unwrap_or(false)
224    }
225    /// Number of live entries.
226    pub fn len(&self) -> usize {
227        self.entries.len() - self.free_list.len()
228    }
229    /// Check if empty.
230    pub fn is_empty(&self) -> bool {
231        self.len() == 0
232    }
233    /// Clear all entries.
234    pub fn clear(&mut self) {
235        self.entries.clear();
236        self.free_list.clear();
237    }
238    /// Iterate over live entries.
239    pub fn iter(&self) -> impl Iterator<Item = (GenIdx, &T)> {
240        self.entries.iter().enumerate().filter_map(|(i, e)| {
241            e.value.as_ref().map(|v| {
242                (
243                    GenIdx {
244                        index: i as u32,
245                        generation: e.generation,
246                    },
247                    v,
248                )
249            })
250        })
251    }
252}
253/// An index into a generational arena that includes a generation counter.
254#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
255pub struct GenIdx {
256    /// The slot index.
257    pub index: u32,
258    /// The generation when this index was created.
259    pub generation: u32,
260}
261/// A pool of reusable bump arenas.
262///
263/// When a temporary arena is needed (e.g., for evaluating a single expression),
264/// it can be acquired from the pool and returned after use rather than creating
265/// a new one each time.
266pub struct ArenaPool {
267    /// Available (idle) arenas.
268    pub(super) available: Vec<BumpArena>,
269    /// Maximum number of arenas to keep in the pool.
270    pub(super) max_pool_size: usize,
271    /// Default chunk size for new arenas.
272    pub(super) chunk_size: usize,
273    /// Statistics.
274    stats: ArenaPoolStats,
275}
276impl ArenaPool {
277    /// Create a new arena pool.
278    pub fn new() -> Self {
279        ArenaPool {
280            available: Vec::new(),
281            max_pool_size: 8,
282            chunk_size: DEFAULT_CHUNK_SIZE,
283            stats: ArenaPoolStats::default(),
284        }
285    }
286    /// Create a new arena pool with custom parameters.
287    pub fn with_config(max_pool_size: usize, chunk_size: usize) -> Self {
288        ArenaPool {
289            available: Vec::new(),
290            max_pool_size,
291            chunk_size,
292            stats: ArenaPoolStats::default(),
293        }
294    }
295    /// Acquire an arena from the pool (or create a new one).
296    pub fn acquire(&mut self) -> BumpArena {
297        self.stats.acquired += 1;
298        if let Some(mut arena) = self.available.pop() {
299            arena.reset();
300            arena
301        } else {
302            self.stats.created += 1;
303            BumpArena::with_chunk_size(self.chunk_size)
304        }
305    }
306    /// Return an arena to the pool for reuse.
307    pub fn release(&mut self, arena: BumpArena) {
308        self.stats.returned += 1;
309        if self.available.len() < self.max_pool_size {
310            self.available.push(arena);
311        } else {
312            self.stats.discarded += 1;
313        }
314    }
315    /// Number of available arenas in the pool.
316    pub fn available_count(&self) -> usize {
317        self.available.len()
318    }
319    /// Get the pool statistics.
320    pub fn stats(&self) -> &ArenaPoolStats {
321        &self.stats
322    }
323    /// Set the maximum pool size.
324    pub fn set_max_pool_size(&mut self, size: usize) {
325        self.max_pool_size = size;
326        while self.available.len() > self.max_pool_size {
327            self.available.pop();
328        }
329    }
330    /// Clear the pool.
331    pub fn clear(&mut self) {
332        self.available.clear();
333    }
334}
335/// A page manager that allocates fixed-size 4096-byte pages.
336#[allow(dead_code)]
337pub struct ArenaPageManager {
338    pages: Vec<Box<[u8; PAGE_SIZE]>>,
339    free_list: Vec<usize>,
340    alloc_count: u64,
341    free_count: u64,
342}
343#[allow(dead_code)]
344impl ArenaPageManager {
345    /// Create an empty page manager.
346    pub fn new() -> Self {
347        Self {
348            pages: Vec::new(),
349            free_list: Vec::new(),
350            alloc_count: 0,
351            free_count: 0,
352        }
353    }
354    /// Allocate a page. Returns the page index.
355    pub fn alloc_page(&mut self) -> usize {
356        self.alloc_count += 1;
357        if let Some(idx) = self.free_list.pop() {
358            idx
359        } else {
360            let idx = self.pages.len();
361            self.pages.push(Box::new([0u8; PAGE_SIZE]));
362            idx
363        }
364    }
365    /// Free a page by index.
366    pub fn free_page(&mut self, idx: usize) {
367        if idx < self.pages.len() {
368            for b in self.pages[idx].iter_mut() {
369                *b = 0;
370            }
371            self.free_list.push(idx);
372            self.free_count += 1;
373        }
374    }
375    /// Get a reference to a page.
376    pub fn page(&self, idx: usize) -> Option<&[u8; PAGE_SIZE]> {
377        self.pages.get(idx).map(|p| p.as_ref())
378    }
379    /// Get a mutable reference to a page.
380    pub fn page_mut(&mut self, idx: usize) -> Option<&mut [u8; PAGE_SIZE]> {
381        self.pages.get_mut(idx).map(|p| p.as_mut())
382    }
383    /// Total pages allocated from the system.
384    pub fn total_pages(&self) -> usize {
385        self.pages.len()
386    }
387    /// Free pages in the free list.
388    pub fn free_pages(&self) -> usize {
389        self.free_list.len()
390    }
391    /// Live pages (total - free).
392    pub fn live_pages(&self) -> usize {
393        self.pages.len().saturating_sub(self.free_list.len())
394    }
395    /// Total bytes managed.
396    pub fn total_bytes(&self) -> usize {
397        self.pages.len() * PAGE_SIZE
398    }
399    /// Total allocs.
400    pub fn alloc_count(&self) -> u64 {
401        self.alloc_count
402    }
403    /// Total frees.
404    pub fn free_count(&self) -> u64 {
405        self.free_count
406    }
407}
408/// A bump allocator for fast, thread-local allocation.
409///
410/// Objects are allocated by advancing a pointer. Deallocation happens
411/// all at once when `reset()` is called. This is ideal for temporary
412/// allocations during evaluation.
413pub struct BumpArena {
414    /// The chunks of memory.
415    pub(super) chunks: Vec<Chunk>,
416    /// Index of the current chunk.
417    current_chunk: usize,
418    /// Default chunk size for new allocations.
419    chunk_size: usize,
420    /// Statistics.
421    stats: ArenaStats,
422}
423impl BumpArena {
424    /// Create a new bump arena with default chunk size (64 KB).
425    pub fn new() -> Self {
426        BumpArena {
427            chunks: vec![Chunk::new(DEFAULT_CHUNK_SIZE)],
428            current_chunk: 0,
429            chunk_size: DEFAULT_CHUNK_SIZE,
430            stats: ArenaStats::new(),
431        }
432    }
433    /// Create a new bump arena with the specified chunk size.
434    pub fn with_chunk_size(size: usize) -> Self {
435        let size = size.clamp(MIN_CHUNK_SIZE, MAX_CHUNK_SIZE);
436        BumpArena {
437            chunks: vec![Chunk::new(size)],
438            current_chunk: 0,
439            chunk_size: size,
440            stats: ArenaStats::new(),
441        }
442    }
443    /// Allocate `size` bytes with default alignment.
444    ///
445    /// Returns the offset within the arena (chunk_index, byte_offset).
446    pub fn alloc(&mut self, size: usize) -> ArenaOffset {
447        self.alloc_aligned(size, ARENA_ALIGN)
448    }
449    /// Allocate `size` bytes with the specified alignment.
450    pub fn alloc_aligned(&mut self, size: usize, align: usize) -> ArenaOffset {
451        self.stats.total_allocations += 1;
452        self.stats.total_bytes_allocated += size as u64;
453        if let Some(offset) = self.chunks[self.current_chunk].try_alloc(size, align) {
454            return ArenaOffset {
455                chunk: self.current_chunk,
456                offset,
457            };
458        }
459        for i in (self.current_chunk + 1)..self.chunks.len() {
460            if let Some(offset) = self.chunks[i].try_alloc(size, align) {
461                self.current_chunk = i;
462                return ArenaOffset {
463                    chunk: self.current_chunk,
464                    offset,
465                };
466            }
467        }
468        let new_chunk_size = if size > self.chunk_size {
469            (size + align).max(self.chunk_size)
470        } else {
471            self.chunk_size.min(MAX_CHUNK_SIZE)
472        };
473        let mut chunk = Chunk::new(new_chunk_size);
474        let offset = chunk
475            .try_alloc(size, align)
476            .expect("freshly allocated chunk must have enough space for the requested allocation");
477        self.chunks.push(chunk);
478        self.current_chunk = self.chunks.len() - 1;
479        self.stats.total_chunks_allocated += 1;
480        ArenaOffset {
481            chunk: self.current_chunk,
482            offset,
483        }
484    }
485    /// Get a byte slice for a previously allocated region.
486    pub fn get_bytes(&self, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
487        let chunk = self.chunks.get(loc.chunk)?;
488        if loc.offset + size > chunk.data.len() {
489            return None;
490        }
491        Some(&chunk.data[loc.offset..loc.offset + size])
492    }
493    /// Get a mutable byte slice for a previously allocated region.
494    pub fn get_bytes_mut(&mut self, loc: &ArenaOffset, size: usize) -> Option<&mut [u8]> {
495        let chunk = self.chunks.get_mut(loc.chunk)?;
496        if loc.offset + size > chunk.data.len() {
497            return None;
498        }
499        Some(&mut chunk.data[loc.offset..loc.offset + size])
500    }
501    /// Reset the arena, freeing all allocations.
502    ///
503    /// This does not deallocate the underlying memory — chunks are reused.
504    pub fn reset(&mut self) {
505        for chunk in &mut self.chunks {
506            chunk.reset();
507        }
508        self.current_chunk = 0;
509        self.stats.total_resets += 1;
510    }
511    /// Total number of bytes currently allocated.
512    pub fn bytes_used(&self) -> usize {
513        self.chunks.iter().map(|c| c.used).sum()
514    }
515    /// Total capacity (including unused space).
516    pub fn total_capacity(&self) -> usize {
517        self.chunks.iter().map(|c| c.capacity()).sum()
518    }
519    /// Number of chunks.
520    pub fn num_chunks(&self) -> usize {
521        self.chunks.len()
522    }
523    /// Get the arena statistics.
524    pub fn stats(&self) -> &ArenaStats {
525        &self.stats
526    }
527    /// Shrink the arena, releasing unused chunks.
528    pub fn shrink(&mut self) {
529        let keep = self
530            .chunks
531            .iter()
532            .position(|c| c.used == 0)
533            .unwrap_or(self.chunks.len());
534        self.chunks.truncate(keep.max(1));
535        self.current_chunk = self.current_chunk.min(self.chunks.len() - 1);
536    }
537}
538/// A bump arena with mark/release support for scoped allocation.
539#[allow(dead_code)]
540pub struct MarkArena {
541    buf: Vec<u8>,
542    top: usize,
543    marks: Vec<usize>,
544}
545#[allow(dead_code)]
546impl MarkArena {
547    /// Create a mark arena with the given capacity.
548    pub fn new(capacity: usize) -> Self {
549        Self {
550            buf: vec![0u8; capacity.max(64)],
551            top: 0,
552            marks: Vec::new(),
553        }
554    }
555    /// Allocate `size` bytes. Returns an offset or None if full.
556    pub fn alloc(&mut self, size: usize) -> Option<usize> {
557        if self.top + size > self.buf.len() {
558            return None;
559        }
560        let offset = self.top;
561        self.top += size;
562        Some(offset)
563    }
564    /// Save the current top as a mark.
565    pub fn mark(&mut self) -> usize {
566        let mark = self.top;
567        self.marks.push(mark);
568        mark
569    }
570    /// Release back to the most recent mark.
571    pub fn release(&mut self) {
572        if let Some(mark) = self.marks.pop() {
573            self.top = mark;
574        }
575    }
576    /// Release back to a specific mark (and discard newer marks).
577    pub fn release_to(&mut self, mark: usize) {
578        self.marks.retain(|&m| m < mark);
579        self.top = mark.min(self.top);
580    }
581    /// Reset to empty.
582    pub fn reset(&mut self) {
583        self.top = 0;
584        self.marks.clear();
585    }
586    /// Current top.
587    pub fn top(&self) -> usize {
588        self.top
589    }
590    /// Current mark stack depth.
591    pub fn mark_depth(&self) -> usize {
592        self.marks.len()
593    }
594    /// Capacity.
595    pub fn capacity(&self) -> usize {
596        self.buf.len()
597    }
598}
599/// An entry in a generational arena.
600#[derive(Debug)]
601pub(super) struct GenerationalEntry<T> {
602    /// The stored value (None if free).
603    value: Option<T>,
604    /// The generation when this slot was last written.
605    generation: u32,
606}
607/// Thread-local arena for temporary allocations.
608///
609/// Each thread gets its own bump arena that can be used for short-lived
610/// allocations without synchronization.
611pub struct ThreadLocalArena {
612    /// The arena.
613    arena: BumpArena,
614    /// High-water mark for automatic resets.
615    _high_water_mark: Cell<usize>,
616    /// Number of allocations since last reset.
617    _allocs_since_reset: Cell<u64>,
618}
619impl ThreadLocalArena {
620    /// Create a new thread-local arena.
621    pub fn new() -> Self {
622        ThreadLocalArena {
623            arena: BumpArena::new(),
624            _high_water_mark: Cell::new(DEFAULT_CHUNK_SIZE),
625            _allocs_since_reset: Cell::new(0),
626        }
627    }
628    /// Allocate bytes.
629    pub fn alloc(&mut self, size: usize) -> ArenaOffset {
630        self._allocs_since_reset
631            .set(self._allocs_since_reset.get() + 1);
632        self.arena.alloc(size)
633    }
634    /// Reset the thread-local arena.
635    pub fn reset(&mut self) {
636        self.arena.reset();
637        self._allocs_since_reset.set(0);
638    }
639    /// Get the underlying arena.
640    pub fn arena(&self) -> &BumpArena {
641        &self.arena
642    }
643    /// Bytes used.
644    pub fn bytes_used(&self) -> usize {
645        self.arena.bytes_used()
646    }
647}
648/// An arena that tracks its allocation pressure and adapts chunk size.
649#[allow(dead_code)]
650pub struct AdaptiveArena {
651    inner: BumpArena,
652    pressure_samples: Vec<f64>,
653    target_utilization: f64,
654    sample_window: usize,
655}
656#[allow(dead_code)]
657impl AdaptiveArena {
658    /// Create an adaptive arena.
659    pub fn new(target_utilization: f64, sample_window: usize) -> Self {
660        Self {
661            inner: BumpArena::new(),
662            pressure_samples: Vec::new(),
663            target_utilization: target_utilization.clamp(0.1, 0.99),
664            sample_window: sample_window.max(3),
665        }
666    }
667    /// Allocate bytes and record pressure.
668    pub fn alloc(&mut self, size: usize) -> ArenaOffset {
669        let result = self.inner.alloc(size);
670        let pressure = self.inner.bytes_used() as f64
671            / (self.inner.num_chunks() as f64 * DEFAULT_CHUNK_SIZE as f64 + 1.0);
672        self.pressure_samples.push(pressure);
673        if self.pressure_samples.len() > self.sample_window {
674            self.pressure_samples.remove(0);
675        }
676        result
677    }
678    /// Average pressure over recent samples.
679    pub fn avg_pressure(&self) -> f64 {
680        if self.pressure_samples.is_empty() {
681            return 0.0;
682        }
683        self.pressure_samples.iter().sum::<f64>() / self.pressure_samples.len() as f64
684    }
685    /// Whether the arena is over-utilized.
686    pub fn is_over_utilized(&self) -> bool {
687        self.avg_pressure() > self.target_utilization
688    }
689    /// Reset the arena.
690    pub fn reset(&mut self) {
691        self.inner.reset();
692        self.pressure_samples.clear();
693    }
694    /// Allocated bytes.
695    pub fn allocated_bytes(&self) -> usize {
696        self.inner.bytes_used()
697    }
698}
699/// Index into a typed arena.
700#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
701pub struct ArenaIdx(pub u32);
702impl ArenaIdx {
703    /// Create a new arena index.
704    pub fn new(index: u32) -> Self {
705        ArenaIdx(index)
706    }
707    /// Get the raw index value.
708    pub fn raw(self) -> u32 {
709        self.0
710    }
711}
712/// Statistics for arena allocators.
713#[derive(Clone, Debug, Default)]
714pub struct ArenaStats {
715    /// Total number of allocations.
716    pub total_allocations: u64,
717    /// Total bytes allocated.
718    pub total_bytes_allocated: u64,
719    /// Total number of arena resets.
720    pub total_resets: u64,
721    /// Total number of chunks allocated.
722    pub total_chunks_allocated: u64,
723}
724impl ArenaStats {
725    /// Create new empty statistics.
726    pub fn new() -> Self {
727        Self::default()
728    }
729    /// Average allocation size.
730    pub fn avg_alloc_size(&self) -> f64 {
731        if self.total_allocations == 0 {
732            return 0.0;
733        }
734        self.total_bytes_allocated as f64 / self.total_allocations as f64
735    }
736    /// Reset the statistics.
737    pub fn reset(&mut self) {
738        *self = Self::default();
739    }
740}
741/// Statistics for a typed arena.
742#[derive(Clone, Debug, Default)]
743pub struct TypedArenaStats {
744    /// Total allocations.
745    pub total_allocations: u64,
746    /// Total deallocations.
747    pub total_deallocations: u64,
748    /// Current live count.
749    pub live_count: u64,
750    /// Peak live count.
751    pub peak_count: u64,
752}
753/// A pool of pre-allocated byte chunks for arena reuse.
754#[allow(dead_code)]
755#[derive(Debug, Default)]
756pub struct ArenaChunkPool {
757    chunks: Vec<Vec<u8>>,
758    chunk_size: usize,
759    max_pooled: usize,
760    reused: u64,
761    created: u64,
762}
763#[allow(dead_code)]
764impl ArenaChunkPool {
765    pub fn new(chunk_size: usize, max_pooled: usize) -> Self {
766        Self {
767            chunks: Vec::new(),
768            chunk_size,
769            max_pooled,
770            reused: 0,
771            created: 0,
772        }
773    }
774    /// Acquire a chunk (from pool or newly allocated).
775    pub fn acquire(&mut self) -> Vec<u8> {
776        if let Some(mut chunk) = self.chunks.pop() {
777            for b in chunk.iter_mut() {
778                *b = 0;
779            }
780            self.reused += 1;
781            chunk
782        } else {
783            self.created += 1;
784            vec![0u8; self.chunk_size]
785        }
786    }
787    /// Return a chunk to the pool (or discard if at capacity).
788    pub fn release(&mut self, chunk: Vec<u8>) {
789        if chunk.len() == self.chunk_size && self.chunks.len() < self.max_pooled {
790            self.chunks.push(chunk);
791        }
792    }
793    pub fn pooled_count(&self) -> usize {
794        self.chunks.len()
795    }
796    pub fn reused_count(&self) -> u64 {
797        self.reused
798    }
799    pub fn created_count(&self) -> u64 {
800        self.created
801    }
802    pub fn hit_rate(&self) -> f64 {
803        let total = self.reused + self.created;
804        if total == 0 {
805            0.0
806        } else {
807            self.reused as f64 / total as f64
808        }
809    }
810}
811/// A slab allocator for a specific slot size.
812#[allow(dead_code)]
813pub struct SlabArena {
814    slot_size: usize,
815    buf: Vec<u8>,
816    free_slots: Vec<usize>,
817    alloc_count: u64,
818}
819#[allow(dead_code)]
820impl SlabArena {
821    /// Create a slab arena with the given slot size and initial capacity.
822    pub fn new(slot_size: usize, initial_slots: usize) -> Self {
823        let slot_size = slot_size.max(8);
824        Self {
825            slot_size,
826            buf: vec![0u8; slot_size * initial_slots],
827            free_slots: (0..initial_slots).rev().collect(),
828            alloc_count: 0,
829        }
830    }
831    /// Allocate a slot. Returns the slot offset in the buffer.
832    pub fn alloc(&mut self) -> Option<usize> {
833        if let Some(slot) = self.free_slots.pop() {
834            self.alloc_count += 1;
835            Some(slot * self.slot_size)
836        } else {
837            let new_slot = self.buf.len() / self.slot_size;
838            self.buf.extend(vec![0u8; self.slot_size]);
839            self.alloc_count += 1;
840            Some(new_slot * self.slot_size)
841        }
842    }
843    /// Free a slot by its offset.
844    pub fn free(&mut self, offset: usize) {
845        let slot = offset / self.slot_size;
846        if !self.free_slots.contains(&slot) {
847            self.free_slots.push(slot);
848        }
849    }
850    /// Number of live (allocated) slots.
851    pub fn live_count(&self) -> usize {
852        let total = self.buf.len() / self.slot_size;
853        total.saturating_sub(self.free_slots.len())
854    }
855    /// Total slots.
856    pub fn total_slots(&self) -> usize {
857        self.buf.len() / self.slot_size
858    }
859    /// Slot size.
860    pub fn slot_size(&self) -> usize {
861        self.slot_size
862    }
863    /// Total allocation count.
864    pub fn alloc_count(&self) -> u64 {
865        self.alloc_count
866    }
867}
868/// Detailed statistics for an arena session.
869#[allow(dead_code)]
870#[derive(Clone, Debug, Default)]
871pub struct ArenaExtStats {
872    pub alloc_calls: u64,
873    pub total_bytes_allocated: u64,
874    pub peak_bytes: u64,
875    pub reset_count: u64,
876    pub overflow_count: u64,
877    pub chunk_alloc_count: u64,
878}
879#[allow(dead_code)]
880impl ArenaExtStats {
881    pub fn new() -> Self {
882        Self::default()
883    }
884    /// Merge another stats object into this one.
885    pub fn merge(&mut self, other: &ArenaExtStats) {
886        self.alloc_calls += other.alloc_calls;
887        self.total_bytes_allocated += other.total_bytes_allocated;
888        self.peak_bytes = self.peak_bytes.max(other.peak_bytes);
889        self.reset_count += other.reset_count;
890        self.overflow_count += other.overflow_count;
891        self.chunk_alloc_count += other.chunk_alloc_count;
892    }
893    pub fn record_alloc(&mut self, bytes: u64) {
894        self.alloc_calls += 1;
895        self.total_bytes_allocated += bytes;
896    }
897    pub fn record_reset(&mut self) {
898        self.reset_count += 1;
899    }
900    pub fn record_overflow(&mut self) {
901        self.overflow_count += 1;
902    }
903    pub fn record_chunk_alloc(&mut self) {
904        self.chunk_alloc_count += 1;
905    }
906    pub fn update_peak(&mut self, current_bytes: u64) {
907        if current_bytes > self.peak_bytes {
908            self.peak_bytes = current_bytes;
909        }
910    }
911    pub fn avg_alloc_size(&self) -> f64 {
912        if self.alloc_calls == 0 {
913            0.0
914        } else {
915            self.total_bytes_allocated as f64 / self.alloc_calls as f64
916        }
917    }
918}
919/// An arena that automatically returns to a pool when dropped.
920pub struct ScopedArena<'pool> {
921    /// The underlying arena.
922    pub(super) arena: Option<BumpArena>,
923    /// The pool to return the arena to.
924    pub(super) pool: &'pool mut ArenaPool,
925}
926impl<'pool> ScopedArena<'pool> {
927    /// Create a new scoped arena from a pool.
928    pub fn new(pool: &'pool mut ArenaPool) -> Self {
929        let arena = pool.acquire();
930        ScopedArena {
931            arena: Some(arena),
932            pool,
933        }
934    }
935    /// Allocate bytes in this arena.
936    pub fn alloc(&mut self, size: usize) -> ArenaOffset {
937        self.arena
938            .as_mut()
939            .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
940            .alloc(size)
941    }
942    /// Get the underlying arena.
943    pub fn arena(&self) -> &BumpArena {
944        self.arena
945            .as_ref()
946            .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
947    }
948    /// Get the underlying arena mutably.
949    pub fn arena_mut(&mut self) -> &mut BumpArena {
950        self.arena
951            .as_mut()
952            .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
953    }
954}
955/// An arena checkpoint (offset into a BumpArena).
956#[allow(dead_code)]
957#[derive(Debug, Clone, Copy, PartialEq, Eq)]
958pub struct ArenaCheckpoint {
959    bytes_used: usize,
960    chunk_count: usize,
961}
962#[allow(dead_code)]
963impl ArenaCheckpoint {
964    pub fn capture(arena: &BumpArena) -> Self {
965        Self {
966            bytes_used: arena.bytes_used(),
967            chunk_count: arena.num_chunks(),
968        }
969    }
970    pub fn bytes_used(&self) -> usize {
971        self.bytes_used
972    }
973    pub fn chunk_count(&self) -> usize {
974        self.chunk_count
975    }
976    pub fn bytes_since(&self, later_bytes_used: usize) -> usize {
977        later_bytes_used.saturating_sub(self.bytes_used)
978    }
979}
980/// A snapshot of arena state (offset/byte count only, not actual data).
981#[allow(dead_code)]
982#[derive(Clone, Debug, PartialEq, Eq)]
983pub struct ArenaSnapshot {
984    /// Offset at snapshot time.
985    pub offset: usize,
986    /// Number of chunks at snapshot time.
987    pub chunk_count: usize,
988    /// Total allocated bytes at snapshot time.
989    pub allocated_bytes: usize,
990}
991#[allow(dead_code)]
992impl ArenaSnapshot {
993    /// Create a snapshot from an arena.
994    pub fn capture(arena: &BumpArena) -> Self {
995        Self {
996            offset: arena.bytes_used(),
997            chunk_count: arena.num_chunks(),
998            allocated_bytes: arena.bytes_used(),
999        }
1000    }
1001    /// Check if `later` represents more allocation than `self`.
1002    pub fn bytes_since(&self, later: &ArenaSnapshot) -> usize {
1003        later.allocated_bytes.saturating_sub(self.allocated_bytes)
1004    }
1005    /// Whether extra chunks were allocated between `self` and `later`.
1006    pub fn new_chunks_since(&self, later: &ArenaSnapshot) -> usize {
1007        later.chunk_count.saturating_sub(self.chunk_count)
1008    }
1009}
1010/// A record of arena allocations for diagnostics.
1011#[allow(dead_code)]
1012#[derive(Clone, Debug)]
1013pub struct AllocRecord {
1014    pub size: usize,
1015    pub align: usize,
1016    pub offset: usize,
1017    pub label: String,
1018}
1019/// A chunk of memory used by the arena.
1020#[derive(Debug)]
1021pub(super) struct Chunk {
1022    /// The actual storage.
1023    data: Vec<u8>,
1024    /// How many bytes have been allocated in this chunk.
1025    used: usize,
1026}
1027impl Chunk {
1028    /// Create a new chunk with the given capacity.
1029    fn new(capacity: usize) -> Self {
1030        Chunk {
1031            data: vec![0u8; capacity],
1032            used: 0,
1033        }
1034    }
1035    /// Capacity of this chunk.
1036    fn capacity(&self) -> usize {
1037        self.data.len()
1038    }
1039    /// Remaining space in this chunk.
1040    fn remaining(&self) -> usize {
1041        self.data.len() - self.used
1042    }
1043    /// Try to allocate `size` bytes with the given alignment.
1044    fn try_alloc(&mut self, size: usize, align: usize) -> Option<usize> {
1045        let aligned_used = (self.used + align - 1) & !(align - 1);
1046        let new_used = aligned_used + size;
1047        if new_used > self.data.len() {
1048            return None;
1049        }
1050        let offset = aligned_used;
1051        self.used = new_used;
1052        Some(offset)
1053    }
1054    /// Reset this chunk (mark all space as free).
1055    fn reset(&mut self) {
1056        self.used = 0;
1057    }
1058}
1059/// Manages a hierarchy of memory regions.
1060pub struct RegionManager {
1061    /// All regions, indexed by ID.
1062    pub(super) regions: HashMap<u64, Region>,
1063    /// The next region ID to assign.
1064    next_id: u64,
1065    /// Stack of active region IDs (current scope).
1066    pub(super) scope_stack: Vec<u64>,
1067}
1068impl RegionManager {
1069    /// Create a new region manager.
1070    pub fn new() -> Self {
1071        let root = Region::new(0);
1072        let mut regions = HashMap::new();
1073        regions.insert(0, root);
1074        RegionManager {
1075            regions,
1076            next_id: 1,
1077            scope_stack: vec![0],
1078        }
1079    }
1080    /// Get the current active region ID.
1081    pub fn current_region_id(&self) -> u64 {
1082        *self.scope_stack.last().unwrap_or(&0)
1083    }
1084    /// Push a new region scope.
1085    pub fn push_region(&mut self) -> u64 {
1086        let id = self.next_id;
1087        self.next_id += 1;
1088        let parent_id = self.current_region_id();
1089        let region = Region::child(id, parent_id);
1090        self.regions.insert(id, region);
1091        if let Some(parent) = self.regions.get_mut(&parent_id) {
1092            parent.add_child(id);
1093        }
1094        self.scope_stack.push(id);
1095        id
1096    }
1097    /// Push a region with a custom size.
1098    pub fn push_region_with_size(&mut self, chunk_size: usize) -> u64 {
1099        let id = self.next_id;
1100        self.next_id += 1;
1101        let parent_id = self.current_region_id();
1102        let mut region = Region::with_size(id, chunk_size);
1103        region.parent_id = Some(parent_id);
1104        self.regions.insert(id, region);
1105        if let Some(parent) = self.regions.get_mut(&parent_id) {
1106            parent.add_child(id);
1107        }
1108        self.scope_stack.push(id);
1109        id
1110    }
1111    /// Pop the current region scope (deactivating it).
1112    pub fn pop_region(&mut self) -> Option<u64> {
1113        if self.scope_stack.len() <= 1 {
1114            return None;
1115        }
1116        let id = self.scope_stack.pop()?;
1117        if let Some(region) = self.regions.get_mut(&id) {
1118            region.deactivate();
1119        }
1120        Some(id)
1121    }
1122    /// Allocate in the current region.
1123    pub fn alloc(&mut self, size: usize) -> Option<(u64, ArenaOffset)> {
1124        let id = self.current_region_id();
1125        let offset = self.regions.get_mut(&id)?.alloc(size)?;
1126        Some((id, offset))
1127    }
1128    /// Get bytes from a specific region.
1129    pub fn get_bytes(&self, region_id: u64, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
1130        self.regions.get(&region_id)?.get_bytes(loc, size)
1131    }
1132    /// Reset a region and all its children.
1133    pub fn reset_region(&mut self, region_id: u64) {
1134        let children: Vec<u64> = self
1135            .regions
1136            .get(&region_id)
1137            .map(|r| r.children().to_vec())
1138            .unwrap_or_default();
1139        for child_id in children {
1140            self.reset_region(child_id);
1141        }
1142        if let Some(region) = self.regions.get_mut(&region_id) {
1143            region.reset();
1144        }
1145    }
1146    /// Get a reference to a region.
1147    pub fn get_region(&self, id: u64) -> Option<&Region> {
1148        self.regions.get(&id)
1149    }
1150    /// Get a mutable reference to a region.
1151    pub fn get_region_mut(&mut self, id: u64) -> Option<&mut Region> {
1152        self.regions.get_mut(&id)
1153    }
1154    /// Number of regions.
1155    pub fn num_regions(&self) -> usize {
1156        self.regions.len()
1157    }
1158    /// Total bytes used across all regions.
1159    pub fn total_bytes_used(&self) -> usize {
1160        self.regions.values().map(|r| r.bytes_used()).sum()
1161    }
1162    /// Total capacity across all regions.
1163    pub fn total_capacity(&self) -> usize {
1164        self.regions.values().map(|r| r.total_capacity()).sum()
1165    }
1166    /// Depth of the current scope stack.
1167    pub fn scope_depth(&self) -> usize {
1168        self.scope_stack.len()
1169    }
1170    /// Remove a region and all its children.
1171    pub fn remove_region(&mut self, region_id: u64) {
1172        let children: Vec<u64> = self
1173            .regions
1174            .get(&region_id)
1175            .map(|r| r.children().to_vec())
1176            .unwrap_or_default();
1177        for child_id in children {
1178            self.remove_region(child_id);
1179        }
1180        self.regions.remove(&region_id);
1181    }
1182}
1183/// A region for bulk allocation and deallocation.
1184///
1185/// Regions provide scoped memory management: all allocations in a region
1186/// are freed when the region is dropped or reset. Regions can be nested.
1187pub struct Region {
1188    /// Region identifier.
1189    pub(super) id: u64,
1190    /// The underlying bump arena for this region.
1191    pub(super) arena: BumpArena,
1192    /// Parent region ID (for nesting).
1193    pub(super) parent_id: Option<u64>,
1194    /// Child region IDs.
1195    pub(super) children: Vec<u64>,
1196    /// Whether this region is active (can allocate).
1197    pub(super) active: bool,
1198    /// Statistics for this region.
1199    stats: RegionStats,
1200}
1201impl Region {
1202    /// Create a new root region.
1203    pub fn new(id: u64) -> Self {
1204        Region {
1205            id,
1206            arena: BumpArena::new(),
1207            parent_id: None,
1208            children: Vec::new(),
1209            active: true,
1210            stats: RegionStats::default(),
1211        }
1212    }
1213    /// Create a new region with a custom arena size.
1214    pub fn with_size(id: u64, chunk_size: usize) -> Self {
1215        Region {
1216            id,
1217            arena: BumpArena::with_chunk_size(chunk_size),
1218            parent_id: None,
1219            children: Vec::new(),
1220            active: true,
1221            stats: RegionStats::default(),
1222        }
1223    }
1224    /// Create a child region.
1225    pub fn child(id: u64, parent_id: u64) -> Self {
1226        Region {
1227            id,
1228            arena: BumpArena::new(),
1229            parent_id: Some(parent_id),
1230            children: Vec::new(),
1231            active: true,
1232            stats: RegionStats::default(),
1233        }
1234    }
1235    /// Get the region ID.
1236    pub fn id(&self) -> u64 {
1237        self.id
1238    }
1239    /// Get the parent region ID.
1240    pub fn parent_id(&self) -> Option<u64> {
1241        self.parent_id
1242    }
1243    /// Check if this region is active.
1244    pub fn is_active(&self) -> bool {
1245        self.active
1246    }
1247    /// Allocate bytes in this region.
1248    pub fn alloc(&mut self, size: usize) -> Option<ArenaOffset> {
1249        if !self.active {
1250            return None;
1251        }
1252        self.stats.allocations += 1;
1253        self.stats.bytes_allocated += size as u64;
1254        Some(self.arena.alloc(size))
1255    }
1256    /// Get bytes from this region.
1257    pub fn get_bytes(&self, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
1258        self.arena.get_bytes(loc, size)
1259    }
1260    /// Get mutable bytes from this region.
1261    pub fn get_bytes_mut(&mut self, loc: &ArenaOffset, size: usize) -> Option<&mut [u8]> {
1262        self.arena.get_bytes_mut(loc, size)
1263    }
1264    /// Reset this region (free all allocations).
1265    pub fn reset(&mut self) {
1266        self.arena.reset();
1267        self.stats.resets += 1;
1268    }
1269    /// Deactivate this region (prevent further allocations).
1270    pub fn deactivate(&mut self) {
1271        self.active = false;
1272    }
1273    /// Reactivate this region.
1274    pub fn reactivate(&mut self) {
1275        self.active = true;
1276    }
1277    /// Add a child region ID.
1278    pub fn add_child(&mut self, child_id: u64) {
1279        self.children.push(child_id);
1280    }
1281    /// Get child region IDs.
1282    pub fn children(&self) -> &[u64] {
1283        &self.children
1284    }
1285    /// Get the region statistics.
1286    pub fn stats(&self) -> &RegionStats {
1287        &self.stats
1288    }
1289    /// Get the underlying arena.
1290    pub fn arena(&self) -> &BumpArena {
1291        &self.arena
1292    }
1293    /// Bytes used in this region.
1294    pub fn bytes_used(&self) -> usize {
1295        self.arena.bytes_used()
1296    }
1297    /// Total capacity of this region.
1298    pub fn total_capacity(&self) -> usize {
1299        self.arena.total_capacity()
1300    }
1301}
1302/// A typed arena for homogeneous allocation.
1303///
1304/// All values stored in a `TypedArena<T>` have the same type `T`.
1305/// Values can be referenced by index (analogous to kernel `Idx<T>`).
1306pub struct TypedArena<T> {
1307    /// The stored values.
1308    pub(super) values: Vec<T>,
1309    /// Free list for reuse (indices of removed values).
1310    free_list: Vec<usize>,
1311    /// Statistics.
1312    stats: TypedArenaStats,
1313}
1314impl<T> TypedArena<T> {
1315    /// Create a new empty typed arena.
1316    pub fn new() -> Self {
1317        TypedArena {
1318            values: Vec::new(),
1319            free_list: Vec::new(),
1320            stats: TypedArenaStats::default(),
1321        }
1322    }
1323    /// Create a new typed arena with pre-allocated capacity.
1324    pub fn with_capacity(cap: usize) -> Self {
1325        TypedArena {
1326            values: Vec::with_capacity(cap),
1327            free_list: Vec::new(),
1328            stats: TypedArenaStats::default(),
1329        }
1330    }
1331    /// Allocate a new value and return its index.
1332    pub fn alloc(&mut self, value: T) -> ArenaIdx {
1333        self.stats.total_allocations += 1;
1334        self.stats.live_count += 1;
1335        if self.stats.live_count > self.stats.peak_count {
1336            self.stats.peak_count = self.stats.live_count;
1337        }
1338        let idx = self.values.len();
1339        self.values.push(value);
1340        ArenaIdx(idx as u32)
1341    }
1342    /// Get a reference to a value by index.
1343    pub fn get(&self, idx: ArenaIdx) -> Option<&T> {
1344        self.values.get(idx.0 as usize)
1345    }
1346    /// Get a mutable reference to a value by index.
1347    pub fn get_mut(&mut self, idx: ArenaIdx) -> Option<&mut T> {
1348        self.values.get_mut(idx.0 as usize)
1349    }
1350    /// Number of values in the arena.
1351    pub fn len(&self) -> usize {
1352        self.values.len()
1353    }
1354    /// Check if the arena is empty.
1355    pub fn is_empty(&self) -> bool {
1356        self.values.is_empty()
1357    }
1358    /// Iterate over all values.
1359    pub fn iter(&self) -> impl Iterator<Item = (ArenaIdx, &T)> {
1360        self.values
1361            .iter()
1362            .enumerate()
1363            .map(|(i, v)| (ArenaIdx(i as u32), v))
1364    }
1365    /// Iterate over all values mutably.
1366    pub fn iter_mut(&mut self) -> impl Iterator<Item = (ArenaIdx, &mut T)> {
1367        self.values
1368            .iter_mut()
1369            .enumerate()
1370            .map(|(i, v)| (ArenaIdx(i as u32), v))
1371    }
1372    /// Get the arena statistics.
1373    pub fn stats(&self) -> &TypedArenaStats {
1374        &self.stats
1375    }
1376    /// Clear the arena.
1377    pub fn clear(&mut self) {
1378        self.values.clear();
1379        self.free_list.clear();
1380        self.stats.live_count = 0;
1381    }
1382    /// Capacity of the arena.
1383    pub fn capacity(&self) -> usize {
1384        self.values.capacity()
1385    }
1386}
1387/// Statistics for the arena pool.
1388#[derive(Clone, Debug, Default)]
1389pub struct ArenaPoolStats {
1390    /// Number of arenas acquired.
1391    pub acquired: u64,
1392    /// Number of arenas returned.
1393    pub returned: u64,
1394    /// Number of arenas created (not found in pool).
1395    pub created: u64,
1396    /// Number of arenas discarded (pool was full).
1397    pub discarded: u64,
1398}
1399/// An arena that records allocation history for debugging.
1400#[allow(dead_code)]
1401pub struct ArenaAllocHistory {
1402    inner: LinearAllocator,
1403    history: Vec<AllocRecord>,
1404    max_history: usize,
1405}
1406#[allow(dead_code)]
1407impl ArenaAllocHistory {
1408    /// Create an arena with history tracking.
1409    pub fn new(capacity: usize, max_history: usize) -> Self {
1410        Self {
1411            inner: LinearAllocator::new(capacity),
1412            history: Vec::new(),
1413            max_history,
1414        }
1415    }
1416    /// Allocate and record.
1417    pub fn alloc_labeled(&mut self, size: usize, align: usize, label: &str) -> Option<usize> {
1418        let offset = self.inner.alloc_offset(size, align)?;
1419        if self.history.len() < self.max_history {
1420            self.history.push(AllocRecord {
1421                size,
1422                align,
1423                offset,
1424                label: label.to_string(),
1425            });
1426        }
1427        Some(offset)
1428    }
1429    /// Get allocation history.
1430    pub fn history(&self) -> &[AllocRecord] {
1431        &self.history
1432    }
1433    /// Total bytes allocated.
1434    pub fn top(&self) -> usize {
1435        self.inner.top()
1436    }
1437    /// Reset and clear history.
1438    pub fn reset(&mut self) {
1439        self.inner.reset();
1440        self.history.clear();
1441    }
1442    /// Allocation count.
1443    pub fn alloc_count(&self) -> u64 {
1444        self.inner.alloc_count()
1445    }
1446    /// Largest single allocation.
1447    pub fn largest_alloc(&self) -> Option<&AllocRecord> {
1448        self.history.iter().max_by_key(|r| r.size)
1449    }
1450}
1451/// A circular/ring arena that overwrites oldest data when full.
1452#[allow(dead_code)]
1453#[derive(Debug)]
1454pub struct RingArena {
1455    buf: Vec<u8>,
1456    head: usize,
1457    wrap_count: u64,
1458}
1459#[allow(dead_code)]
1460impl RingArena {
1461    pub fn new(capacity: usize) -> Self {
1462        Self {
1463            buf: vec![0u8; capacity],
1464            head: 0,
1465            wrap_count: 0,
1466        }
1467    }
1468    /// Allocate `size` bytes (wraps around, overwriting old data).
1469    pub fn alloc(&mut self, size: usize) -> usize {
1470        let start = self.head;
1471        let cap = self.buf.len();
1472        if cap == 0 {
1473            return 0;
1474        }
1475        if start + size > cap {
1476            self.wrap_count += 1;
1477            self.head = size % cap;
1478            0
1479        } else {
1480            self.head = (self.head + size) % cap;
1481            if self.head == 0 && size > 0 {
1482                self.wrap_count += 1;
1483            }
1484            start
1485        }
1486    }
1487    pub fn capacity(&self) -> usize {
1488        self.buf.len()
1489    }
1490    pub fn head(&self) -> usize {
1491        self.head
1492    }
1493    pub fn wrap_count(&self) -> u64 {
1494        self.wrap_count
1495    }
1496    pub fn get(&self, offset: usize, len: usize) -> Option<&[u8]> {
1497        if offset + len <= self.buf.len() {
1498            Some(&self.buf[offset..offset + len])
1499        } else {
1500            None
1501        }
1502    }
1503}
1504/// Tracks arena allocation watermarks (current and peak usage).
1505#[derive(Clone, Debug, Default)]
1506pub struct ArenaWatermark {
1507    /// Current allocated bytes.
1508    current: u64,
1509    /// Peak allocated bytes.
1510    peak: u64,
1511}
1512#[allow(dead_code)]
1513impl ArenaWatermark {
1514    /// Create a new watermark tracker with zero usage.
1515    pub fn new() -> Self {
1516        Self::default()
1517    }
1518    /// Record an allocation of `bytes`.
1519    pub fn record_alloc(&mut self, bytes: u64) {
1520        self.current += bytes;
1521        if self.current > self.peak {
1522            self.peak = self.current;
1523        }
1524    }
1525    /// Record a deallocation of `bytes`.
1526    pub fn record_free(&mut self, bytes: u64) {
1527        self.current = self.current.saturating_sub(bytes);
1528    }
1529    /// Current allocated bytes.
1530    pub fn current(&self) -> u64 {
1531        self.current
1532    }
1533    /// Peak allocated bytes observed.
1534    pub fn peak(&self) -> u64 {
1535        self.peak
1536    }
1537    /// Reset both current and peak to zero.
1538    pub fn reset(&mut self) {
1539        self.current = 0;
1540        self.peak = 0;
1541    }
1542}