1use std::cell::Cell;
6use std::collections::HashMap;
7
8use super::functions::{
9 ARENA_ALIGN, DEFAULT_CHUNK_SIZE, MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, PAGE_SIZE,
10};
11
12#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
14pub struct ArenaOffset {
15 pub chunk: usize,
17 pub offset: usize,
19}
20impl ArenaOffset {
21 pub fn new(chunk: usize, offset: usize) -> Self {
23 ArenaOffset { chunk, offset }
24 }
25}
26#[allow(dead_code)]
28pub struct LinearAllocator {
29 buf: Vec<u8>,
30 top: usize,
31 alloc_count: u64,
32 overflow_count: u64,
33}
34#[allow(dead_code)]
35impl LinearAllocator {
36 pub fn new(size: usize) -> Self {
38 Self {
39 buf: vec![0u8; size.max(16)],
40 top: 0,
41 alloc_count: 0,
42 overflow_count: 0,
43 }
44 }
45 pub fn alloc_offset(&mut self, size: usize, align: usize) -> Option<usize> {
47 let align = align.next_power_of_two().max(1);
48 let aligned = (self.top + align - 1) & !(align - 1);
49 if aligned + size > self.buf.len() {
50 self.overflow_count += 1;
51 return None;
52 }
53 self.top = aligned + size;
54 self.alloc_count += 1;
55 Some(aligned)
56 }
57 pub fn get_bytes(&self, offset: usize, size: usize) -> Option<&[u8]> {
59 self.buf.get(offset..offset + size)
60 }
61 pub fn get_bytes_mut(&mut self, offset: usize, size: usize) -> Option<&mut [u8]> {
63 self.buf.get_mut(offset..offset + size)
64 }
65 pub fn reset(&mut self) {
67 self.top = 0;
68 }
69 pub fn top(&self) -> usize {
71 self.top
72 }
73 pub fn capacity(&self) -> usize {
75 self.buf.len()
76 }
77 pub fn remaining(&self) -> usize {
79 self.buf.len().saturating_sub(self.top)
80 }
81 pub fn utilization(&self) -> f64 {
83 if self.buf.is_empty() {
84 0.0
85 } else {
86 self.top as f64 / self.buf.len() as f64
87 }
88 }
89 pub fn alloc_count(&self) -> u64 {
91 self.alloc_count
92 }
93 pub fn overflow_count(&self) -> u64 {
95 self.overflow_count
96 }
97}
98#[allow(dead_code)]
100#[derive(Debug, Clone)]
101pub struct ArenaBenchResult {
102 pub iterations: u64,
103 pub total_bytes: u64,
104 pub allocs_per_iter: usize,
105 pub description: String,
106}
107#[allow(dead_code)]
108impl ArenaBenchResult {
109 pub fn new(iterations: u64, total_bytes: u64, allocs_per_iter: usize, desc: &str) -> Self {
110 Self {
111 iterations,
112 total_bytes,
113 allocs_per_iter,
114 description: desc.to_string(),
115 }
116 }
117 pub fn bytes_per_iter(&self) -> f64 {
118 if self.iterations == 0 {
119 0.0
120 } else {
121 self.total_bytes as f64 / self.iterations as f64
122 }
123 }
124}
125#[derive(Clone, Debug, Default)]
127pub struct RegionStats {
128 pub allocations: u64,
130 pub bytes_allocated: u64,
132 pub resets: u64,
134}
135pub struct GenerationalArena<T> {
141 pub(super) entries: Vec<GenerationalEntry<T>>,
143 free_list: Vec<usize>,
145 pub(super) generation: u32,
147}
148impl<T> GenerationalArena<T> {
149 pub fn new() -> Self {
151 GenerationalArena {
152 entries: Vec::new(),
153 free_list: Vec::new(),
154 generation: 0,
155 }
156 }
157 pub fn with_capacity(cap: usize) -> Self {
159 GenerationalArena {
160 entries: Vec::with_capacity(cap),
161 free_list: Vec::new(),
162 generation: 0,
163 }
164 }
165 pub fn insert(&mut self, value: T) -> GenIdx {
167 self.generation = self.generation.wrapping_add(1);
168 if let Some(slot) = self.free_list.pop() {
169 self.entries[slot] = GenerationalEntry {
170 value: Some(value),
171 generation: self.generation,
172 };
173 GenIdx {
174 index: slot as u32,
175 generation: self.generation,
176 }
177 } else {
178 let index = self.entries.len() as u32;
179 self.entries.push(GenerationalEntry {
180 value: Some(value),
181 generation: self.generation,
182 });
183 GenIdx {
184 index,
185 generation: self.generation,
186 }
187 }
188 }
189 pub fn get(&self, idx: GenIdx) -> Option<&T> {
191 let entry = self.entries.get(idx.index as usize)?;
192 if entry.generation == idx.generation {
193 entry.value.as_ref()
194 } else {
195 None
196 }
197 }
198 pub fn get_mut(&mut self, idx: GenIdx) -> Option<&mut T> {
200 let entry = self.entries.get_mut(idx.index as usize)?;
201 if entry.generation == idx.generation {
202 entry.value.as_mut()
203 } else {
204 None
205 }
206 }
207 pub fn remove(&mut self, idx: GenIdx) -> Option<T> {
209 let entry = self.entries.get_mut(idx.index as usize)?;
210 if entry.generation == idx.generation {
211 let value = entry.value.take();
212 self.free_list.push(idx.index as usize);
213 value
214 } else {
215 None
216 }
217 }
218 pub fn contains(&self, idx: GenIdx) -> bool {
220 self.entries
221 .get(idx.index as usize)
222 .map(|e| e.generation == idx.generation && e.value.is_some())
223 .unwrap_or(false)
224 }
225 pub fn len(&self) -> usize {
227 self.entries.len() - self.free_list.len()
228 }
229 pub fn is_empty(&self) -> bool {
231 self.len() == 0
232 }
233 pub fn clear(&mut self) {
235 self.entries.clear();
236 self.free_list.clear();
237 }
238 pub fn iter(&self) -> impl Iterator<Item = (GenIdx, &T)> {
240 self.entries.iter().enumerate().filter_map(|(i, e)| {
241 e.value.as_ref().map(|v| {
242 (
243 GenIdx {
244 index: i as u32,
245 generation: e.generation,
246 },
247 v,
248 )
249 })
250 })
251 }
252}
253#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
255pub struct GenIdx {
256 pub index: u32,
258 pub generation: u32,
260}
261pub struct ArenaPool {
267 pub(super) available: Vec<BumpArena>,
269 pub(super) max_pool_size: usize,
271 pub(super) chunk_size: usize,
273 stats: ArenaPoolStats,
275}
276impl ArenaPool {
277 pub fn new() -> Self {
279 ArenaPool {
280 available: Vec::new(),
281 max_pool_size: 8,
282 chunk_size: DEFAULT_CHUNK_SIZE,
283 stats: ArenaPoolStats::default(),
284 }
285 }
286 pub fn with_config(max_pool_size: usize, chunk_size: usize) -> Self {
288 ArenaPool {
289 available: Vec::new(),
290 max_pool_size,
291 chunk_size,
292 stats: ArenaPoolStats::default(),
293 }
294 }
295 pub fn acquire(&mut self) -> BumpArena {
297 self.stats.acquired += 1;
298 if let Some(mut arena) = self.available.pop() {
299 arena.reset();
300 arena
301 } else {
302 self.stats.created += 1;
303 BumpArena::with_chunk_size(self.chunk_size)
304 }
305 }
306 pub fn release(&mut self, arena: BumpArena) {
308 self.stats.returned += 1;
309 if self.available.len() < self.max_pool_size {
310 self.available.push(arena);
311 } else {
312 self.stats.discarded += 1;
313 }
314 }
315 pub fn available_count(&self) -> usize {
317 self.available.len()
318 }
319 pub fn stats(&self) -> &ArenaPoolStats {
321 &self.stats
322 }
323 pub fn set_max_pool_size(&mut self, size: usize) {
325 self.max_pool_size = size;
326 while self.available.len() > self.max_pool_size {
327 self.available.pop();
328 }
329 }
330 pub fn clear(&mut self) {
332 self.available.clear();
333 }
334}
335#[allow(dead_code)]
337pub struct ArenaPageManager {
338 pages: Vec<Box<[u8; PAGE_SIZE]>>,
339 free_list: Vec<usize>,
340 alloc_count: u64,
341 free_count: u64,
342}
343#[allow(dead_code)]
344impl ArenaPageManager {
345 pub fn new() -> Self {
347 Self {
348 pages: Vec::new(),
349 free_list: Vec::new(),
350 alloc_count: 0,
351 free_count: 0,
352 }
353 }
354 pub fn alloc_page(&mut self) -> usize {
356 self.alloc_count += 1;
357 if let Some(idx) = self.free_list.pop() {
358 idx
359 } else {
360 let idx = self.pages.len();
361 self.pages.push(Box::new([0u8; PAGE_SIZE]));
362 idx
363 }
364 }
365 pub fn free_page(&mut self, idx: usize) {
367 if idx < self.pages.len() {
368 for b in self.pages[idx].iter_mut() {
369 *b = 0;
370 }
371 self.free_list.push(idx);
372 self.free_count += 1;
373 }
374 }
375 pub fn page(&self, idx: usize) -> Option<&[u8; PAGE_SIZE]> {
377 self.pages.get(idx).map(|p| p.as_ref())
378 }
379 pub fn page_mut(&mut self, idx: usize) -> Option<&mut [u8; PAGE_SIZE]> {
381 self.pages.get_mut(idx).map(|p| p.as_mut())
382 }
383 pub fn total_pages(&self) -> usize {
385 self.pages.len()
386 }
387 pub fn free_pages(&self) -> usize {
389 self.free_list.len()
390 }
391 pub fn live_pages(&self) -> usize {
393 self.pages.len().saturating_sub(self.free_list.len())
394 }
395 pub fn total_bytes(&self) -> usize {
397 self.pages.len() * PAGE_SIZE
398 }
399 pub fn alloc_count(&self) -> u64 {
401 self.alloc_count
402 }
403 pub fn free_count(&self) -> u64 {
405 self.free_count
406 }
407}
408pub struct BumpArena {
414 pub(super) chunks: Vec<Chunk>,
416 current_chunk: usize,
418 chunk_size: usize,
420 stats: ArenaStats,
422}
423impl BumpArena {
424 pub fn new() -> Self {
426 BumpArena {
427 chunks: vec![Chunk::new(DEFAULT_CHUNK_SIZE)],
428 current_chunk: 0,
429 chunk_size: DEFAULT_CHUNK_SIZE,
430 stats: ArenaStats::new(),
431 }
432 }
433 pub fn with_chunk_size(size: usize) -> Self {
435 let size = size.clamp(MIN_CHUNK_SIZE, MAX_CHUNK_SIZE);
436 BumpArena {
437 chunks: vec![Chunk::new(size)],
438 current_chunk: 0,
439 chunk_size: size,
440 stats: ArenaStats::new(),
441 }
442 }
443 pub fn alloc(&mut self, size: usize) -> ArenaOffset {
447 self.alloc_aligned(size, ARENA_ALIGN)
448 }
449 pub fn alloc_aligned(&mut self, size: usize, align: usize) -> ArenaOffset {
451 self.stats.total_allocations += 1;
452 self.stats.total_bytes_allocated += size as u64;
453 if let Some(offset) = self.chunks[self.current_chunk].try_alloc(size, align) {
454 return ArenaOffset {
455 chunk: self.current_chunk,
456 offset,
457 };
458 }
459 for i in (self.current_chunk + 1)..self.chunks.len() {
460 if let Some(offset) = self.chunks[i].try_alloc(size, align) {
461 self.current_chunk = i;
462 return ArenaOffset {
463 chunk: self.current_chunk,
464 offset,
465 };
466 }
467 }
468 let new_chunk_size = if size > self.chunk_size {
469 (size + align).max(self.chunk_size)
470 } else {
471 self.chunk_size.min(MAX_CHUNK_SIZE)
472 };
473 let mut chunk = Chunk::new(new_chunk_size);
474 let offset = chunk
475 .try_alloc(size, align)
476 .expect("freshly allocated chunk must have enough space for the requested allocation");
477 self.chunks.push(chunk);
478 self.current_chunk = self.chunks.len() - 1;
479 self.stats.total_chunks_allocated += 1;
480 ArenaOffset {
481 chunk: self.current_chunk,
482 offset,
483 }
484 }
485 pub fn get_bytes(&self, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
487 let chunk = self.chunks.get(loc.chunk)?;
488 if loc.offset + size > chunk.data.len() {
489 return None;
490 }
491 Some(&chunk.data[loc.offset..loc.offset + size])
492 }
493 pub fn get_bytes_mut(&mut self, loc: &ArenaOffset, size: usize) -> Option<&mut [u8]> {
495 let chunk = self.chunks.get_mut(loc.chunk)?;
496 if loc.offset + size > chunk.data.len() {
497 return None;
498 }
499 Some(&mut chunk.data[loc.offset..loc.offset + size])
500 }
501 pub fn reset(&mut self) {
505 for chunk in &mut self.chunks {
506 chunk.reset();
507 }
508 self.current_chunk = 0;
509 self.stats.total_resets += 1;
510 }
511 pub fn bytes_used(&self) -> usize {
513 self.chunks.iter().map(|c| c.used).sum()
514 }
515 pub fn total_capacity(&self) -> usize {
517 self.chunks.iter().map(|c| c.capacity()).sum()
518 }
519 pub fn num_chunks(&self) -> usize {
521 self.chunks.len()
522 }
523 pub fn stats(&self) -> &ArenaStats {
525 &self.stats
526 }
527 pub fn shrink(&mut self) {
529 let keep = self
530 .chunks
531 .iter()
532 .position(|c| c.used == 0)
533 .unwrap_or(self.chunks.len());
534 self.chunks.truncate(keep.max(1));
535 self.current_chunk = self.current_chunk.min(self.chunks.len() - 1);
536 }
537}
538#[allow(dead_code)]
540pub struct MarkArena {
541 buf: Vec<u8>,
542 top: usize,
543 marks: Vec<usize>,
544}
545#[allow(dead_code)]
546impl MarkArena {
547 pub fn new(capacity: usize) -> Self {
549 Self {
550 buf: vec![0u8; capacity.max(64)],
551 top: 0,
552 marks: Vec::new(),
553 }
554 }
555 pub fn alloc(&mut self, size: usize) -> Option<usize> {
557 if self.top + size > self.buf.len() {
558 return None;
559 }
560 let offset = self.top;
561 self.top += size;
562 Some(offset)
563 }
564 pub fn mark(&mut self) -> usize {
566 let mark = self.top;
567 self.marks.push(mark);
568 mark
569 }
570 pub fn release(&mut self) {
572 if let Some(mark) = self.marks.pop() {
573 self.top = mark;
574 }
575 }
576 pub fn release_to(&mut self, mark: usize) {
578 self.marks.retain(|&m| m < mark);
579 self.top = mark.min(self.top);
580 }
581 pub fn reset(&mut self) {
583 self.top = 0;
584 self.marks.clear();
585 }
586 pub fn top(&self) -> usize {
588 self.top
589 }
590 pub fn mark_depth(&self) -> usize {
592 self.marks.len()
593 }
594 pub fn capacity(&self) -> usize {
596 self.buf.len()
597 }
598}
599#[derive(Debug)]
601pub(super) struct GenerationalEntry<T> {
602 value: Option<T>,
604 generation: u32,
606}
607pub struct ThreadLocalArena {
612 arena: BumpArena,
614 _high_water_mark: Cell<usize>,
616 _allocs_since_reset: Cell<u64>,
618}
619impl ThreadLocalArena {
620 pub fn new() -> Self {
622 ThreadLocalArena {
623 arena: BumpArena::new(),
624 _high_water_mark: Cell::new(DEFAULT_CHUNK_SIZE),
625 _allocs_since_reset: Cell::new(0),
626 }
627 }
628 pub fn alloc(&mut self, size: usize) -> ArenaOffset {
630 self._allocs_since_reset
631 .set(self._allocs_since_reset.get() + 1);
632 self.arena.alloc(size)
633 }
634 pub fn reset(&mut self) {
636 self.arena.reset();
637 self._allocs_since_reset.set(0);
638 }
639 pub fn arena(&self) -> &BumpArena {
641 &self.arena
642 }
643 pub fn bytes_used(&self) -> usize {
645 self.arena.bytes_used()
646 }
647}
648#[allow(dead_code)]
650pub struct AdaptiveArena {
651 inner: BumpArena,
652 pressure_samples: Vec<f64>,
653 target_utilization: f64,
654 sample_window: usize,
655}
656#[allow(dead_code)]
657impl AdaptiveArena {
658 pub fn new(target_utilization: f64, sample_window: usize) -> Self {
660 Self {
661 inner: BumpArena::new(),
662 pressure_samples: Vec::new(),
663 target_utilization: target_utilization.clamp(0.1, 0.99),
664 sample_window: sample_window.max(3),
665 }
666 }
667 pub fn alloc(&mut self, size: usize) -> ArenaOffset {
669 let result = self.inner.alloc(size);
670 let pressure = self.inner.bytes_used() as f64
671 / (self.inner.num_chunks() as f64 * DEFAULT_CHUNK_SIZE as f64 + 1.0);
672 self.pressure_samples.push(pressure);
673 if self.pressure_samples.len() > self.sample_window {
674 self.pressure_samples.remove(0);
675 }
676 result
677 }
678 pub fn avg_pressure(&self) -> f64 {
680 if self.pressure_samples.is_empty() {
681 return 0.0;
682 }
683 self.pressure_samples.iter().sum::<f64>() / self.pressure_samples.len() as f64
684 }
685 pub fn is_over_utilized(&self) -> bool {
687 self.avg_pressure() > self.target_utilization
688 }
689 pub fn reset(&mut self) {
691 self.inner.reset();
692 self.pressure_samples.clear();
693 }
694 pub fn allocated_bytes(&self) -> usize {
696 self.inner.bytes_used()
697 }
698}
699#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
701pub struct ArenaIdx(pub u32);
702impl ArenaIdx {
703 pub fn new(index: u32) -> Self {
705 ArenaIdx(index)
706 }
707 pub fn raw(self) -> u32 {
709 self.0
710 }
711}
712#[derive(Clone, Debug, Default)]
714pub struct ArenaStats {
715 pub total_allocations: u64,
717 pub total_bytes_allocated: u64,
719 pub total_resets: u64,
721 pub total_chunks_allocated: u64,
723}
724impl ArenaStats {
725 pub fn new() -> Self {
727 Self::default()
728 }
729 pub fn avg_alloc_size(&self) -> f64 {
731 if self.total_allocations == 0 {
732 return 0.0;
733 }
734 self.total_bytes_allocated as f64 / self.total_allocations as f64
735 }
736 pub fn reset(&mut self) {
738 *self = Self::default();
739 }
740}
741#[derive(Clone, Debug, Default)]
743pub struct TypedArenaStats {
744 pub total_allocations: u64,
746 pub total_deallocations: u64,
748 pub live_count: u64,
750 pub peak_count: u64,
752}
753#[allow(dead_code)]
755#[derive(Debug, Default)]
756pub struct ArenaChunkPool {
757 chunks: Vec<Vec<u8>>,
758 chunk_size: usize,
759 max_pooled: usize,
760 reused: u64,
761 created: u64,
762}
763#[allow(dead_code)]
764impl ArenaChunkPool {
765 pub fn new(chunk_size: usize, max_pooled: usize) -> Self {
766 Self {
767 chunks: Vec::new(),
768 chunk_size,
769 max_pooled,
770 reused: 0,
771 created: 0,
772 }
773 }
774 pub fn acquire(&mut self) -> Vec<u8> {
776 if let Some(mut chunk) = self.chunks.pop() {
777 for b in chunk.iter_mut() {
778 *b = 0;
779 }
780 self.reused += 1;
781 chunk
782 } else {
783 self.created += 1;
784 vec![0u8; self.chunk_size]
785 }
786 }
787 pub fn release(&mut self, chunk: Vec<u8>) {
789 if chunk.len() == self.chunk_size && self.chunks.len() < self.max_pooled {
790 self.chunks.push(chunk);
791 }
792 }
793 pub fn pooled_count(&self) -> usize {
794 self.chunks.len()
795 }
796 pub fn reused_count(&self) -> u64 {
797 self.reused
798 }
799 pub fn created_count(&self) -> u64 {
800 self.created
801 }
802 pub fn hit_rate(&self) -> f64 {
803 let total = self.reused + self.created;
804 if total == 0 {
805 0.0
806 } else {
807 self.reused as f64 / total as f64
808 }
809 }
810}
811#[allow(dead_code)]
813pub struct SlabArena {
814 slot_size: usize,
815 buf: Vec<u8>,
816 free_slots: Vec<usize>,
817 alloc_count: u64,
818}
819#[allow(dead_code)]
820impl SlabArena {
821 pub fn new(slot_size: usize, initial_slots: usize) -> Self {
823 let slot_size = slot_size.max(8);
824 Self {
825 slot_size,
826 buf: vec![0u8; slot_size * initial_slots],
827 free_slots: (0..initial_slots).rev().collect(),
828 alloc_count: 0,
829 }
830 }
831 pub fn alloc(&mut self) -> Option<usize> {
833 if let Some(slot) = self.free_slots.pop() {
834 self.alloc_count += 1;
835 Some(slot * self.slot_size)
836 } else {
837 let new_slot = self.buf.len() / self.slot_size;
838 self.buf.extend(vec![0u8; self.slot_size]);
839 self.alloc_count += 1;
840 Some(new_slot * self.slot_size)
841 }
842 }
843 pub fn free(&mut self, offset: usize) {
845 let slot = offset / self.slot_size;
846 if !self.free_slots.contains(&slot) {
847 self.free_slots.push(slot);
848 }
849 }
850 pub fn live_count(&self) -> usize {
852 let total = self.buf.len() / self.slot_size;
853 total.saturating_sub(self.free_slots.len())
854 }
855 pub fn total_slots(&self) -> usize {
857 self.buf.len() / self.slot_size
858 }
859 pub fn slot_size(&self) -> usize {
861 self.slot_size
862 }
863 pub fn alloc_count(&self) -> u64 {
865 self.alloc_count
866 }
867}
868#[allow(dead_code)]
870#[derive(Clone, Debug, Default)]
871pub struct ArenaExtStats {
872 pub alloc_calls: u64,
873 pub total_bytes_allocated: u64,
874 pub peak_bytes: u64,
875 pub reset_count: u64,
876 pub overflow_count: u64,
877 pub chunk_alloc_count: u64,
878}
879#[allow(dead_code)]
880impl ArenaExtStats {
881 pub fn new() -> Self {
882 Self::default()
883 }
884 pub fn merge(&mut self, other: &ArenaExtStats) {
886 self.alloc_calls += other.alloc_calls;
887 self.total_bytes_allocated += other.total_bytes_allocated;
888 self.peak_bytes = self.peak_bytes.max(other.peak_bytes);
889 self.reset_count += other.reset_count;
890 self.overflow_count += other.overflow_count;
891 self.chunk_alloc_count += other.chunk_alloc_count;
892 }
893 pub fn record_alloc(&mut self, bytes: u64) {
894 self.alloc_calls += 1;
895 self.total_bytes_allocated += bytes;
896 }
897 pub fn record_reset(&mut self) {
898 self.reset_count += 1;
899 }
900 pub fn record_overflow(&mut self) {
901 self.overflow_count += 1;
902 }
903 pub fn record_chunk_alloc(&mut self) {
904 self.chunk_alloc_count += 1;
905 }
906 pub fn update_peak(&mut self, current_bytes: u64) {
907 if current_bytes > self.peak_bytes {
908 self.peak_bytes = current_bytes;
909 }
910 }
911 pub fn avg_alloc_size(&self) -> f64 {
912 if self.alloc_calls == 0 {
913 0.0
914 } else {
915 self.total_bytes_allocated as f64 / self.alloc_calls as f64
916 }
917 }
918}
919pub struct ScopedArena<'pool> {
921 pub(super) arena: Option<BumpArena>,
923 pub(super) pool: &'pool mut ArenaPool,
925}
926impl<'pool> ScopedArena<'pool> {
927 pub fn new(pool: &'pool mut ArenaPool) -> Self {
929 let arena = pool.acquire();
930 ScopedArena {
931 arena: Some(arena),
932 pool,
933 }
934 }
935 pub fn alloc(&mut self, size: usize) -> ArenaOffset {
937 self.arena
938 .as_mut()
939 .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
940 .alloc(size)
941 }
942 pub fn arena(&self) -> &BumpArena {
944 self.arena
945 .as_ref()
946 .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
947 }
948 pub fn arena_mut(&mut self) -> &mut BumpArena {
950 self.arena
951 .as_mut()
952 .expect("ScopedArena is valid during its lifetime; arena is always Some before drop")
953 }
954}
955#[allow(dead_code)]
957#[derive(Debug, Clone, Copy, PartialEq, Eq)]
958pub struct ArenaCheckpoint {
959 bytes_used: usize,
960 chunk_count: usize,
961}
962#[allow(dead_code)]
963impl ArenaCheckpoint {
964 pub fn capture(arena: &BumpArena) -> Self {
965 Self {
966 bytes_used: arena.bytes_used(),
967 chunk_count: arena.num_chunks(),
968 }
969 }
970 pub fn bytes_used(&self) -> usize {
971 self.bytes_used
972 }
973 pub fn chunk_count(&self) -> usize {
974 self.chunk_count
975 }
976 pub fn bytes_since(&self, later_bytes_used: usize) -> usize {
977 later_bytes_used.saturating_sub(self.bytes_used)
978 }
979}
980#[allow(dead_code)]
982#[derive(Clone, Debug, PartialEq, Eq)]
983pub struct ArenaSnapshot {
984 pub offset: usize,
986 pub chunk_count: usize,
988 pub allocated_bytes: usize,
990}
991#[allow(dead_code)]
992impl ArenaSnapshot {
993 pub fn capture(arena: &BumpArena) -> Self {
995 Self {
996 offset: arena.bytes_used(),
997 chunk_count: arena.num_chunks(),
998 allocated_bytes: arena.bytes_used(),
999 }
1000 }
1001 pub fn bytes_since(&self, later: &ArenaSnapshot) -> usize {
1003 later.allocated_bytes.saturating_sub(self.allocated_bytes)
1004 }
1005 pub fn new_chunks_since(&self, later: &ArenaSnapshot) -> usize {
1007 later.chunk_count.saturating_sub(self.chunk_count)
1008 }
1009}
1010#[allow(dead_code)]
1012#[derive(Clone, Debug)]
1013pub struct AllocRecord {
1014 pub size: usize,
1015 pub align: usize,
1016 pub offset: usize,
1017 pub label: String,
1018}
1019#[derive(Debug)]
1021pub(super) struct Chunk {
1022 data: Vec<u8>,
1024 used: usize,
1026}
1027impl Chunk {
1028 fn new(capacity: usize) -> Self {
1030 Chunk {
1031 data: vec![0u8; capacity],
1032 used: 0,
1033 }
1034 }
1035 fn capacity(&self) -> usize {
1037 self.data.len()
1038 }
1039 fn remaining(&self) -> usize {
1041 self.data.len() - self.used
1042 }
1043 fn try_alloc(&mut self, size: usize, align: usize) -> Option<usize> {
1045 let aligned_used = (self.used + align - 1) & !(align - 1);
1046 let new_used = aligned_used + size;
1047 if new_used > self.data.len() {
1048 return None;
1049 }
1050 let offset = aligned_used;
1051 self.used = new_used;
1052 Some(offset)
1053 }
1054 fn reset(&mut self) {
1056 self.used = 0;
1057 }
1058}
1059pub struct RegionManager {
1061 pub(super) regions: HashMap<u64, Region>,
1063 next_id: u64,
1065 pub(super) scope_stack: Vec<u64>,
1067}
1068impl RegionManager {
1069 pub fn new() -> Self {
1071 let root = Region::new(0);
1072 let mut regions = HashMap::new();
1073 regions.insert(0, root);
1074 RegionManager {
1075 regions,
1076 next_id: 1,
1077 scope_stack: vec![0],
1078 }
1079 }
1080 pub fn current_region_id(&self) -> u64 {
1082 *self.scope_stack.last().unwrap_or(&0)
1083 }
1084 pub fn push_region(&mut self) -> u64 {
1086 let id = self.next_id;
1087 self.next_id += 1;
1088 let parent_id = self.current_region_id();
1089 let region = Region::child(id, parent_id);
1090 self.regions.insert(id, region);
1091 if let Some(parent) = self.regions.get_mut(&parent_id) {
1092 parent.add_child(id);
1093 }
1094 self.scope_stack.push(id);
1095 id
1096 }
1097 pub fn push_region_with_size(&mut self, chunk_size: usize) -> u64 {
1099 let id = self.next_id;
1100 self.next_id += 1;
1101 let parent_id = self.current_region_id();
1102 let mut region = Region::with_size(id, chunk_size);
1103 region.parent_id = Some(parent_id);
1104 self.regions.insert(id, region);
1105 if let Some(parent) = self.regions.get_mut(&parent_id) {
1106 parent.add_child(id);
1107 }
1108 self.scope_stack.push(id);
1109 id
1110 }
1111 pub fn pop_region(&mut self) -> Option<u64> {
1113 if self.scope_stack.len() <= 1 {
1114 return None;
1115 }
1116 let id = self.scope_stack.pop()?;
1117 if let Some(region) = self.regions.get_mut(&id) {
1118 region.deactivate();
1119 }
1120 Some(id)
1121 }
1122 pub fn alloc(&mut self, size: usize) -> Option<(u64, ArenaOffset)> {
1124 let id = self.current_region_id();
1125 let offset = self.regions.get_mut(&id)?.alloc(size)?;
1126 Some((id, offset))
1127 }
1128 pub fn get_bytes(&self, region_id: u64, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
1130 self.regions.get(®ion_id)?.get_bytes(loc, size)
1131 }
1132 pub fn reset_region(&mut self, region_id: u64) {
1134 let children: Vec<u64> = self
1135 .regions
1136 .get(®ion_id)
1137 .map(|r| r.children().to_vec())
1138 .unwrap_or_default();
1139 for child_id in children {
1140 self.reset_region(child_id);
1141 }
1142 if let Some(region) = self.regions.get_mut(®ion_id) {
1143 region.reset();
1144 }
1145 }
1146 pub fn get_region(&self, id: u64) -> Option<&Region> {
1148 self.regions.get(&id)
1149 }
1150 pub fn get_region_mut(&mut self, id: u64) -> Option<&mut Region> {
1152 self.regions.get_mut(&id)
1153 }
1154 pub fn num_regions(&self) -> usize {
1156 self.regions.len()
1157 }
1158 pub fn total_bytes_used(&self) -> usize {
1160 self.regions.values().map(|r| r.bytes_used()).sum()
1161 }
1162 pub fn total_capacity(&self) -> usize {
1164 self.regions.values().map(|r| r.total_capacity()).sum()
1165 }
1166 pub fn scope_depth(&self) -> usize {
1168 self.scope_stack.len()
1169 }
1170 pub fn remove_region(&mut self, region_id: u64) {
1172 let children: Vec<u64> = self
1173 .regions
1174 .get(®ion_id)
1175 .map(|r| r.children().to_vec())
1176 .unwrap_or_default();
1177 for child_id in children {
1178 self.remove_region(child_id);
1179 }
1180 self.regions.remove(®ion_id);
1181 }
1182}
1183pub struct Region {
1188 pub(super) id: u64,
1190 pub(super) arena: BumpArena,
1192 pub(super) parent_id: Option<u64>,
1194 pub(super) children: Vec<u64>,
1196 pub(super) active: bool,
1198 stats: RegionStats,
1200}
1201impl Region {
1202 pub fn new(id: u64) -> Self {
1204 Region {
1205 id,
1206 arena: BumpArena::new(),
1207 parent_id: None,
1208 children: Vec::new(),
1209 active: true,
1210 stats: RegionStats::default(),
1211 }
1212 }
1213 pub fn with_size(id: u64, chunk_size: usize) -> Self {
1215 Region {
1216 id,
1217 arena: BumpArena::with_chunk_size(chunk_size),
1218 parent_id: None,
1219 children: Vec::new(),
1220 active: true,
1221 stats: RegionStats::default(),
1222 }
1223 }
1224 pub fn child(id: u64, parent_id: u64) -> Self {
1226 Region {
1227 id,
1228 arena: BumpArena::new(),
1229 parent_id: Some(parent_id),
1230 children: Vec::new(),
1231 active: true,
1232 stats: RegionStats::default(),
1233 }
1234 }
1235 pub fn id(&self) -> u64 {
1237 self.id
1238 }
1239 pub fn parent_id(&self) -> Option<u64> {
1241 self.parent_id
1242 }
1243 pub fn is_active(&self) -> bool {
1245 self.active
1246 }
1247 pub fn alloc(&mut self, size: usize) -> Option<ArenaOffset> {
1249 if !self.active {
1250 return None;
1251 }
1252 self.stats.allocations += 1;
1253 self.stats.bytes_allocated += size as u64;
1254 Some(self.arena.alloc(size))
1255 }
1256 pub fn get_bytes(&self, loc: &ArenaOffset, size: usize) -> Option<&[u8]> {
1258 self.arena.get_bytes(loc, size)
1259 }
1260 pub fn get_bytes_mut(&mut self, loc: &ArenaOffset, size: usize) -> Option<&mut [u8]> {
1262 self.arena.get_bytes_mut(loc, size)
1263 }
1264 pub fn reset(&mut self) {
1266 self.arena.reset();
1267 self.stats.resets += 1;
1268 }
1269 pub fn deactivate(&mut self) {
1271 self.active = false;
1272 }
1273 pub fn reactivate(&mut self) {
1275 self.active = true;
1276 }
1277 pub fn add_child(&mut self, child_id: u64) {
1279 self.children.push(child_id);
1280 }
1281 pub fn children(&self) -> &[u64] {
1283 &self.children
1284 }
1285 pub fn stats(&self) -> &RegionStats {
1287 &self.stats
1288 }
1289 pub fn arena(&self) -> &BumpArena {
1291 &self.arena
1292 }
1293 pub fn bytes_used(&self) -> usize {
1295 self.arena.bytes_used()
1296 }
1297 pub fn total_capacity(&self) -> usize {
1299 self.arena.total_capacity()
1300 }
1301}
1302pub struct TypedArena<T> {
1307 pub(super) values: Vec<T>,
1309 free_list: Vec<usize>,
1311 stats: TypedArenaStats,
1313}
1314impl<T> TypedArena<T> {
1315 pub fn new() -> Self {
1317 TypedArena {
1318 values: Vec::new(),
1319 free_list: Vec::new(),
1320 stats: TypedArenaStats::default(),
1321 }
1322 }
1323 pub fn with_capacity(cap: usize) -> Self {
1325 TypedArena {
1326 values: Vec::with_capacity(cap),
1327 free_list: Vec::new(),
1328 stats: TypedArenaStats::default(),
1329 }
1330 }
1331 pub fn alloc(&mut self, value: T) -> ArenaIdx {
1333 self.stats.total_allocations += 1;
1334 self.stats.live_count += 1;
1335 if self.stats.live_count > self.stats.peak_count {
1336 self.stats.peak_count = self.stats.live_count;
1337 }
1338 let idx = self.values.len();
1339 self.values.push(value);
1340 ArenaIdx(idx as u32)
1341 }
1342 pub fn get(&self, idx: ArenaIdx) -> Option<&T> {
1344 self.values.get(idx.0 as usize)
1345 }
1346 pub fn get_mut(&mut self, idx: ArenaIdx) -> Option<&mut T> {
1348 self.values.get_mut(idx.0 as usize)
1349 }
1350 pub fn len(&self) -> usize {
1352 self.values.len()
1353 }
1354 pub fn is_empty(&self) -> bool {
1356 self.values.is_empty()
1357 }
1358 pub fn iter(&self) -> impl Iterator<Item = (ArenaIdx, &T)> {
1360 self.values
1361 .iter()
1362 .enumerate()
1363 .map(|(i, v)| (ArenaIdx(i as u32), v))
1364 }
1365 pub fn iter_mut(&mut self) -> impl Iterator<Item = (ArenaIdx, &mut T)> {
1367 self.values
1368 .iter_mut()
1369 .enumerate()
1370 .map(|(i, v)| (ArenaIdx(i as u32), v))
1371 }
1372 pub fn stats(&self) -> &TypedArenaStats {
1374 &self.stats
1375 }
1376 pub fn clear(&mut self) {
1378 self.values.clear();
1379 self.free_list.clear();
1380 self.stats.live_count = 0;
1381 }
1382 pub fn capacity(&self) -> usize {
1384 self.values.capacity()
1385 }
1386}
1387#[derive(Clone, Debug, Default)]
1389pub struct ArenaPoolStats {
1390 pub acquired: u64,
1392 pub returned: u64,
1394 pub created: u64,
1396 pub discarded: u64,
1398}
1399#[allow(dead_code)]
1401pub struct ArenaAllocHistory {
1402 inner: LinearAllocator,
1403 history: Vec<AllocRecord>,
1404 max_history: usize,
1405}
1406#[allow(dead_code)]
1407impl ArenaAllocHistory {
1408 pub fn new(capacity: usize, max_history: usize) -> Self {
1410 Self {
1411 inner: LinearAllocator::new(capacity),
1412 history: Vec::new(),
1413 max_history,
1414 }
1415 }
1416 pub fn alloc_labeled(&mut self, size: usize, align: usize, label: &str) -> Option<usize> {
1418 let offset = self.inner.alloc_offset(size, align)?;
1419 if self.history.len() < self.max_history {
1420 self.history.push(AllocRecord {
1421 size,
1422 align,
1423 offset,
1424 label: label.to_string(),
1425 });
1426 }
1427 Some(offset)
1428 }
1429 pub fn history(&self) -> &[AllocRecord] {
1431 &self.history
1432 }
1433 pub fn top(&self) -> usize {
1435 self.inner.top()
1436 }
1437 pub fn reset(&mut self) {
1439 self.inner.reset();
1440 self.history.clear();
1441 }
1442 pub fn alloc_count(&self) -> u64 {
1444 self.inner.alloc_count()
1445 }
1446 pub fn largest_alloc(&self) -> Option<&AllocRecord> {
1448 self.history.iter().max_by_key(|r| r.size)
1449 }
1450}
1451#[allow(dead_code)]
1453#[derive(Debug)]
1454pub struct RingArena {
1455 buf: Vec<u8>,
1456 head: usize,
1457 wrap_count: u64,
1458}
1459#[allow(dead_code)]
1460impl RingArena {
1461 pub fn new(capacity: usize) -> Self {
1462 Self {
1463 buf: vec![0u8; capacity],
1464 head: 0,
1465 wrap_count: 0,
1466 }
1467 }
1468 pub fn alloc(&mut self, size: usize) -> usize {
1470 let start = self.head;
1471 let cap = self.buf.len();
1472 if cap == 0 {
1473 return 0;
1474 }
1475 if start + size > cap {
1476 self.wrap_count += 1;
1477 self.head = size % cap;
1478 0
1479 } else {
1480 self.head = (self.head + size) % cap;
1481 if self.head == 0 && size > 0 {
1482 self.wrap_count += 1;
1483 }
1484 start
1485 }
1486 }
1487 pub fn capacity(&self) -> usize {
1488 self.buf.len()
1489 }
1490 pub fn head(&self) -> usize {
1491 self.head
1492 }
1493 pub fn wrap_count(&self) -> u64 {
1494 self.wrap_count
1495 }
1496 pub fn get(&self, offset: usize, len: usize) -> Option<&[u8]> {
1497 if offset + len <= self.buf.len() {
1498 Some(&self.buf[offset..offset + len])
1499 } else {
1500 None
1501 }
1502 }
1503}
1504#[derive(Clone, Debug, Default)]
1506pub struct ArenaWatermark {
1507 current: u64,
1509 peak: u64,
1511}
1512#[allow(dead_code)]
1513impl ArenaWatermark {
1514 pub fn new() -> Self {
1516 Self::default()
1517 }
1518 pub fn record_alloc(&mut self, bytes: u64) {
1520 self.current += bytes;
1521 if self.current > self.peak {
1522 self.peak = self.current;
1523 }
1524 }
1525 pub fn record_free(&mut self, bytes: u64) {
1527 self.current = self.current.saturating_sub(bytes);
1528 }
1529 pub fn current(&self) -> u64 {
1531 self.current
1532 }
1533 pub fn peak(&self) -> u64 {
1535 self.peak
1536 }
1537 pub fn reset(&mut self) {
1539 self.current = 0;
1540 self.peak = 0;
1541 }
1542}