Skip to main content

grafeo_common/memory/
arena.rs

1//! Epoch-based arena allocator for MVCC.
2//!
3//! This is how Grafeo manages memory for versioned data. Each epoch gets its
4//! own arena, and when all readers from an old epoch finish, we free the whole
5//! thing at once. Much faster than tracking individual allocations.
6//!
7//! Use [`ArenaAllocator`] to manage multiple epochs, or [`Arena`] directly
8//! if you're working with a single epoch.
9
10// Arena allocators require unsafe code for memory management
11#![allow(unsafe_code)]
12
13use std::alloc::{Layout, alloc, dealloc};
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18use parking_lot::RwLock;
19
20use crate::types::EpochId;
21
22/// Default chunk size for arena allocations (1 MB).
23const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024;
24
25/// Errors from arena allocation operations.
26#[derive(Debug, Clone)]
27#[non_exhaustive]
28pub enum AllocError {
29    /// The system allocator returned null (out of memory).
30    OutOfMemory,
31    /// The requested epoch does not exist.
32    EpochNotFound(EpochId),
33    /// Arena chunk has insufficient space for the allocation.
34    InsufficientSpace,
35    /// Alignment must be a non-zero power of two.
36    InvalidAlignment(usize),
37}
38
39impl fmt::Display for AllocError {
40    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
41        match self {
42            Self::OutOfMemory => write!(f, "arena allocation failed: out of memory"),
43            Self::EpochNotFound(id) => write!(f, "epoch {id} not found in arena allocator"),
44            Self::InsufficientSpace => {
45                write!(f, "arena chunk has insufficient space for allocation")
46            }
47            Self::InvalidAlignment(align) => {
48                write!(f, "alignment must be a non-zero power of two, got {align}")
49            }
50        }
51    }
52}
53
54impl std::error::Error for AllocError {}
55
56impl From<AllocError> for crate::Error {
57    fn from(e: AllocError) -> Self {
58        match e {
59            AllocError::OutOfMemory | AllocError::InsufficientSpace => {
60                crate::Error::Storage(crate::utils::error::StorageError::Full)
61            }
62            AllocError::EpochNotFound(id) => {
63                crate::Error::Internal(format!("epoch {id} not found in arena allocator"))
64            }
65            AllocError::InvalidAlignment(align) => crate::Error::Internal(format!(
66                "alignment must be a non-zero power of two, got {align}"
67            )),
68        }
69    }
70}
71
72/// A memory chunk in the arena.
73struct Chunk {
74    /// Pointer to the start of the chunk.
75    ptr: NonNull<u8>,
76    /// Total capacity of the chunk.
77    capacity: usize,
78    /// Current allocation offset.
79    offset: AtomicUsize,
80}
81
82impl Chunk {
83    /// Creates a new chunk with the given capacity.
84    ///
85    /// # Errors
86    ///
87    /// Returns `AllocError::OutOfMemory` if the system allocator fails.
88    fn new(capacity: usize) -> Result<Self, AllocError> {
89        if capacity > u32::MAX as usize {
90            return Err(AllocError::OutOfMemory);
91        }
92        let layout = Layout::from_size_align(capacity, 16).map_err(|_| AllocError::OutOfMemory)?;
93        // SAFETY: We're allocating a valid layout
94        let ptr = unsafe { alloc(layout) };
95        let ptr = NonNull::new(ptr).ok_or(AllocError::OutOfMemory)?;
96
97        Ok(Self {
98            ptr,
99            capacity,
100            offset: AtomicUsize::new(0),
101        })
102    }
103
104    /// Tries to allocate `size` bytes with the given alignment.
105    /// Returns None if there's not enough space.
106    fn try_alloc(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
107        self.try_alloc_with_offset(size, align).map(|(_, ptr)| ptr)
108    }
109
110    /// Tries to allocate `size` bytes with the given alignment.
111    /// Returns (offset, ptr) where offset is the aligned offset within this chunk.
112    /// Returns None if there's not enough space.
113    fn try_alloc_with_offset(&self, size: usize, align: usize) -> Option<(u32, NonNull<u8>)> {
114        // Alignment must be a power of two; checked_sub handles align == 0 below,
115        // but non-power-of-two values produce invalid bitmasks silently.
116        debug_assert!(
117            align.is_power_of_two(),
118            "alignment must be a power of two, got {align}"
119        );
120        let base_addr = self.ptr.as_ptr() as usize;
121        loop {
122            let current = self.offset.load(Ordering::Relaxed);
123
124            // Align the absolute address (base + offset), then convert back to an
125            // offset. This is correct for any requested alignment, even when it
126            // exceeds the chunk's own 16-byte alignment.
127            let align_mask = align.checked_sub(1)?;
128            let current_addr = base_addr.checked_add(current)?;
129            let aligned_addr = current_addr.checked_add(align_mask)? & !align_mask;
130            let aligned = aligned_addr - base_addr;
131            let new_offset = aligned.checked_add(size)?;
132
133            if new_offset > self.capacity {
134                return None;
135            }
136
137            // Try to reserve the space
138            match self.offset.compare_exchange_weak(
139                current,
140                new_offset,
141                Ordering::AcqRel,
142                Ordering::Relaxed,
143            ) {
144                Ok(_) => {
145                    // SAFETY: We've reserved this range exclusively
146                    let ptr = unsafe { self.ptr.as_ptr().add(aligned) };
147                    // reason: aligned <= capacity, and chunk sizes are well under u32::MAX (4 GiB)
148                    #[allow(clippy::cast_possible_truncation)]
149                    return Some((aligned as u32, NonNull::new(ptr)?));
150                }
151                Err(_) => continue, // Retry
152            }
153        }
154    }
155
156    /// Returns the amount of memory used in this chunk.
157    fn used(&self) -> usize {
158        self.offset.load(Ordering::Relaxed)
159    }
160}
161
162impl Drop for Chunk {
163    fn drop(&mut self) {
164        let layout = Layout::from_size_align(self.capacity, 16).expect("Invalid layout");
165        // SAFETY: We allocated this memory with the same layout
166        unsafe { dealloc(self.ptr.as_ptr(), layout) };
167    }
168}
169
170// SAFETY: Chunk uses atomic operations for thread-safe allocation
171unsafe impl Send for Chunk {}
172unsafe impl Sync for Chunk {}
173
174/// A single epoch's memory arena.
175///
176/// Allocates by bumping a pointer forward - extremely fast. You can't free
177/// individual allocations; instead, drop the whole arena when the epoch
178/// is no longer needed.
179///
180/// Thread-safe: multiple threads can allocate concurrently using atomics.
181pub struct Arena {
182    /// The epoch this arena belongs to.
183    epoch: EpochId,
184    /// List of memory chunks.
185    chunks: RwLock<Vec<Chunk>>,
186    /// Default chunk size for new allocations.
187    chunk_size: usize,
188    /// Total bytes allocated.
189    total_allocated: AtomicUsize,
190}
191
192impl Arena {
193    /// Creates a new arena for the given epoch.
194    ///
195    /// # Errors
196    ///
197    /// Returns `AllocError::OutOfMemory` if the initial chunk allocation fails.
198    pub fn new(epoch: EpochId) -> Result<Self, AllocError> {
199        Self::with_chunk_size(epoch, DEFAULT_CHUNK_SIZE)
200    }
201
202    /// Creates a new arena with a custom chunk size.
203    ///
204    /// # Errors
205    ///
206    /// Returns `AllocError::OutOfMemory` if the initial chunk allocation fails.
207    pub fn with_chunk_size(epoch: EpochId, chunk_size: usize) -> Result<Self, AllocError> {
208        let initial_chunk = Chunk::new(chunk_size)?;
209        Ok(Self {
210            epoch,
211            chunks: RwLock::new(vec![initial_chunk]),
212            chunk_size,
213            total_allocated: AtomicUsize::new(chunk_size),
214        })
215    }
216
217    /// Returns the epoch this arena belongs to.
218    #[must_use]
219    pub fn epoch(&self) -> EpochId {
220        self.epoch
221    }
222
223    /// Allocates `size` bytes with the given alignment.
224    ///
225    /// # Errors
226    ///
227    /// Returns `AllocError::OutOfMemory` if a new chunk is needed and
228    /// the system allocator fails.
229    pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
230        if align == 0 || !align.is_power_of_two() {
231            return Err(AllocError::InvalidAlignment(align));
232        }
233        // First try to allocate from existing chunks
234        {
235            let chunks = self.chunks.read();
236            for chunk in chunks.iter().rev() {
237                if let Some(ptr) = chunk.try_alloc(size, align) {
238                    return Ok(ptr);
239                }
240            }
241        }
242
243        // Need a new chunk
244        self.alloc_new_chunk(size, align)
245    }
246
247    /// Allocates a value of type T.
248    ///
249    /// # Errors
250    ///
251    /// Returns `AllocError::OutOfMemory` if allocation fails.
252    pub fn alloc_value<T>(&self, value: T) -> Result<&mut T, AllocError> {
253        let ptr = self.alloc(std::mem::size_of::<T>(), std::mem::align_of::<T>())?;
254        // SAFETY: We've allocated the correct size and alignment
255        Ok(unsafe {
256            let typed_ptr = ptr.as_ptr() as *mut T;
257            typed_ptr.write(value);
258            &mut *typed_ptr
259        })
260    }
261
262    /// Allocates a slice of values.
263    ///
264    /// # Errors
265    ///
266    /// Returns `AllocError::OutOfMemory` if allocation fails.
267    pub fn alloc_slice<T: Copy>(&self, values: &[T]) -> Result<&mut [T], AllocError> {
268        if values.is_empty() {
269            return Ok(&mut []);
270        }
271
272        let size = std::mem::size_of::<T>()
273            .checked_mul(values.len())
274            .ok_or(AllocError::OutOfMemory)?;
275        let align = std::mem::align_of::<T>();
276        let ptr = self.alloc(size, align)?;
277
278        // SAFETY: We've allocated the correct size and alignment
279        Ok(unsafe {
280            let typed_ptr = ptr.as_ptr() as *mut T;
281            std::ptr::copy_nonoverlapping(values.as_ptr(), typed_ptr, values.len());
282            std::slice::from_raw_parts_mut(typed_ptr, values.len())
283        })
284    }
285
286    /// Allocates a value and returns its offset within the primary chunk.
287    ///
288    /// This is used by tiered storage to store values in the arena and track
289    /// their locations via compact u32 offsets in `HotVersionRef`.
290    ///
291    /// # Errors
292    ///
293    /// Returns `AllocError::InsufficientSpace` if the primary chunk does not
294    /// have enough room. Increase the chunk size for your use case.
295    ///
296    /// # Panics
297    ///
298    /// Panics if the arena has no chunks (should never happen in normal use).
299    #[cfg(feature = "tiered-storage")]
300    pub fn alloc_value_with_offset<T>(&self, value: T) -> Result<(u32, &mut T), AllocError> {
301        let size = std::mem::size_of::<T>();
302        let align = std::mem::align_of::<T>();
303
304        // Try to allocate in the first chunk to get a stable offset
305        let chunks = self.chunks.read();
306        let chunk = chunks
307            .first()
308            .expect("Arena should have at least one chunk");
309
310        let (offset, ptr) = chunk
311            .try_alloc_with_offset(size, align)
312            .ok_or(AllocError::InsufficientSpace)?;
313
314        // SAFETY: We've allocated the correct size and alignment
315        Ok(unsafe {
316            let typed_ptr = ptr.as_ptr().cast::<T>();
317            typed_ptr.write(value);
318            (offset, &mut *typed_ptr)
319        })
320    }
321
322    /// Reads a value at the given offset in the primary chunk.
323    ///
324    /// # Safety
325    ///
326    /// - The offset must have been returned by a previous `alloc_value_with_offset` call
327    /// - The type T must match what was stored at that offset
328    /// - The arena must not have been dropped
329    ///
330    /// # Panics
331    ///
332    /// Panics if the arena has no chunks (should never happen in normal use).
333    #[cfg(feature = "tiered-storage")]
334    pub unsafe fn read_at<T>(&self, offset: u32) -> &T {
335        let chunks = self.chunks.read();
336        let chunk = chunks
337            .first()
338            .expect("Arena should have at least one chunk");
339
340        assert!(
341            (offset as usize) + std::mem::size_of::<T>() <= chunk.used(),
342            "read_at: offset {} + size_of::<{}>() = {} exceeds chunk used bytes {}",
343            offset,
344            std::any::type_name::<T>(),
345            (offset as usize) + std::mem::size_of::<T>(),
346            chunk.used()
347        );
348        assert!(
349            (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
350            "read_at: offset {} is not aligned for {} (alignment {})",
351            offset,
352            std::any::type_name::<T>(),
353            std::mem::align_of::<T>()
354        );
355
356        // SAFETY: Caller guarantees offset is valid and T matches stored type
357        unsafe {
358            let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
359            &*ptr
360        }
361    }
362
363    /// Reads a value mutably at the given offset in the primary chunk.
364    ///
365    /// # Safety
366    ///
367    /// - The offset must have been returned by a previous `alloc_value_with_offset` call
368    /// - The type T must match what was stored at that offset
369    /// - The arena must not have been dropped
370    /// - No other references to this value may exist
371    ///
372    /// # Panics
373    ///
374    /// Panics if the arena has no chunks (should never happen in normal use).
375    #[cfg(feature = "tiered-storage")]
376    pub unsafe fn read_at_mut<T>(&self, offset: u32) -> &mut T {
377        let chunks = self.chunks.read();
378        let chunk = chunks
379            .first()
380            .expect("Arena should have at least one chunk");
381
382        assert!(
383            (offset as usize) + std::mem::size_of::<T>() <= chunk.capacity,
384            "read_at_mut: offset {} + size_of::<{}>() = {} exceeds chunk capacity {}",
385            offset,
386            std::any::type_name::<T>(),
387            (offset as usize) + std::mem::size_of::<T>(),
388            chunk.capacity
389        );
390        assert!(
391            (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
392            "read_at_mut: offset {} is not aligned for {} (alignment {})",
393            offset,
394            std::any::type_name::<T>(),
395            std::mem::align_of::<T>()
396        );
397
398        // SAFETY: Caller guarantees offset is valid, T matches, and no aliasing
399        unsafe {
400            let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
401            &mut *ptr
402        }
403    }
404
405    /// Allocates a new chunk and performs the allocation.
406    fn alloc_new_chunk(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
407        let required = size.checked_add(align).ok_or(AllocError::OutOfMemory)?;
408        let chunk_size = self.chunk_size.max(required);
409        let chunk = Chunk::new(chunk_size)?;
410
411        self.total_allocated
412            .fetch_add(chunk_size, Ordering::Relaxed);
413
414        // The chunk was sized to fit this allocation, so this cannot fail.
415        let ptr = chunk
416            .try_alloc(size, align)
417            .expect("fresh chunk sized to fit");
418
419        let mut chunks = self.chunks.write();
420        chunks.push(chunk);
421
422        Ok(ptr)
423    }
424
425    /// Returns the total memory allocated by this arena.
426    #[must_use]
427    pub fn total_allocated(&self) -> usize {
428        self.total_allocated.load(Ordering::Relaxed)
429    }
430
431    /// Returns the total memory used (not just allocated capacity).
432    #[must_use]
433    pub fn total_used(&self) -> usize {
434        let chunks = self.chunks.read();
435        chunks.iter().map(Chunk::used).sum()
436    }
437
438    /// Returns statistics about this arena.
439    #[must_use]
440    pub fn stats(&self) -> ArenaStats {
441        let chunks = self.chunks.read();
442        ArenaStats {
443            epoch: self.epoch,
444            chunk_count: chunks.len(),
445            total_allocated: self.total_allocated.load(Ordering::Relaxed),
446            total_used: chunks.iter().map(Chunk::used).sum(),
447        }
448    }
449}
450
451/// Statistics about an arena.
452#[derive(Debug, Clone)]
453pub struct ArenaStats {
454    /// The epoch this arena belongs to.
455    pub epoch: EpochId,
456    /// Number of chunks allocated.
457    pub chunk_count: usize,
458    /// Total bytes allocated.
459    pub total_allocated: usize,
460    /// Total bytes used.
461    pub total_used: usize,
462}
463
464/// Manages arenas across multiple epochs.
465///
466/// Use this to create new epochs, allocate in the current epoch, and
467/// clean up old epochs when they're no longer needed.
468pub struct ArenaAllocator {
469    /// Map of epochs to arenas.
470    arenas: RwLock<hashbrown::HashMap<EpochId, Arena>>,
471    /// Current epoch.
472    current_epoch: AtomicUsize,
473    /// Default chunk size.
474    chunk_size: usize,
475}
476
477impl ArenaAllocator {
478    /// Creates a new arena allocator.
479    ///
480    /// # Errors
481    ///
482    /// Returns `AllocError::OutOfMemory` if the initial arena allocation fails.
483    pub fn new() -> Result<Self, AllocError> {
484        Self::with_chunk_size(DEFAULT_CHUNK_SIZE)
485    }
486
487    /// Creates a new arena allocator with a custom chunk size.
488    ///
489    /// # Errors
490    ///
491    /// Returns `AllocError::OutOfMemory` if the initial arena allocation fails.
492    pub fn with_chunk_size(chunk_size: usize) -> Result<Self, AllocError> {
493        let allocator = Self {
494            arenas: RwLock::new(hashbrown::HashMap::new()),
495            current_epoch: AtomicUsize::new(0),
496            chunk_size,
497        };
498
499        // Create the initial epoch
500        let epoch = EpochId::INITIAL;
501        allocator
502            .arenas
503            .write()
504            .insert(epoch, Arena::with_chunk_size(epoch, chunk_size)?);
505
506        Ok(allocator)
507    }
508
509    /// Returns the current epoch.
510    #[must_use]
511    pub fn current_epoch(&self) -> EpochId {
512        EpochId::new(self.current_epoch.load(Ordering::Acquire) as u64)
513    }
514
515    /// Creates a new epoch and returns its ID.
516    ///
517    /// # Errors
518    ///
519    /// Returns `AllocError::OutOfMemory` if the arena allocation fails.
520    pub fn new_epoch(&self) -> Result<EpochId, AllocError> {
521        let new_id = self.current_epoch.fetch_add(1, Ordering::AcqRel) as u64 + 1;
522        let epoch = EpochId::new(new_id);
523
524        let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
525        self.arenas.write().insert(epoch, arena);
526
527        Ok(epoch)
528    }
529
530    /// Gets the arena for a specific epoch.
531    ///
532    /// # Errors
533    ///
534    /// Returns `AllocError::EpochNotFound` if the epoch doesn't exist.
535    pub fn arena(
536        &self,
537        epoch: EpochId,
538    ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
539        let arenas = self.arenas.read();
540        if !arenas.contains_key(&epoch) {
541            return Err(AllocError::EpochNotFound(epoch));
542        }
543        Ok(parking_lot::RwLockReadGuard::map(arenas, |arenas| {
544            &arenas[&epoch]
545        }))
546    }
547
548    /// Ensures an arena exists for the given epoch, creating it if necessary.
549    /// Returns whether a new arena was created.
550    ///
551    /// # Errors
552    ///
553    /// Returns `AllocError::OutOfMemory` if a new arena allocation fails.
554    #[cfg(feature = "tiered-storage")]
555    pub fn ensure_epoch(&self, epoch: EpochId) -> Result<bool, AllocError> {
556        // Fast path: check if epoch already exists
557        {
558            let arenas = self.arenas.read();
559            if arenas.contains_key(&epoch) {
560                return Ok(false);
561            }
562        }
563
564        // Slow path: create the epoch
565        let mut arenas = self.arenas.write();
566        // Double-check after acquiring write lock
567        if arenas.contains_key(&epoch) {
568            return Ok(false);
569        }
570
571        let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
572        arenas.insert(epoch, arena);
573        Ok(true)
574    }
575
576    /// Gets or creates an arena for a specific epoch.
577    ///
578    /// # Errors
579    ///
580    /// Returns `AllocError` if the arena allocation fails.
581    #[cfg(feature = "tiered-storage")]
582    pub fn arena_or_create(
583        &self,
584        epoch: EpochId,
585    ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
586        self.ensure_epoch(epoch)?;
587        self.arena(epoch)
588    }
589
590    /// Allocates in the current epoch.
591    ///
592    /// # Errors
593    ///
594    /// Returns `AllocError` if allocation fails.
595    ///
596    /// # Panics
597    ///
598    /// Panics if the current epoch has no arena (should never happen in normal use).
599    pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
600        let epoch = self.current_epoch();
601        let arenas = self.arenas.read();
602        arenas
603            .get(&epoch)
604            .expect("current epoch always exists")
605            .alloc(size, align)
606    }
607
608    /// Drops an epoch, freeing all its memory.
609    ///
610    /// This should only be called when no readers are using this epoch.
611    pub fn drop_epoch(&self, epoch: EpochId) {
612        self.arenas.write().remove(&epoch);
613    }
614
615    /// Returns total memory allocated across all epochs.
616    #[must_use]
617    pub fn total_allocated(&self) -> usize {
618        self.arenas
619            .read()
620            .values()
621            .map(Arena::total_allocated)
622            .sum()
623    }
624}
625
626impl Default for ArenaAllocator {
627    /// Creates a default arena allocator.
628    ///
629    /// # Panics
630    ///
631    /// Panics if the initial arena allocation fails (out of memory).
632    fn default() -> Self {
633        Self::new().expect("failed to allocate default arena")
634    }
635}
636
637#[cfg(test)]
638mod tests {
639    use super::*;
640
641    #[test]
642    fn test_arena_basic_allocation() {
643        let arena = Arena::new(EpochId::INITIAL).unwrap();
644
645        // Allocate some bytes
646        let ptr1 = arena.alloc(100, 8).unwrap();
647        let ptr2 = arena.alloc(200, 8).unwrap();
648
649        // Pointers should be different
650        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
651    }
652
653    #[test]
654    fn test_arena_value_allocation() {
655        let arena = Arena::new(EpochId::INITIAL).unwrap();
656
657        let value = arena.alloc_value(42u64).unwrap();
658        assert_eq!(*value, 42);
659
660        *value = 100;
661        assert_eq!(*value, 100);
662    }
663
664    #[test]
665    fn test_arena_slice_allocation() {
666        let arena = Arena::new(EpochId::INITIAL).unwrap();
667
668        let slice = arena.alloc_slice(&[1u32, 2, 3, 4, 5]).unwrap();
669        assert_eq!(slice, &[1, 2, 3, 4, 5]);
670
671        slice[0] = 10;
672        assert_eq!(slice[0], 10);
673    }
674
675    #[test]
676    fn test_arena_large_allocation() {
677        let arena = Arena::with_chunk_size(EpochId::INITIAL, 1024).unwrap();
678
679        // Allocate something larger than the chunk size
680        let _ptr = arena.alloc(2048, 8).unwrap();
681
682        // Should have created a new chunk
683        assert!(arena.stats().chunk_count >= 2);
684    }
685
686    #[test]
687    fn test_arena_allocator_epochs() {
688        let allocator = ArenaAllocator::new().unwrap();
689
690        let epoch0 = allocator.current_epoch();
691        assert_eq!(epoch0, EpochId::INITIAL);
692
693        let epoch1 = allocator.new_epoch().unwrap();
694        assert_eq!(epoch1, EpochId::new(1));
695
696        let epoch2 = allocator.new_epoch().unwrap();
697        assert_eq!(epoch2, EpochId::new(2));
698
699        // Current epoch should be the latest
700        assert_eq!(allocator.current_epoch(), epoch2);
701    }
702
703    #[test]
704    fn test_arena_allocator_allocation() {
705        let allocator = ArenaAllocator::new().unwrap();
706
707        let ptr1 = allocator.alloc(100, 8).unwrap();
708        let ptr2 = allocator.alloc(100, 8).unwrap();
709
710        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
711    }
712
713    #[test]
714    fn test_arena_drop_epoch() {
715        let allocator = ArenaAllocator::new().unwrap();
716
717        let initial_mem = allocator.total_allocated();
718
719        let epoch1 = allocator.new_epoch().unwrap();
720        // Allocate some memory in the new epoch
721        {
722            let arena = allocator.arena(epoch1).unwrap();
723            arena.alloc(10000, 8).unwrap();
724        }
725
726        let after_alloc = allocator.total_allocated();
727        assert!(after_alloc > initial_mem);
728
729        // Drop the epoch
730        allocator.drop_epoch(epoch1);
731
732        // Memory should decrease
733        let after_drop = allocator.total_allocated();
734        assert!(after_drop < after_alloc);
735    }
736
737    #[test]
738    fn test_arena_stats() {
739        let arena = Arena::with_chunk_size(EpochId::new(5), 4096).unwrap();
740
741        let stats = arena.stats();
742        assert_eq!(stats.epoch, EpochId::new(5));
743        assert_eq!(stats.chunk_count, 1);
744        assert_eq!(stats.total_allocated, 4096);
745        assert_eq!(stats.total_used, 0);
746
747        arena.alloc(100, 8).unwrap();
748        let stats = arena.stats();
749        assert!(stats.total_used >= 100);
750    }
751}
752
753#[cfg(all(test, feature = "tiered-storage"))]
754mod tiered_storage_tests {
755    use super::*;
756
757    #[test]
758    // reason: size_of::<u64>() is 8, fits u32
759    #[allow(clippy::cast_possible_truncation)]
760    fn test_alloc_value_with_offset_basic() {
761        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
762
763        let (offset1, val1) = arena.alloc_value_with_offset(42u64).unwrap();
764        let (offset2, val2) = arena.alloc_value_with_offset(100u64).unwrap();
765
766        // First allocation should be at offset 0 (aligned)
767        assert_eq!(offset1, 0);
768        // Second allocation should be after the first
769        assert!(offset2 > offset1);
770        assert!(offset2 >= std::mem::size_of::<u64>() as u32);
771
772        // Values should be correct
773        assert_eq!(*val1, 42);
774        assert_eq!(*val2, 100);
775
776        // Mutation should work
777        *val1 = 999;
778        assert_eq!(*val1, 999);
779    }
780
781    #[test]
782    fn test_read_at_basic() {
783        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
784
785        let (offset, _) = arena.alloc_value_with_offset(12345u64).unwrap();
786
787        // Read it back
788        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
789        let value: &u64 = unsafe { arena.read_at(offset) };
790        assert_eq!(*value, 12345);
791    }
792
793    #[test]
794    fn test_read_at_mut_basic() {
795        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
796
797        let (offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
798
799        // Read and modify
800        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
801        let value: &mut u64 = unsafe { arena.read_at_mut(offset) };
802        assert_eq!(*value, 42);
803        *value = 100;
804
805        // Verify modification persisted
806        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
807        let value: &u64 = unsafe { arena.read_at(offset) };
808        assert_eq!(*value, 100);
809    }
810
811    #[test]
812    fn test_alloc_value_with_offset_struct() {
813        #[derive(Debug, Clone, PartialEq)]
814        struct TestNode {
815            id: u64,
816            name: [u8; 32],
817            value: i32,
818        }
819
820        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
821
822        let node = TestNode {
823            id: 12345,
824            name: [b'A'; 32],
825            value: -999,
826        };
827
828        let (offset, stored) = arena.alloc_value_with_offset(node.clone()).unwrap();
829        assert_eq!(stored.id, 12345);
830        assert_eq!(stored.value, -999);
831
832        // Read it back
833        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
834        let read: &TestNode = unsafe { arena.read_at(offset) };
835        assert_eq!(read.id, node.id);
836        assert_eq!(read.name, node.name);
837        assert_eq!(read.value, node.value);
838    }
839
840    #[test]
841    fn test_alloc_value_with_offset_alignment() {
842        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
843
844        // Allocate a byte first to potentially misalign
845        let (offset1, _) = arena.alloc_value_with_offset(1u8).unwrap();
846        assert_eq!(offset1, 0);
847
848        // Now allocate a u64 which requires 8-byte alignment
849        let (offset2, val) = arena.alloc_value_with_offset(42u64).unwrap();
850
851        // offset2 should be 8-byte aligned
852        assert_eq!(offset2 % 8, 0);
853        assert_eq!(*val, 42);
854    }
855
856    #[test]
857    fn test_alloc_value_with_offset_multiple() {
858        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
859
860        let mut offsets = Vec::new();
861        for i in 0..100u64 {
862            let (offset, val) = arena.alloc_value_with_offset(i).unwrap();
863            offsets.push(offset);
864            assert_eq!(*val, i);
865        }
866
867        // All offsets should be unique and in ascending order
868        for window in offsets.windows(2) {
869            assert!(window[0] < window[1]);
870        }
871
872        // Read all values back
873        for (i, offset) in offsets.iter().enumerate() {
874            // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
875            let val: &u64 = unsafe { arena.read_at(*offset) };
876            assert_eq!(*val, i as u64);
877        }
878    }
879
880    #[test]
881    fn test_arena_allocator_with_offset() {
882        let allocator = ArenaAllocator::with_chunk_size(4096).unwrap();
883
884        let epoch = allocator.current_epoch();
885        let arena = allocator.arena(epoch).unwrap();
886
887        let (offset, val) = arena.alloc_value_with_offset(42u64).unwrap();
888        assert_eq!(*val, 42);
889
890        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
891        let read: &u64 = unsafe { arena.read_at(offset) };
892        assert_eq!(*read, 42);
893    }
894
895    #[test]
896    #[cfg(debug_assertions)]
897    #[should_panic(expected = "exceeds chunk used bytes")]
898    fn test_read_at_out_of_bounds() {
899        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
900        let (_offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
901
902        // Read way past the allocated region: should panic in debug
903        // SAFETY: intentionally invalid offset to test debug assertion
904        unsafe {
905            let _: &u64 = arena.read_at(4000);
906        }
907    }
908
909    #[test]
910    #[cfg(debug_assertions)]
911    #[should_panic(expected = "is not aligned")]
912    fn test_read_at_misaligned() {
913        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
914        // Allocate a u8 at offset 0
915        let (_offset, _) = arena.alloc_value_with_offset(0xFFu8).unwrap();
916        // Also allocate some bytes so offset 1 is within used range
917        let _ = arena.alloc_value_with_offset(0u64).unwrap();
918
919        // Try to read a u64 at offset 1 (misaligned for u64)
920        // SAFETY: intentionally misaligned offset to test debug assertion
921        unsafe {
922            let _: &u64 = arena.read_at(1);
923        }
924    }
925
926    #[test]
927    #[cfg(not(miri))] // parking_lot uses integer-to-pointer casts incompatible with Miri strict provenance
928    fn test_concurrent_read_stress() {
929        use std::sync::Arc;
930
931        let arena = Arc::new(Arena::with_chunk_size(EpochId::INITIAL, 1024 * 1024).unwrap());
932        let num_threads = 8;
933        let values_per_thread = 1000;
934
935        // Each thread allocates values and records offsets
936        let mut all_offsets = Vec::new();
937        for t in 0..num_threads {
938            let base = (t * values_per_thread) as u64;
939            let mut offsets = Vec::with_capacity(values_per_thread);
940            for i in 0..values_per_thread as u64 {
941                let (offset, _) = arena.alloc_value_with_offset(base + i).unwrap();
942                offsets.push(offset);
943            }
944            all_offsets.push(offsets);
945        }
946
947        // Now read all values back concurrently from multiple threads
948        let mut handles = Vec::new();
949        for (t, offsets) in all_offsets.into_iter().enumerate() {
950            let arena = Arc::clone(&arena);
951            let base = (t * values_per_thread) as u64;
952            handles.push(std::thread::spawn(move || {
953                for (i, offset) in offsets.iter().enumerate() {
954                    // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
955                    let val: &u64 = unsafe { arena.read_at(*offset) };
956                    assert_eq!(*val, base + i as u64);
957                }
958            }));
959        }
960
961        for handle in handles {
962            handle.join().expect("Thread panicked");
963        }
964    }
965
966    #[test]
967    fn test_alloc_value_with_offset_insufficient_space() {
968        // Create a tiny arena where a large allocation will fail
969        let arena = Arena::with_chunk_size(EpochId::INITIAL, 64).unwrap();
970
971        // Fill up the chunk
972        let _ = arena.alloc_value_with_offset([0u8; 48]).unwrap();
973
974        // This should return InsufficientSpace, not panic
975        let result = arena.alloc_value_with_offset([0u8; 32]);
976        assert!(result.is_err());
977    }
978
979    #[test]
980    fn test_multi_type_interleaved() {
981        #[derive(Debug, Clone, PartialEq)]
982        #[repr(C)]
983        struct Record {
984            id: u64,
985            flags: u32,
986            weight: f32,
987        }
988
989        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
990
991        // Interleave different types
992        let (off_u8, _) = arena.alloc_value_with_offset(0xAAu8).unwrap();
993        let (off_u32, _) = arena.alloc_value_with_offset(0xBBBBu32).unwrap();
994        let (off_u64, _) = arena.alloc_value_with_offset(0xCCCCCCCCu64).unwrap();
995        let (off_rec, _) = arena
996            .alloc_value_with_offset(Record {
997                id: 42,
998                flags: 0xFF,
999                weight: std::f32::consts::PI,
1000            })
1001            .unwrap();
1002
1003        // Read them all back
1004        // SAFETY: all offsets were returned by alloc_value_with_offset for matching types and arena
1005        unsafe {
1006            assert_eq!(*arena.read_at::<u8>(off_u8), 0xAA);
1007            assert_eq!(*arena.read_at::<u32>(off_u32), 0xBBBB);
1008            assert_eq!(*arena.read_at::<u64>(off_u64), 0xCCCCCCCC);
1009
1010            let rec: &Record = arena.read_at(off_rec);
1011            assert_eq!(rec.id, 42);
1012            assert_eq!(rec.flags, 0xFF);
1013            assert!((rec.weight - std::f32::consts::PI).abs() < 0.001);
1014        }
1015    }
1016}