Skip to main content

grafeo_common/memory/
arena.rs

1//! Epoch-based arena allocator for MVCC.
2//!
3//! This is how Grafeo manages memory for versioned data. Each epoch gets its
4//! own arena, and when all readers from an old epoch finish, we free the whole
5//! thing at once. Much faster than tracking individual allocations.
6//!
7//! Use [`ArenaAllocator`] to manage multiple epochs, or [`Arena`] directly
8//! if you're working with a single epoch.
9
10// Arena allocators require unsafe code for memory management
11#![allow(unsafe_code)]
12
13use std::alloc::{Layout, alloc, dealloc};
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18use parking_lot::RwLock;
19
20use crate::types::EpochId;
21
22/// Default chunk size for arena allocations (1 MB).
23const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024;
24
25/// Errors from arena allocation operations.
26#[derive(Debug, Clone)]
27pub enum AllocError {
28    /// The system allocator returned null (out of memory).
29    OutOfMemory,
30    /// The requested epoch does not exist.
31    EpochNotFound(EpochId),
32    /// Arena chunk has insufficient space for the allocation.
33    InsufficientSpace,
34}
35
36impl fmt::Display for AllocError {
37    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
38        match self {
39            Self::OutOfMemory => write!(f, "arena allocation failed: out of memory"),
40            Self::EpochNotFound(id) => write!(f, "epoch {id} not found in arena allocator"),
41            Self::InsufficientSpace => {
42                write!(f, "arena chunk has insufficient space for allocation")
43            }
44        }
45    }
46}
47
48impl std::error::Error for AllocError {}
49
50impl From<AllocError> for crate::Error {
51    fn from(e: AllocError) -> Self {
52        match e {
53            AllocError::OutOfMemory | AllocError::InsufficientSpace => {
54                crate::Error::Storage(crate::utils::error::StorageError::Full)
55            }
56            AllocError::EpochNotFound(id) => {
57                crate::Error::Internal(format!("epoch {id} not found in arena allocator"))
58            }
59        }
60    }
61}
62
63/// A memory chunk in the arena.
64struct Chunk {
65    /// Pointer to the start of the chunk.
66    ptr: NonNull<u8>,
67    /// Total capacity of the chunk.
68    capacity: usize,
69    /// Current allocation offset.
70    offset: AtomicUsize,
71}
72
73impl Chunk {
74    /// Creates a new chunk with the given capacity.
75    ///
76    /// # Errors
77    ///
78    /// Returns `AllocError::OutOfMemory` if the system allocator fails.
79    fn new(capacity: usize) -> Result<Self, AllocError> {
80        let layout = Layout::from_size_align(capacity, 16).map_err(|_| AllocError::OutOfMemory)?;
81        // SAFETY: We're allocating a valid layout
82        let ptr = unsafe { alloc(layout) };
83        let ptr = NonNull::new(ptr).ok_or(AllocError::OutOfMemory)?;
84
85        Ok(Self {
86            ptr,
87            capacity,
88            offset: AtomicUsize::new(0),
89        })
90    }
91
92    /// Tries to allocate `size` bytes with the given alignment.
93    /// Returns None if there's not enough space.
94    fn try_alloc(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
95        self.try_alloc_with_offset(size, align).map(|(_, ptr)| ptr)
96    }
97
98    /// Tries to allocate `size` bytes with the given alignment.
99    /// Returns (offset, ptr) where offset is the aligned offset within this chunk.
100    /// Returns None if there's not enough space.
101    fn try_alloc_with_offset(&self, size: usize, align: usize) -> Option<(u32, NonNull<u8>)> {
102        loop {
103            let current = self.offset.load(Ordering::Relaxed);
104
105            // Calculate aligned offset
106            let aligned = (current + align - 1) & !(align - 1);
107            let new_offset = aligned + size;
108
109            if new_offset > self.capacity {
110                return None;
111            }
112
113            // Try to reserve the space
114            match self.offset.compare_exchange_weak(
115                current,
116                new_offset,
117                Ordering::AcqRel,
118                Ordering::Relaxed,
119            ) {
120                Ok(_) => {
121                    // SAFETY: We've reserved this range exclusively
122                    let ptr = unsafe { self.ptr.as_ptr().add(aligned) };
123                    return Some((aligned as u32, NonNull::new(ptr)?));
124                }
125                Err(_) => continue, // Retry
126            }
127        }
128    }
129
130    /// Returns the amount of memory used in this chunk.
131    fn used(&self) -> usize {
132        self.offset.load(Ordering::Relaxed)
133    }
134}
135
136impl Drop for Chunk {
137    fn drop(&mut self) {
138        let layout = Layout::from_size_align(self.capacity, 16).expect("Invalid layout");
139        // SAFETY: We allocated this memory with the same layout
140        unsafe { dealloc(self.ptr.as_ptr(), layout) };
141    }
142}
143
144// SAFETY: Chunk uses atomic operations for thread-safe allocation
145unsafe impl Send for Chunk {}
146unsafe impl Sync for Chunk {}
147
148/// A single epoch's memory arena.
149///
150/// Allocates by bumping a pointer forward - extremely fast. You can't free
151/// individual allocations; instead, drop the whole arena when the epoch
152/// is no longer needed.
153///
154/// Thread-safe: multiple threads can allocate concurrently using atomics.
155pub struct Arena {
156    /// The epoch this arena belongs to.
157    epoch: EpochId,
158    /// List of memory chunks.
159    chunks: RwLock<Vec<Chunk>>,
160    /// Default chunk size for new allocations.
161    chunk_size: usize,
162    /// Total bytes allocated.
163    total_allocated: AtomicUsize,
164}
165
166impl Arena {
167    /// Creates a new arena for the given epoch.
168    ///
169    /// # Errors
170    ///
171    /// Returns `AllocError::OutOfMemory` if the initial chunk allocation fails.
172    pub fn new(epoch: EpochId) -> Result<Self, AllocError> {
173        Self::with_chunk_size(epoch, DEFAULT_CHUNK_SIZE)
174    }
175
176    /// Creates a new arena with a custom chunk size.
177    ///
178    /// # Errors
179    ///
180    /// Returns `AllocError::OutOfMemory` if the initial chunk allocation fails.
181    pub fn with_chunk_size(epoch: EpochId, chunk_size: usize) -> Result<Self, AllocError> {
182        let initial_chunk = Chunk::new(chunk_size)?;
183        Ok(Self {
184            epoch,
185            chunks: RwLock::new(vec![initial_chunk]),
186            chunk_size,
187            total_allocated: AtomicUsize::new(chunk_size),
188        })
189    }
190
191    /// Returns the epoch this arena belongs to.
192    #[must_use]
193    pub fn epoch(&self) -> EpochId {
194        self.epoch
195    }
196
197    /// Allocates `size` bytes with the given alignment.
198    ///
199    /// # Errors
200    ///
201    /// Returns `AllocError::OutOfMemory` if a new chunk is needed and
202    /// the system allocator fails.
203    pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
204        // First try to allocate from existing chunks
205        {
206            let chunks = self.chunks.read();
207            for chunk in chunks.iter().rev() {
208                if let Some(ptr) = chunk.try_alloc(size, align) {
209                    return Ok(ptr);
210                }
211            }
212        }
213
214        // Need a new chunk
215        self.alloc_new_chunk(size, align)
216    }
217
218    /// Allocates a value of type T.
219    ///
220    /// # Errors
221    ///
222    /// Returns `AllocError::OutOfMemory` if allocation fails.
223    pub fn alloc_value<T>(&self, value: T) -> Result<&mut T, AllocError> {
224        let ptr = self.alloc(std::mem::size_of::<T>(), std::mem::align_of::<T>())?;
225        // SAFETY: We've allocated the correct size and alignment
226        Ok(unsafe {
227            let typed_ptr = ptr.as_ptr() as *mut T;
228            typed_ptr.write(value);
229            &mut *typed_ptr
230        })
231    }
232
233    /// Allocates a slice of values.
234    ///
235    /// # Errors
236    ///
237    /// Returns `AllocError::OutOfMemory` if allocation fails.
238    pub fn alloc_slice<T: Copy>(&self, values: &[T]) -> Result<&mut [T], AllocError> {
239        if values.is_empty() {
240            return Ok(&mut []);
241        }
242
243        let size = std::mem::size_of::<T>() * values.len();
244        let align = std::mem::align_of::<T>();
245        let ptr = self.alloc(size, align)?;
246
247        // SAFETY: We've allocated the correct size and alignment
248        Ok(unsafe {
249            let typed_ptr = ptr.as_ptr() as *mut T;
250            std::ptr::copy_nonoverlapping(values.as_ptr(), typed_ptr, values.len());
251            std::slice::from_raw_parts_mut(typed_ptr, values.len())
252        })
253    }
254
255    /// Allocates a value and returns its offset within the primary chunk.
256    ///
257    /// This is used by tiered storage to store values in the arena and track
258    /// their locations via compact u32 offsets in `HotVersionRef`.
259    ///
260    /// # Errors
261    ///
262    /// Returns `AllocError::InsufficientSpace` if the primary chunk does not
263    /// have enough room. Increase the chunk size for your use case.
264    #[cfg(feature = "tiered-storage")]
265    pub fn alloc_value_with_offset<T>(&self, value: T) -> Result<(u32, &mut T), AllocError> {
266        let size = std::mem::size_of::<T>();
267        let align = std::mem::align_of::<T>();
268
269        // Try to allocate in the first chunk to get a stable offset
270        let chunks = self.chunks.read();
271        let chunk = chunks
272            .first()
273            .expect("Arena should have at least one chunk");
274
275        let (offset, ptr) = chunk
276            .try_alloc_with_offset(size, align)
277            .ok_or(AllocError::InsufficientSpace)?;
278
279        // SAFETY: We've allocated the correct size and alignment
280        Ok(unsafe {
281            let typed_ptr = ptr.as_ptr().cast::<T>();
282            typed_ptr.write(value);
283            (offset, &mut *typed_ptr)
284        })
285    }
286
287    /// Reads a value at the given offset in the primary chunk.
288    ///
289    /// # Safety
290    ///
291    /// - The offset must have been returned by a previous `alloc_value_with_offset` call
292    /// - The type T must match what was stored at that offset
293    /// - The arena must not have been dropped
294    #[cfg(feature = "tiered-storage")]
295    pub unsafe fn read_at<T>(&self, offset: u32) -> &T {
296        let chunks = self.chunks.read();
297        let chunk = chunks
298            .first()
299            .expect("Arena should have at least one chunk");
300
301        debug_assert!(
302            (offset as usize) + std::mem::size_of::<T>() <= chunk.used(),
303            "read_at: offset {} + size_of::<{}>() = {} exceeds chunk used bytes {}",
304            offset,
305            std::any::type_name::<T>(),
306            (offset as usize) + std::mem::size_of::<T>(),
307            chunk.used()
308        );
309        debug_assert!(
310            (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
311            "read_at: offset {} is not aligned for {} (alignment {})",
312            offset,
313            std::any::type_name::<T>(),
314            std::mem::align_of::<T>()
315        );
316
317        // SAFETY: Caller guarantees offset is valid and T matches stored type
318        unsafe {
319            let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
320            &*ptr
321        }
322    }
323
324    /// Reads a value mutably at the given offset in the primary chunk.
325    ///
326    /// # Safety
327    ///
328    /// - The offset must have been returned by a previous `alloc_value_with_offset` call
329    /// - The type T must match what was stored at that offset
330    /// - The arena must not have been dropped
331    /// - No other references to this value may exist
332    #[cfg(feature = "tiered-storage")]
333    pub unsafe fn read_at_mut<T>(&self, offset: u32) -> &mut T {
334        let chunks = self.chunks.read();
335        let chunk = chunks
336            .first()
337            .expect("Arena should have at least one chunk");
338
339        debug_assert!(
340            (offset as usize) + std::mem::size_of::<T>() <= chunk.capacity,
341            "read_at_mut: offset {} + size_of::<{}>() = {} exceeds chunk capacity {}",
342            offset,
343            std::any::type_name::<T>(),
344            (offset as usize) + std::mem::size_of::<T>(),
345            chunk.capacity
346        );
347        debug_assert!(
348            (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
349            "read_at_mut: offset {} is not aligned for {} (alignment {})",
350            offset,
351            std::any::type_name::<T>(),
352            std::mem::align_of::<T>()
353        );
354
355        // SAFETY: Caller guarantees offset is valid, T matches, and no aliasing
356        unsafe {
357            let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
358            &mut *ptr
359        }
360    }
361
362    /// Allocates a new chunk and performs the allocation.
363    fn alloc_new_chunk(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
364        let chunk_size = self.chunk_size.max(size + align);
365        let chunk = Chunk::new(chunk_size)?;
366
367        self.total_allocated
368            .fetch_add(chunk_size, Ordering::Relaxed);
369
370        // The chunk was sized to fit this allocation, so this cannot fail.
371        let ptr = chunk
372            .try_alloc(size, align)
373            .expect("fresh chunk sized to fit");
374
375        let mut chunks = self.chunks.write();
376        chunks.push(chunk);
377
378        Ok(ptr)
379    }
380
381    /// Returns the total memory allocated by this arena.
382    #[must_use]
383    pub fn total_allocated(&self) -> usize {
384        self.total_allocated.load(Ordering::Relaxed)
385    }
386
387    /// Returns the total memory used (not just allocated capacity).
388    #[must_use]
389    pub fn total_used(&self) -> usize {
390        let chunks = self.chunks.read();
391        chunks.iter().map(Chunk::used).sum()
392    }
393
394    /// Returns statistics about this arena.
395    #[must_use]
396    pub fn stats(&self) -> ArenaStats {
397        let chunks = self.chunks.read();
398        ArenaStats {
399            epoch: self.epoch,
400            chunk_count: chunks.len(),
401            total_allocated: self.total_allocated.load(Ordering::Relaxed),
402            total_used: chunks.iter().map(Chunk::used).sum(),
403        }
404    }
405}
406
407/// Statistics about an arena.
408#[derive(Debug, Clone)]
409pub struct ArenaStats {
410    /// The epoch this arena belongs to.
411    pub epoch: EpochId,
412    /// Number of chunks allocated.
413    pub chunk_count: usize,
414    /// Total bytes allocated.
415    pub total_allocated: usize,
416    /// Total bytes used.
417    pub total_used: usize,
418}
419
420/// Manages arenas across multiple epochs.
421///
422/// Use this to create new epochs, allocate in the current epoch, and
423/// clean up old epochs when they're no longer needed.
424pub struct ArenaAllocator {
425    /// Map of epochs to arenas.
426    arenas: RwLock<hashbrown::HashMap<EpochId, Arena>>,
427    /// Current epoch.
428    current_epoch: AtomicUsize,
429    /// Default chunk size.
430    chunk_size: usize,
431}
432
433impl ArenaAllocator {
434    /// Creates a new arena allocator.
435    ///
436    /// # Errors
437    ///
438    /// Returns `AllocError::OutOfMemory` if the initial arena allocation fails.
439    pub fn new() -> Result<Self, AllocError> {
440        Self::with_chunk_size(DEFAULT_CHUNK_SIZE)
441    }
442
443    /// Creates a new arena allocator with a custom chunk size.
444    ///
445    /// # Errors
446    ///
447    /// Returns `AllocError::OutOfMemory` if the initial arena allocation fails.
448    pub fn with_chunk_size(chunk_size: usize) -> Result<Self, AllocError> {
449        let allocator = Self {
450            arenas: RwLock::new(hashbrown::HashMap::new()),
451            current_epoch: AtomicUsize::new(0),
452            chunk_size,
453        };
454
455        // Create the initial epoch
456        let epoch = EpochId::INITIAL;
457        allocator
458            .arenas
459            .write()
460            .insert(epoch, Arena::with_chunk_size(epoch, chunk_size)?);
461
462        Ok(allocator)
463    }
464
465    /// Returns the current epoch.
466    #[must_use]
467    pub fn current_epoch(&self) -> EpochId {
468        EpochId::new(self.current_epoch.load(Ordering::Acquire) as u64)
469    }
470
471    /// Creates a new epoch and returns its ID.
472    ///
473    /// # Errors
474    ///
475    /// Returns `AllocError::OutOfMemory` if the arena allocation fails.
476    pub fn new_epoch(&self) -> Result<EpochId, AllocError> {
477        let new_id = self.current_epoch.fetch_add(1, Ordering::AcqRel) as u64 + 1;
478        let epoch = EpochId::new(new_id);
479
480        let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
481        self.arenas.write().insert(epoch, arena);
482
483        Ok(epoch)
484    }
485
486    /// Gets the arena for a specific epoch.
487    ///
488    /// # Errors
489    ///
490    /// Returns `AllocError::EpochNotFound` if the epoch doesn't exist.
491    pub fn arena(
492        &self,
493        epoch: EpochId,
494    ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
495        let arenas = self.arenas.read();
496        if !arenas.contains_key(&epoch) {
497            return Err(AllocError::EpochNotFound(epoch));
498        }
499        Ok(parking_lot::RwLockReadGuard::map(arenas, |arenas| {
500            &arenas[&epoch]
501        }))
502    }
503
504    /// Ensures an arena exists for the given epoch, creating it if necessary.
505    /// Returns whether a new arena was created.
506    ///
507    /// # Errors
508    ///
509    /// Returns `AllocError::OutOfMemory` if a new arena allocation fails.
510    #[cfg(feature = "tiered-storage")]
511    pub fn ensure_epoch(&self, epoch: EpochId) -> Result<bool, AllocError> {
512        // Fast path: check if epoch already exists
513        {
514            let arenas = self.arenas.read();
515            if arenas.contains_key(&epoch) {
516                return Ok(false);
517            }
518        }
519
520        // Slow path: create the epoch
521        let mut arenas = self.arenas.write();
522        // Double-check after acquiring write lock
523        if arenas.contains_key(&epoch) {
524            return Ok(false);
525        }
526
527        let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
528        arenas.insert(epoch, arena);
529        Ok(true)
530    }
531
532    /// Gets or creates an arena for a specific epoch.
533    ///
534    /// # Errors
535    ///
536    /// Returns `AllocError` if the arena allocation fails.
537    #[cfg(feature = "tiered-storage")]
538    pub fn arena_or_create(
539        &self,
540        epoch: EpochId,
541    ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
542        self.ensure_epoch(epoch)?;
543        self.arena(epoch)
544    }
545
546    /// Allocates in the current epoch.
547    ///
548    /// # Errors
549    ///
550    /// Returns `AllocError` if allocation fails.
551    pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
552        let epoch = self.current_epoch();
553        let arenas = self.arenas.read();
554        arenas
555            .get(&epoch)
556            .expect("current epoch always exists")
557            .alloc(size, align)
558    }
559
560    /// Drops an epoch, freeing all its memory.
561    ///
562    /// This should only be called when no readers are using this epoch.
563    pub fn drop_epoch(&self, epoch: EpochId) {
564        self.arenas.write().remove(&epoch);
565    }
566
567    /// Returns total memory allocated across all epochs.
568    #[must_use]
569    pub fn total_allocated(&self) -> usize {
570        self.arenas
571            .read()
572            .values()
573            .map(Arena::total_allocated)
574            .sum()
575    }
576}
577
578impl Default for ArenaAllocator {
579    /// Creates a default arena allocator.
580    ///
581    /// # Panics
582    ///
583    /// Panics if the initial arena allocation fails (out of memory).
584    fn default() -> Self {
585        Self::new().expect("failed to allocate default arena")
586    }
587}
588
589#[cfg(test)]
590mod tests {
591    use super::*;
592
593    #[test]
594    fn test_arena_basic_allocation() {
595        let arena = Arena::new(EpochId::INITIAL).unwrap();
596
597        // Allocate some bytes
598        let ptr1 = arena.alloc(100, 8).unwrap();
599        let ptr2 = arena.alloc(200, 8).unwrap();
600
601        // Pointers should be different
602        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
603    }
604
605    #[test]
606    fn test_arena_value_allocation() {
607        let arena = Arena::new(EpochId::INITIAL).unwrap();
608
609        let value = arena.alloc_value(42u64).unwrap();
610        assert_eq!(*value, 42);
611
612        *value = 100;
613        assert_eq!(*value, 100);
614    }
615
616    #[test]
617    fn test_arena_slice_allocation() {
618        let arena = Arena::new(EpochId::INITIAL).unwrap();
619
620        let slice = arena.alloc_slice(&[1u32, 2, 3, 4, 5]).unwrap();
621        assert_eq!(slice, &[1, 2, 3, 4, 5]);
622
623        slice[0] = 10;
624        assert_eq!(slice[0], 10);
625    }
626
627    #[test]
628    fn test_arena_large_allocation() {
629        let arena = Arena::with_chunk_size(EpochId::INITIAL, 1024).unwrap();
630
631        // Allocate something larger than the chunk size
632        let _ptr = arena.alloc(2048, 8).unwrap();
633
634        // Should have created a new chunk
635        assert!(arena.stats().chunk_count >= 2);
636    }
637
638    #[test]
639    fn test_arena_allocator_epochs() {
640        let allocator = ArenaAllocator::new().unwrap();
641
642        let epoch0 = allocator.current_epoch();
643        assert_eq!(epoch0, EpochId::INITIAL);
644
645        let epoch1 = allocator.new_epoch().unwrap();
646        assert_eq!(epoch1, EpochId::new(1));
647
648        let epoch2 = allocator.new_epoch().unwrap();
649        assert_eq!(epoch2, EpochId::new(2));
650
651        // Current epoch should be the latest
652        assert_eq!(allocator.current_epoch(), epoch2);
653    }
654
655    #[test]
656    fn test_arena_allocator_allocation() {
657        let allocator = ArenaAllocator::new().unwrap();
658
659        let ptr1 = allocator.alloc(100, 8).unwrap();
660        let ptr2 = allocator.alloc(100, 8).unwrap();
661
662        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
663    }
664
665    #[test]
666    fn test_arena_drop_epoch() {
667        let allocator = ArenaAllocator::new().unwrap();
668
669        let initial_mem = allocator.total_allocated();
670
671        let epoch1 = allocator.new_epoch().unwrap();
672        // Allocate some memory in the new epoch
673        {
674            let arena = allocator.arena(epoch1).unwrap();
675            arena.alloc(10000, 8).unwrap();
676        }
677
678        let after_alloc = allocator.total_allocated();
679        assert!(after_alloc > initial_mem);
680
681        // Drop the epoch
682        allocator.drop_epoch(epoch1);
683
684        // Memory should decrease
685        let after_drop = allocator.total_allocated();
686        assert!(after_drop < after_alloc);
687    }
688
689    #[test]
690    fn test_arena_stats() {
691        let arena = Arena::with_chunk_size(EpochId::new(5), 4096).unwrap();
692
693        let stats = arena.stats();
694        assert_eq!(stats.epoch, EpochId::new(5));
695        assert_eq!(stats.chunk_count, 1);
696        assert_eq!(stats.total_allocated, 4096);
697        assert_eq!(stats.total_used, 0);
698
699        arena.alloc(100, 8).unwrap();
700        let stats = arena.stats();
701        assert!(stats.total_used >= 100);
702    }
703}
704
705#[cfg(all(test, feature = "tiered-storage"))]
706mod tiered_storage_tests {
707    use super::*;
708
709    #[test]
710    fn test_alloc_value_with_offset_basic() {
711        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
712
713        let (offset1, val1) = arena.alloc_value_with_offset(42u64).unwrap();
714        let (offset2, val2) = arena.alloc_value_with_offset(100u64).unwrap();
715
716        // First allocation should be at offset 0 (aligned)
717        assert_eq!(offset1, 0);
718        // Second allocation should be after the first
719        assert!(offset2 > offset1);
720        assert!(offset2 >= std::mem::size_of::<u64>() as u32);
721
722        // Values should be correct
723        assert_eq!(*val1, 42);
724        assert_eq!(*val2, 100);
725
726        // Mutation should work
727        *val1 = 999;
728        assert_eq!(*val1, 999);
729    }
730
731    #[test]
732    fn test_read_at_basic() {
733        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
734
735        let (offset, _) = arena.alloc_value_with_offset(12345u64).unwrap();
736
737        // Read it back
738        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
739        let value: &u64 = unsafe { arena.read_at(offset) };
740        assert_eq!(*value, 12345);
741    }
742
743    #[test]
744    fn test_read_at_mut_basic() {
745        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
746
747        let (offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
748
749        // Read and modify
750        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
751        let value: &mut u64 = unsafe { arena.read_at_mut(offset) };
752        assert_eq!(*value, 42);
753        *value = 100;
754
755        // Verify modification persisted
756        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
757        let value: &u64 = unsafe { arena.read_at(offset) };
758        assert_eq!(*value, 100);
759    }
760
761    #[test]
762    fn test_alloc_value_with_offset_struct() {
763        #[derive(Debug, Clone, PartialEq)]
764        struct TestNode {
765            id: u64,
766            name: [u8; 32],
767            value: i32,
768        }
769
770        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
771
772        let node = TestNode {
773            id: 12345,
774            name: [b'A'; 32],
775            value: -999,
776        };
777
778        let (offset, stored) = arena.alloc_value_with_offset(node.clone()).unwrap();
779        assert_eq!(stored.id, 12345);
780        assert_eq!(stored.value, -999);
781
782        // Read it back
783        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
784        let read: &TestNode = unsafe { arena.read_at(offset) };
785        assert_eq!(read.id, node.id);
786        assert_eq!(read.name, node.name);
787        assert_eq!(read.value, node.value);
788    }
789
790    #[test]
791    fn test_alloc_value_with_offset_alignment() {
792        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
793
794        // Allocate a byte first to potentially misalign
795        let (offset1, _) = arena.alloc_value_with_offset(1u8).unwrap();
796        assert_eq!(offset1, 0);
797
798        // Now allocate a u64 which requires 8-byte alignment
799        let (offset2, val) = arena.alloc_value_with_offset(42u64).unwrap();
800
801        // offset2 should be 8-byte aligned
802        assert_eq!(offset2 % 8, 0);
803        assert_eq!(*val, 42);
804    }
805
806    #[test]
807    fn test_alloc_value_with_offset_multiple() {
808        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
809
810        let mut offsets = Vec::new();
811        for i in 0..100u64 {
812            let (offset, val) = arena.alloc_value_with_offset(i).unwrap();
813            offsets.push(offset);
814            assert_eq!(*val, i);
815        }
816
817        // All offsets should be unique and in ascending order
818        for window in offsets.windows(2) {
819            assert!(window[0] < window[1]);
820        }
821
822        // Read all values back
823        for (i, offset) in offsets.iter().enumerate() {
824            // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
825            let val: &u64 = unsafe { arena.read_at(*offset) };
826            assert_eq!(*val, i as u64);
827        }
828    }
829
830    #[test]
831    fn test_arena_allocator_with_offset() {
832        let allocator = ArenaAllocator::with_chunk_size(4096).unwrap();
833
834        let epoch = allocator.current_epoch();
835        let arena = allocator.arena(epoch).unwrap();
836
837        let (offset, val) = arena.alloc_value_with_offset(42u64).unwrap();
838        assert_eq!(*val, 42);
839
840        // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
841        let read: &u64 = unsafe { arena.read_at(offset) };
842        assert_eq!(*read, 42);
843    }
844
845    #[test]
846    #[cfg(debug_assertions)]
847    #[should_panic(expected = "exceeds chunk used bytes")]
848    fn test_read_at_out_of_bounds() {
849        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
850        let (_offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
851
852        // Read way past the allocated region: should panic in debug
853        // SAFETY: intentionally invalid offset to test debug assertion
854        unsafe {
855            let _: &u64 = arena.read_at(4000);
856        }
857    }
858
859    #[test]
860    #[cfg(debug_assertions)]
861    #[should_panic(expected = "is not aligned")]
862    fn test_read_at_misaligned() {
863        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
864        // Allocate a u8 at offset 0
865        let (_offset, _) = arena.alloc_value_with_offset(0xFFu8).unwrap();
866        // Also allocate some bytes so offset 1 is within used range
867        let _ = arena.alloc_value_with_offset(0u64).unwrap();
868
869        // Try to read a u64 at offset 1 (misaligned for u64)
870        // SAFETY: intentionally misaligned offset to test debug assertion
871        unsafe {
872            let _: &u64 = arena.read_at(1);
873        }
874    }
875
876    #[test]
877    #[cfg(not(miri))] // parking_lot uses integer-to-pointer casts incompatible with Miri strict provenance
878    fn test_concurrent_read_stress() {
879        use std::sync::Arc;
880
881        let arena = Arc::new(Arena::with_chunk_size(EpochId::INITIAL, 1024 * 1024).unwrap());
882        let num_threads = 8;
883        let values_per_thread = 1000;
884
885        // Each thread allocates values and records offsets
886        let mut all_offsets = Vec::new();
887        for t in 0..num_threads {
888            let base = (t * values_per_thread) as u64;
889            let mut offsets = Vec::with_capacity(values_per_thread);
890            for i in 0..values_per_thread as u64 {
891                let (offset, _) = arena.alloc_value_with_offset(base + i).unwrap();
892                offsets.push(offset);
893            }
894            all_offsets.push(offsets);
895        }
896
897        // Now read all values back concurrently from multiple threads
898        let mut handles = Vec::new();
899        for (t, offsets) in all_offsets.into_iter().enumerate() {
900            let arena = Arc::clone(&arena);
901            let base = (t * values_per_thread) as u64;
902            handles.push(std::thread::spawn(move || {
903                for (i, offset) in offsets.iter().enumerate() {
904                    // SAFETY: offset was returned by alloc_value_with_offset for the same type and arena
905                    let val: &u64 = unsafe { arena.read_at(*offset) };
906                    assert_eq!(*val, base + i as u64);
907                }
908            }));
909        }
910
911        for handle in handles {
912            handle.join().expect("Thread panicked");
913        }
914    }
915
916    #[test]
917    fn test_alloc_value_with_offset_insufficient_space() {
918        // Create a tiny arena where a large allocation will fail
919        let arena = Arena::with_chunk_size(EpochId::INITIAL, 64).unwrap();
920
921        // Fill up the chunk
922        let _ = arena.alloc_value_with_offset([0u8; 48]).unwrap();
923
924        // This should return InsufficientSpace, not panic
925        let result = arena.alloc_value_with_offset([0u8; 32]);
926        assert!(result.is_err());
927    }
928
929    #[test]
930    fn test_multi_type_interleaved() {
931        #[derive(Debug, Clone, PartialEq)]
932        #[repr(C)]
933        struct Record {
934            id: u64,
935            flags: u32,
936            weight: f32,
937        }
938
939        let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
940
941        // Interleave different types
942        let (off_u8, _) = arena.alloc_value_with_offset(0xAAu8).unwrap();
943        let (off_u32, _) = arena.alloc_value_with_offset(0xBBBBu32).unwrap();
944        let (off_u64, _) = arena.alloc_value_with_offset(0xCCCCCCCCu64).unwrap();
945        let (off_rec, _) = arena
946            .alloc_value_with_offset(Record {
947                id: 42,
948                flags: 0xFF,
949                weight: std::f32::consts::PI,
950            })
951            .unwrap();
952
953        // Read them all back
954        // SAFETY: all offsets were returned by alloc_value_with_offset for matching types and arena
955        unsafe {
956            assert_eq!(*arena.read_at::<u8>(off_u8), 0xAA);
957            assert_eq!(*arena.read_at::<u32>(off_u32), 0xBBBB);
958            assert_eq!(*arena.read_at::<u64>(off_u64), 0xCCCCCCCC);
959
960            let rec: &Record = arena.read_at(off_rec);
961            assert_eq!(rec.id, 42);
962            assert_eq!(rec.flags, 0xFF);
963            assert!((rec.weight - std::f32::consts::PI).abs() < 0.001);
964        }
965    }
966}