1#![allow(unsafe_code)]
12
13use std::alloc::{Layout, alloc, dealloc};
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18use parking_lot::RwLock;
19
20use crate::types::EpochId;
21
22const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024;
24
25#[derive(Debug, Clone)]
27#[non_exhaustive]
28pub enum AllocError {
29 OutOfMemory,
31 EpochNotFound(EpochId),
33 InsufficientSpace,
35 InvalidAlignment(usize),
37}
38
39impl fmt::Display for AllocError {
40 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
41 match self {
42 Self::OutOfMemory => write!(f, "arena allocation failed: out of memory"),
43 Self::EpochNotFound(id) => write!(f, "epoch {id} not found in arena allocator"),
44 Self::InsufficientSpace => {
45 write!(f, "arena chunk has insufficient space for allocation")
46 }
47 Self::InvalidAlignment(align) => {
48 write!(f, "alignment must be a non-zero power of two, got {align}")
49 }
50 }
51 }
52}
53
54impl std::error::Error for AllocError {}
55
56impl From<AllocError> for crate::Error {
57 fn from(e: AllocError) -> Self {
58 match e {
59 AllocError::OutOfMemory | AllocError::InsufficientSpace => {
60 crate::Error::Storage(crate::utils::error::StorageError::Full)
61 }
62 AllocError::EpochNotFound(id) => {
63 crate::Error::Internal(format!("epoch {id} not found in arena allocator"))
64 }
65 AllocError::InvalidAlignment(align) => crate::Error::Internal(format!(
66 "alignment must be a non-zero power of two, got {align}"
67 )),
68 }
69 }
70}
71
72struct Chunk {
74 ptr: NonNull<u8>,
76 capacity: usize,
78 offset: AtomicUsize,
80}
81
82impl Chunk {
83 fn new(capacity: usize) -> Result<Self, AllocError> {
89 if capacity > u32::MAX as usize {
90 return Err(AllocError::OutOfMemory);
91 }
92 let layout = Layout::from_size_align(capacity, 16).map_err(|_| AllocError::OutOfMemory)?;
93 let ptr = unsafe { alloc(layout) };
95 let ptr = NonNull::new(ptr).ok_or(AllocError::OutOfMemory)?;
96
97 Ok(Self {
98 ptr,
99 capacity,
100 offset: AtomicUsize::new(0),
101 })
102 }
103
104 fn try_alloc(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
107 self.try_alloc_with_offset(size, align).map(|(_, ptr)| ptr)
108 }
109
110 fn try_alloc_with_offset(&self, size: usize, align: usize) -> Option<(u32, NonNull<u8>)> {
114 debug_assert!(
117 align.is_power_of_two(),
118 "alignment must be a power of two, got {align}"
119 );
120 let base_addr = self.ptr.as_ptr() as usize;
121 loop {
122 let current = self.offset.load(Ordering::Relaxed);
123
124 let align_mask = align.checked_sub(1)?;
128 let current_addr = base_addr.checked_add(current)?;
129 let aligned_addr = current_addr.checked_add(align_mask)? & !align_mask;
130 let aligned = aligned_addr - base_addr;
131 let new_offset = aligned.checked_add(size)?;
132
133 if new_offset > self.capacity {
134 return None;
135 }
136
137 match self.offset.compare_exchange_weak(
139 current,
140 new_offset,
141 Ordering::AcqRel,
142 Ordering::Relaxed,
143 ) {
144 Ok(_) => {
145 let ptr = unsafe { self.ptr.as_ptr().add(aligned) };
147 #[allow(clippy::cast_possible_truncation)]
149 return Some((aligned as u32, NonNull::new(ptr)?));
150 }
151 Err(_) => continue, }
153 }
154 }
155
156 fn used(&self) -> usize {
158 self.offset.load(Ordering::Relaxed)
159 }
160}
161
162impl Drop for Chunk {
163 fn drop(&mut self) {
164 let layout = Layout::from_size_align(self.capacity, 16).expect("Invalid layout");
165 unsafe { dealloc(self.ptr.as_ptr(), layout) };
167 }
168}
169
170unsafe impl Send for Chunk {}
172unsafe impl Sync for Chunk {}
173
174pub struct Arena {
182 epoch: EpochId,
184 chunks: RwLock<Vec<Chunk>>,
186 chunk_size: usize,
188 total_allocated: AtomicUsize,
190}
191
192impl Arena {
193 pub fn new(epoch: EpochId) -> Result<Self, AllocError> {
199 Self::with_chunk_size(epoch, DEFAULT_CHUNK_SIZE)
200 }
201
202 pub fn with_chunk_size(epoch: EpochId, chunk_size: usize) -> Result<Self, AllocError> {
208 let initial_chunk = Chunk::new(chunk_size)?;
209 Ok(Self {
210 epoch,
211 chunks: RwLock::new(vec![initial_chunk]),
212 chunk_size,
213 total_allocated: AtomicUsize::new(chunk_size),
214 })
215 }
216
217 #[must_use]
219 pub fn epoch(&self) -> EpochId {
220 self.epoch
221 }
222
223 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
230 if align == 0 || !align.is_power_of_two() {
231 return Err(AllocError::InvalidAlignment(align));
232 }
233 {
235 let chunks = self.chunks.read();
236 for chunk in chunks.iter().rev() {
237 if let Some(ptr) = chunk.try_alloc(size, align) {
238 return Ok(ptr);
239 }
240 }
241 }
242
243 self.alloc_new_chunk(size, align)
245 }
246
247 pub fn alloc_value<T>(&self, value: T) -> Result<&mut T, AllocError> {
253 let ptr = self.alloc(std::mem::size_of::<T>(), std::mem::align_of::<T>())?;
254 Ok(unsafe {
256 let typed_ptr = ptr.as_ptr() as *mut T;
257 typed_ptr.write(value);
258 &mut *typed_ptr
259 })
260 }
261
262 pub fn alloc_slice<T: Copy>(&self, values: &[T]) -> Result<&mut [T], AllocError> {
268 if values.is_empty() {
269 return Ok(&mut []);
270 }
271
272 let size = std::mem::size_of::<T>()
273 .checked_mul(values.len())
274 .ok_or(AllocError::OutOfMemory)?;
275 let align = std::mem::align_of::<T>();
276 let ptr = self.alloc(size, align)?;
277
278 Ok(unsafe {
280 let typed_ptr = ptr.as_ptr() as *mut T;
281 std::ptr::copy_nonoverlapping(values.as_ptr(), typed_ptr, values.len());
282 std::slice::from_raw_parts_mut(typed_ptr, values.len())
283 })
284 }
285
286 #[cfg(feature = "tiered-storage")]
300 pub fn alloc_value_with_offset<T>(&self, value: T) -> Result<(u32, &mut T), AllocError> {
301 let size = std::mem::size_of::<T>();
302 let align = std::mem::align_of::<T>();
303
304 let chunks = self.chunks.read();
306 let chunk = chunks
307 .first()
308 .expect("Arena should have at least one chunk");
309
310 let (offset, ptr) = chunk
311 .try_alloc_with_offset(size, align)
312 .ok_or(AllocError::InsufficientSpace)?;
313
314 Ok(unsafe {
316 let typed_ptr = ptr.as_ptr().cast::<T>();
317 typed_ptr.write(value);
318 (offset, &mut *typed_ptr)
319 })
320 }
321
322 #[cfg(feature = "tiered-storage")]
334 pub unsafe fn read_at<T>(&self, offset: u32) -> &T {
335 let chunks = self.chunks.read();
336 let chunk = chunks
337 .first()
338 .expect("Arena should have at least one chunk");
339
340 assert!(
341 (offset as usize) + std::mem::size_of::<T>() <= chunk.used(),
342 "read_at: offset {} + size_of::<{}>() = {} exceeds chunk used bytes {}",
343 offset,
344 std::any::type_name::<T>(),
345 (offset as usize) + std::mem::size_of::<T>(),
346 chunk.used()
347 );
348 assert!(
349 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
350 "read_at: offset {} is not aligned for {} (alignment {})",
351 offset,
352 std::any::type_name::<T>(),
353 std::mem::align_of::<T>()
354 );
355
356 unsafe {
358 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
359 &*ptr
360 }
361 }
362
363 #[cfg(feature = "tiered-storage")]
376 pub unsafe fn read_at_mut<T>(&self, offset: u32) -> &mut T {
377 let chunks = self.chunks.read();
378 let chunk = chunks
379 .first()
380 .expect("Arena should have at least one chunk");
381
382 assert!(
383 (offset as usize) + std::mem::size_of::<T>() <= chunk.capacity,
384 "read_at_mut: offset {} + size_of::<{}>() = {} exceeds chunk capacity {}",
385 offset,
386 std::any::type_name::<T>(),
387 (offset as usize) + std::mem::size_of::<T>(),
388 chunk.capacity
389 );
390 assert!(
391 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
392 "read_at_mut: offset {} is not aligned for {} (alignment {})",
393 offset,
394 std::any::type_name::<T>(),
395 std::mem::align_of::<T>()
396 );
397
398 unsafe {
400 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
401 &mut *ptr
402 }
403 }
404
405 fn alloc_new_chunk(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
407 let required = size.checked_add(align).ok_or(AllocError::OutOfMemory)?;
408 let chunk_size = self.chunk_size.max(required);
409 let chunk = Chunk::new(chunk_size)?;
410
411 self.total_allocated
412 .fetch_add(chunk_size, Ordering::Relaxed);
413
414 let ptr = chunk
416 .try_alloc(size, align)
417 .expect("fresh chunk sized to fit");
418
419 let mut chunks = self.chunks.write();
420 chunks.push(chunk);
421
422 Ok(ptr)
423 }
424
425 #[must_use]
427 pub fn total_allocated(&self) -> usize {
428 self.total_allocated.load(Ordering::Relaxed)
429 }
430
431 #[must_use]
433 pub fn total_used(&self) -> usize {
434 let chunks = self.chunks.read();
435 chunks.iter().map(Chunk::used).sum()
436 }
437
438 #[must_use]
440 pub fn stats(&self) -> ArenaStats {
441 let chunks = self.chunks.read();
442 ArenaStats {
443 epoch: self.epoch,
444 chunk_count: chunks.len(),
445 total_allocated: self.total_allocated.load(Ordering::Relaxed),
446 total_used: chunks.iter().map(Chunk::used).sum(),
447 }
448 }
449}
450
451#[derive(Debug, Clone)]
453pub struct ArenaStats {
454 pub epoch: EpochId,
456 pub chunk_count: usize,
458 pub total_allocated: usize,
460 pub total_used: usize,
462}
463
464pub struct ArenaAllocator {
469 arenas: RwLock<hashbrown::HashMap<EpochId, Arena>>,
471 current_epoch: AtomicUsize,
473 chunk_size: usize,
475}
476
477impl ArenaAllocator {
478 pub fn new() -> Result<Self, AllocError> {
484 Self::with_chunk_size(DEFAULT_CHUNK_SIZE)
485 }
486
487 pub fn with_chunk_size(chunk_size: usize) -> Result<Self, AllocError> {
493 let allocator = Self {
494 arenas: RwLock::new(hashbrown::HashMap::new()),
495 current_epoch: AtomicUsize::new(0),
496 chunk_size,
497 };
498
499 let epoch = EpochId::INITIAL;
501 allocator
502 .arenas
503 .write()
504 .insert(epoch, Arena::with_chunk_size(epoch, chunk_size)?);
505
506 Ok(allocator)
507 }
508
509 #[must_use]
511 pub fn current_epoch(&self) -> EpochId {
512 EpochId::new(self.current_epoch.load(Ordering::Acquire) as u64)
513 }
514
515 pub fn new_epoch(&self) -> Result<EpochId, AllocError> {
521 let new_id = self.current_epoch.fetch_add(1, Ordering::AcqRel) as u64 + 1;
522 let epoch = EpochId::new(new_id);
523
524 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
525 self.arenas.write().insert(epoch, arena);
526
527 Ok(epoch)
528 }
529
530 pub fn arena(
536 &self,
537 epoch: EpochId,
538 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
539 let arenas = self.arenas.read();
540 if !arenas.contains_key(&epoch) {
541 return Err(AllocError::EpochNotFound(epoch));
542 }
543 Ok(parking_lot::RwLockReadGuard::map(arenas, |arenas| {
544 &arenas[&epoch]
545 }))
546 }
547
548 #[cfg(feature = "tiered-storage")]
555 pub fn ensure_epoch(&self, epoch: EpochId) -> Result<bool, AllocError> {
556 {
558 let arenas = self.arenas.read();
559 if arenas.contains_key(&epoch) {
560 return Ok(false);
561 }
562 }
563
564 let mut arenas = self.arenas.write();
566 if arenas.contains_key(&epoch) {
568 return Ok(false);
569 }
570
571 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
572 arenas.insert(epoch, arena);
573 Ok(true)
574 }
575
576 #[cfg(feature = "tiered-storage")]
582 pub fn arena_or_create(
583 &self,
584 epoch: EpochId,
585 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
586 self.ensure_epoch(epoch)?;
587 self.arena(epoch)
588 }
589
590 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
600 let epoch = self.current_epoch();
601 let arenas = self.arenas.read();
602 arenas
603 .get(&epoch)
604 .expect("current epoch always exists")
605 .alloc(size, align)
606 }
607
608 pub fn drop_epoch(&self, epoch: EpochId) {
612 self.arenas.write().remove(&epoch);
613 }
614
615 #[must_use]
617 pub fn total_allocated(&self) -> usize {
618 self.arenas
619 .read()
620 .values()
621 .map(Arena::total_allocated)
622 .sum()
623 }
624}
625
626impl Default for ArenaAllocator {
627 fn default() -> Self {
633 Self::new().expect("failed to allocate default arena")
634 }
635}
636
637#[cfg(test)]
638mod tests {
639 use super::*;
640
641 #[test]
642 fn test_arena_basic_allocation() {
643 let arena = Arena::new(EpochId::INITIAL).unwrap();
644
645 let ptr1 = arena.alloc(100, 8).unwrap();
647 let ptr2 = arena.alloc(200, 8).unwrap();
648
649 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
651 }
652
653 #[test]
654 fn test_arena_value_allocation() {
655 let arena = Arena::new(EpochId::INITIAL).unwrap();
656
657 let value = arena.alloc_value(42u64).unwrap();
658 assert_eq!(*value, 42);
659
660 *value = 100;
661 assert_eq!(*value, 100);
662 }
663
664 #[test]
665 fn test_arena_slice_allocation() {
666 let arena = Arena::new(EpochId::INITIAL).unwrap();
667
668 let slice = arena.alloc_slice(&[1u32, 2, 3, 4, 5]).unwrap();
669 assert_eq!(slice, &[1, 2, 3, 4, 5]);
670
671 slice[0] = 10;
672 assert_eq!(slice[0], 10);
673 }
674
675 #[test]
676 fn test_arena_large_allocation() {
677 let arena = Arena::with_chunk_size(EpochId::INITIAL, 1024).unwrap();
678
679 let _ptr = arena.alloc(2048, 8).unwrap();
681
682 assert!(arena.stats().chunk_count >= 2);
684 }
685
686 #[test]
687 fn test_arena_allocator_epochs() {
688 let allocator = ArenaAllocator::new().unwrap();
689
690 let epoch0 = allocator.current_epoch();
691 assert_eq!(epoch0, EpochId::INITIAL);
692
693 let epoch1 = allocator.new_epoch().unwrap();
694 assert_eq!(epoch1, EpochId::new(1));
695
696 let epoch2 = allocator.new_epoch().unwrap();
697 assert_eq!(epoch2, EpochId::new(2));
698
699 assert_eq!(allocator.current_epoch(), epoch2);
701 }
702
703 #[test]
704 fn test_arena_allocator_allocation() {
705 let allocator = ArenaAllocator::new().unwrap();
706
707 let ptr1 = allocator.alloc(100, 8).unwrap();
708 let ptr2 = allocator.alloc(100, 8).unwrap();
709
710 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
711 }
712
713 #[test]
714 fn test_arena_drop_epoch() {
715 let allocator = ArenaAllocator::new().unwrap();
716
717 let initial_mem = allocator.total_allocated();
718
719 let epoch1 = allocator.new_epoch().unwrap();
720 {
722 let arena = allocator.arena(epoch1).unwrap();
723 arena.alloc(10000, 8).unwrap();
724 }
725
726 let after_alloc = allocator.total_allocated();
727 assert!(after_alloc > initial_mem);
728
729 allocator.drop_epoch(epoch1);
731
732 let after_drop = allocator.total_allocated();
734 assert!(after_drop < after_alloc);
735 }
736
737 #[test]
738 fn test_arena_stats() {
739 let arena = Arena::with_chunk_size(EpochId::new(5), 4096).unwrap();
740
741 let stats = arena.stats();
742 assert_eq!(stats.epoch, EpochId::new(5));
743 assert_eq!(stats.chunk_count, 1);
744 assert_eq!(stats.total_allocated, 4096);
745 assert_eq!(stats.total_used, 0);
746
747 arena.alloc(100, 8).unwrap();
748 let stats = arena.stats();
749 assert!(stats.total_used >= 100);
750 }
751}
752
753#[cfg(all(test, feature = "tiered-storage"))]
754mod tiered_storage_tests {
755 use super::*;
756
757 #[test]
758 #[allow(clippy::cast_possible_truncation)]
760 fn test_alloc_value_with_offset_basic() {
761 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
762
763 let (offset1, val1) = arena.alloc_value_with_offset(42u64).unwrap();
764 let (offset2, val2) = arena.alloc_value_with_offset(100u64).unwrap();
765
766 assert_eq!(offset1, 0);
768 assert!(offset2 > offset1);
770 assert!(offset2 >= std::mem::size_of::<u64>() as u32);
771
772 assert_eq!(*val1, 42);
774 assert_eq!(*val2, 100);
775
776 *val1 = 999;
778 assert_eq!(*val1, 999);
779 }
780
781 #[test]
782 fn test_read_at_basic() {
783 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
784
785 let (offset, _) = arena.alloc_value_with_offset(12345u64).unwrap();
786
787 let value: &u64 = unsafe { arena.read_at(offset) };
790 assert_eq!(*value, 12345);
791 }
792
793 #[test]
794 fn test_read_at_mut_basic() {
795 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
796
797 let (offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
798
799 let value: &mut u64 = unsafe { arena.read_at_mut(offset) };
802 assert_eq!(*value, 42);
803 *value = 100;
804
805 let value: &u64 = unsafe { arena.read_at(offset) };
808 assert_eq!(*value, 100);
809 }
810
811 #[test]
812 fn test_alloc_value_with_offset_struct() {
813 #[derive(Debug, Clone, PartialEq)]
814 struct TestNode {
815 id: u64,
816 name: [u8; 32],
817 value: i32,
818 }
819
820 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
821
822 let node = TestNode {
823 id: 12345,
824 name: [b'A'; 32],
825 value: -999,
826 };
827
828 let (offset, stored) = arena.alloc_value_with_offset(node.clone()).unwrap();
829 assert_eq!(stored.id, 12345);
830 assert_eq!(stored.value, -999);
831
832 let read: &TestNode = unsafe { arena.read_at(offset) };
835 assert_eq!(read.id, node.id);
836 assert_eq!(read.name, node.name);
837 assert_eq!(read.value, node.value);
838 }
839
840 #[test]
841 fn test_alloc_value_with_offset_alignment() {
842 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
843
844 let (offset1, _) = arena.alloc_value_with_offset(1u8).unwrap();
846 assert_eq!(offset1, 0);
847
848 let (offset2, val) = arena.alloc_value_with_offset(42u64).unwrap();
850
851 assert_eq!(offset2 % 8, 0);
853 assert_eq!(*val, 42);
854 }
855
856 #[test]
857 fn test_alloc_value_with_offset_multiple() {
858 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
859
860 let mut offsets = Vec::new();
861 for i in 0..100u64 {
862 let (offset, val) = arena.alloc_value_with_offset(i).unwrap();
863 offsets.push(offset);
864 assert_eq!(*val, i);
865 }
866
867 for window in offsets.windows(2) {
869 assert!(window[0] < window[1]);
870 }
871
872 for (i, offset) in offsets.iter().enumerate() {
874 let val: &u64 = unsafe { arena.read_at(*offset) };
876 assert_eq!(*val, i as u64);
877 }
878 }
879
880 #[test]
881 fn test_arena_allocator_with_offset() {
882 let allocator = ArenaAllocator::with_chunk_size(4096).unwrap();
883
884 let epoch = allocator.current_epoch();
885 let arena = allocator.arena(epoch).unwrap();
886
887 let (offset, val) = arena.alloc_value_with_offset(42u64).unwrap();
888 assert_eq!(*val, 42);
889
890 let read: &u64 = unsafe { arena.read_at(offset) };
892 assert_eq!(*read, 42);
893 }
894
895 #[test]
896 #[cfg(debug_assertions)]
897 #[should_panic(expected = "exceeds chunk used bytes")]
898 fn test_read_at_out_of_bounds() {
899 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
900 let (_offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
901
902 unsafe {
905 let _: &u64 = arena.read_at(4000);
906 }
907 }
908
909 #[test]
910 #[cfg(debug_assertions)]
911 #[should_panic(expected = "is not aligned")]
912 fn test_read_at_misaligned() {
913 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
914 let (_offset, _) = arena.alloc_value_with_offset(0xFFu8).unwrap();
916 let _ = arena.alloc_value_with_offset(0u64).unwrap();
918
919 unsafe {
922 let _: &u64 = arena.read_at(1);
923 }
924 }
925
926 #[test]
927 #[cfg(not(miri))] fn test_concurrent_read_stress() {
929 use std::sync::Arc;
930
931 let arena = Arc::new(Arena::with_chunk_size(EpochId::INITIAL, 1024 * 1024).unwrap());
932 let num_threads = 8;
933 let values_per_thread = 1000;
934
935 let mut all_offsets = Vec::new();
937 for t in 0..num_threads {
938 let base = (t * values_per_thread) as u64;
939 let mut offsets = Vec::with_capacity(values_per_thread);
940 for i in 0..values_per_thread as u64 {
941 let (offset, _) = arena.alloc_value_with_offset(base + i).unwrap();
942 offsets.push(offset);
943 }
944 all_offsets.push(offsets);
945 }
946
947 let mut handles = Vec::new();
949 for (t, offsets) in all_offsets.into_iter().enumerate() {
950 let arena = Arc::clone(&arena);
951 let base = (t * values_per_thread) as u64;
952 handles.push(std::thread::spawn(move || {
953 for (i, offset) in offsets.iter().enumerate() {
954 let val: &u64 = unsafe { arena.read_at(*offset) };
956 assert_eq!(*val, base + i as u64);
957 }
958 }));
959 }
960
961 for handle in handles {
962 handle.join().expect("Thread panicked");
963 }
964 }
965
966 #[test]
967 fn test_alloc_value_with_offset_insufficient_space() {
968 let arena = Arena::with_chunk_size(EpochId::INITIAL, 64).unwrap();
970
971 let _ = arena.alloc_value_with_offset([0u8; 48]).unwrap();
973
974 let result = arena.alloc_value_with_offset([0u8; 32]);
976 assert!(result.is_err());
977 }
978
979 #[test]
980 fn test_multi_type_interleaved() {
981 #[derive(Debug, Clone, PartialEq)]
982 #[repr(C)]
983 struct Record {
984 id: u64,
985 flags: u32,
986 weight: f32,
987 }
988
989 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
990
991 let (off_u8, _) = arena.alloc_value_with_offset(0xAAu8).unwrap();
993 let (off_u32, _) = arena.alloc_value_with_offset(0xBBBBu32).unwrap();
994 let (off_u64, _) = arena.alloc_value_with_offset(0xCCCCCCCCu64).unwrap();
995 let (off_rec, _) = arena
996 .alloc_value_with_offset(Record {
997 id: 42,
998 flags: 0xFF,
999 weight: std::f32::consts::PI,
1000 })
1001 .unwrap();
1002
1003 unsafe {
1006 assert_eq!(*arena.read_at::<u8>(off_u8), 0xAA);
1007 assert_eq!(*arena.read_at::<u32>(off_u32), 0xBBBB);
1008 assert_eq!(*arena.read_at::<u64>(off_u64), 0xCCCCCCCC);
1009
1010 let rec: &Record = arena.read_at(off_rec);
1011 assert_eq!(rec.id, 42);
1012 assert_eq!(rec.flags, 0xFF);
1013 assert!((rec.weight - std::f32::consts::PI).abs() < 0.001);
1014 }
1015 }
1016}