1#![allow(unsafe_code)]
12
13use std::alloc::{Layout, alloc, dealloc};
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18use parking_lot::RwLock;
19
20use crate::types::EpochId;
21
22const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024;
24
25#[derive(Debug, Clone)]
27pub enum AllocError {
28 OutOfMemory,
30 EpochNotFound(EpochId),
32 InsufficientSpace,
34}
35
36impl fmt::Display for AllocError {
37 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
38 match self {
39 Self::OutOfMemory => write!(f, "arena allocation failed: out of memory"),
40 Self::EpochNotFound(id) => write!(f, "epoch {id} not found in arena allocator"),
41 Self::InsufficientSpace => {
42 write!(f, "arena chunk has insufficient space for allocation")
43 }
44 }
45 }
46}
47
48impl std::error::Error for AllocError {}
49
50impl From<AllocError> for crate::Error {
51 fn from(e: AllocError) -> Self {
52 match e {
53 AllocError::OutOfMemory | AllocError::InsufficientSpace => {
54 crate::Error::Storage(crate::utils::error::StorageError::Full)
55 }
56 AllocError::EpochNotFound(id) => {
57 crate::Error::Internal(format!("epoch {id} not found in arena allocator"))
58 }
59 }
60 }
61}
62
63struct Chunk {
65 ptr: NonNull<u8>,
67 capacity: usize,
69 offset: AtomicUsize,
71}
72
73impl Chunk {
74 fn new(capacity: usize) -> Result<Self, AllocError> {
80 let layout = Layout::from_size_align(capacity, 16).map_err(|_| AllocError::OutOfMemory)?;
81 let ptr = unsafe { alloc(layout) };
83 let ptr = NonNull::new(ptr).ok_or(AllocError::OutOfMemory)?;
84
85 Ok(Self {
86 ptr,
87 capacity,
88 offset: AtomicUsize::new(0),
89 })
90 }
91
92 fn try_alloc(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
95 self.try_alloc_with_offset(size, align).map(|(_, ptr)| ptr)
96 }
97
98 fn try_alloc_with_offset(&self, size: usize, align: usize) -> Option<(u32, NonNull<u8>)> {
102 loop {
103 let current = self.offset.load(Ordering::Relaxed);
104
105 let aligned = (current + align - 1) & !(align - 1);
107 let new_offset = aligned + size;
108
109 if new_offset > self.capacity {
110 return None;
111 }
112
113 match self.offset.compare_exchange_weak(
115 current,
116 new_offset,
117 Ordering::AcqRel,
118 Ordering::Relaxed,
119 ) {
120 Ok(_) => {
121 let ptr = unsafe { self.ptr.as_ptr().add(aligned) };
123 return Some((aligned as u32, NonNull::new(ptr)?));
124 }
125 Err(_) => continue, }
127 }
128 }
129
130 fn used(&self) -> usize {
132 self.offset.load(Ordering::Relaxed)
133 }
134}
135
136impl Drop for Chunk {
137 fn drop(&mut self) {
138 let layout = Layout::from_size_align(self.capacity, 16).expect("Invalid layout");
139 unsafe { dealloc(self.ptr.as_ptr(), layout) };
141 }
142}
143
144unsafe impl Send for Chunk {}
146unsafe impl Sync for Chunk {}
147
148pub struct Arena {
156 epoch: EpochId,
158 chunks: RwLock<Vec<Chunk>>,
160 chunk_size: usize,
162 total_allocated: AtomicUsize,
164}
165
166impl Arena {
167 pub fn new(epoch: EpochId) -> Result<Self, AllocError> {
173 Self::with_chunk_size(epoch, DEFAULT_CHUNK_SIZE)
174 }
175
176 pub fn with_chunk_size(epoch: EpochId, chunk_size: usize) -> Result<Self, AllocError> {
182 let initial_chunk = Chunk::new(chunk_size)?;
183 Ok(Self {
184 epoch,
185 chunks: RwLock::new(vec![initial_chunk]),
186 chunk_size,
187 total_allocated: AtomicUsize::new(chunk_size),
188 })
189 }
190
191 #[must_use]
193 pub fn epoch(&self) -> EpochId {
194 self.epoch
195 }
196
197 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
204 {
206 let chunks = self.chunks.read();
207 for chunk in chunks.iter().rev() {
208 if let Some(ptr) = chunk.try_alloc(size, align) {
209 return Ok(ptr);
210 }
211 }
212 }
213
214 self.alloc_new_chunk(size, align)
216 }
217
218 pub fn alloc_value<T>(&self, value: T) -> Result<&mut T, AllocError> {
224 let ptr = self.alloc(std::mem::size_of::<T>(), std::mem::align_of::<T>())?;
225 Ok(unsafe {
227 let typed_ptr = ptr.as_ptr() as *mut T;
228 typed_ptr.write(value);
229 &mut *typed_ptr
230 })
231 }
232
233 pub fn alloc_slice<T: Copy>(&self, values: &[T]) -> Result<&mut [T], AllocError> {
239 if values.is_empty() {
240 return Ok(&mut []);
241 }
242
243 let size = std::mem::size_of::<T>() * values.len();
244 let align = std::mem::align_of::<T>();
245 let ptr = self.alloc(size, align)?;
246
247 Ok(unsafe {
249 let typed_ptr = ptr.as_ptr() as *mut T;
250 std::ptr::copy_nonoverlapping(values.as_ptr(), typed_ptr, values.len());
251 std::slice::from_raw_parts_mut(typed_ptr, values.len())
252 })
253 }
254
255 #[cfg(feature = "tiered-storage")]
269 pub fn alloc_value_with_offset<T>(&self, value: T) -> Result<(u32, &mut T), AllocError> {
270 let size = std::mem::size_of::<T>();
271 let align = std::mem::align_of::<T>();
272
273 let chunks = self.chunks.read();
275 let chunk = chunks
276 .first()
277 .expect("Arena should have at least one chunk");
278
279 let (offset, ptr) = chunk
280 .try_alloc_with_offset(size, align)
281 .ok_or(AllocError::InsufficientSpace)?;
282
283 Ok(unsafe {
285 let typed_ptr = ptr.as_ptr().cast::<T>();
286 typed_ptr.write(value);
287 (offset, &mut *typed_ptr)
288 })
289 }
290
291 #[cfg(feature = "tiered-storage")]
303 pub unsafe fn read_at<T>(&self, offset: u32) -> &T {
304 let chunks = self.chunks.read();
305 let chunk = chunks
306 .first()
307 .expect("Arena should have at least one chunk");
308
309 debug_assert!(
310 (offset as usize) + std::mem::size_of::<T>() <= chunk.used(),
311 "read_at: offset {} + size_of::<{}>() = {} exceeds chunk used bytes {}",
312 offset,
313 std::any::type_name::<T>(),
314 (offset as usize) + std::mem::size_of::<T>(),
315 chunk.used()
316 );
317 debug_assert!(
318 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
319 "read_at: offset {} is not aligned for {} (alignment {})",
320 offset,
321 std::any::type_name::<T>(),
322 std::mem::align_of::<T>()
323 );
324
325 unsafe {
327 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
328 &*ptr
329 }
330 }
331
332 #[cfg(feature = "tiered-storage")]
345 pub unsafe fn read_at_mut<T>(&self, offset: u32) -> &mut T {
346 let chunks = self.chunks.read();
347 let chunk = chunks
348 .first()
349 .expect("Arena should have at least one chunk");
350
351 debug_assert!(
352 (offset as usize) + std::mem::size_of::<T>() <= chunk.capacity,
353 "read_at_mut: offset {} + size_of::<{}>() = {} exceeds chunk capacity {}",
354 offset,
355 std::any::type_name::<T>(),
356 (offset as usize) + std::mem::size_of::<T>(),
357 chunk.capacity
358 );
359 debug_assert!(
360 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
361 "read_at_mut: offset {} is not aligned for {} (alignment {})",
362 offset,
363 std::any::type_name::<T>(),
364 std::mem::align_of::<T>()
365 );
366
367 unsafe {
369 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
370 &mut *ptr
371 }
372 }
373
374 fn alloc_new_chunk(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
376 let chunk_size = self.chunk_size.max(size + align);
377 let chunk = Chunk::new(chunk_size)?;
378
379 self.total_allocated
380 .fetch_add(chunk_size, Ordering::Relaxed);
381
382 let ptr = chunk
384 .try_alloc(size, align)
385 .expect("fresh chunk sized to fit");
386
387 let mut chunks = self.chunks.write();
388 chunks.push(chunk);
389
390 Ok(ptr)
391 }
392
393 #[must_use]
395 pub fn total_allocated(&self) -> usize {
396 self.total_allocated.load(Ordering::Relaxed)
397 }
398
399 #[must_use]
401 pub fn total_used(&self) -> usize {
402 let chunks = self.chunks.read();
403 chunks.iter().map(Chunk::used).sum()
404 }
405
406 #[must_use]
408 pub fn stats(&self) -> ArenaStats {
409 let chunks = self.chunks.read();
410 ArenaStats {
411 epoch: self.epoch,
412 chunk_count: chunks.len(),
413 total_allocated: self.total_allocated.load(Ordering::Relaxed),
414 total_used: chunks.iter().map(Chunk::used).sum(),
415 }
416 }
417}
418
419#[derive(Debug, Clone)]
421pub struct ArenaStats {
422 pub epoch: EpochId,
424 pub chunk_count: usize,
426 pub total_allocated: usize,
428 pub total_used: usize,
430}
431
432pub struct ArenaAllocator {
437 arenas: RwLock<hashbrown::HashMap<EpochId, Arena>>,
439 current_epoch: AtomicUsize,
441 chunk_size: usize,
443}
444
445impl ArenaAllocator {
446 pub fn new() -> Result<Self, AllocError> {
452 Self::with_chunk_size(DEFAULT_CHUNK_SIZE)
453 }
454
455 pub fn with_chunk_size(chunk_size: usize) -> Result<Self, AllocError> {
461 let allocator = Self {
462 arenas: RwLock::new(hashbrown::HashMap::new()),
463 current_epoch: AtomicUsize::new(0),
464 chunk_size,
465 };
466
467 let epoch = EpochId::INITIAL;
469 allocator
470 .arenas
471 .write()
472 .insert(epoch, Arena::with_chunk_size(epoch, chunk_size)?);
473
474 Ok(allocator)
475 }
476
477 #[must_use]
479 pub fn current_epoch(&self) -> EpochId {
480 EpochId::new(self.current_epoch.load(Ordering::Acquire) as u64)
481 }
482
483 pub fn new_epoch(&self) -> Result<EpochId, AllocError> {
489 let new_id = self.current_epoch.fetch_add(1, Ordering::AcqRel) as u64 + 1;
490 let epoch = EpochId::new(new_id);
491
492 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
493 self.arenas.write().insert(epoch, arena);
494
495 Ok(epoch)
496 }
497
498 pub fn arena(
504 &self,
505 epoch: EpochId,
506 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
507 let arenas = self.arenas.read();
508 if !arenas.contains_key(&epoch) {
509 return Err(AllocError::EpochNotFound(epoch));
510 }
511 Ok(parking_lot::RwLockReadGuard::map(arenas, |arenas| {
512 &arenas[&epoch]
513 }))
514 }
515
516 #[cfg(feature = "tiered-storage")]
523 pub fn ensure_epoch(&self, epoch: EpochId) -> Result<bool, AllocError> {
524 {
526 let arenas = self.arenas.read();
527 if arenas.contains_key(&epoch) {
528 return Ok(false);
529 }
530 }
531
532 let mut arenas = self.arenas.write();
534 if arenas.contains_key(&epoch) {
536 return Ok(false);
537 }
538
539 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
540 arenas.insert(epoch, arena);
541 Ok(true)
542 }
543
544 #[cfg(feature = "tiered-storage")]
550 pub fn arena_or_create(
551 &self,
552 epoch: EpochId,
553 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
554 self.ensure_epoch(epoch)?;
555 self.arena(epoch)
556 }
557
558 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
568 let epoch = self.current_epoch();
569 let arenas = self.arenas.read();
570 arenas
571 .get(&epoch)
572 .expect("current epoch always exists")
573 .alloc(size, align)
574 }
575
576 pub fn drop_epoch(&self, epoch: EpochId) {
580 self.arenas.write().remove(&epoch);
581 }
582
583 #[must_use]
585 pub fn total_allocated(&self) -> usize {
586 self.arenas
587 .read()
588 .values()
589 .map(Arena::total_allocated)
590 .sum()
591 }
592}
593
594impl Default for ArenaAllocator {
595 fn default() -> Self {
601 Self::new().expect("failed to allocate default arena")
602 }
603}
604
605#[cfg(test)]
606mod tests {
607 use super::*;
608
609 #[test]
610 fn test_arena_basic_allocation() {
611 let arena = Arena::new(EpochId::INITIAL).unwrap();
612
613 let ptr1 = arena.alloc(100, 8).unwrap();
615 let ptr2 = arena.alloc(200, 8).unwrap();
616
617 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
619 }
620
621 #[test]
622 fn test_arena_value_allocation() {
623 let arena = Arena::new(EpochId::INITIAL).unwrap();
624
625 let value = arena.alloc_value(42u64).unwrap();
626 assert_eq!(*value, 42);
627
628 *value = 100;
629 assert_eq!(*value, 100);
630 }
631
632 #[test]
633 fn test_arena_slice_allocation() {
634 let arena = Arena::new(EpochId::INITIAL).unwrap();
635
636 let slice = arena.alloc_slice(&[1u32, 2, 3, 4, 5]).unwrap();
637 assert_eq!(slice, &[1, 2, 3, 4, 5]);
638
639 slice[0] = 10;
640 assert_eq!(slice[0], 10);
641 }
642
643 #[test]
644 fn test_arena_large_allocation() {
645 let arena = Arena::with_chunk_size(EpochId::INITIAL, 1024).unwrap();
646
647 let _ptr = arena.alloc(2048, 8).unwrap();
649
650 assert!(arena.stats().chunk_count >= 2);
652 }
653
654 #[test]
655 fn test_arena_allocator_epochs() {
656 let allocator = ArenaAllocator::new().unwrap();
657
658 let epoch0 = allocator.current_epoch();
659 assert_eq!(epoch0, EpochId::INITIAL);
660
661 let epoch1 = allocator.new_epoch().unwrap();
662 assert_eq!(epoch1, EpochId::new(1));
663
664 let epoch2 = allocator.new_epoch().unwrap();
665 assert_eq!(epoch2, EpochId::new(2));
666
667 assert_eq!(allocator.current_epoch(), epoch2);
669 }
670
671 #[test]
672 fn test_arena_allocator_allocation() {
673 let allocator = ArenaAllocator::new().unwrap();
674
675 let ptr1 = allocator.alloc(100, 8).unwrap();
676 let ptr2 = allocator.alloc(100, 8).unwrap();
677
678 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
679 }
680
681 #[test]
682 fn test_arena_drop_epoch() {
683 let allocator = ArenaAllocator::new().unwrap();
684
685 let initial_mem = allocator.total_allocated();
686
687 let epoch1 = allocator.new_epoch().unwrap();
688 {
690 let arena = allocator.arena(epoch1).unwrap();
691 arena.alloc(10000, 8).unwrap();
692 }
693
694 let after_alloc = allocator.total_allocated();
695 assert!(after_alloc > initial_mem);
696
697 allocator.drop_epoch(epoch1);
699
700 let after_drop = allocator.total_allocated();
702 assert!(after_drop < after_alloc);
703 }
704
705 #[test]
706 fn test_arena_stats() {
707 let arena = Arena::with_chunk_size(EpochId::new(5), 4096).unwrap();
708
709 let stats = arena.stats();
710 assert_eq!(stats.epoch, EpochId::new(5));
711 assert_eq!(stats.chunk_count, 1);
712 assert_eq!(stats.total_allocated, 4096);
713 assert_eq!(stats.total_used, 0);
714
715 arena.alloc(100, 8).unwrap();
716 let stats = arena.stats();
717 assert!(stats.total_used >= 100);
718 }
719}
720
721#[cfg(all(test, feature = "tiered-storage"))]
722mod tiered_storage_tests {
723 use super::*;
724
725 #[test]
726 fn test_alloc_value_with_offset_basic() {
727 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
728
729 let (offset1, val1) = arena.alloc_value_with_offset(42u64).unwrap();
730 let (offset2, val2) = arena.alloc_value_with_offset(100u64).unwrap();
731
732 assert_eq!(offset1, 0);
734 assert!(offset2 > offset1);
736 assert!(offset2 >= std::mem::size_of::<u64>() as u32);
737
738 assert_eq!(*val1, 42);
740 assert_eq!(*val2, 100);
741
742 *val1 = 999;
744 assert_eq!(*val1, 999);
745 }
746
747 #[test]
748 fn test_read_at_basic() {
749 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
750
751 let (offset, _) = arena.alloc_value_with_offset(12345u64).unwrap();
752
753 let value: &u64 = unsafe { arena.read_at(offset) };
756 assert_eq!(*value, 12345);
757 }
758
759 #[test]
760 fn test_read_at_mut_basic() {
761 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
762
763 let (offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
764
765 let value: &mut u64 = unsafe { arena.read_at_mut(offset) };
768 assert_eq!(*value, 42);
769 *value = 100;
770
771 let value: &u64 = unsafe { arena.read_at(offset) };
774 assert_eq!(*value, 100);
775 }
776
777 #[test]
778 fn test_alloc_value_with_offset_struct() {
779 #[derive(Debug, Clone, PartialEq)]
780 struct TestNode {
781 id: u64,
782 name: [u8; 32],
783 value: i32,
784 }
785
786 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
787
788 let node = TestNode {
789 id: 12345,
790 name: [b'A'; 32],
791 value: -999,
792 };
793
794 let (offset, stored) = arena.alloc_value_with_offset(node.clone()).unwrap();
795 assert_eq!(stored.id, 12345);
796 assert_eq!(stored.value, -999);
797
798 let read: &TestNode = unsafe { arena.read_at(offset) };
801 assert_eq!(read.id, node.id);
802 assert_eq!(read.name, node.name);
803 assert_eq!(read.value, node.value);
804 }
805
806 #[test]
807 fn test_alloc_value_with_offset_alignment() {
808 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
809
810 let (offset1, _) = arena.alloc_value_with_offset(1u8).unwrap();
812 assert_eq!(offset1, 0);
813
814 let (offset2, val) = arena.alloc_value_with_offset(42u64).unwrap();
816
817 assert_eq!(offset2 % 8, 0);
819 assert_eq!(*val, 42);
820 }
821
822 #[test]
823 fn test_alloc_value_with_offset_multiple() {
824 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
825
826 let mut offsets = Vec::new();
827 for i in 0..100u64 {
828 let (offset, val) = arena.alloc_value_with_offset(i).unwrap();
829 offsets.push(offset);
830 assert_eq!(*val, i);
831 }
832
833 for window in offsets.windows(2) {
835 assert!(window[0] < window[1]);
836 }
837
838 for (i, offset) in offsets.iter().enumerate() {
840 let val: &u64 = unsafe { arena.read_at(*offset) };
842 assert_eq!(*val, i as u64);
843 }
844 }
845
846 #[test]
847 fn test_arena_allocator_with_offset() {
848 let allocator = ArenaAllocator::with_chunk_size(4096).unwrap();
849
850 let epoch = allocator.current_epoch();
851 let arena = allocator.arena(epoch).unwrap();
852
853 let (offset, val) = arena.alloc_value_with_offset(42u64).unwrap();
854 assert_eq!(*val, 42);
855
856 let read: &u64 = unsafe { arena.read_at(offset) };
858 assert_eq!(*read, 42);
859 }
860
861 #[test]
862 #[cfg(debug_assertions)]
863 #[should_panic(expected = "exceeds chunk used bytes")]
864 fn test_read_at_out_of_bounds() {
865 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
866 let (_offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
867
868 unsafe {
871 let _: &u64 = arena.read_at(4000);
872 }
873 }
874
875 #[test]
876 #[cfg(debug_assertions)]
877 #[should_panic(expected = "is not aligned")]
878 fn test_read_at_misaligned() {
879 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
880 let (_offset, _) = arena.alloc_value_with_offset(0xFFu8).unwrap();
882 let _ = arena.alloc_value_with_offset(0u64).unwrap();
884
885 unsafe {
888 let _: &u64 = arena.read_at(1);
889 }
890 }
891
892 #[test]
893 #[cfg(not(miri))] fn test_concurrent_read_stress() {
895 use std::sync::Arc;
896
897 let arena = Arc::new(Arena::with_chunk_size(EpochId::INITIAL, 1024 * 1024).unwrap());
898 let num_threads = 8;
899 let values_per_thread = 1000;
900
901 let mut all_offsets = Vec::new();
903 for t in 0..num_threads {
904 let base = (t * values_per_thread) as u64;
905 let mut offsets = Vec::with_capacity(values_per_thread);
906 for i in 0..values_per_thread as u64 {
907 let (offset, _) = arena.alloc_value_with_offset(base + i).unwrap();
908 offsets.push(offset);
909 }
910 all_offsets.push(offsets);
911 }
912
913 let mut handles = Vec::new();
915 for (t, offsets) in all_offsets.into_iter().enumerate() {
916 let arena = Arc::clone(&arena);
917 let base = (t * values_per_thread) as u64;
918 handles.push(std::thread::spawn(move || {
919 for (i, offset) in offsets.iter().enumerate() {
920 let val: &u64 = unsafe { arena.read_at(*offset) };
922 assert_eq!(*val, base + i as u64);
923 }
924 }));
925 }
926
927 for handle in handles {
928 handle.join().expect("Thread panicked");
929 }
930 }
931
932 #[test]
933 fn test_alloc_value_with_offset_insufficient_space() {
934 let arena = Arena::with_chunk_size(EpochId::INITIAL, 64).unwrap();
936
937 let _ = arena.alloc_value_with_offset([0u8; 48]).unwrap();
939
940 let result = arena.alloc_value_with_offset([0u8; 32]);
942 assert!(result.is_err());
943 }
944
945 #[test]
946 fn test_multi_type_interleaved() {
947 #[derive(Debug, Clone, PartialEq)]
948 #[repr(C)]
949 struct Record {
950 id: u64,
951 flags: u32,
952 weight: f32,
953 }
954
955 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
956
957 let (off_u8, _) = arena.alloc_value_with_offset(0xAAu8).unwrap();
959 let (off_u32, _) = arena.alloc_value_with_offset(0xBBBBu32).unwrap();
960 let (off_u64, _) = arena.alloc_value_with_offset(0xCCCCCCCCu64).unwrap();
961 let (off_rec, _) = arena
962 .alloc_value_with_offset(Record {
963 id: 42,
964 flags: 0xFF,
965 weight: std::f32::consts::PI,
966 })
967 .unwrap();
968
969 unsafe {
972 assert_eq!(*arena.read_at::<u8>(off_u8), 0xAA);
973 assert_eq!(*arena.read_at::<u32>(off_u32), 0xBBBB);
974 assert_eq!(*arena.read_at::<u64>(off_u64), 0xCCCCCCCC);
975
976 let rec: &Record = arena.read_at(off_rec);
977 assert_eq!(rec.id, 42);
978 assert_eq!(rec.flags, 0xFF);
979 assert!((rec.weight - std::f32::consts::PI).abs() < 0.001);
980 }
981 }
982}