1#![allow(unsafe_code)]
12
13use std::alloc::{Layout, alloc, dealloc};
14use std::fmt;
15use std::ptr::NonNull;
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18use parking_lot::RwLock;
19
20use crate::types::EpochId;
21
22const DEFAULT_CHUNK_SIZE: usize = 1024 * 1024;
24
25#[derive(Debug, Clone)]
27pub enum AllocError {
28 OutOfMemory,
30 EpochNotFound(EpochId),
32 InsufficientSpace,
34}
35
36impl fmt::Display for AllocError {
37 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
38 match self {
39 Self::OutOfMemory => write!(f, "arena allocation failed: out of memory"),
40 Self::EpochNotFound(id) => write!(f, "epoch {id} not found in arena allocator"),
41 Self::InsufficientSpace => {
42 write!(f, "arena chunk has insufficient space for allocation")
43 }
44 }
45 }
46}
47
48impl std::error::Error for AllocError {}
49
50impl From<AllocError> for crate::Error {
51 fn from(e: AllocError) -> Self {
52 match e {
53 AllocError::OutOfMemory | AllocError::InsufficientSpace => {
54 crate::Error::Storage(crate::utils::error::StorageError::Full)
55 }
56 AllocError::EpochNotFound(id) => {
57 crate::Error::Internal(format!("epoch {id} not found in arena allocator"))
58 }
59 }
60 }
61}
62
63struct Chunk {
65 ptr: NonNull<u8>,
67 capacity: usize,
69 offset: AtomicUsize,
71}
72
73impl Chunk {
74 fn new(capacity: usize) -> Result<Self, AllocError> {
80 let layout = Layout::from_size_align(capacity, 16).map_err(|_| AllocError::OutOfMemory)?;
81 let ptr = unsafe { alloc(layout) };
83 let ptr = NonNull::new(ptr).ok_or(AllocError::OutOfMemory)?;
84
85 Ok(Self {
86 ptr,
87 capacity,
88 offset: AtomicUsize::new(0),
89 })
90 }
91
92 fn try_alloc(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
95 self.try_alloc_with_offset(size, align).map(|(_, ptr)| ptr)
96 }
97
98 fn try_alloc_with_offset(&self, size: usize, align: usize) -> Option<(u32, NonNull<u8>)> {
102 loop {
103 let current = self.offset.load(Ordering::Relaxed);
104
105 let aligned = (current + align - 1) & !(align - 1);
107 let new_offset = aligned + size;
108
109 if new_offset > self.capacity {
110 return None;
111 }
112
113 match self.offset.compare_exchange_weak(
115 current,
116 new_offset,
117 Ordering::AcqRel,
118 Ordering::Relaxed,
119 ) {
120 Ok(_) => {
121 let ptr = unsafe { self.ptr.as_ptr().add(aligned) };
123 return Some((aligned as u32, NonNull::new(ptr)?));
124 }
125 Err(_) => continue, }
127 }
128 }
129
130 fn used(&self) -> usize {
132 self.offset.load(Ordering::Relaxed)
133 }
134}
135
136impl Drop for Chunk {
137 fn drop(&mut self) {
138 let layout = Layout::from_size_align(self.capacity, 16).expect("Invalid layout");
139 unsafe { dealloc(self.ptr.as_ptr(), layout) };
141 }
142}
143
144unsafe impl Send for Chunk {}
146unsafe impl Sync for Chunk {}
147
148pub struct Arena {
156 epoch: EpochId,
158 chunks: RwLock<Vec<Chunk>>,
160 chunk_size: usize,
162 total_allocated: AtomicUsize,
164}
165
166impl Arena {
167 pub fn new(epoch: EpochId) -> Result<Self, AllocError> {
173 Self::with_chunk_size(epoch, DEFAULT_CHUNK_SIZE)
174 }
175
176 pub fn with_chunk_size(epoch: EpochId, chunk_size: usize) -> Result<Self, AllocError> {
182 let initial_chunk = Chunk::new(chunk_size)?;
183 Ok(Self {
184 epoch,
185 chunks: RwLock::new(vec![initial_chunk]),
186 chunk_size,
187 total_allocated: AtomicUsize::new(chunk_size),
188 })
189 }
190
191 #[must_use]
193 pub fn epoch(&self) -> EpochId {
194 self.epoch
195 }
196
197 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
204 {
206 let chunks = self.chunks.read();
207 for chunk in chunks.iter().rev() {
208 if let Some(ptr) = chunk.try_alloc(size, align) {
209 return Ok(ptr);
210 }
211 }
212 }
213
214 self.alloc_new_chunk(size, align)
216 }
217
218 pub fn alloc_value<T>(&self, value: T) -> Result<&mut T, AllocError> {
224 let ptr = self.alloc(std::mem::size_of::<T>(), std::mem::align_of::<T>())?;
225 Ok(unsafe {
227 let typed_ptr = ptr.as_ptr() as *mut T;
228 typed_ptr.write(value);
229 &mut *typed_ptr
230 })
231 }
232
233 pub fn alloc_slice<T: Copy>(&self, values: &[T]) -> Result<&mut [T], AllocError> {
239 if values.is_empty() {
240 return Ok(&mut []);
241 }
242
243 let size = std::mem::size_of::<T>() * values.len();
244 let align = std::mem::align_of::<T>();
245 let ptr = self.alloc(size, align)?;
246
247 Ok(unsafe {
249 let typed_ptr = ptr.as_ptr() as *mut T;
250 std::ptr::copy_nonoverlapping(values.as_ptr(), typed_ptr, values.len());
251 std::slice::from_raw_parts_mut(typed_ptr, values.len())
252 })
253 }
254
255 #[cfg(feature = "tiered-storage")]
265 pub fn alloc_value_with_offset<T>(&self, value: T) -> Result<(u32, &mut T), AllocError> {
266 let size = std::mem::size_of::<T>();
267 let align = std::mem::align_of::<T>();
268
269 let chunks = self.chunks.read();
271 let chunk = chunks
272 .first()
273 .expect("Arena should have at least one chunk");
274
275 let (offset, ptr) = chunk
276 .try_alloc_with_offset(size, align)
277 .ok_or(AllocError::InsufficientSpace)?;
278
279 Ok(unsafe {
281 let typed_ptr = ptr.as_ptr().cast::<T>();
282 typed_ptr.write(value);
283 (offset, &mut *typed_ptr)
284 })
285 }
286
287 #[cfg(feature = "tiered-storage")]
295 pub unsafe fn read_at<T>(&self, offset: u32) -> &T {
296 let chunks = self.chunks.read();
297 let chunk = chunks
298 .first()
299 .expect("Arena should have at least one chunk");
300
301 debug_assert!(
302 (offset as usize) + std::mem::size_of::<T>() <= chunk.used(),
303 "read_at: offset {} + size_of::<{}>() = {} exceeds chunk used bytes {}",
304 offset,
305 std::any::type_name::<T>(),
306 (offset as usize) + std::mem::size_of::<T>(),
307 chunk.used()
308 );
309 debug_assert!(
310 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
311 "read_at: offset {} is not aligned for {} (alignment {})",
312 offset,
313 std::any::type_name::<T>(),
314 std::mem::align_of::<T>()
315 );
316
317 unsafe {
319 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
320 &*ptr
321 }
322 }
323
324 #[cfg(feature = "tiered-storage")]
333 pub unsafe fn read_at_mut<T>(&self, offset: u32) -> &mut T {
334 let chunks = self.chunks.read();
335 let chunk = chunks
336 .first()
337 .expect("Arena should have at least one chunk");
338
339 debug_assert!(
340 (offset as usize) + std::mem::size_of::<T>() <= chunk.capacity,
341 "read_at_mut: offset {} + size_of::<{}>() = {} exceeds chunk capacity {}",
342 offset,
343 std::any::type_name::<T>(),
344 (offset as usize) + std::mem::size_of::<T>(),
345 chunk.capacity
346 );
347 debug_assert!(
348 (offset as usize).is_multiple_of(std::mem::align_of::<T>()),
349 "read_at_mut: offset {} is not aligned for {} (alignment {})",
350 offset,
351 std::any::type_name::<T>(),
352 std::mem::align_of::<T>()
353 );
354
355 unsafe {
357 let ptr = chunk.ptr.as_ptr().add(offset as usize).cast::<T>();
358 &mut *ptr
359 }
360 }
361
362 fn alloc_new_chunk(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
364 let chunk_size = self.chunk_size.max(size + align);
365 let chunk = Chunk::new(chunk_size)?;
366
367 self.total_allocated
368 .fetch_add(chunk_size, Ordering::Relaxed);
369
370 let ptr = chunk
372 .try_alloc(size, align)
373 .expect("fresh chunk sized to fit");
374
375 let mut chunks = self.chunks.write();
376 chunks.push(chunk);
377
378 Ok(ptr)
379 }
380
381 #[must_use]
383 pub fn total_allocated(&self) -> usize {
384 self.total_allocated.load(Ordering::Relaxed)
385 }
386
387 #[must_use]
389 pub fn total_used(&self) -> usize {
390 let chunks = self.chunks.read();
391 chunks.iter().map(Chunk::used).sum()
392 }
393
394 #[must_use]
396 pub fn stats(&self) -> ArenaStats {
397 let chunks = self.chunks.read();
398 ArenaStats {
399 epoch: self.epoch,
400 chunk_count: chunks.len(),
401 total_allocated: self.total_allocated.load(Ordering::Relaxed),
402 total_used: chunks.iter().map(Chunk::used).sum(),
403 }
404 }
405}
406
407#[derive(Debug, Clone)]
409pub struct ArenaStats {
410 pub epoch: EpochId,
412 pub chunk_count: usize,
414 pub total_allocated: usize,
416 pub total_used: usize,
418}
419
420pub struct ArenaAllocator {
425 arenas: RwLock<hashbrown::HashMap<EpochId, Arena>>,
427 current_epoch: AtomicUsize,
429 chunk_size: usize,
431}
432
433impl ArenaAllocator {
434 pub fn new() -> Result<Self, AllocError> {
440 Self::with_chunk_size(DEFAULT_CHUNK_SIZE)
441 }
442
443 pub fn with_chunk_size(chunk_size: usize) -> Result<Self, AllocError> {
449 let allocator = Self {
450 arenas: RwLock::new(hashbrown::HashMap::new()),
451 current_epoch: AtomicUsize::new(0),
452 chunk_size,
453 };
454
455 let epoch = EpochId::INITIAL;
457 allocator
458 .arenas
459 .write()
460 .insert(epoch, Arena::with_chunk_size(epoch, chunk_size)?);
461
462 Ok(allocator)
463 }
464
465 #[must_use]
467 pub fn current_epoch(&self) -> EpochId {
468 EpochId::new(self.current_epoch.load(Ordering::Acquire) as u64)
469 }
470
471 pub fn new_epoch(&self) -> Result<EpochId, AllocError> {
477 let new_id = self.current_epoch.fetch_add(1, Ordering::AcqRel) as u64 + 1;
478 let epoch = EpochId::new(new_id);
479
480 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
481 self.arenas.write().insert(epoch, arena);
482
483 Ok(epoch)
484 }
485
486 pub fn arena(
492 &self,
493 epoch: EpochId,
494 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
495 let arenas = self.arenas.read();
496 if !arenas.contains_key(&epoch) {
497 return Err(AllocError::EpochNotFound(epoch));
498 }
499 Ok(parking_lot::RwLockReadGuard::map(arenas, |arenas| {
500 &arenas[&epoch]
501 }))
502 }
503
504 #[cfg(feature = "tiered-storage")]
511 pub fn ensure_epoch(&self, epoch: EpochId) -> Result<bool, AllocError> {
512 {
514 let arenas = self.arenas.read();
515 if arenas.contains_key(&epoch) {
516 return Ok(false);
517 }
518 }
519
520 let mut arenas = self.arenas.write();
522 if arenas.contains_key(&epoch) {
524 return Ok(false);
525 }
526
527 let arena = Arena::with_chunk_size(epoch, self.chunk_size)?;
528 arenas.insert(epoch, arena);
529 Ok(true)
530 }
531
532 #[cfg(feature = "tiered-storage")]
538 pub fn arena_or_create(
539 &self,
540 epoch: EpochId,
541 ) -> Result<impl std::ops::Deref<Target = Arena> + '_, AllocError> {
542 self.ensure_epoch(epoch)?;
543 self.arena(epoch)
544 }
545
546 pub fn alloc(&self, size: usize, align: usize) -> Result<NonNull<u8>, AllocError> {
552 let epoch = self.current_epoch();
553 let arenas = self.arenas.read();
554 arenas
555 .get(&epoch)
556 .expect("current epoch always exists")
557 .alloc(size, align)
558 }
559
560 pub fn drop_epoch(&self, epoch: EpochId) {
564 self.arenas.write().remove(&epoch);
565 }
566
567 #[must_use]
569 pub fn total_allocated(&self) -> usize {
570 self.arenas
571 .read()
572 .values()
573 .map(Arena::total_allocated)
574 .sum()
575 }
576}
577
578impl Default for ArenaAllocator {
579 fn default() -> Self {
585 Self::new().expect("failed to allocate default arena")
586 }
587}
588
589#[cfg(test)]
590mod tests {
591 use super::*;
592
593 #[test]
594 fn test_arena_basic_allocation() {
595 let arena = Arena::new(EpochId::INITIAL).unwrap();
596
597 let ptr1 = arena.alloc(100, 8).unwrap();
599 let ptr2 = arena.alloc(200, 8).unwrap();
600
601 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
603 }
604
605 #[test]
606 fn test_arena_value_allocation() {
607 let arena = Arena::new(EpochId::INITIAL).unwrap();
608
609 let value = arena.alloc_value(42u64).unwrap();
610 assert_eq!(*value, 42);
611
612 *value = 100;
613 assert_eq!(*value, 100);
614 }
615
616 #[test]
617 fn test_arena_slice_allocation() {
618 let arena = Arena::new(EpochId::INITIAL).unwrap();
619
620 let slice = arena.alloc_slice(&[1u32, 2, 3, 4, 5]).unwrap();
621 assert_eq!(slice, &[1, 2, 3, 4, 5]);
622
623 slice[0] = 10;
624 assert_eq!(slice[0], 10);
625 }
626
627 #[test]
628 fn test_arena_large_allocation() {
629 let arena = Arena::with_chunk_size(EpochId::INITIAL, 1024).unwrap();
630
631 let _ptr = arena.alloc(2048, 8).unwrap();
633
634 assert!(arena.stats().chunk_count >= 2);
636 }
637
638 #[test]
639 fn test_arena_allocator_epochs() {
640 let allocator = ArenaAllocator::new().unwrap();
641
642 let epoch0 = allocator.current_epoch();
643 assert_eq!(epoch0, EpochId::INITIAL);
644
645 let epoch1 = allocator.new_epoch().unwrap();
646 assert_eq!(epoch1, EpochId::new(1));
647
648 let epoch2 = allocator.new_epoch().unwrap();
649 assert_eq!(epoch2, EpochId::new(2));
650
651 assert_eq!(allocator.current_epoch(), epoch2);
653 }
654
655 #[test]
656 fn test_arena_allocator_allocation() {
657 let allocator = ArenaAllocator::new().unwrap();
658
659 let ptr1 = allocator.alloc(100, 8).unwrap();
660 let ptr2 = allocator.alloc(100, 8).unwrap();
661
662 assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
663 }
664
665 #[test]
666 fn test_arena_drop_epoch() {
667 let allocator = ArenaAllocator::new().unwrap();
668
669 let initial_mem = allocator.total_allocated();
670
671 let epoch1 = allocator.new_epoch().unwrap();
672 {
674 let arena = allocator.arena(epoch1).unwrap();
675 arena.alloc(10000, 8).unwrap();
676 }
677
678 let after_alloc = allocator.total_allocated();
679 assert!(after_alloc > initial_mem);
680
681 allocator.drop_epoch(epoch1);
683
684 let after_drop = allocator.total_allocated();
686 assert!(after_drop < after_alloc);
687 }
688
689 #[test]
690 fn test_arena_stats() {
691 let arena = Arena::with_chunk_size(EpochId::new(5), 4096).unwrap();
692
693 let stats = arena.stats();
694 assert_eq!(stats.epoch, EpochId::new(5));
695 assert_eq!(stats.chunk_count, 1);
696 assert_eq!(stats.total_allocated, 4096);
697 assert_eq!(stats.total_used, 0);
698
699 arena.alloc(100, 8).unwrap();
700 let stats = arena.stats();
701 assert!(stats.total_used >= 100);
702 }
703}
704
705#[cfg(all(test, feature = "tiered-storage"))]
706mod tiered_storage_tests {
707 use super::*;
708
709 #[test]
710 fn test_alloc_value_with_offset_basic() {
711 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
712
713 let (offset1, val1) = arena.alloc_value_with_offset(42u64).unwrap();
714 let (offset2, val2) = arena.alloc_value_with_offset(100u64).unwrap();
715
716 assert_eq!(offset1, 0);
718 assert!(offset2 > offset1);
720 assert!(offset2 >= std::mem::size_of::<u64>() as u32);
721
722 assert_eq!(*val1, 42);
724 assert_eq!(*val2, 100);
725
726 *val1 = 999;
728 assert_eq!(*val1, 999);
729 }
730
731 #[test]
732 fn test_read_at_basic() {
733 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
734
735 let (offset, _) = arena.alloc_value_with_offset(12345u64).unwrap();
736
737 let value: &u64 = unsafe { arena.read_at(offset) };
740 assert_eq!(*value, 12345);
741 }
742
743 #[test]
744 fn test_read_at_mut_basic() {
745 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
746
747 let (offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
748
749 let value: &mut u64 = unsafe { arena.read_at_mut(offset) };
752 assert_eq!(*value, 42);
753 *value = 100;
754
755 let value: &u64 = unsafe { arena.read_at(offset) };
758 assert_eq!(*value, 100);
759 }
760
761 #[test]
762 fn test_alloc_value_with_offset_struct() {
763 #[derive(Debug, Clone, PartialEq)]
764 struct TestNode {
765 id: u64,
766 name: [u8; 32],
767 value: i32,
768 }
769
770 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
771
772 let node = TestNode {
773 id: 12345,
774 name: [b'A'; 32],
775 value: -999,
776 };
777
778 let (offset, stored) = arena.alloc_value_with_offset(node.clone()).unwrap();
779 assert_eq!(stored.id, 12345);
780 assert_eq!(stored.value, -999);
781
782 let read: &TestNode = unsafe { arena.read_at(offset) };
785 assert_eq!(read.id, node.id);
786 assert_eq!(read.name, node.name);
787 assert_eq!(read.value, node.value);
788 }
789
790 #[test]
791 fn test_alloc_value_with_offset_alignment() {
792 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
793
794 let (offset1, _) = arena.alloc_value_with_offset(1u8).unwrap();
796 assert_eq!(offset1, 0);
797
798 let (offset2, val) = arena.alloc_value_with_offset(42u64).unwrap();
800
801 assert_eq!(offset2 % 8, 0);
803 assert_eq!(*val, 42);
804 }
805
806 #[test]
807 fn test_alloc_value_with_offset_multiple() {
808 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
809
810 let mut offsets = Vec::new();
811 for i in 0..100u64 {
812 let (offset, val) = arena.alloc_value_with_offset(i).unwrap();
813 offsets.push(offset);
814 assert_eq!(*val, i);
815 }
816
817 for window in offsets.windows(2) {
819 assert!(window[0] < window[1]);
820 }
821
822 for (i, offset) in offsets.iter().enumerate() {
824 let val: &u64 = unsafe { arena.read_at(*offset) };
826 assert_eq!(*val, i as u64);
827 }
828 }
829
830 #[test]
831 fn test_arena_allocator_with_offset() {
832 let allocator = ArenaAllocator::with_chunk_size(4096).unwrap();
833
834 let epoch = allocator.current_epoch();
835 let arena = allocator.arena(epoch).unwrap();
836
837 let (offset, val) = arena.alloc_value_with_offset(42u64).unwrap();
838 assert_eq!(*val, 42);
839
840 let read: &u64 = unsafe { arena.read_at(offset) };
842 assert_eq!(*read, 42);
843 }
844
845 #[test]
846 #[cfg(debug_assertions)]
847 #[should_panic(expected = "exceeds chunk used bytes")]
848 fn test_read_at_out_of_bounds() {
849 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
850 let (_offset, _) = arena.alloc_value_with_offset(42u64).unwrap();
851
852 unsafe {
855 let _: &u64 = arena.read_at(4000);
856 }
857 }
858
859 #[test]
860 #[cfg(debug_assertions)]
861 #[should_panic(expected = "is not aligned")]
862 fn test_read_at_misaligned() {
863 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
864 let (_offset, _) = arena.alloc_value_with_offset(0xFFu8).unwrap();
866 let _ = arena.alloc_value_with_offset(0u64).unwrap();
868
869 unsafe {
872 let _: &u64 = arena.read_at(1);
873 }
874 }
875
876 #[test]
877 #[cfg(not(miri))] fn test_concurrent_read_stress() {
879 use std::sync::Arc;
880
881 let arena = Arc::new(Arena::with_chunk_size(EpochId::INITIAL, 1024 * 1024).unwrap());
882 let num_threads = 8;
883 let values_per_thread = 1000;
884
885 let mut all_offsets = Vec::new();
887 for t in 0..num_threads {
888 let base = (t * values_per_thread) as u64;
889 let mut offsets = Vec::with_capacity(values_per_thread);
890 for i in 0..values_per_thread as u64 {
891 let (offset, _) = arena.alloc_value_with_offset(base + i).unwrap();
892 offsets.push(offset);
893 }
894 all_offsets.push(offsets);
895 }
896
897 let mut handles = Vec::new();
899 for (t, offsets) in all_offsets.into_iter().enumerate() {
900 let arena = Arc::clone(&arena);
901 let base = (t * values_per_thread) as u64;
902 handles.push(std::thread::spawn(move || {
903 for (i, offset) in offsets.iter().enumerate() {
904 let val: &u64 = unsafe { arena.read_at(*offset) };
906 assert_eq!(*val, base + i as u64);
907 }
908 }));
909 }
910
911 for handle in handles {
912 handle.join().expect("Thread panicked");
913 }
914 }
915
916 #[test]
917 fn test_alloc_value_with_offset_insufficient_space() {
918 let arena = Arena::with_chunk_size(EpochId::INITIAL, 64).unwrap();
920
921 let _ = arena.alloc_value_with_offset([0u8; 48]).unwrap();
923
924 let result = arena.alloc_value_with_offset([0u8; 32]);
926 assert!(result.is_err());
927 }
928
929 #[test]
930 fn test_multi_type_interleaved() {
931 #[derive(Debug, Clone, PartialEq)]
932 #[repr(C)]
933 struct Record {
934 id: u64,
935 flags: u32,
936 weight: f32,
937 }
938
939 let arena = Arena::with_chunk_size(EpochId::INITIAL, 4096).unwrap();
940
941 let (off_u8, _) = arena.alloc_value_with_offset(0xAAu8).unwrap();
943 let (off_u32, _) = arena.alloc_value_with_offset(0xBBBBu32).unwrap();
944 let (off_u64, _) = arena.alloc_value_with_offset(0xCCCCCCCCu64).unwrap();
945 let (off_rec, _) = arena
946 .alloc_value_with_offset(Record {
947 id: 42,
948 flags: 0xFF,
949 weight: std::f32::consts::PI,
950 })
951 .unwrap();
952
953 unsafe {
956 assert_eq!(*arena.read_at::<u8>(off_u8), 0xAA);
957 assert_eq!(*arena.read_at::<u32>(off_u32), 0xBBBB);
958 assert_eq!(*arena.read_at::<u64>(off_u64), 0xCCCCCCCC);
959
960 let rec: &Record = arena.read_at(off_rec);
961 assert_eq!(rec.id, 42);
962 assert_eq!(rec.flags, 0xFF);
963 assert!((rec.weight - std::f32::consts::PI).abs() < 0.001);
964 }
965 }
966}