contiguous_mem/
details.rs

1//! Implementation details for behavior specialization marker structs.
2//!
3//! End-users aren't meant to interact with traits defined in this module
4//! directly and they exist solely to simplify implementation of
5//! [`ContiguousMemoryStorage`](ContiguousMemoryStorage) by erasing
6//! type details of different implementations.
7//!
8//! Any changes to these traits aren't considered a breaking change and won't
9//! be reflected in version numbers.
10
11use core::{
12    alloc::{Layout, LayoutError},
13    cell::{Cell, RefCell, RefMut},
14    mem::size_of,
15    ptr::null_mut,
16};
17
18use core::marker::PhantomData;
19
20#[cfg(feature = "no_std")]
21use portable_atomic::{AtomicUsize, Ordering};
22#[cfg(not(feature = "no_std"))]
23use std::sync::atomic::{AtomicUsize, Ordering};
24
25use crate::{
26    error::{ContiguousMemoryError, LockSource, LockingError},
27    range::ByteRange,
28    refs::{sealed::*, ContiguousEntryRef, SyncContiguousEntryRef},
29    tracker::AllocationTracker,
30    types::*,
31    BaseLocation, ContiguousMemoryState,
32};
33
34/// Implementation details shared between [storage](StorageDetails) and
35/// [`reference`](ReferenceDetails) implementations.
36pub trait ImplBase: Sized {
37    /// The type representing reference to internal state
38    type StorageState: Clone;
39
40    /// The type of reference returned by store operations.
41    type ReferenceType<T: ?Sized>: Clone;
42
43    /// The type representing result of accessing data that is locked in async
44    /// context
45    type LockResult<T>;
46
47    /// The type representing the allocation tracker reference type.
48    type ATGuard<'a>;
49
50    /// Indicates whether locks are used for synchronization, allowing the
51    /// compiler to easily optimize away branches involving them.
52    const USES_LOCKS: bool = false;
53}
54
55/// Implementation that's not thread-safe but performs faster as it avoids
56/// mutexes and locks.
57///
58/// For example usage of default implementation see: [`ContiguousMemory`](crate::ContiguousMemory)
59#[cfg_attr(feature = "debug", derive(Debug))]
60#[derive(Clone, Copy, PartialEq, Eq)]
61pub struct ImplDefault;
62impl ImplBase for ImplDefault {
63    type StorageState = Rc<ContiguousMemoryState<Self>>;
64    type ReferenceType<T: ?Sized> = ContiguousEntryRef<T>;
65    type LockResult<T> = T;
66    type ATGuard<'a> = RefMut<'a, AllocationTracker>;
67}
68
69/// Thread-safe implementation utilizing mutexes and locks to prevent data
70/// races.
71///
72/// For example usage of default implementation see:
73/// [`SyncContiguousMemory`](crate::SyncContiguousMemory)
74#[cfg_attr(feature = "debug", derive(Debug))]
75#[derive(Clone, Copy, PartialEq, Eq)]
76pub struct ImplConcurrent;
77impl ImplBase for ImplConcurrent {
78    type StorageState = Arc<ContiguousMemoryState<Self>>;
79    type ReferenceType<T: ?Sized> = SyncContiguousEntryRef<T>;
80    type LockResult<T> = Result<T, LockingError>;
81    type ATGuard<'a> = MutexGuard<'a, AllocationTracker>;
82
83    const USES_LOCKS: bool = true;
84}
85
86/// Implementation which provides direct (unsafe) access to stored entries.
87///
88/// For example usage of default implementation see:
89/// [`UnsafeContiguousMemory`](crate::UnsafeContiguousMemory)
90#[cfg_attr(feature = "debug", derive(Debug))]
91#[derive(Clone, Copy, PartialEq, Eq)]
92pub struct ImplUnsafe;
93impl ImplBase for ImplUnsafe {
94    type StorageState = ContiguousMemoryState<Self>;
95    type ReferenceType<T: ?Sized> = *mut T;
96    type LockResult<T> = T;
97    type ATGuard<'a> = &'a mut AllocationTracker;
98}
99
100/// Implementation details of
101/// [`ContiguousMemoryStorage`](ContiguousMemoryStorage).
102pub trait StorageDetails: ImplBase {
103    /// The type representing the base memory and allocation tracking.
104    type Base;
105
106    /// The type representing the allocation tracker discrete type.
107    type AllocationTracker;
108
109    /// The type representing [`Layout`] entries with inner mutability.
110    type SizeType;
111
112    /// The type representing result of storing data.
113    type PushResult<T>;
114
115    /// Builds a new internal state from provided parameters
116    fn build_state(
117        base: *mut u8,
118        capacity: usize,
119        alignment: usize,
120    ) -> Result<Self::StorageState, LayoutError>;
121
122    /// Dereferences the inner state smart pointer and returns it by reference.
123    fn deref_state(state: &Self::StorageState) -> &ContiguousMemoryState<Self>;
124
125    /// Retrieves the base pointer from the base instance.
126    fn get_base(base: &Self::Base) -> Self::LockResult<*mut u8>;
127
128    /// Retrieves the base pointer from the base instance. Non blocking version.
129    fn try_get_base(base: &Self::Base) -> Self::LockResult<*mut u8>;
130
131    /// Retrieves the capacity from the state.
132    fn get_capacity(capacity: &Self::SizeType) -> usize;
133
134    /// Returns a writable reference to AllocationTracker.
135    fn get_allocation_tracker<'a>(
136        state: &'a mut Self::StorageState,
137    ) -> Self::LockResult<Self::ATGuard<'a>>;
138
139    /// Resizes and reallocates the base memory according to new capacity.
140    fn resize_container(
141        state: &mut Self::StorageState,
142        new_capacity: usize,
143    ) -> Result<Option<*mut u8>, ContiguousMemoryError>;
144
145    /// Deallocates the base memory using layout information.
146    fn deallocate(base: &mut Self::Base, layout: Layout);
147
148    /// Resizes the allocation tracker to the new capacity.
149    fn resize_tracker(
150        state: &mut Self::StorageState,
151        new_capacity: usize,
152    ) -> Result<(), ContiguousMemoryError>;
153
154    /// Shrinks tracked area of the allocation tracker to smallest that can fit
155    /// currently stored data.
156    fn shrink_tracker(state: &mut Self::StorageState) -> Self::LockResult<Option<usize>>;
157
158    /// Finds the next free memory region for given layout in the tracker.
159    fn track_next(
160        state: &mut Self::StorageState,
161        layout: Layout,
162    ) -> Result<ByteRange, ContiguousMemoryError>;
163
164    /// Returns whether a given layout can be stored or returns an error if
165    /// [`AllocationTracker`] can't be stored.
166    fn peek_next(state: &Self::StorageState, layout: Layout)
167        -> Self::LockResult<Option<ByteRange>>;
168}
169
170impl StorageDetails for ImplConcurrent {
171    type Base = RwLock<*mut u8>;
172    type AllocationTracker = Mutex<AllocationTracker>;
173    type SizeType = AtomicUsize;
174    type PushResult<T> = Result<Self::ReferenceType<T>, LockingError>;
175
176    fn build_state(
177        base: *mut u8,
178        capacity: usize,
179        alignment: usize,
180    ) -> Result<Self::StorageState, LayoutError> {
181        let layout = Layout::from_size_align(capacity, alignment)?;
182
183        Ok(Arc::new(ContiguousMemoryState {
184            base: BaseLocation(RwLock::new(base)),
185            capacity: AtomicUsize::new(layout.size()),
186            alignment: layout.align(),
187            tracker: Mutex::new(AllocationTracker::new(capacity)),
188        }))
189    }
190
191    #[inline]
192    fn deref_state(state: &Self::StorageState) -> &ContiguousMemoryState<Self> {
193        state
194    }
195
196    #[inline]
197    fn get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
198        base.read_named(LockSource::BaseAddress)
199            .map(|result| *result)
200    }
201
202    #[inline]
203    fn try_get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
204        base.try_read_named(LockSource::BaseAddress)
205            .map(|result| *result)
206    }
207
208    #[inline]
209    fn get_capacity(capacity: &Self::SizeType) -> usize {
210        capacity.load(Ordering::Acquire)
211    }
212
213    #[inline]
214    fn get_allocation_tracker<'a>(
215        state: &'a mut Self::StorageState,
216    ) -> Self::LockResult<Self::ATGuard<'a>> {
217        state.tracker.lock_named(LockSource::AllocationTracker)
218    }
219
220    fn resize_container(
221        state: &mut Self::StorageState,
222        new_capacity: usize,
223    ) -> Result<Option<*mut u8>, ContiguousMemoryError> {
224        let layout =
225            Layout::from_size_align(state.capacity.load(Ordering::Acquire), state.alignment)?;
226        let mut base_addr = state.base.write_named(LockSource::BaseAddress)?;
227        let prev_addr = *base_addr;
228        *base_addr = unsafe { allocator::realloc(*base_addr, layout, new_capacity) };
229        state.capacity.store(new_capacity, Ordering::Release);
230        Ok(if *base_addr != prev_addr {
231            Some(*base_addr)
232        } else {
233            None
234        })
235    }
236
237    #[inline]
238    fn deallocate(base: &mut Self::Base, layout: Layout) {
239        if let Ok(mut lock) = base.write_named(LockSource::BaseAddress) {
240            unsafe { allocator::dealloc(*lock, layout) };
241            *lock = null_mut();
242        }
243    }
244
245    #[inline]
246    fn resize_tracker(
247        state: &mut Self::StorageState,
248        new_capacity: usize,
249    ) -> Result<(), ContiguousMemoryError> {
250        let mut lock = state.tracker.lock_named(LockSource::AllocationTracker)?;
251        lock.resize(new_capacity)?;
252        Ok(())
253    }
254
255    #[inline]
256    fn shrink_tracker(state: &mut Self::StorageState) -> Result<Option<usize>, LockingError> {
257        let mut lock = state.tracker.lock_named(LockSource::AllocationTracker)?;
258        Ok(lock.shrink_to_fit())
259    }
260
261    #[inline]
262    fn track_next(
263        state: &mut Self::StorageState,
264        layout: Layout,
265    ) -> Result<ByteRange, ContiguousMemoryError> {
266        let base = Self::get_base(&state.base)? as usize;
267        let mut lock = state.tracker.lock_named(LockSource::AllocationTracker)?;
268        lock.take_next(base, layout)
269    }
270
271    #[inline]
272    fn peek_next(
273        state: &Self::StorageState,
274        layout: Layout,
275    ) -> Result<Option<ByteRange>, LockingError> {
276        let lock = state.tracker.lock_named(LockSource::AllocationTracker)?;
277        Ok(lock.peek_next(layout))
278    }
279}
280
281impl StorageDetails for ImplDefault {
282    type Base = Cell<*mut u8>;
283    type AllocationTracker = RefCell<AllocationTracker>;
284    type SizeType = Cell<usize>;
285    type PushResult<T> = ContiguousEntryRef<T>;
286
287    fn build_state(
288        base: *mut u8,
289        capacity: usize,
290        alignment: usize,
291    ) -> Result<Self::StorageState, LayoutError> {
292        let layout: Layout = Layout::from_size_align(capacity, alignment)?;
293
294        Ok(Rc::new(ContiguousMemoryState {
295            base: BaseLocation(Cell::new(base)),
296            capacity: Cell::new(layout.size()),
297            alignment: layout.align(),
298            tracker: RefCell::new(AllocationTracker::new(capacity)),
299        }))
300    }
301
302    #[inline]
303    fn deref_state(state: &Self::StorageState) -> &ContiguousMemoryState<Self> {
304        state
305    }
306
307    #[inline]
308    fn get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
309        base.get()
310    }
311
312    #[inline]
313    fn try_get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
314        Self::get_base(base)
315    }
316
317    #[inline]
318    fn get_capacity(capacity: &Self::SizeType) -> usize {
319        capacity.get()
320    }
321
322    #[inline]
323    fn get_allocation_tracker<'a>(
324        state: &'a mut Self::StorageState,
325    ) -> Self::LockResult<Self::ATGuard<'a>> {
326        state.tracker.borrow_mut()
327    }
328
329    fn resize_container(
330        state: &mut Self::StorageState,
331        new_capacity: usize,
332    ) -> Result<Option<*mut u8>, ContiguousMemoryError> {
333        let layout = Layout::from_size_align(state.capacity.get(), state.alignment)?;
334        let prev_base = state.base.get();
335        let new_base = unsafe { allocator::realloc(prev_base, layout, new_capacity) };
336        state.base.set(new_base);
337        state.capacity.set(new_capacity);
338        Ok(if new_base != prev_base {
339            Some(new_base)
340        } else {
341            None
342        })
343    }
344
345    #[inline]
346    fn deallocate(base: &mut Self::Base, layout: Layout) {
347        unsafe { allocator::dealloc(base.get(), layout) };
348        base.set(null_mut())
349    }
350
351    #[inline]
352    fn resize_tracker(
353        state: &mut Self::StorageState,
354        new_capacity: usize,
355    ) -> Result<(), ContiguousMemoryError> {
356        state.tracker.borrow_mut().resize(new_capacity)
357    }
358
359    #[inline]
360    fn shrink_tracker(state: &mut Self::StorageState) -> Option<usize> {
361        state.tracker.borrow_mut().shrink_to_fit()
362    }
363
364    #[inline]
365    fn track_next(
366        state: &mut Self::StorageState,
367        layout: Layout,
368    ) -> Result<ByteRange, ContiguousMemoryError> {
369        let base = state.base.get() as usize;
370        let mut tracker = state.tracker.borrow_mut();
371        tracker.take_next(base, layout)
372    }
373
374    #[inline]
375    fn peek_next(state: &Self::StorageState, layout: Layout) -> Option<ByteRange> {
376        let tracker = state.tracker.borrow();
377        tracker.peek_next(layout)
378    }
379}
380
381impl StorageDetails for ImplUnsafe {
382    type Base = *mut u8;
383    type AllocationTracker = AllocationTracker;
384    type SizeType = usize;
385    type PushResult<T> = Result<*mut T, ContiguousMemoryError>;
386
387    fn build_state(
388        base: *mut u8,
389        capacity: usize,
390        alignment: usize,
391    ) -> Result<Self::StorageState, LayoutError> {
392        let layout = Layout::from_size_align(capacity, alignment)?;
393        Ok(ContiguousMemoryState {
394            base: BaseLocation(base),
395            capacity: layout.size(),
396            alignment: layout.align(),
397            tracker: AllocationTracker::new(capacity),
398        })
399    }
400
401    #[inline]
402    fn deref_state(state: &Self::StorageState) -> &ContiguousMemoryState<Self> {
403        state
404    }
405
406    #[inline]
407    fn get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
408        *base
409    }
410
411    #[inline]
412    fn try_get_base(base: &Self::Base) -> Self::LockResult<*mut u8> {
413        Self::get_base(base)
414    }
415
416    #[inline]
417    fn get_capacity(capacity: &Self::SizeType) -> usize {
418        *capacity
419    }
420
421    #[inline]
422    fn get_allocation_tracker<'a>(
423        state: &'a mut Self::StorageState,
424    ) -> Self::LockResult<Self::ATGuard<'a>> {
425        &mut state.tracker
426    }
427
428    fn resize_container(
429        state: &mut Self::StorageState,
430        new_capacity: usize,
431    ) -> Result<Option<*mut u8>, ContiguousMemoryError> {
432        let layout = Layout::from_size_align(state.capacity, state.alignment)?;
433        let prev_base = *state.base;
434        state.base = BaseLocation(unsafe { allocator::realloc(prev_base, layout, new_capacity) });
435        state.capacity = new_capacity;
436        Ok(if *state.base != prev_base {
437            Some(*state.base)
438        } else {
439            None
440        })
441    }
442
443    #[inline]
444    fn deallocate(base: &mut Self::Base, layout: Layout) {
445        unsafe {
446            allocator::dealloc(*base, layout);
447        }
448        *base = null_mut();
449    }
450
451    #[inline]
452    fn resize_tracker(
453        state: &mut Self::StorageState,
454        new_capacity: usize,
455    ) -> Result<(), ContiguousMemoryError> {
456        state.tracker.resize(new_capacity)
457    }
458
459    #[inline]
460    fn shrink_tracker(state: &mut Self::StorageState) -> Option<usize> {
461        state.tracker.shrink_to_fit()
462    }
463
464    #[inline]
465    fn track_next(
466        state: &mut Self::StorageState,
467        layout: Layout,
468    ) -> Result<ByteRange, ContiguousMemoryError> {
469        let base = *state.base as usize;
470        state.tracker.take_next(base, layout)
471    }
472
473    #[inline]
474    fn peek_next(state: &Self::StorageState, layout: Layout) -> Option<ByteRange> {
475        state.tracker.peek_next(layout)
476    }
477}
478
479/// Implementation details of returned [reference types](crate::refs).
480pub trait ReferenceDetails: ImplBase {
481    /// The type representing internal state of the reference.
482    type RefState<T: ?Sized>: Clone;
483
484    /// The type handling concurrent mutable access exclusion.
485    type BorrowLock;
486
487    /// Type of the concurrent mutable access exclusion read guard.
488    type ReadGuard<'a>: DebugReq;
489    /// Type of the concurrent mutable access exclusion write guard.
490    type WriteGuard<'a>: DebugReq;
491
492    /// Releases the specified memory region back to the allocation tracker.
493    fn free_region(
494        tracker: Self::LockResult<Self::ATGuard<'_>>,
495        base: Self::LockResult<*mut u8>,
496        range: ByteRange,
497    ) -> Option<*mut ()>;
498
499    /// Builds a reference for the stored data.
500    fn build_ref<T: StoreRequirements>(
501        state: &Self::StorageState,
502        addr: *mut T,
503        range: ByteRange,
504    ) -> Self::ReferenceType<T>;
505
506    /// Marks reference state as no longer being borrowed.
507    fn unborrow_ref<T: ?Sized>(_state: &Self::RefState<T>, _kind: BorrowKind) {}
508}
509
510impl ReferenceDetails for ImplConcurrent {
511    type RefState<T: ?Sized> = Arc<ReferenceState<T, Self>>;
512    type BorrowLock = RwLock<()>;
513    type ReadGuard<'a> = RwLockReadGuard<'a, ()>;
514    type WriteGuard<'a> = RwLockWriteGuard<'a, ()>;
515
516    fn free_region(
517        tracker: Self::LockResult<Self::ATGuard<'_>>,
518        base: Self::LockResult<*mut u8>,
519        range: ByteRange,
520    ) -> Option<*mut ()> {
521        if let Ok(mut lock) = tracker {
522            let _ = lock.release(range);
523
524            if let Ok(base) = base {
525                unsafe { Some(base.add(range.0) as *mut ()) }
526            } else {
527                None
528            }
529        } else {
530            None
531        }
532    }
533
534    fn build_ref<T: StoreRequirements>(
535        state: &Self::StorageState,
536        _addr: *mut T,
537        range: ByteRange,
538    ) -> Self::ReferenceType<T> {
539        SyncContiguousEntryRef {
540            inner: Arc::new(ReferenceState {
541                state: state.clone(),
542                range,
543                borrow_kind: RwLock::new(()),
544                drop_fn: drop_fn::<T>(),
545                _phantom: PhantomData,
546            }),
547            #[cfg(feature = "ptr_metadata")]
548            metadata: (),
549            #[cfg(not(feature = "ptr_metadata"))]
550            _phantom: PhantomData,
551        }
552    }
553}
554
555impl ReferenceDetails for ImplDefault {
556    type RefState<T: ?Sized> = Rc<ReferenceState<T, Self>>;
557    type BorrowLock = Cell<BorrowState>;
558    type ReadGuard<'a> = ();
559    type WriteGuard<'a> = ();
560
561    fn free_region(
562        mut tracker: Self::LockResult<Self::ATGuard<'_>>,
563        base: Self::LockResult<*mut u8>,
564        range: ByteRange,
565    ) -> Option<*mut ()> {
566        let _ = tracker.release(range);
567        unsafe { Some(base.add(range.0) as *mut ()) }
568    }
569
570    fn build_ref<T: StoreRequirements>(
571        state: &Self::StorageState,
572        _addr: *mut T,
573        range: ByteRange,
574    ) -> Self::ReferenceType<T> {
575        ContiguousEntryRef {
576            inner: Rc::new(ReferenceState {
577                state: state.clone(),
578                range,
579                borrow_kind: Cell::new(BorrowState::Read(0)),
580                drop_fn: drop_fn::<T>(),
581                _phantom: PhantomData,
582            }),
583            #[cfg(feature = "ptr_metadata")]
584            metadata: (),
585            #[cfg(not(feature = "ptr_metadata"))]
586            _phantom: PhantomData,
587        }
588    }
589
590    fn unborrow_ref<T: ?Sized>(state: &Self::RefState<T>, _kind: BorrowKind) {
591        let next = match state.borrow_kind.get() {
592            BorrowState::Read(count) => BorrowState::Read(count - 1),
593            BorrowState::Write => BorrowState::Read(0),
594        };
595        state.borrow_kind.set(next)
596    }
597}
598
599impl ReferenceDetails for ImplUnsafe {
600    type RefState<T: ?Sized> = ();
601    type BorrowLock = ();
602    type ReadGuard<'a> = ();
603    type WriteGuard<'a> = ();
604
605    fn free_region(
606        tracker: Self::LockResult<Self::ATGuard<'_>>,
607        base: Self::LockResult<*mut u8>,
608        range: ByteRange,
609    ) -> Option<*mut ()> {
610        let _ = tracker.release(range);
611
612        unsafe { Some(base.add(range.0) as *mut ()) }
613    }
614
615    fn build_ref<T>(
616        _base: &Self::StorageState,
617        addr: *mut T,
618        _range: ByteRange,
619    ) -> Self::ReferenceType<T> {
620        addr
621    }
622}
623
624pub trait StoreDataDetails: StorageDetails {
625    unsafe fn push_raw<T: StoreRequirements>(
626        state: &mut Self::StorageState,
627        data: *const T,
628        layout: Layout,
629    ) -> Self::PushResult<T>;
630
631    unsafe fn push_raw_persisted<T: StoreRequirements>(
632        state: &mut Self::StorageState,
633        data: *const T,
634        layout: Layout,
635    ) -> Self::PushResult<T>;
636
637    fn assume_stored<T: StoreRequirements>(
638        state: &Self::StorageState,
639        position: usize,
640    ) -> Self::LockResult<Self::ReferenceType<T>>;
641}
642
643impl StoreDataDetails for ImplConcurrent {
644    unsafe fn push_raw<T: StoreRequirements>(
645        state: &mut Self::StorageState,
646        data: *const T,
647        layout: Layout,
648    ) -> Result<SyncContiguousEntryRef<T>, LockingError> {
649        let (addr, range) = loop {
650            match ImplConcurrent::track_next(state, layout) {
651                Ok(taken) => {
652                    let found = (taken.0
653                        + *state.base.read_named(LockSource::BaseAddress)? as usize)
654                        as *mut u8;
655                    unsafe { core::ptr::copy_nonoverlapping(data as *mut u8, found, layout.size()) }
656                    break (found, taken);
657                }
658                Err(ContiguousMemoryError::NoStorageLeft) => {
659                    let curr_capacity = state.capacity.load(Ordering::Acquire);
660                    let new_capacity = curr_capacity
661                        .saturating_mul(2)
662                        .max(curr_capacity + layout.size());
663                    match ImplConcurrent::resize_container(state, new_capacity) {
664                        Ok(_) => {
665                            match ImplConcurrent::resize_tracker(state, new_capacity) {
666                                Ok(_) => {},
667                                Err(ContiguousMemoryError::Lock(locking_err)) => return Err(locking_err),
668                                Err(_) => unreachable!("unable to grow AllocationTracker"),
669                            };
670                        }
671                        Err(ContiguousMemoryError::Lock(locking_err)) => return Err(locking_err),
672                        Err(other) => unreachable!(
673                            "reached unexpected error while growing the container to store data: {:?}",
674                            other
675                        ),
676                    };
677                }
678                Err(ContiguousMemoryError::Lock(locking_err)) => return Err(locking_err),
679                Err(other) => unreachable!(
680                    "reached unexpected error while looking for next region to store data: {:?}",
681                    other
682                ),
683            }
684        };
685
686        Ok(ImplConcurrent::build_ref(state, addr as *mut T, range))
687    }
688
689    #[inline(always)]
690    unsafe fn push_raw_persisted<T: StoreRequirements>(
691        state: &mut Self::StorageState,
692        data: *const T,
693        layout: Layout,
694    ) -> Self::PushResult<T> {
695        match Self::push_raw(state, data, layout) {
696            Ok(it) => {
697                let result = it.clone();
698                core::mem::forget(it.inner);
699                Ok(result)
700            }
701            err => err,
702        }
703    }
704
705    #[inline(always)]
706    fn assume_stored<T: StoreRequirements>(
707        state: &Self::StorageState,
708        position: usize,
709    ) -> Result<SyncContiguousEntryRef<T>, LockingError> {
710        let addr = unsafe {
711            state
712                .base
713                .read_named(LockSource::BaseAddress)?
714                .add(position)
715        };
716        Ok(ImplConcurrent::build_ref(
717            state,
718            addr as *mut T,
719            ByteRange(position, size_of::<T>()),
720        ))
721    }
722}
723
724impl StoreDataDetails for ImplDefault {
725    unsafe fn push_raw<T: StoreRequirements>(
726        state: &mut Self::StorageState,
727        data: *const T,
728        layout: Layout,
729    ) -> ContiguousEntryRef<T> {
730        let (addr, range) = loop {
731            match ImplDefault::track_next(state, layout) {
732                Ok(taken) => {
733                    let found = (taken.0 + state.base.get() as usize) as *mut u8;
734                    unsafe {
735                        core::ptr::copy_nonoverlapping(data as *mut u8, found, layout.size());
736                    }
737                    break (found, taken);
738                }
739                Err(ContiguousMemoryError::NoStorageLeft) => {
740                    let curr_capacity = state.capacity.get();
741                    let new_capacity = curr_capacity
742                        .saturating_mul(2)
743                        .max(curr_capacity + layout.size());
744                    match ImplDefault::resize_container(state, new_capacity) {
745                        Ok(_) => {
746                            ImplDefault::resize_tracker(state, new_capacity).expect("unable to grow AllocationTracker");
747                        },
748                        Err(err) => unreachable!(
749                            "reached unexpected error while growing the container to store data: {:?}",
750                            err
751                        ),
752                    }
753                }
754                Err(other) => unreachable!(
755                    "reached unexpected error while looking for next region to store data: {:?}",
756                    other
757                ),
758            }
759        };
760
761        ImplDefault::build_ref(state, addr as *mut T, range)
762    }
763
764    #[inline(always)]
765    unsafe fn push_raw_persisted<T: StoreRequirements>(
766        state: &mut Self::StorageState,
767        data: *const T,
768        layout: Layout,
769    ) -> Self::PushResult<T> {
770        let value = Self::push_raw(state, data, layout);
771        let result = value.clone();
772        core::mem::forget(value.inner);
773        result
774    }
775
776    #[inline(always)]
777    fn assume_stored<T: StoreRequirements>(
778        state: &Self::StorageState,
779        position: usize,
780    ) -> ContiguousEntryRef<T> {
781        let addr = unsafe { state.base.get().add(position) };
782        ImplDefault::build_ref(state, addr as *mut T, ByteRange(position, size_of::<T>()))
783    }
784}
785
786impl StoreDataDetails for ImplUnsafe {
787    /// Returns a raw pointer (`*mut T`) to the stored value or an error if no
788    /// free regions remain
789    unsafe fn push_raw<T: StoreRequirements>(
790        state: &mut Self::StorageState,
791        data: *const T,
792        layout: Layout,
793    ) -> Result<*mut T, ContiguousMemoryError> {
794        let (addr, range) = match ImplUnsafe::track_next(state, layout) {
795            Ok(taken) => {
796                let found = (taken.0 + *state.base as usize) as *mut u8;
797                unsafe {
798                    core::ptr::copy_nonoverlapping(data as *mut u8, found, layout.size());
799                }
800
801                (found, taken)
802            }
803            Err(other) => return Err(other),
804        };
805
806        Ok(ImplUnsafe::build_ref(state, addr as *mut T, range))
807    }
808
809    unsafe fn push_raw_persisted<T: StoreRequirements>(
810        _state: &mut Self::StorageState,
811        _data: *const T,
812        _layout: Layout,
813    ) -> Self::PushResult<T> {
814        unimplemented!()
815    }
816
817    #[inline(always)]
818    fn assume_stored<T: StoreRequirements>(state: &Self::StorageState, position: usize) -> *mut T {
819        let addr = unsafe { state.base.add(position) };
820        ImplUnsafe::build_ref(
821            state,
822            addr as *mut T,
823            ByteRange(position, position + size_of::<T>()),
824        )
825    }
826}
827
828/// Trait representing requirements for implementation details of the
829/// [`ContiguousMemoryStorage`](crate::ContiguousMemoryStorage).
830///
831/// This trait is implemented by:
832/// - [`ImplDefault`]
833/// - [`ImplConcurrent`]
834/// - [`ImplUnsafe`]
835pub trait ImplDetails: ImplBase + StorageDetails + ReferenceDetails + StoreDataDetails {}
836impl<Impl: ImplBase + StorageDetails + ReferenceDetails + StoreDataDetails> ImplDetails for Impl {}