contiguous_mem/
lib.rs

1#![allow(incomplete_features)]
2#![cfg_attr(feature = "no_std", no_std)]
3#![cfg_attr(feature = "ptr_metadata", feature(ptr_metadata, unsize))]
4#![cfg_attr(feature = "error_in_core", feature(error_in_core))]
5#![cfg_attr(doc, feature(doc_auto_cfg))]
6#![warn(missing_docs)]
7#![doc = include_str!("../doc/crate.md")]
8
9#[cfg(feature = "no_std")]
10extern crate alloc;
11
12mod details;
13pub mod error;
14pub mod range;
15pub mod refs;
16pub mod tracker;
17mod types;
18
19use details::*;
20pub use details::{ImplConcurrent, ImplDefault, ImplUnsafe};
21pub use range::ByteRange;
22use refs::sealed::EntryRef;
23pub use refs::{CERef, ContiguousEntryRef, SCERef, SyncContiguousEntryRef};
24#[cfg(feature = "ptr_metadata")]
25pub use types::static_metadata;
26use types::*;
27
28use core::{
29    alloc::{Layout, LayoutError},
30    mem::{size_of, ManuallyDrop},
31    ops::Deref,
32};
33
34use error::{ContiguousMemoryError, LockingError};
35
36/// A memory container for efficient allocation and storage of contiguous data.
37///
38/// This collection manages a contiguous block of memory, allowing for storage
39/// of arbitrary data types while ensuring that stored items are placed
40/// adjacently and ensuring they're properly alligned.
41///
42/// Type argument `Impl` specifies implementation details for the behavior of
43/// this struct.
44///
45/// Note that this structure is a smart abstraction over underlying data,
46/// copying it creates a copy which represents the same internal state. If you
47/// need to copy the memory region into a new container see:
48/// [`ContiguousMemoryStorage::copy_data`]
49pub struct ContiguousMemoryStorage<Impl: ImplDetails = ImplDefault> {
50    inner: Impl::StorageState,
51}
52
53impl<Impl: ImplDetails> ContiguousMemoryStorage<Impl> {
54    /// Creates a new `ContiguousMemory` instance with the specified `capacity`,
55    /// aligned as platform dependant alignment of `usize`.
56    pub fn new(capacity: usize) -> Self {
57        Self::new_aligned(capacity, core::mem::align_of::<usize>())
58            .expect("unable to create a ContiguousMemory with usize alignment")
59    }
60
61    /// Creates a new `ContiguousMemory` instance with the specified `capacity`
62    /// and `alignment`.
63    pub fn new_aligned(capacity: usize, alignment: usize) -> Result<Self, LayoutError> {
64        let layout = Layout::from_size_align(capacity, alignment)?;
65        let base = unsafe { allocator::alloc(layout) };
66        Ok(ContiguousMemoryStorage {
67            inner: Impl::build_state(base, capacity, alignment)?,
68        })
69    }
70
71    /// Creates a new `ContiguousMemory` instance with the provided `layout`.
72    pub fn new_for_layout(layout: Layout) -> Self {
73        let base = unsafe { allocator::alloc(layout) };
74        unsafe {
75            // SAFETY: Impl::build_state won't return a LayoutError because
76            // we're constructing it from a provided layout argument.
77            ContiguousMemoryStorage {
78                inner: Impl::build_state(base, layout.size(), layout.align()).unwrap_unchecked(),
79            }
80        }
81    }
82
83    /// Returns the current capacity of the memory container.
84    ///
85    /// The capacity represents the size of the memory block that has been
86    /// allocated for storing data. It may be larger than the amount of data
87    /// currently stored within the container.
88    pub fn get_capacity(&self) -> usize {
89        Impl::get_capacity(&self.capacity)
90    }
91
92    /// Returns the layout of the memory region containing stored data.
93    pub fn get_layout(&self) -> Layout {
94        Impl::deref_state(&self.inner).layout()
95    }
96
97    /// Resizes the memory container to the specified `new_capacity`, optionally
98    /// returning the new base address of the stored items - if `None` is
99    /// returned the base address of the memory block is the same.
100    ///
101    /// Shrinking the container is generally performed in place by freeing
102    /// tailing memory space, but growing it can move the data in memory to find
103    /// a location that can fit it.
104    ///
105    /// [Unsafe implementation](ImplUnsafe) should match on the returned value
106    /// and update any existing pointers accordingly.
107    ///
108    /// # Errors
109    ///
110    /// [`ContiguousMemoryError::Unshrinkable`] error is returned when
111    /// attempting to shrink the memory container, but previously stored data
112    /// prevents the container from being shrunk to the desired capacity.
113    ///
114    /// In a concurrent implementation [`ContiguousMemoryError::Lock`] is
115    /// returned if the mutex holding the base address or the
116    /// [`AllocationTracker`](crate::tracker::AllocationTracker) is poisoned.
117    pub fn resize(
118        &mut self,
119        new_capacity: usize,
120    ) -> Result<Option<*mut u8>, ContiguousMemoryError> {
121        // TODO: (0.5.0) Change resize return type to *mut ()
122        if new_capacity == Impl::get_capacity(&self.capacity) {
123            return Ok(None);
124        }
125
126        let old_capacity = Impl::get_capacity(&self.capacity);
127        Impl::resize_tracker(&mut self.inner, new_capacity)?;
128        let moved = match Impl::resize_container(&mut self.inner, new_capacity) {
129            Ok(it) => it,
130            Err(ContiguousMemoryError::Lock(lock_err)) if Impl::USES_LOCKS => {
131                Impl::resize_tracker(&mut self.inner, old_capacity)?;
132                return Err(ContiguousMemoryError::Lock(lock_err));
133            }
134            Err(other) => return Err(other),
135        };
136
137        Ok(moved)
138    }
139
140    /// Reserves exactly `additional` bytes.
141    /// After calling this function, new capacity will be equal to:
142    /// `self.get_capacity() + additional`.
143    ///
144    /// # Errors
145    ///
146    /// See: [`ContiguousMemoryStorage::resize`]
147    pub fn reserve(&mut self, additional: usize) -> Result<Option<*mut ()>, ContiguousMemoryError> {
148        self.resize(self.get_capacity() + additional)
149            .map(|it| it.map(|ptr| ptr as *mut ()))
150    }
151
152    /// Reserves exactly additional bytes required to store a value of type `V`.
153    /// After calling this function, new capacity will be equal to:
154    /// `self.get_capacity() + size_of::<V>()`.
155    ///
156    /// # Errors
157    ///
158    /// See: [`ContiguousMemoryStorage::resize`]
159    pub fn reserve_type<V>(&mut self) -> Result<Option<*mut ()>, ContiguousMemoryError> {
160        self.reserve(size_of::<V>())
161    }
162
163    /// Reserves exactly additional bytes required to store `count` number of
164    /// values of type `V`.
165    /// After calling this function, new capacity will be equal to:
166    /// `self.get_capacity() + size_of::<V>() * count`.
167    ///
168    /// # Errors
169    ///
170    /// See: [`ContiguousMemoryStorage::resize`]
171    pub fn reserve_type_count<V>(
172        &mut self,
173        count: usize,
174    ) -> Result<Option<*mut ()>, ContiguousMemoryError> {
175        self.reserve(size_of::<V>() * count)
176    }
177
178    /// Stores a `value` of type `T` in the contiguous memory block and returns
179    /// a reference or a pointer pointing to it.
180    ///
181    /// Value type argument `T` is used to deduce type size and returned
182    /// reference dropping behavior.
183    ///
184    /// Returned value is implementation specific:
185    ///
186    /// | Implementation | Result | Alias |
187    /// |-|:-:|:-:|
188    /// |[Default](ImplDefault)|[`ContiguousEntryRef<T>`](refs::ContiguousEntryRef)|[`CERef`](refs::CERef)|
189    /// |[Concurrent](ImplConcurrent)|[`SyncContiguousEntryRef<T>`](refs::SyncContiguousEntryRef)|[`SCERef`](refs::SCERef)|
190    /// |[Unsafe](ImplUnsafe)|`*mut T`|_N/A_|
191    ///
192    /// # Errors
193    ///
194    /// ## Concurrent implementation
195    ///
196    /// Concurrent implementation returns a
197    /// [`LockingError::Poisoned`](crate::error::LockingError::Poisoned) error
198    /// when the `AllocationTracker` associated with the memory container is
199    /// poisoned.
200    ///
201    /// ## Unsafe implementation
202    ///
203    /// Unsafe implementation returns a [`ContiguousMemoryError::NoStorageLeft`]
204    /// indicating that the container couldn't store the provided data with
205    /// current size.
206    ///
207    /// Memory block can still be grown by calling [`ContiguousMemory::resize`],
208    /// but it can't be done automatically as that would invalidate all the
209    /// existing pointers without any indication.
210    pub fn push<T: StoreRequirements>(&mut self, value: T) -> Impl::PushResult<T> {
211        let mut data = ManuallyDrop::new(value);
212        let layout = Layout::for_value(&data);
213        let pos = &mut *data as *mut T;
214
215        unsafe { self.push_raw(pos, layout) }
216    }
217
218    /// Stores a `value` of type `T` in the contiguous memory block and returns
219    /// a reference to it which doesn't mark the memory segment as free when
220    /// dropped.
221    ///
222    /// See [`ContiguousMemoryStorage::push`] for details.
223    pub fn push_persisted<T: StoreRequirements>(&mut self, value: T) -> Impl::PushResult<T>
224    where
225        Impl::ReferenceType<T>: EntryRef,
226    {
227        let mut data = ManuallyDrop::new(value);
228        let layout = Layout::for_value(&data);
229        let pos = &mut *data as *mut T;
230
231        unsafe { self.push_raw_persisted(pos, layout) }
232    }
233
234    /// Works same as [`push`](ContiguousMemory::push) but takes a pointer and
235    /// layout.
236    ///
237    /// Pointer type is used to deduce the destruction behavior for
238    /// implementations that return a reference, but can be disabled by casting
239    /// the provided pointer into `*const ()` type and then calling
240    /// [`transmute`](core::mem::transmute) on the returned reference:
241    /// ```rust
242    /// # use contiguous_mem::{ContiguousMemory, CERef};
243    /// # use core::alloc::Layout;
244    /// # use core::mem;
245    /// # let mut storage = ContiguousMemory::new(0);
246    /// let value = vec!["ignore", "drop", "for", "me"];
247    /// let erased = &value as *const Vec<&str> as *const ();
248    /// let layout = Layout::new::<Vec<&str>>();
249    ///
250    /// let stored: CERef<Vec<&str>> = unsafe {
251    ///     mem::transmute(storage.push_raw(erased, layout))
252    /// };
253    /// ```
254    ///
255    /// # Safety
256    ///
257    /// This function is unsafe because it clones memory from provided pointer
258    /// which means it could cause a segmentation fault if the pointer is
259    /// invalid.
260    ///
261    /// Further, it also allows escaping type drop glue because it takes type
262    /// [`Layout`] as a separate argument.
263    pub unsafe fn push_raw<T: StoreRequirements>(
264        &mut self,
265        data: *const T,
266        layout: Layout,
267    ) -> Impl::PushResult<T> {
268        Impl::push_raw(&mut self.inner, data, layout)
269    }
270
271    /// Variant of [`push_raw`](ContiguousMemory::push_raw) which returns a
272    /// reference that doesn't mark the used memory segment as free when
273    /// dropped.
274    pub unsafe fn push_raw_persisted<T: StoreRequirements>(
275        &mut self,
276        data: *const T,
277        layout: Layout,
278    ) -> Impl::PushResult<T>
279    where
280        Impl::ReferenceType<T>: EntryRef,
281    {
282        Impl::push_raw_persisted(&mut self.inner, data, layout)
283    }
284
285    /// Assumes value is stored at the provided _relative_ `position` in
286    /// managed memory and returns a pointer or a reference to it.
287    ///
288    /// # Example
289    ///
290    /// ```rust
291    /// # use contiguous_mem::UnsafeContiguousMemory;
292    /// let mut storage = UnsafeContiguousMemory::new(128);
293    /// let initial_position = storage.push(278u32).unwrap();
294    ///
295    /// // ...other code...
296    ///
297    /// let base_addr = storage.get_base();
298    /// storage.resize(512);
299    ///
300    /// let new_position: *mut u32 = storage.assume_stored(
301    ///     initial_position as usize - base_addr as usize
302    /// );
303    /// unsafe {
304    ///     assert_eq!(*new_position, 278u32);
305    /// }
306    /// ```
307    ///
308    /// # Safety
309    ///
310    /// This functions isn't unsafe because creating an invalid pointer isn't
311    /// considered unsafe. Responsibility for guaranteeing safety falls on
312    /// code that's dereferencing the pointer.
313    pub fn assume_stored<T: StoreRequirements>(
314        &self,
315        position: usize,
316    ) -> Impl::LockResult<Impl::ReferenceType<T>> {
317        Impl::assume_stored(&self.inner, position)
318    }
319}
320
321impl ContiguousMemoryStorage<ImplDefault> {
322    /// Returns the base address of the allocated memory.
323    pub fn get_base(&self) -> *const () {
324        ImplDefault::get_base(&self.base) as *const ()
325    }
326
327    /// Returns `true` if provided generic type `T` can be stored without
328    /// growing the container.
329    pub fn can_push<T: StoreRequirements>(&self) -> bool {
330        let layout = Layout::new::<T>();
331        ImplDefault::peek_next(&self.inner, layout).is_some()
332    }
333
334    /// Returns `true` if the provided `value` can be stored without growing the
335    /// container.
336    pub fn can_push_value<T: StoreRequirements>(&self, value: &T) -> bool {
337        let layout = Layout::for_value(value);
338        ImplDefault::peek_next(&self.inner, layout).is_some()
339    }
340
341    /// Returns `true` if the provided `layout` can be stored without growing
342    /// the container.
343    pub fn can_push_layout(&self, layout: Layout) -> bool {
344        ImplDefault::peek_next(&self.inner, layout).is_some()
345    }
346
347    /// Shrinks the allocated memory to fit the currently stored data and
348    /// returns the new capacity.
349    pub fn shrink_to_fit(&mut self) -> usize {
350        if let Some(shrunk) = ImplDefault::shrink_tracker(&mut self.inner) {
351            self.resize(shrunk).expect("unable to shrink container");
352            shrunk
353        } else {
354            self.capacity.get()
355        }
356    }
357
358    /// Forgets this container without dropping it and returns its base address
359    /// and [`Layout`].
360    ///
361    /// # Safety
362    ///
363    /// Calling this method will create a memory leak because the smart pointer
364    /// to state will not be dropped even when all of the created references go
365    /// out of scope. As this method takes ownership of the container, calling
366    /// it also ensures that dereferencing pointers created by
367    /// [`as_ptr`](refs::ContiguousEntryRef::as_ptr),
368    /// [`as_ptr_mut`](refs::ContiguousEntryRef::as_ptr_mut),
369    /// [`into_ptr`](refs::ContiguousEntryRef::into_ptr), and
370    /// [`into_ptr_mut`](refs::ContiguousEntryRef::into_ptr_mut)
371    /// `ContiguousEntryRef` methods is guaranteed to be safe.
372    ///
373    /// This method isn't unsafe as leaking data doesn't cause undefined
374    /// behavior.
375    /// ([_see details_](https://doc.rust-lang.org/nomicon/leaking.html))
376    pub fn forget(self) -> (*const (), Layout) {
377        let base = ImplDefault::get_base(&self.base);
378        let layout = self.get_layout();
379        core::mem::forget(self);
380        (base as *const (), layout)
381    }
382}
383
384impl ContiguousMemoryStorage<ImplConcurrent> {
385    /// Returns the base address of the allocated memory or a
386    /// [`LockingError::Poisoned`] error if the mutex holding the base address
387    /// has been poisoned.
388    ///
389    /// This function will block the current thread until base address RwLock
390    /// doesn't become readable.
391    pub fn get_base(&self) -> Result<*const (), LockingError> {
392        unsafe { core::mem::transmute(ImplConcurrent::get_base(&self.base)) }
393    }
394
395    /// Returns `true` if provided generic type `T` can be stored without
396    /// growing the container or a [`LockingError::Poisoned`] error if
397    /// allocation tracker mutex has been poisoned.
398    ///
399    /// This function will block the current thread until internal allocation
400    /// tracked doesn't become available.
401    pub fn can_push<T: StoreRequirements>(&self) -> Result<bool, LockingError> {
402        let layout = Layout::new::<T>();
403        ImplConcurrent::peek_next(&self.inner, layout).map(|it| it.is_some())
404    }
405
406    /// Returns `true` if the provided `value` can be stored without growing the
407    /// container or a [`LockingError::Poisoned`] error if allocation tracker
408    /// mutex has been poisoned.
409    ///
410    /// This function will block the current thread until internal allocation
411    /// tracked doesn't become available.
412    pub fn can_push_value<T: StoreRequirements>(&self, value: &T) -> Result<bool, LockingError> {
413        let layout = Layout::for_value(value);
414        ImplConcurrent::peek_next(&self.inner, layout).map(|it| it.is_some())
415    }
416
417    /// Returns `true` if the provided `layout` can be stored without growing
418    /// the container or a [`LockingError::Poisoned`] error if allocation
419    /// tracker mutex has been poisoned.
420    ///
421    /// This function will block the current thread until internal allocation
422    /// tracked doesn't become available.
423    pub fn can_push_layout(&self, layout: Layout) -> Result<bool, LockingError> {
424        ImplConcurrent::peek_next(&self.inner, layout).map(|it| it.is_some())
425    }
426
427    /// Shrinks the allocated memory to fit the currently stored data and
428    /// returns the new capacity.
429    ///
430    /// This function will block the current thread until internal allocation
431    /// tracked doesn't become available.
432    pub fn shrink_to_fit(&mut self) -> Result<usize, LockingError> {
433        if let Some(shrunk) = ImplConcurrent::shrink_tracker(&mut self.inner)? {
434            self.resize(shrunk).expect("unable to shrink container");
435            Ok(shrunk)
436        } else {
437            Ok(self.get_capacity())
438        }
439    }
440
441    /// Forgets this container without dropping it and returns its base address
442    /// and [`Layout`], or a [`LockingError::Poisoned`] error if base address
443    /// `RwLock` has been poisoned.
444    ///
445    /// For details on safety see _Safety_ section of
446    /// [default implementation](ContiguousMemoryStorage<ImplConcurrent>::forget).
447    pub fn forget(self) -> Result<(*const (), Layout), LockingError> {
448        let base = ImplConcurrent::get_base(&self.base);
449        let layout = self.get_layout();
450        core::mem::forget(self);
451        base.map(|it| (it as *const (), layout))
452    }
453}
454
455impl ContiguousMemoryStorage<ImplUnsafe> {
456    /// Returns the base address of the allocated memory.
457    pub fn get_base(&self) -> *const () {
458        self.base.0 as *const ()
459    }
460
461    /// Returns `true` if the provided value can be stored without growing the
462    /// container.
463    ///
464    /// It's usually clearer to try storing the value directly and then handle
465    /// the case where it wasn't stored through error matching.
466    ///
467    /// # Example
468    ///
469    /// ```rust
470    /// # use contiguous_mem::UnsafeContiguousMemory;
471    /// # use core::mem::size_of_val;
472    /// let mut storage = UnsafeContiguousMemory::new(0);
473    /// let value = [2, 4, 8, 16];
474    ///
475    /// # assert_eq!(storage.can_push::<Vec<i32>>(), false);
476    /// if !storage.can_push::<Vec<i32>>() {
477    ///     storage.resize(storage.get_capacity() + size_of_val(&value));
478    ///
479    ///     // ...update old pointers...
480    /// }
481    ///
482    /// let stored_value =
483    ///   storage.push(value).expect("unable to store after growing the container");
484    /// ```
485    pub fn can_push<T: StoreRequirements>(&self) -> bool {
486        let layout = Layout::new::<T>();
487        ImplUnsafe::peek_next(&self.inner, layout).is_some()
488    }
489
490    /// Returns `true` if the provided `value` can be stored without growing the
491    /// container.
492    pub fn can_push_value<T: StoreRequirements>(&self, value: &T) -> bool {
493        let layout = Layout::for_value(value);
494        ImplUnsafe::peek_next(&self.inner, layout).is_some()
495    }
496
497    /// Returns `true` if the provided `layout` can be stored without growing
498    /// the container.
499    pub fn can_push_layout(&self, layout: Layout) -> bool {
500        ImplUnsafe::peek_next(&self.inner, layout).is_some()
501    }
502
503    /// Shrinks the allocated memory to fit the currently stored data and
504    /// returns the new capacity.
505    pub fn shrink_to_fit(&mut self) -> usize {
506        if let Some(shrunk) = ImplUnsafe::shrink_tracker(&mut self.inner) {
507            self.resize(shrunk).expect("unable to shrink container");
508            shrunk
509        } else {
510            self.capacity
511        }
512    }
513
514    /// Clones the allocated memory region into a new ContiguousMemoryStorage.
515    ///
516    /// This function isn't unsafe, even though it ignores presence of `Copy`
517    /// bound on stored data, because it doesn't create any pointers.
518    #[must_use]
519    pub fn copy_data(&self) -> Self {
520        let current_layout = self.get_layout();
521        let result = Self::new_for_layout(current_layout);
522        unsafe {
523            core::ptr::copy_nonoverlapping(
524                self.get_base(),
525                result.get_base() as *mut (),
526                current_layout.size(),
527            );
528        }
529        result
530    }
531
532    /// Allows freeing a memory range stored at provided `position`.
533    ///
534    /// Type of the position pointer `T` determines the size of the freed chunk.
535    ///
536    /// # Safety
537    ///
538    /// This function is considered unsafe because it can mark a memory range
539    /// as free while a valid reference is pointing to it from another place in
540    /// code.
541    pub unsafe fn free_typed<T>(&mut self, position: *mut T) {
542        Self::free(self, position, size_of::<T>())
543    }
544
545    /// Allows freeing a memory range stored at provided `position` with the
546    /// specified `size`.
547    ///
548    /// # Safety
549    ///
550    /// This function is considered unsafe because it can mark a memory range
551    /// as free while a valid reference is pointing to it from another place in
552    /// code.
553    pub unsafe fn free<T>(&mut self, position: *mut T, size: usize) {
554        let pos: usize = position.sub(self.get_base() as usize) as usize;
555        let base = ImplUnsafe::get_base(&self.base);
556        let tracker = ImplUnsafe::get_allocation_tracker(&mut self.inner);
557        if let Some(freed) = ImplUnsafe::free_region(tracker, base, ByteRange(pos, pos + size)) {
558            core::ptr::drop_in_place(freed as *mut T);
559        }
560    }
561
562    /// Forgets this container without dropping it and returns its base address
563    /// and [`Layout`].
564    ///
565    /// For details on safety see _Safety_ section of
566    /// [default implementation](ContiguousMemoryStorage<ImplConcurrent>::forget).
567    pub fn forget(self) -> (*const (), Layout) {
568        let base = ImplUnsafe::get_base(&self.base);
569        let layout = self.get_layout();
570        core::mem::forget(self);
571        (base as *const (), layout)
572    }
573}
574
575#[cfg(feature = "debug")]
576impl<Impl: ImplDetails> core::fmt::Debug for ContiguousMemoryStorage<Impl>
577where
578    Impl::StorageState: core::fmt::Debug,
579{
580    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
581        f.debug_struct("ContiguousMemoryStorage")
582            .field("inner", &self.inner)
583            .finish()
584    }
585}
586
587impl<Impl: ImplDetails> Clone for ContiguousMemoryStorage<Impl> {
588    fn clone(&self) -> Self {
589        ContiguousMemoryStorage {
590            inner: self.inner.clone(),
591        }
592    }
593}
594
595impl<Impl: ImplDetails> Deref for ContiguousMemoryStorage<Impl> {
596    type Target = ContiguousMemoryState<Impl>;
597
598    fn deref(&self) -> &Self::Target {
599        Impl::deref_state(&self.inner)
600    }
601}
602
603pub(crate) mod sealed {
604    use super::*;
605
606    #[derive(Clone, PartialEq, Eq)]
607    #[repr(transparent)]
608    pub(crate) struct BaseLocation<Impl: StorageDetails>(pub(crate) Impl::Base);
609
610    #[cfg(feature = "debug")]
611    impl<Impl: StorageDetails> core::fmt::Debug for BaseLocation<Impl>
612    where
613        Impl::LockResult<*mut u8>: core::fmt::Debug,
614    {
615        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
616            f.debug_tuple("BaseLocation")
617                .field(&Impl::get_base(&self.0))
618                .finish()
619        }
620    }
621
622    impl<Impl: ImplDetails> Deref for BaseLocation<Impl> {
623        type Target = <Impl as StorageDetails>::Base;
624
625        fn deref(&self) -> &Self::Target {
626            &self.0
627        }
628    }
629
630    impl Copy for BaseLocation<ImplUnsafe> {}
631    unsafe impl<Impl: ImplDetails> Send for BaseLocation<Impl> where Impl: PartialEq<ImplConcurrent> {}
632    unsafe impl<Impl: ImplDetails> Sync for BaseLocation<Impl> where Impl: PartialEq<ImplConcurrent> {}
633
634    #[repr(C)]
635    pub struct ContiguousMemoryState<Impl: StorageDetails = ImplDefault> {
636        pub(crate) base: BaseLocation<Impl>,
637        pub(crate) capacity: Impl::SizeType,
638        pub(crate) alignment: usize,
639        pub(crate) tracker: Impl::AllocationTracker,
640    }
641
642    impl<Impl: StorageDetails> core::fmt::Debug for ContiguousMemoryState<Impl>
643    where
644        BaseLocation<Impl>: core::fmt::Debug,
645        Impl::SizeType: core::fmt::Debug,
646        Impl::AllocationTracker: core::fmt::Debug,
647    {
648        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
649            f.debug_struct("ContiguousMemoryState")
650                .field("base", &self.base)
651                .field("capacity", &self.capacity)
652                .field("alignment", &self.alignment)
653                .field("tracker", &self.tracker)
654                .finish()
655        }
656    }
657
658    impl<Impl: StorageDetails> ContiguousMemoryState<Impl> {
659        /// Returns the layout of the managed memory.
660        pub fn layout(&self) -> Layout {
661            unsafe {
662                let capacity = Impl::get_capacity(core::mem::transmute(self));
663                Layout::from_size_align_unchecked(capacity, self.alignment)
664            }
665        }
666    }
667
668    impl Clone for ContiguousMemoryState<ImplUnsafe> {
669        fn clone(&self) -> Self {
670            Self {
671                base: self.base,
672                capacity: self.capacity,
673                alignment: self.alignment,
674                tracker: self.tracker.clone(),
675            }
676        }
677    }
678
679    impl<Impl: StorageDetails> Drop for ContiguousMemoryState<Impl> {
680        fn drop(&mut self) {
681            let layout = self.layout();
682            Impl::deallocate(&mut self.base.0, layout)
683        }
684    }
685}
686use sealed::*;
687
688/// Alias for `ContiguousMemoryStorage` that uses
689/// [concurrent implementation](ImplConcurrent).
690///
691/// # Example
692///
693/// ```rust
694#[doc = include_str!("../examples/sync_impl.rs")]
695/// ```
696pub type SyncContiguousMemory = ContiguousMemoryStorage<ImplConcurrent>;
697
698/// Alias for `ContiguousMemoryStorage` that uses
699/// [default implementation](ImplDefault).
700///
701/// # Example
702///
703/// ```rust
704#[doc = include_str!("../examples/default_impl.rs")]
705/// ```
706pub type ContiguousMemory = ContiguousMemoryStorage<ImplDefault>;
707
708/// Alias for `ContiguousMemoryStorage` that uses
709/// [unsafe implementation](ImplUnsafe).
710///
711/// # Example
712///
713/// ```rust
714#[doc = include_str!("../examples/unsafe_impl.rs")]
715/// ```
716pub type UnsafeContiguousMemory = ContiguousMemoryStorage<ImplUnsafe>;
717
718#[cfg(all(test, not(feature = "no_std")))]
719mod test {
720    use core::mem::align_of;
721
722    use super::*;
723
724    #[derive(Debug, Clone, PartialEq, Eq)]
725    #[repr(C)]
726    struct Person {
727        name: String,
728        last_name: String,
729    }
730
731    #[derive(Debug, Clone, PartialEq, Eq)]
732    #[repr(C)]
733    struct Car {
734        owner: Person,
735        driver: Option<Person>,
736        cost: u32,
737        miles: u32,
738    }
739
740    #[test]
741    fn construct_contiguous_memory() {
742        let memory = ContiguousMemory::new(1024);
743        assert_eq!(memory.get_capacity(), 1024);
744    }
745
746    #[test]
747    fn store_and_get() {
748        let mut memory = ContiguousMemory::new(1024);
749
750        let person_a = Person {
751            name: "Jerry".to_string(),
752            last_name: "Taylor".to_string(),
753        };
754
755        let person_b = Person {
756            name: "Larry".to_string(),
757            last_name: "Taylor".to_string(),
758        };
759
760        let car_a = Car {
761            owner: person_a.clone(),
762            driver: Some(person_b.clone()),
763            cost: 20_000,
764            miles: 30123,
765        };
766
767        let car_b = Car {
768            owner: person_b.clone(),
769            driver: None,
770            cost: 30_000,
771            miles: 3780123,
772        };
773
774        let value_number = 248169u64;
775        let value_string = "This is a test string".to_string();
776        let value_byte = 0x41u8;
777
778        let stored_ref_number = memory.push(value_number);
779        let stored_ref_car_a = memory.push(car_a.clone());
780        let stored_ref_string = memory.push(value_string.clone());
781        let stored_ref_byte = memory.push(value_byte);
782        let stored_ref_car_b = memory.push(car_b.clone());
783
784        assert_eq!(*stored_ref_number.get(), value_number);
785        assert_eq!(*stored_ref_car_a.get(), car_a);
786        assert_eq!(*stored_ref_string.get(), value_string);
787        assert_eq!(*stored_ref_car_b.get(), car_b);
788        assert_eq!(*stored_ref_byte.get(), value_byte);
789    }
790
791    #[test]
792    fn resize_manually() {
793        let mut memory = ContiguousMemory::new(512);
794
795        let person_a = Person {
796            name: "Larry".to_string(),
797            last_name: "Taylor".to_string(),
798        };
799
800        let car_a = Car {
801            owner: person_a.clone(),
802            driver: Some(person_a),
803            cost: 20_000,
804            miles: 30123,
805        };
806
807        let stored_car = memory.push(car_a.clone());
808
809        assert!(memory.resize(32).is_err());
810        memory.resize(1024).unwrap();
811        assert_eq!(memory.get_capacity(), 1024);
812
813        assert_eq!(*stored_car.get(), car_a);
814
815        memory.resize(128).unwrap();
816        assert_eq!(memory.get_capacity(), 128);
817
818        assert_eq!(*stored_car.get(), car_a);
819    }
820
821    #[test]
822    fn resize_automatically() {
823        let mut memory = ContiguousMemory::new_aligned(12, align_of::<u64>()).unwrap();
824
825        {
826            let _a = memory.push(1u32);
827            let _b = memory.push(2u32);
828            let _c = memory.push(3u32);
829            assert_eq!(memory.can_push::<u32>(), false);
830            let _d = memory.push(4u32);
831            assert_eq!(memory.get_capacity(), 24);
832        }
833
834        memory.resize(4).expect("can't shrink empty storage");
835        {
836            memory.push_persisted(1u16);
837            memory.push_persisted(2u16);
838            assert_eq!(memory.can_push::<u64>(), false);
839            memory.push_persisted(3u64);
840            // expecting 12, but due to alignment we're skipping two u16 slots
841            // and then double the size as remaining (aligned) 4 bytes aren't
842            // enough for u64
843            assert_eq!(memory.get_capacity(), 24);
844        }
845    }
846
847    #[test]
848    fn add_to_zero_sized() {
849        let mut memory = ContiguousMemory::new(0);
850
851        let person = Person {
852            name: "Jacky".to_string(),
853            last_name: "Larsson".to_string(),
854        };
855
856        let stored_person = memory.push(person.clone());
857
858        assert_eq!(memory.get_capacity(), 48);
859        assert_eq!(*stored_person.get(), person);
860    }
861}