static_alloc/unsync/
bump.rs

1use core::{
2    alloc::{Layout, LayoutError},
3    cell::{Cell, UnsafeCell},
4    mem::{self, MaybeUninit},
5    ops,
6    ptr::{self, NonNull},
7};
8
9use alloc_traits::AllocTime;
10
11use crate::bump::{Allocation, Failure, Level};
12use crate::leaked::LeakBox;
13
14/// A bump allocator whose storage capacity and alignment is given by `T`.
15///
16/// This type dereferences to the generic `MemBump` that implements the allocation behavior. Note
17/// that `MemBump` is an unsized type. In contrast this type is sized so it is possible to
18/// construct an instance on the stack or leak one from another bump allocator such as a global
19/// one.
20///
21/// # Usage
22///
23/// For on-stack usage this works the same as [`Bump`]. Note that it is not possible to use as a
24/// global allocator though.
25///
26/// [`Bump`]: ../bump/struct.Bump.html
27///
28/// One interesting use case for this struct is as scratch space for subroutines. This ensures good
29/// locality and cache usage. It can also allows such subroutines to use a dynamic amount of space
30/// without the need to actually allocate. Contrary to other methods where the caller provides some
31/// preallocated memory it will also not 'leak' private data types. This could be used in handling
32/// web requests.
33///
34/// ```
35/// use static_alloc::unsync::Bump;
36/// # use static_alloc::unsync::MemBump;
37/// # fn subroutine_one(_: &MemBump) {}
38/// # fn subroutine_two(_: &MemBump) {}
39///
40/// let mut stack_buffer: Bump<[usize; 64]> = Bump::uninit();
41/// subroutine_one(&stack_buffer);
42/// stack_buffer.reset();
43/// subroutine_two(&stack_buffer);
44/// ```
45///
46/// Note that you need not use the stack for the `Bump` itself. Indeed, you could allocate a large
47/// contiguous instance from the global (synchronized) allocator and then do subsequent allocations
48/// from the `Bump` you've obtained. This avoids potential contention on a lock of the global
49/// allocator, especially in case you must do many small allocations. If you're writing an
50/// allocator yourself you might use this technique as an internal optimization.
51///
52#[cfg_attr(feature = "alloc", doc = "```")]
53#[cfg_attr(not(feature = "alloc"), doc = "```ignore")]
54/// use static_alloc::unsync::{Bump, MemBump};
55/// # struct Request;
56/// # fn handle_request(_: &MemBump, _: Request) {}
57/// # fn iterate_recv() -> Option<Request> { None }
58/// let mut local_page: Box<Bump<[u64; 64]>> = Box::new(Bump::uninit());
59///
60/// for request in iterate_recv() {
61///     local_page.reset();
62///     handle_request(&local_page, request);
63/// }
64/// ```
65#[repr(C)]
66pub struct Bump<T> {
67    /// The index used in allocation.
68    _index: Cell<usize>,
69    /// The backing storage for raw allocated data.
70    _data: UnsafeCell<MaybeUninit<T>>,
71    // Warning: when changing the data layout, you must change `MemBump` as well.
72}
73
74/// An error used when one could not re-use raw memory for a bump allocator.
75#[derive(Debug)]
76pub struct FromMemError {
77    _inner: (),
78}
79
80/// A dynamically sized allocation block in which any type can be allocated.
81#[repr(C)]
82pub struct MemBump {
83    /// An index into the data field. This index
84    /// will always be an index to an element
85    /// that has not been allocated into.
86    /// Again this is wrapped in a Cell,
87    /// to allow modification with just a
88    /// &self reference.
89    index: Cell<usize>,
90
91    /// The data slice of a node. This slice
92    /// may be of any arbitrary size. We use
93    /// a Cell<MaybeUninit> to allow modification
94    /// trough a &self reference, and to allow
95    /// writing uninit padding bytes.
96    /// Note that the underlying memory is in one
97    /// contiguous `UnsafeCell`, it's only represented
98    /// here to make it easier to slice.
99    data: UnsafeCell<[MaybeUninit<u8>]>,
100}
101
102impl<T> Bump<T> {
103    /// Create an allocator with uninitialized memory.
104    ///
105    /// All allocations coming from the allocator will need to be initialized manually.
106    pub fn uninit() -> Self {
107        Bump {
108            _index: Cell::new(0),
109            _data: UnsafeCell::new(MaybeUninit::uninit()),
110        }
111    }
112
113    /// Create an allocator with zeroed memory.
114    ///
115    /// The caller can rely on all allocations to be zeroed.
116    pub fn zeroed() -> Self {
117        Bump {
118            _index: Cell::new(0),
119            _data: UnsafeCell::new(MaybeUninit::zeroed()),
120        }
121    }
122}
123
124#[cfg(feature = "alloc")]
125impl MemBump {
126    /// Allocate some space to use for a bump allocator.
127    pub fn new(capacity: usize) -> alloc::boxed::Box<Self> {
128        let layout = Self::layout_from_size(capacity).expect("Bad layout");
129        let ptr = NonNull::new(unsafe { alloc::alloc::alloc(layout) })
130            .unwrap_or_else(|| alloc::alloc::handle_alloc_error(layout));
131        let ptr = ptr::slice_from_raw_parts_mut(ptr.as_ptr(), capacity);
132        unsafe { ptr::write(ptr as *mut Cell<usize>, Cell::new(0)) };
133        unsafe { alloc::boxed::Box::from_raw(ptr as *mut MemBump) }
134    }
135}
136
137impl MemBump {
138    /// Initialize a bump allocator from existing memory.
139    ///
140    /// # Usage
141    ///
142    /// ```
143    /// use core::mem::MaybeUninit;
144    /// use static_alloc::unsync::MemBump;
145    ///
146    /// let mut backing = [MaybeUninit::new(0); 128];
147    /// let alloc = MemBump::from_mem(&mut backing)?;
148    ///
149    /// # Ok::<(), static_alloc::unsync::FromMemError>(())
150    /// ```
151    pub fn from_mem(mem: &mut [MaybeUninit<u8>]) -> Result<LeakBox<'_, Self>, FromMemError> {
152        let header = Self::header_layout();
153        let offset = mem.as_ptr().align_offset(header.align());
154        // Align the memory for the header.
155        let mem = mem.get_mut(offset..).ok_or(FromMemError { _inner: () })?;
156        mem.get_mut(..header.size())
157            .ok_or(FromMemError { _inner: () })?
158            .fill(MaybeUninit::new(0));
159        Ok(unsafe { Self::from_mem_unchecked(mem) })
160    }
161
162    /// Construct a bump allocator from existing memory without reinitializing.
163    ///
164    /// This allows the caller to (unsafely) fallback to manual borrow checking of the memory
165    /// region between regions of allocator use.
166    ///
167    /// # Safety
168    ///
169    /// The memory must contain data that has been previously wrapped as a `MemBump`, exactly. The
170    /// only endorsed sound form of obtaining such memory is [`MemBump::into_mem`].
171    ///
172    /// Warning: Any _use_ of the memory will have invalidated all pointers to allocated objects,
173    /// more specifically the provenance of these pointers is no longer valid! You _must_ derive
174    /// new pointers based on their offsets.
175    pub unsafe fn from_mem_unchecked(mem: &mut [MaybeUninit<u8>]) -> LeakBox<'_, Self> {
176        let raw = Self::from_aligned_mem(mem);
177        LeakBox::from_mut_unchecked(raw)
178    }
179
180    /// Cast pre-initialized, aligned memory into a bump allocator.
181    #[allow(unused_unsafe)]
182    unsafe fn from_aligned_mem(mem: &mut [MaybeUninit<u8>]) -> &mut Self {
183        let header = Self::header_layout();
184        // debug_assert!(mem.len() >= header.size());
185        // debug_assert!(mem.as_ptr().align_offset(header.align()) == 0);
186
187        let datasize = mem.len() - header.size();
188        // Round down to the header alignment! The whole struct will occupy memory according to its
189        // natural alignment. We must be prepared fro the `pad_to_align` so to speak.
190        let datasize = datasize - datasize % header.align();
191        debug_assert!(Self::layout_from_size(datasize).map_or(false, |l| l.size() <= mem.len()));
192
193        let raw = mem.as_mut_ptr() as *mut u8;
194        // Turn it into a fat pointer with correct metadata for a `MemBump`.
195        // Safety:
196        // - The data is writable as we owned
197        unsafe { &mut *(ptr::slice_from_raw_parts_mut(raw, datasize) as *mut MemBump) }
198    }
199
200    /// Unwrap the memory owned by an unsized bump allocator.
201    ///
202    /// This releases the memory used by the allocator, similar to `Box::leak`, with the difference
203    /// of operating on unique references instead. It is necessary to own the bump allocator due to
204    /// internal state contained within the memory region that the caller can subsequently
205    /// invalidate.
206    ///
207    /// # Example
208    ///
209    /// ```rust
210    /// use core::mem::MaybeUninit;
211    /// use static_alloc::unsync::MemBump;
212    ///
213    /// # let mut backing = [MaybeUninit::new(0); 128];
214    /// # let alloc = MemBump::from_mem(&mut backing)?;
215    /// let memory: &mut [_] = MemBump::into_mem(alloc);
216    /// assert!(memory.len() <= 128, "Not guaranteed to use all memory");
217    ///
218    /// // Safety: We have not touched the memory itself.
219    /// unsafe { MemBump::from_mem_unchecked(memory) };
220    /// # Ok::<(), static_alloc::unsync::FromMemError>(())
221    /// ```
222    pub fn into_mem<'lt>(this: LeakBox<'lt, Self>) -> &'lt mut [MaybeUninit<u8>] {
223        let layout = Layout::for_value(&*this);
224        let mem_pointer = LeakBox::into_raw(this) as *mut MaybeUninit<u8>;
225        unsafe { &mut *ptr::slice_from_raw_parts_mut(mem_pointer, layout.size()) }
226    }
227
228    /// Returns the layout for the `header` of a `MemBump`.
229    /// The definition of `header` in this case is all the
230    /// fields that come **before** the `data` field.
231    /// If any of the fields of a MemBump are modified,
232    /// this function likely has to be modified too.
233    fn header_layout() -> Layout {
234        Layout::new::<Cell<usize>>()
235    }
236
237    /// Returns the layout for an array with the size of `size`
238    fn data_layout(size: usize) -> Result<Layout, LayoutError> {
239        Layout::array::<UnsafeCell<MaybeUninit<u8>>>(size)
240    }
241
242    /// Returns a layout for a MemBump where the length of the data field is `size`.
243    /// This relies on the two functions defined above.
244    pub(crate) fn layout_from_size(size: usize) -> Result<Layout, LayoutError> {
245        let data_tail = Self::data_layout(size)?;
246        let (layout, _) = Self::header_layout().extend(data_tail)?;
247        Ok(layout.pad_to_align())
248    }
249
250    /// Returns capacity of this `MemBump`.
251    /// This is how many *bytes* can be allocated
252    /// within this node.
253    pub const fn capacity(&self) -> usize {
254        // Safety: just gets the pointer metadata `len` without invalidating any provenance,
255        // accepting the pointer use itself. This may be replaced by a safe `pointer::len` as soon
256        // as stable (#71146) and const which would avoid any pointer use.
257        unsafe { (*(self.data.get() as *const [UnsafeCell<u8>])).len() }
258    }
259
260    /// Get a raw pointer to the data.
261    ///
262    /// Note that *any* use of the pointer must be done with extreme care as it may invalidate
263    /// existing references into the allocated region. Furthermore, bytes may not be initialized.
264    /// The length of the valid region is [`MemBump::capacity`].
265    ///
266    /// Prefer [`MemBump::get_unchecked`] for reconstructing a prior allocation.
267    pub fn data_ptr(&self) -> NonNull<u8> {
268        NonNull::new(self.data.get() as *mut u8).expect("from a reference")
269    }
270
271    /// Allocate a region of memory.
272    ///
273    /// This is a safe alternative to [GlobalAlloc::alloc](#impl-GlobalAlloc).
274    ///
275    /// # Panics
276    /// This function will panic if the requested layout has a size of `0`. For the use in a
277    /// `GlobalAlloc` this is explicitely forbidden to request and would allow any behaviour but we
278    /// instead strictly check it.
279    ///
280    /// FIXME(breaking): this could well be a `Result<_, Failure>`.
281    pub fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
282        Some(self.try_alloc(layout)?.ptr)
283    }
284
285    /// Try to allocate some layout with a precise base location.
286    ///
287    /// The base location is the currently consumed byte count, without correction for the
288    /// alignment of the allocation. This will succeed if it can be allocate exactly at the
289    /// expected location.
290    ///
291    /// # Panics
292    /// This function may panic if the provided `level` is from a different slab.
293    pub fn alloc_at(&self, layout: Layout, level: Level) -> Result<NonNull<u8>, Failure> {
294        let Allocation { ptr, .. } = self.try_alloc_at(layout, level.0)?;
295        Ok(ptr)
296    }
297
298    /// Get an allocation for a specific type.
299    ///
300    /// It is not yet initialized but provides an interface for that initialization.
301    ///
302    /// ## Usage
303    ///
304    /// ```
305    /// # use static_alloc::unsync::Bump;
306    /// use core::cell::{Ref, RefCell};
307    ///
308    /// let slab: Bump<[Ref<'static, usize>; 1]> = Bump::uninit();
309    /// let data = RefCell::new(0xff);
310    ///
311    /// // We can place a `Ref` here but we did not yet.
312    /// let alloc = slab.get::<Ref<usize>>().unwrap();
313    /// let cell_ref = unsafe {
314    ///     alloc.leak(data.borrow())
315    /// };
316    ///
317    /// assert_eq!(**cell_ref, 0xff);
318    /// ```
319    ///
320    /// FIXME(breaking): this could well be a `Result<_, Failure>`.
321    pub fn get<V>(&self) -> Option<Allocation<V>> {
322        let alloc = self.try_alloc(Layout::new::<V>())?;
323        Some(Allocation {
324            lifetime: alloc.lifetime,
325            level: alloc.level,
326            ptr: alloc.ptr.cast(),
327        })
328    }
329
330    /// Get an allocation for a specific type at a specific level.
331    ///
332    /// See [`get`] for usage. This can be used to ensure that data is contiguous in concurrent
333    /// access to the allocator.
334    ///
335    /// [`get`]: #method.get
336    pub fn get_at<V>(&self, level: Level) -> Result<Allocation<V>, Failure> {
337        let alloc = self.try_alloc_at(Layout::new::<V>(), level.0)?;
338        Ok(Allocation {
339            lifetime: alloc.lifetime,
340            level: alloc.level,
341            ptr: alloc.ptr.cast(),
342        })
343    }
344
345    /// Reacquire an allocation that has been performed previously.
346    ///
347    /// This call won't invalidate any other allocations.
348    ///
349    /// # Safety
350    ///
351    /// The caller must guarantee that no other pointers to this prior allocation are alive, or can
352    /// be created. This is guaranteed if the allocation was performed previously, has since been
353    /// discarded, and `reset` can not be called (for example, the caller holds a shared
354    /// reference).
355    ///
356    /// # Usage
357    ///
358    /// ```
359    /// # use core::mem::MaybeUninit;
360    /// # use static_alloc::unsync::MemBump;
361    /// # let mut backing = [MaybeUninit::new(0); 128];
362    /// # let alloc = MemBump::from_mem(&mut backing).unwrap();
363    /// // Create an initial allocation.
364    /// let level = alloc.level();
365    /// let allocation = alloc.get_at::<usize>(level)?;
366    /// let address = allocation.ptr.as_ptr() as usize;
367    /// // pretend to lose the owning pointer of the allocation.
368    /// let _ = { allocation };
369    ///
370    /// // Restore our access.
371    /// let renewed = unsafe { alloc.get_unchecked::<usize>(level) };
372    /// assert_eq!(address, renewed.ptr.as_ptr() as usize);
373    /// # Ok::<_, static_alloc::bump::Failure>(())
374    /// ```
375    ///
376    /// Critically, you can rely on *other* allocations to stay valid.
377    ///
378    /// ```
379    /// # use core::mem::MaybeUninit;
380    /// # use static_alloc::{leaked::LeakBox, unsync::MemBump};
381    /// # let mut backing = [MaybeUninit::new(0); 128];
382    /// # let alloc = MemBump::from_mem(&mut backing).unwrap();
383    /// let level = alloc.level();
384    /// alloc.get_at::<usize>(level)?;
385    ///
386    /// let other_val = alloc.bump_box()?;
387    /// let other_val = LeakBox::write(other_val, 0usize);
388    ///
389    /// let renew = unsafe { alloc.get_unchecked::<usize>(level) };
390    /// assert_eq!(*other_val, 0); // Not UB!
391    /// # Ok::<_, static_alloc::bump::Failure>(())
392    /// ```
393    pub unsafe fn get_unchecked<V>(&self, level: Level) -> Allocation<V> {
394        debug_assert!(level.0 < self.capacity());
395        let ptr = self.data_ptr().as_ptr();
396        // Safety: guaranteed by the caller.
397        let alloc = ptr.offset(level.0 as isize) as *mut V;
398
399        Allocation {
400            level,
401            lifetime: AllocTime::default(),
402            ptr: NonNull::new_unchecked(alloc),
403        }
404    }
405
406    /// Allocate space for one `T` without initializing it.
407    ///
408    /// Note that the returned `MaybeUninit` can be unwrapped from `LeakBox`. Or you can store an
409    /// arbitrary value and ensure it is safely dropped before the borrow ends.
410    ///
411    /// ## Usage
412    ///
413    /// ```
414    /// # use static_alloc::unsync::Bump;
415    /// use core::cell::RefCell;
416    /// use static_alloc::leaked::LeakBox;
417    ///
418    /// let slab: Bump<[usize; 4]> = Bump::uninit();
419    /// let data = RefCell::new(0xff);
420    ///
421    /// let slot = slab.bump_box().unwrap();
422    /// let cell_box = LeakBox::write(slot, data.borrow());
423    ///
424    /// assert_eq!(**cell_box, 0xff);
425    /// drop(cell_box);
426    ///
427    /// assert!(data.try_borrow_mut().is_ok());
428    /// ```
429    ///
430    /// FIXME(breaking): should return evidence of the level (observed, and post). Something
431    /// similar to `Allocation` but containing a `LeakBox<T>` instead? Introduce that to the sync
432    /// `Bump` allocator as well.
433    ///
434    /// FIXME(breaking): align with sync `Bump::get` (probably rename get to bump_box).
435    pub fn bump_box<'bump, T: 'bump>(
436        &'bump self,
437    ) -> Result<LeakBox<'bump, MaybeUninit<T>>, Failure> {
438        let allocation = self.get_at(self.level())?;
439        Ok(unsafe { allocation.uninit() }.into())
440    }
441
442    /// Allocate space for a slice of `T`s without initializing any.
443    ///
444    /// Retrieve individual `MaybeUninit` elements and wrap them as a `LeakBox` to store values. Or
445    /// use the slice as backing memory for one of the containers from `without-alloc`. Or manually
446    /// initialize them.
447    ///
448    /// ## Usage
449    ///
450    /// Quicksort, implemented recursively, requires a maximum of `log n` stack frames in the worst
451    /// case when implemented optimally. Since each frame is quite large this is wasteful. We can
452    /// use a properly sized buffer instead and implement an iterative solution. (Left as an
453    /// exercise to the reader, or see the examples for `without-alloc` where we use such a dynamic
454    /// allocation with an inline vector as our stack).
455    pub fn bump_array<'bump, T: 'bump>(
456        &'bump self,
457        n: usize,
458    ) -> Result<LeakBox<'bump, [MaybeUninit<T>]>, Failure> {
459        let layout = Layout::array::<T>(n).map_err(|_| Failure::Exhausted)?;
460        let raw = self.alloc(layout).ok_or(Failure::Exhausted)?;
461        let slice = ptr::slice_from_raw_parts_mut(raw.cast().as_ptr(), n);
462        let uninit = unsafe { &mut *slice };
463        Ok(uninit.into())
464    }
465
466    /// Get the number of already allocated bytes.
467    pub fn level(&self) -> Level {
468        Level(self.index.get())
469    }
470
471    /// Reset the bump allocator.
472    ///
473    /// This requires a unique reference to the allocator hence no allocation can be alive at this
474    /// point. It will reset the internal count of used bytes to zero.
475    pub fn reset(&mut self) {
476        self.index.set(0)
477    }
478
479    fn try_alloc(&self, layout: Layout) -> Option<Allocation<'_>> {
480        let consumed = self.index.get();
481        match self.try_alloc_at(layout, consumed) {
482            Ok(alloc) => return Some(alloc),
483            Err(Failure::Exhausted) => return None,
484            Err(Failure::Mismatch { observed: _ }) => {
485                unreachable!("Count in Cell concurrently modified, this UB")
486            }
487        }
488    }
489
490    fn try_alloc_at(
491        &self,
492        layout: Layout,
493        expect_consumed: usize,
494    ) -> Result<Allocation<'_>, Failure> {
495        assert!(layout.size() > 0);
496        let length = mem::size_of_val(&self.data);
497        // We want to access contiguous slice, so cast to a single cell.
498        let data: &UnsafeCell<[MaybeUninit<u8>]> =
499            unsafe { &*(&self.data as *const _ as *const UnsafeCell<_>) };
500        let base_ptr = data.get() as *mut u8;
501
502        let alignment = layout.align();
503        let requested = layout.size();
504
505        // Ensure no overflows when calculating offets within.
506        assert!(expect_consumed <= length, "{}/{}", expect_consumed, length);
507
508        let available = length.checked_sub(expect_consumed).unwrap();
509        let ptr_to = base_ptr.wrapping_add(expect_consumed);
510        let offset = ptr_to.align_offset(alignment);
511
512        if Some(requested) > available.checked_sub(offset) {
513            return Err(Failure::Exhausted); // exhausted
514        }
515
516        // `size` can not be zero, saturation will thus always make this true.
517        assert!(offset < available);
518        let at_aligned = expect_consumed.checked_add(offset).unwrap();
519        let new_consumed = at_aligned.checked_add(requested).unwrap();
520        // new_consumed
521        //    = consumed + offset + requested  [lines above]
522        //   <= consumed + available  [bail out: exhausted]
523        //   <= length  [first line of loop]
524        // So it's ok to store `allocated` into `consumed`.
525        assert!(new_consumed <= length);
526        assert!(at_aligned < length);
527
528        // Try to actually allocate.
529        match self.bump(expect_consumed, new_consumed) {
530            Ok(()) => (),
531            Err(observed) => {
532                // Someone else was faster, if you want it then recalculate again.
533                return Err(Failure::Mismatch {
534                    observed: Level(observed),
535                });
536            }
537        }
538
539        let aligned = unsafe {
540            // SAFETY:
541            // * `0 <= at_aligned < length` in bounds as checked above.
542            (base_ptr as *mut u8).add(at_aligned)
543        };
544
545        Ok(Allocation {
546            ptr: NonNull::new(aligned).unwrap(),
547            lifetime: AllocTime::default(),
548            level: Level(new_consumed),
549        })
550    }
551
552    fn bump(&self, expect: usize, consume: usize) -> Result<(), usize> {
553        debug_assert!(consume <= self.capacity());
554        debug_assert!(expect <= consume);
555        let prev = self.index.get();
556        if prev != expect {
557            Err(prev)
558        } else {
559            self.index.set(consume);
560            Ok(())
561        }
562    }
563}
564
565impl<T> ops::Deref for Bump<T> {
566    type Target = MemBump;
567    fn deref(&self) -> &MemBump {
568        let from_layout = Layout::for_value(self);
569        let data_layout = Layout::new::<MaybeUninit<T>>();
570        // Construct a point with the meta data of a slice to `data`, but pointing to the whole
571        // struct instead. This meta data is later copied to the meta data of `bump` when cast.
572        let ptr = self as *const Self as *const MaybeUninit<u8>;
573        let mem: *const [MaybeUninit<u8>] = ptr::slice_from_raw_parts(ptr, data_layout.size());
574        // Now we have a pointer to MemBump with length meta data of the data slice.
575        let bump = unsafe { &*(mem as *const MemBump) };
576        debug_assert_eq!(from_layout, Layout::for_value(bump));
577        bump
578    }
579}
580
581impl<T> ops::DerefMut for Bump<T> {
582    fn deref_mut(&mut self) -> &mut MemBump {
583        let from_layout = Layout::for_value(self);
584        let data_layout = Layout::new::<MaybeUninit<T>>();
585        // Construct a point with the meta data of a slice to `data`, but pointing to the whole
586        // struct instead. This meta data is later copied to the meta data of `bump` when cast.
587        let ptr = self as *mut Self as *mut MaybeUninit<u8>;
588        let mem: *mut [MaybeUninit<u8>] = ptr::slice_from_raw_parts_mut(ptr, data_layout.size());
589        // Now we have a pointer to MemBump with length meta data of the data slice.
590        let bump = unsafe { &mut *(mem as *mut MemBump) };
591        debug_assert_eq!(from_layout, Layout::for_value(bump));
592        bump
593    }
594}
595
596#[test]
597fn mem_bump_derefs_correctly() {
598    let bump = Bump::<usize>::zeroed();
599    let mem: &MemBump = &bump;
600    assert_eq!(mem::size_of_val(&bump), mem::size_of_val(mem));
601}