static_alloc/unsync/
bump.rs

1use core::{
2    alloc::{Layout, LayoutError},
3    cell::{Cell, UnsafeCell},
4    mem::{self, MaybeUninit},
5    ops,
6    ptr::{self, NonNull},
7};
8
9use alloc_traits::AllocTime;
10
11use crate::bump::{Allocation, Failure, Level};
12use crate::leaked::LeakBox;
13
14/// A bump allocator whose storage capacity and alignment is given by `T`.
15///
16/// This type dereferences to the generic `MemBump` that implements the allocation behavior. Note
17/// that `MemBump` is an unsized type. In contrast this type is sized so it is possible to
18/// construct an instance on the stack or leak one from another bump allocator such as a global
19/// one.
20///
21/// # Usage
22///
23/// For on-stack usage this works the same as [`Bump`]. Note that it is not possible to use as a
24/// global allocator though.
25///
26/// [`Bump`]: ../bump/struct.Bump.html
27///
28/// One interesting use case for this struct is as scratch space for subroutines. This ensures good
29/// locality and cache usage. It can also allows such subroutines to use a dynamic amount of space
30/// without the need to actually allocate. Contrary to other methods where the caller provides some
31/// preallocated memory it will also not 'leak' private data types. This could be used in handling
32/// web requests.
33///
34/// ```
35/// use static_alloc::unsync::Bump;
36/// # use static_alloc::unsync::MemBump;
37/// # fn subroutine_one(_: &MemBump) {}
38/// # fn subroutine_two(_: &MemBump) {}
39///
40/// let mut stack_buffer: Bump<[usize; 64]> = Bump::uninit();
41/// subroutine_one(&stack_buffer);
42/// stack_buffer.reset();
43/// subroutine_two(&stack_buffer);
44/// ```
45///
46/// Note that you need not use the stack for the `Bump` itself. Indeed, you could allocate a large
47/// contiguous instance from the global (synchronized) allocator and then do subsequent allocations
48/// from the `Bump` you've obtained. This avoids potential contention on a lock of the global
49/// allocator, especially in case you must do many small allocations. If you're writing an
50/// allocator yourself you might use this technique as an internal optimization.
51///
52#[cfg_attr(feature = "alloc", doc = "```")]
53#[cfg_attr(not(feature = "alloc"), doc = "```ignore")]
54/// use static_alloc::unsync::{Bump, MemBump};
55/// # struct Request;
56/// # fn handle_request(_: &MemBump, _: Request) {}
57/// # fn iterate_recv() -> Option<Request> { None }
58/// let mut local_page: Box<Bump<[u64; 64]>> = Box::new(Bump::uninit());
59///
60/// for request in iterate_recv() {
61///     local_page.reset();
62///     handle_request(&local_page, request);
63/// }
64/// ```
65#[repr(C)]
66pub struct Bump<T> {
67    /// The index used in allocation.
68    _index: Cell<usize>,
69    /// The backing storage for raw allocated data.
70    _data: UnsafeCell<MaybeUninit<T>>,
71    // Warning: when changing the data layout, you must change `MemBump` as well.
72}
73
74/// An error used when one could not re-use raw memory for a bump allocator.
75#[derive(Debug)]
76pub struct FromMemError {
77    _inner: (),
78}
79
80/// A dynamically sized allocation block in which any type can be allocated.
81#[repr(C)]
82pub struct MemBump {
83    /// An index into the data field. This index
84    /// will always be an index to an element
85    /// that has not been allocated into.
86    /// Again this is wrapped in a Cell,
87    /// to allow modification with just a
88    /// &self reference.
89    index: Cell<usize>,
90
91    /// The data slice of a node. This slice
92    /// may be of any arbitrary size. We use
93    /// a Cell<MaybeUninit> to allow modification
94    /// trough a &self reference, and to allow
95    /// writing uninit padding bytes.
96    /// Note that the underlying memory is in one
97    /// contiguous `UnsafeCell`, it's only represented
98    /// here to make it easier to slice.
99    data: UnsafeCell<[MaybeUninit<u8>]>,
100}
101
102impl<T> Bump<T> {
103    /// Create an allocator with uninitialized memory.
104    ///
105    /// All allocations coming from the allocator will need to be initialized manually.
106    pub fn uninit() -> Self {
107        Bump {
108            _index: Cell::new(0),
109            _data: UnsafeCell::new(MaybeUninit::uninit()),
110        }
111    }
112
113    /// Create an allocator with zeroed memory.
114    ///
115    /// The caller can rely on all allocations to be zeroed.
116    pub fn zeroed() -> Self {
117        Bump {
118            _index: Cell::new(0),
119            _data: UnsafeCell::new(MaybeUninit::zeroed()),
120        }
121    }
122}
123
124#[cfg(feature = "alloc")]
125impl MemBump {
126    /// Allocate some space to use for a bump allocator.
127    pub fn new(capacity: usize) -> alloc::boxed::Box<Self> {
128        let layout = Self::layout_from_size(capacity).expect("Bad layout");
129        let ptr = NonNull::new(unsafe { alloc::alloc::alloc(layout) })
130            .unwrap_or_else(|| alloc::alloc::handle_alloc_error(layout));
131        let ptr = ptr::slice_from_raw_parts_mut(ptr.as_ptr(), capacity);
132        unsafe { alloc::boxed::Box::from_raw(ptr as *mut MemBump) }
133    }
134}
135
136impl MemBump {
137    /// Initialize a bump allocator from existing memory.
138    ///
139    /// # Usage
140    ///
141    /// ```
142    /// use core::mem::MaybeUninit;
143    /// use static_alloc::unsync::MemBump;
144    ///
145    /// let mut backing = [MaybeUninit::new(0); 128];
146    /// let alloc = MemBump::from_mem(&mut backing)?;
147    ///
148    /// # Ok::<(), static_alloc::unsync::FromMemError>(())
149    /// ```
150    pub fn from_mem(mem: &mut [MaybeUninit<u8>]) -> Result<LeakBox<'_, Self>, FromMemError> {
151        let header = Self::header_layout();
152        let offset = mem.as_ptr().align_offset(header.align());
153        // Align the memory for the header.
154        let mem = mem.get_mut(offset..).ok_or(FromMemError { _inner: () })?;
155        mem.get_mut(..header.size())
156            .ok_or(FromMemError { _inner: () })?
157            .fill(MaybeUninit::new(0));
158        Ok(unsafe { Self::from_mem_unchecked(mem) })
159    }
160
161    /// Construct a bump allocator from existing memory without reinitializing.
162    ///
163    /// This allows the caller to (unsafely) fallback to manual borrow checking of the memory
164    /// region between regions of allocator use.
165    ///
166    /// # Safety
167    ///
168    /// The memory must contain data that has been previously wrapped as a `MemBump`, exactly. The
169    /// only endorsed sound form of obtaining such memory is [`MemBump::into_mem`].
170    ///
171    /// Warning: Any _use_ of the memory will have invalidated all pointers to allocated objects,
172    /// more specifically the provenance of these pointers is no longer valid! You _must_ derive
173    /// new pointers based on their offsets.
174    pub unsafe fn from_mem_unchecked(mem: &mut [MaybeUninit<u8>]) -> LeakBox<'_, Self> {
175        let raw = Self::from_aligned_mem(mem);
176        LeakBox::from_mut_unchecked(raw)
177    }
178
179    /// Cast pre-initialized, aligned memory into a bump allocator.
180    #[allow(unused_unsafe)]
181    unsafe fn from_aligned_mem(mem: &mut [MaybeUninit<u8>]) -> &mut Self {
182        let header = Self::header_layout();
183        // debug_assert!(mem.len() >= header.size());
184        // debug_assert!(mem.as_ptr().align_offset(header.align()) == 0);
185
186        let datasize = mem.len() - header.size();
187        // Round down to the header alignment! The whole struct will occupy memory according to its
188        // natural alignment. We must be prepared fro the `pad_to_align` so to speak.
189        let datasize = datasize - datasize % header.align();
190        debug_assert!(Self::layout_from_size(datasize).map_or(false, |l| l.size() <= mem.len()));
191
192        let raw = mem.as_mut_ptr() as *mut u8;
193        // Turn it into a fat pointer with correct metadata for a `MemBump`.
194        // Safety:
195        // - The data is writable as we owned
196        unsafe { &mut *(ptr::slice_from_raw_parts_mut(raw, datasize) as *mut MemBump) }
197    }
198
199    /// Unwrap the memory owned by an unsized bump allocator.
200    ///
201    /// This releases the memory used by the allocator, similar to `Box::leak`, with the difference
202    /// of operating on unique references instead. It is necessary to own the bump allocator due to
203    /// internal state contained within the memory region that the caller can subsequently
204    /// invalidate.
205    ///
206    /// # Example
207    ///
208    /// ```rust
209    /// use core::mem::MaybeUninit;
210    /// use static_alloc::unsync::MemBump;
211    ///
212    /// # let mut backing = [MaybeUninit::new(0); 128];
213    /// # let alloc = MemBump::from_mem(&mut backing)?;
214    /// let memory: &mut [_] = MemBump::into_mem(alloc);
215    /// assert!(memory.len() <= 128, "Not guaranteed to use all memory");
216    ///
217    /// // Safety: We have not touched the memory itself.
218    /// unsafe { MemBump::from_mem_unchecked(memory) };
219    /// # Ok::<(), static_alloc::unsync::FromMemError>(())
220    /// ```
221    pub fn into_mem<'lt>(this: LeakBox<'lt, Self>) -> &'lt mut [MaybeUninit<u8>] {
222        let layout = Layout::for_value(&*this);
223        let mem_pointer = LeakBox::into_raw(this) as *mut MaybeUninit<u8>;
224        unsafe { &mut *ptr::slice_from_raw_parts_mut(mem_pointer, layout.size()) }
225    }
226
227    /// Returns the layout for the `header` of a `MemBump`.
228    /// The definition of `header` in this case is all the
229    /// fields that come **before** the `data` field.
230    /// If any of the fields of a MemBump are modified,
231    /// this function likely has to be modified too.
232    fn header_layout() -> Layout {
233        Layout::new::<Cell<usize>>()
234    }
235
236    /// Returns the layout for an array with the size of `size`
237    fn data_layout(size: usize) -> Result<Layout, LayoutError> {
238        Layout::array::<UnsafeCell<MaybeUninit<u8>>>(size)
239    }
240
241    /// Returns a layout for a MemBump where the length of the data field is `size`.
242    /// This relies on the two functions defined above.
243    pub(crate) fn layout_from_size(size: usize) -> Result<Layout, LayoutError> {
244        let data_tail = Self::data_layout(size)?;
245        let (layout, _) = Self::header_layout().extend(data_tail)?;
246        Ok(layout.pad_to_align())
247    }
248
249    /// Returns capacity of this `MemBump`.
250    /// This is how many *bytes* can be allocated
251    /// within this node.
252    pub const fn capacity(&self) -> usize {
253        // Safety: just gets the pointer metadata `len` without invalidating any provenance,
254        // accepting the pointer use itself. This may be replaced by a safe `pointer::len` as soon
255        // as stable (#71146) and const which would avoid any pointer use.
256        unsafe { (*(self.data.get() as *const [UnsafeCell<u8>])).len() }
257    }
258
259    /// Get a raw pointer to the data.
260    ///
261    /// Note that *any* use of the pointer must be done with extreme care as it may invalidate
262    /// existing references into the allocated region. Furthermore, bytes may not be initialized.
263    /// The length of the valid region is [`MemBump::capacity`].
264    ///
265    /// Prefer [`MemBump::get_unchecked`] for reconstructing a prior allocation.
266    pub fn data_ptr(&self) -> NonNull<u8> {
267        NonNull::new(self.data.get() as *mut u8).expect("from a reference")
268    }
269
270    /// Allocate a region of memory.
271    ///
272    /// This is a safe alternative to [GlobalAlloc::alloc](#impl-GlobalAlloc).
273    ///
274    /// # Panics
275    /// This function will panic if the requested layout has a size of `0`. For the use in a
276    /// `GlobalAlloc` this is explicitely forbidden to request and would allow any behaviour but we
277    /// instead strictly check it.
278    ///
279    /// FIXME(breaking): this could well be a `Result<_, Failure>`.
280    pub fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
281        Some(self.try_alloc(layout)?.ptr)
282    }
283
284    /// Try to allocate some layout with a precise base location.
285    ///
286    /// The base location is the currently consumed byte count, without correction for the
287    /// alignment of the allocation. This will succeed if it can be allocate exactly at the
288    /// expected location.
289    ///
290    /// # Panics
291    /// This function may panic if the provided `level` is from a different slab.
292    pub fn alloc_at(&self, layout: Layout, level: Level) -> Result<NonNull<u8>, Failure> {
293        let Allocation { ptr, .. } = self.try_alloc_at(layout, level.0)?;
294        Ok(ptr)
295    }
296
297    /// Get an allocation for a specific type.
298    ///
299    /// It is not yet initialized but provides an interface for that initialization.
300    ///
301    /// ## Usage
302    ///
303    /// ```
304    /// # use static_alloc::unsync::Bump;
305    /// use core::cell::{Ref, RefCell};
306    ///
307    /// let slab: Bump<[Ref<'static, usize>; 1]> = Bump::uninit();
308    /// let data = RefCell::new(0xff);
309    ///
310    /// // We can place a `Ref` here but we did not yet.
311    /// let alloc = slab.get::<Ref<usize>>().unwrap();
312    /// let cell_ref = unsafe {
313    ///     alloc.leak(data.borrow())
314    /// };
315    ///
316    /// assert_eq!(**cell_ref, 0xff);
317    /// ```
318    ///
319    /// FIXME(breaking): this could well be a `Result<_, Failure>`.
320    pub fn get<V>(&self) -> Option<Allocation<V>> {
321        let alloc = self.try_alloc(Layout::new::<V>())?;
322        Some(Allocation {
323            lifetime: alloc.lifetime,
324            level: alloc.level,
325            ptr: alloc.ptr.cast(),
326        })
327    }
328
329    /// Get an allocation for a specific type at a specific level.
330    ///
331    /// See [`get`] for usage. This can be used to ensure that data is contiguous in concurrent
332    /// access to the allocator.
333    ///
334    /// [`get`]: #method.get
335    pub fn get_at<V>(&self, level: Level) -> Result<Allocation<V>, Failure> {
336        let alloc = self.try_alloc_at(Layout::new::<V>(), level.0)?;
337        Ok(Allocation {
338            lifetime: alloc.lifetime,
339            level: alloc.level,
340            ptr: alloc.ptr.cast(),
341        })
342    }
343
344    /// Reacquire an allocation that has been performed previously.
345    ///
346    /// This call won't invalidate any other allocations.
347    ///
348    /// # Safety
349    ///
350    /// The caller must guarantee that no other pointers to this prior allocation are alive, or can
351    /// be created. This is guaranteed if the allocation was performed previously, has since been
352    /// discarded, and `reset` can not be called (for example, the caller holds a shared
353    /// reference).
354    ///
355    /// # Usage
356    ///
357    /// ```
358    /// # use core::mem::MaybeUninit;
359    /// # use static_alloc::unsync::MemBump;
360    /// # let mut backing = [MaybeUninit::new(0); 128];
361    /// # let alloc = MemBump::from_mem(&mut backing).unwrap();
362    /// // Create an initial allocation.
363    /// let level = alloc.level();
364    /// let allocation = alloc.get_at::<usize>(level)?;
365    /// let address = allocation.ptr.as_ptr() as usize;
366    /// // pretend to lose the owning pointer of the allocation.
367    /// let _ = { allocation };
368    ///
369    /// // Restore our access.
370    /// let renewed = unsafe { alloc.get_unchecked::<usize>(level) };
371    /// assert_eq!(address, renewed.ptr.as_ptr() as usize);
372    /// # Ok::<_, static_alloc::bump::Failure>(())
373    /// ```
374    ///
375    /// Critically, you can rely on *other* allocations to stay valid.
376    ///
377    /// ```
378    /// # use core::mem::MaybeUninit;
379    /// # use static_alloc::{leaked::LeakBox, unsync::MemBump};
380    /// # let mut backing = [MaybeUninit::new(0); 128];
381    /// # let alloc = MemBump::from_mem(&mut backing).unwrap();
382    /// let level = alloc.level();
383    /// alloc.get_at::<usize>(level)?;
384    ///
385    /// let other_val = alloc.bump_box()?;
386    /// let other_val = LeakBox::write(other_val, 0usize);
387    ///
388    /// let renew = unsafe { alloc.get_unchecked::<usize>(level) };
389    /// assert_eq!(*other_val, 0); // Not UB!
390    /// # Ok::<_, static_alloc::bump::Failure>(())
391    /// ```
392    pub unsafe fn get_unchecked<V>(&self, level: Level) -> Allocation<V> {
393        debug_assert!(level.0 < self.capacity());
394        let ptr = self.data_ptr().as_ptr();
395        // Safety: guaranteed by the caller.
396        let alloc = ptr.offset(level.0 as isize) as *mut V;
397
398        Allocation {
399            level,
400            lifetime: AllocTime::default(),
401            ptr: NonNull::new_unchecked(alloc),
402        }
403    }
404
405    /// Allocate space for one `T` without initializing it.
406    ///
407    /// Note that the returned `MaybeUninit` can be unwrapped from `LeakBox`. Or you can store an
408    /// arbitrary value and ensure it is safely dropped before the borrow ends.
409    ///
410    /// ## Usage
411    ///
412    /// ```
413    /// # use static_alloc::unsync::Bump;
414    /// use core::cell::RefCell;
415    /// use static_alloc::leaked::LeakBox;
416    ///
417    /// let slab: Bump<[usize; 4]> = Bump::uninit();
418    /// let data = RefCell::new(0xff);
419    ///
420    /// let slot = slab.bump_box().unwrap();
421    /// let cell_box = LeakBox::write(slot, data.borrow());
422    ///
423    /// assert_eq!(**cell_box, 0xff);
424    /// drop(cell_box);
425    ///
426    /// assert!(data.try_borrow_mut().is_ok());
427    /// ```
428    ///
429    /// FIXME(breaking): should return evidence of the level (observed, and post). Something
430    /// similar to `Allocation` but containing a `LeakBox<T>` instead? Introduce that to the sync
431    /// `Bump` allocator as well.
432    ///
433    /// FIXME(breaking): align with sync `Bump::get` (probably rename get to bump_box).
434    pub fn bump_box<'bump, T: 'bump>(
435        &'bump self,
436    ) -> Result<LeakBox<'bump, MaybeUninit<T>>, Failure> {
437        let allocation = self.get_at(self.level())?;
438        Ok(unsafe { allocation.uninit() }.into())
439    }
440
441    /// Allocate space for a slice of `T`s without initializing any.
442    ///
443    /// Retrieve individual `MaybeUninit` elements and wrap them as a `LeakBox` to store values. Or
444    /// use the slice as backing memory for one of the containers from `without-alloc`. Or manually
445    /// initialize them.
446    ///
447    /// ## Usage
448    ///
449    /// Quicksort, implemented recursively, requires a maximum of `log n` stack frames in the worst
450    /// case when implemented optimally. Since each frame is quite large this is wasteful. We can
451    /// use a properly sized buffer instead and implement an iterative solution. (Left as an
452    /// exercise to the reader, or see the examples for `without-alloc` where we use such a dynamic
453    /// allocation with an inline vector as our stack).
454    pub fn bump_array<'bump, T: 'bump>(
455        &'bump self,
456        n: usize,
457    ) -> Result<LeakBox<'bump, [MaybeUninit<T>]>, Failure> {
458        let layout = Layout::array::<T>(n).map_err(|_| Failure::Exhausted)?;
459        let raw = self.alloc(layout).ok_or(Failure::Exhausted)?;
460        let slice = ptr::slice_from_raw_parts_mut(raw.cast().as_ptr(), n);
461        let uninit = unsafe { &mut *slice };
462        Ok(uninit.into())
463    }
464
465    /// Get the number of already allocated bytes.
466    pub fn level(&self) -> Level {
467        Level(self.index.get())
468    }
469
470    /// Reset the bump allocator.
471    ///
472    /// This requires a unique reference to the allocator hence no allocation can be alive at this
473    /// point. It will reset the internal count of used bytes to zero.
474    pub fn reset(&mut self) {
475        self.index.set(0)
476    }
477
478    fn try_alloc(&self, layout: Layout) -> Option<Allocation<'_>> {
479        let consumed = self.index.get();
480        match self.try_alloc_at(layout, consumed) {
481            Ok(alloc) => return Some(alloc),
482            Err(Failure::Exhausted) => return None,
483            Err(Failure::Mismatch { observed: _ }) => {
484                unreachable!("Count in Cell concurrently modified, this UB")
485            }
486        }
487    }
488
489    fn try_alloc_at(
490        &self,
491        layout: Layout,
492        expect_consumed: usize,
493    ) -> Result<Allocation<'_>, Failure> {
494        assert!(layout.size() > 0);
495        let length = mem::size_of_val(&self.data);
496        // We want to access contiguous slice, so cast to a single cell.
497        let data: &UnsafeCell<[MaybeUninit<u8>]> =
498            unsafe { &*(&self.data as *const _ as *const UnsafeCell<_>) };
499        let base_ptr = data.get() as *mut u8;
500
501        let alignment = layout.align();
502        let requested = layout.size();
503
504        // Ensure no overflows when calculating offets within.
505        assert!(expect_consumed <= length);
506
507        let available = length.checked_sub(expect_consumed).unwrap();
508        let ptr_to = base_ptr.wrapping_add(expect_consumed);
509        let offset = ptr_to.align_offset(alignment);
510
511        if Some(requested) > available.checked_sub(offset) {
512            return Err(Failure::Exhausted); // exhausted
513        }
514
515        // `size` can not be zero, saturation will thus always make this true.
516        assert!(offset < available);
517        let at_aligned = expect_consumed.checked_add(offset).unwrap();
518        let new_consumed = at_aligned.checked_add(requested).unwrap();
519        // new_consumed
520        //    = consumed + offset + requested  [lines above]
521        //   <= consumed + available  [bail out: exhausted]
522        //   <= length  [first line of loop]
523        // So it's ok to store `allocated` into `consumed`.
524        assert!(new_consumed <= length);
525        assert!(at_aligned < length);
526
527        // Try to actually allocate.
528        match self.bump(expect_consumed, new_consumed) {
529            Ok(()) => (),
530            Err(observed) => {
531                // Someone else was faster, if you want it then recalculate again.
532                return Err(Failure::Mismatch {
533                    observed: Level(observed),
534                });
535            }
536        }
537
538        let aligned = unsafe {
539            // SAFETY:
540            // * `0 <= at_aligned < length` in bounds as checked above.
541            (base_ptr as *mut u8).add(at_aligned)
542        };
543
544        Ok(Allocation {
545            ptr: NonNull::new(aligned).unwrap(),
546            lifetime: AllocTime::default(),
547            level: Level(new_consumed),
548        })
549    }
550
551    fn bump(&self, expect: usize, consume: usize) -> Result<(), usize> {
552        debug_assert!(consume <= self.capacity());
553        debug_assert!(expect <= consume);
554        let prev = self.index.get();
555        if prev != expect {
556            Err(prev)
557        } else {
558            self.index.set(consume);
559            Ok(())
560        }
561    }
562}
563
564impl<T> ops::Deref for Bump<T> {
565    type Target = MemBump;
566    fn deref(&self) -> &MemBump {
567        let from_layout = Layout::for_value(self);
568        let data_layout = Layout::new::<MaybeUninit<T>>();
569        // Construct a point with the meta data of a slice to `data`, but pointing to the whole
570        // struct instead. This meta data is later copied to the meta data of `bump` when cast.
571        let ptr = self as *const Self as *const MaybeUninit<u8>;
572        let mem: *const [MaybeUninit<u8>] = ptr::slice_from_raw_parts(ptr, data_layout.size());
573        // Now we have a pointer to MemBump with length meta data of the data slice.
574        let bump = unsafe { &*(mem as *const MemBump) };
575        debug_assert_eq!(from_layout, Layout::for_value(bump));
576        bump
577    }
578}
579
580impl<T> ops::DerefMut for Bump<T> {
581    fn deref_mut(&mut self) -> &mut MemBump {
582        let from_layout = Layout::for_value(self);
583        let data_layout = Layout::new::<MaybeUninit<T>>();
584        // Construct a point with the meta data of a slice to `data`, but pointing to the whole
585        // struct instead. This meta data is later copied to the meta data of `bump` when cast.
586        let ptr = self as *mut Self as *mut MaybeUninit<u8>;
587        let mem: *mut [MaybeUninit<u8>] = ptr::slice_from_raw_parts_mut(ptr, data_layout.size());
588        // Now we have a pointer to MemBump with length meta data of the data slice.
589        let bump = unsafe { &mut *(mem as *mut MemBump) };
590        debug_assert_eq!(from_layout, Layout::for_value(bump));
591        bump
592    }
593}
594
595#[test]
596fn mem_bump_derefs_correctly() {
597    let bump = Bump::<usize>::zeroed();
598    let mem: &MemBump = &bump;
599    assert_eq!(mem::size_of_val(&bump), mem::size_of_val(mem));
600}