secmem_alloc/
sec_alloc.rs

1//! An allocator designed to handle security sensitive allocations, i.e. heap
2//! memory with confidential contents.
3//!
4//! This can be used to store e.g. passwords and secret cryptographic keys in
5//! memory. It is not designed to be performant or light on system resources.
6//!
7//! The allocator tries to never get swapped out using `mlock` on linux. The
8//! amount of memory that can be `mlock`ed is very limited for unprivileged
9//! processes so use with care. Allocating too much memory using this allocator
10//! (exceeding the `mlock` limit) causes the program to OOM abort using
11//! [`alloc::alloc::handle_alloc_error`]. A process with `CAP_SYS_RESOURCE` can
12//! change the `mlock` limit using `setrlimit` from libc (available in rust
13//! through the `secmem-proc` crate).
14//!
15//! Various security measures are implemented:
16//! - Zeroization of memory on drop.
17//! - Non-swappable locked memory.
18//! - Memory is not in the program break or global allocator memory pool,
19//!   therefore at a less predictable address (even when the address to memory
20//!   in the global allocator leaks). This *could* make some exploits harder,
21//!   but not impossible.
22
23use crate::internals::mem;
24use crate::util::{
25    align_up_ptr_mut, align_up_usize, is_aligned_ptr, large_offset_from, nonnull_as_mut_ptr,
26    unlikely,
27};
28use crate::zeroize::zeroize_mem;
29use allocator_api2::alloc::{AllocError, Allocator};
30use core::alloc::Layout;
31use core::cell::Cell;
32use core::ptr::{self, NonNull};
33use mirai_annotations::debug_checked_precondition;
34
35/// Memory allocator for confidential memory. See the module level
36/// documentation.
37///
38/// Memory allocator which is backed by a single page of memory. Allocation
39/// works like in a bump allocator. This is very efficient for stacked
40/// allocations, i.e. a latter allocation drops before an earlier allocation. If
41/// allocations are deallocated in a different order, then memory can not be
42/// reused until everything is deallocated.
43///
44/// Since the allocator is backed by a single page, only 4 KiB of memory (on
45/// Linux with default configuration) can be allocated with a single. Exceeding
46/// this limit causes the allocator to error on allocation requests!
47///
48/// This is not a zero sized type and should not be dropped before all it's
49/// memory is deallocated. The same allocator instance must be used for
50/// allocation and deallocation.
51///
52/// # Panics
53/// If debug assertions are enabled, *some* of the safety requirement for using
54/// the allocator are checked. In addition, memory leaks are then checked (at
55/// drop). Therefore, memory allocated with this allocated should not leak!
56///
57/// # Errors
58/// Allocation functions return errors when the requested allocation does not
59/// fit what is left of the backing page of memory. In addition, zero sized
60/// allocations are not allowed (but cause only an allocation error, no UB like
61/// with `GlobalAlloc`).
62///
63/// # Memory fragmentation
64/// This allocator is basically a bump allocator, and hence suffers from memory
65/// fragmentation: memory can only be reused once all allocations are
66/// deallocated, or if the allocator is used in a strictly (first-in last-out)
67/// stack like manner with at most 8 byte aligned allocations. When
68/// the allocator is used for a bunch of allocations which need to live for
69/// approximately the same lifetime memory fragmentation is not an issue.
70/// Otherwise, it might be a good idea to use the allocation in a filo stack
71/// like manner, that is, always only deallocate, shrink or grow the
72/// last created allocation, and request at most 8 byte alignment for all but
73/// the first allocation.
74pub struct SecStackSinglePageAlloc {
75    /// The number of bytes currently allocated.
76    bytes: Cell<usize>,
77    /// Page of allocated mlocked memory.
78    page: mem::Page,
79    // /// Top of the stack, i.e. pointer to the first byte of available memory.
80    // stack_ptr: Cell<NonNull<u8>>,
81    /// Top of the stack, i.e. offset to the first byte of available memory.
82    ///
83    /// This is at most the page size.
84    /// Page size always fits an `isize` so this can safely be cast to an
85    /// `isize`.
86    // SAFETY INVARIANT: always a multiple of 8
87    // SAFETY INVARIANT: at most page size (`self.page.page_size()`)
88    stack_offset: Cell<usize>,
89}
90
91impl SecStackSinglePageAlloc {
92    #[cfg(test)]
93    /// Panic on inconsistent internal state.
94    fn consistency_check(&self) {
95        let bytes = self.bytes.get();
96        let stack_offset = self.stack_offset.get();
97        assert!(
98            stack_offset % 8 == 0,
99            "safety critical SecStackSinglePageAlloc invariant: offset alignment"
100        );
101        assert!(
102            stack_offset <= self.page.page_size(),
103            "safety critical SecStackSinglePageAlloc invariant: offset in page size"
104        );
105        assert!(
106            is_aligned_ptr(self.page.as_ptr(), 8),
107            "safety critical SecStackSinglePageAlloc invariant: page alignment"
108        );
109        assert!(
110            bytes <= stack_offset,
111            "critical SecStackSinglePageAlloc consistency: allocated bytes in offset"
112        );
113        assert!(
114            bytes % 8 == 0,
115            "SecStackSinglePageAlloc consistency: allocated bytes 8 multiple"
116        );
117    }
118}
119
120#[cfg(debug_assertions)]
121impl Drop for SecStackSinglePageAlloc {
122    // panic in drop leads to abort, so we better just abort
123    // however, abort is only stably available with `std` (not `core`)
124    #[cfg(feature = "std")]
125    fn drop(&mut self) {
126        // check for leaks
127        if self.bytes.get() != 0 {
128            std::process::abort();
129        }
130        // check that the entire page contains only zeroized memory
131        let page_ptr: *const u8 = self.page.as_ptr();
132        for offset in 0..self.page.page_size() {
133            // SAFETY: `page_ptr + offset` still points into the memory page, but `offset`
134            // doesn't necessarily fit `isize` so we have to use `wrapping_add`
135            let byte = unsafe { page_ptr.wrapping_add(offset).read() };
136            if byte != 0 {
137                std::process::abort();
138            }
139        }
140    }
141
142    #[cfg(not(feature = "std"))]
143    fn drop(&mut self) {
144        // check for leaks
145        debug_assert!(self.bytes.get() == 0);
146        // check that the entire page contains only zeroized memory
147        let page_ptr: *const u8 = self.page.as_ptr();
148        for offset in 0..self.page.page_size() {
149            // SAFETY: `page_ptr + offset` still points into the memory page, but `offset`
150            // doesn't necessarily fit `isize` so we have to use `wrapping_add`
151            let byte = unsafe { page_ptr.wrapping_add(offset).read() };
152            assert!(byte == 0);
153        }
154    }
155}
156
157#[cfg(any(unix, windows))]
158impl SecStackSinglePageAlloc {
159    /// Create a new `SecStackSinglePageAlloc` allocator. This allocates one
160    /// page of memory to be used by the allocator. This page is only
161    /// released once the allocator is dropped.
162    ///
163    /// # Errors
164    /// The function returns an `PageAllocError` if no page could be allocated
165    /// by the system or if the page could not be locked. The second can be
166    /// caused either by memory starvation of the system or the process
167    /// exceeding the amount of memory it is allowed to lock.
168    ///
169    /// For unprivileged processes amount of memory that locked is very limited
170    /// on Linux. A process with `CAP_SYS_RESOURCE` can change the `mlock`
171    /// limit using `setrlimit` from libc.
172    pub fn new() -> Result<Self, mem::PageAllocError> {
173        let page = mem::Page::alloc_new_lock()?;
174        //let stack_ptr = page.page_ptr_nonnull();
175        Ok(Self {
176            bytes: Cell::new(0),
177            page,
178            //stack_ptr,
179            stack_offset: Cell::new(0),
180        })
181    }
182}
183
184impl SecStackSinglePageAlloc {
185    /// Returns `true` iff `ptr` points to the final allocation on the memory
186    /// page of `self`.
187    ///
188    /// # SAFETY
189    /// This function cannot cause UB on it's own but for the result to be
190    /// correct and the function not to panic, the following statements must
191    /// hold:
192    /// - `ptr` must have been allocated with the allocator `self`
193    /// - `rounded_size` must be a size fitting the allocation pointed to by
194    ///   `ptr` and must be a multiple of 8 (note that allocation sizes are
195    ///   always a multiple of 8)
196    ///
197    /// In addition, `rounded_size` must be the maximal value satisfying the
198    /// second point. If this cannot be assured then the result can be
199    /// `false` even if the allocation pointed to by `ptr` is actually the
200    /// final allocation.
201    fn ptr_is_last_allocation(&self, ptr: NonNull<u8>, rounded_size: usize) -> bool {
202        // SAFETY: this doesn't overflow as `ptr` was returned by a previous allocation
203        // request so lies in our memory page, so `ptr` is larger than the page
204        // pointer
205        let alloc_start_offset = unsafe { large_offset_from(ptr.as_ptr(), self.page.as_ptr()) };
206        // this doesn't overflow since `rounded_size` fits the allocation pointed to by
207        // `ptr`
208        let alloc_end_offset = alloc_start_offset + rounded_size;
209        // `alloc_end_offset` is the stack offset directly after it's allocation
210        alloc_end_offset == self.stack_offset.get()
211    }
212
213    /// Create a zero-sized allocation.
214    ///
215    /// # Safety
216    /// `align` must be a power of 2
217    #[must_use]
218    pub unsafe fn allocate_zerosized(align: usize) -> NonNull<[u8]> {
219        debug_checked_precondition!(align.is_power_of_two());
220
221        // SAFETY: creating a pointer is safe, using it not; `dangling` is non-null
222        let dangling: *mut u8 = ptr::without_provenance_mut(align);
223        let zerosized_slice: *mut [u8] = ptr::slice_from_raw_parts_mut(dangling, 0);
224        // SAFETY: zerosized_slice has a non-null pointer part since `align` > 0
225        unsafe { NonNull::new_unchecked(zerosized_slice) }
226    }
227
228    /// Reallocate allocation into a smaller one.
229    ///
230    /// This won't try to reuse the existing allocation but forces a new
231    /// allocation. Useful if the existing allocation e.g. doesn't have the
232    /// correct alignment.
233    ///
234    /// [`Self::shrink`] falls back to this function if the current allocation
235    /// cannot be reused.
236    ///
237    /// # Safety
238    /// Safety contract of this function is identical to that of
239    /// [`Allocator::shrink`].
240    pub unsafe fn realloc_shrink(
241        &self,
242        ptr: NonNull<u8>,
243        old_layout: Layout,
244        new_layout: Layout,
245    ) -> Result<NonNull<[u8]>, AllocError> {
246        // like the default implementation of `Allocator::shrink` in the standard
247        // library
248        debug_checked_precondition!(
249            new_layout.size() <= old_layout.size(),
250            "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
251        );
252
253        let new_ptr = self.allocate(new_layout)?;
254
255        // SAFETY: because `new_layout.size()` must be lower than or equal to
256        // `old_layout.size()`, both the old and new memory allocation are valid for
257        // reads and writes for `new_layout.size()` bytes. Also, because the old
258        // allocation wasn't yet deallocated, it cannot overlap `new_ptr`. Thus,
259        // the call to `copy_nonoverlapping` is safe. The safety contract for
260        // `dealloc` must be upheld by the caller.
261        unsafe {
262            ptr::copy_nonoverlapping(ptr.as_ptr(), nonnull_as_mut_ptr(new_ptr), new_layout.size());
263            self.deallocate(ptr, old_layout);
264        }
265
266        Ok(new_ptr)
267    }
268
269    /// Reallocate allocation into a larger one.
270    ///
271    /// This won't try to reuse the existing allocation but forces a new
272    /// allocation. Useful if the existing allocation e.g. doesn't have the
273    /// correct alignment, or is not the last one on the memory page.
274    ///
275    /// [`Self::grow`] and [`Self::grow_zeroed`] fall back to this function if
276    /// the current allocation cannot be reused.
277    ///
278    /// # Safety
279    /// Safety contract of this function is identical to that of
280    /// [`Allocator::grow`].
281    pub unsafe fn realloc_grow(
282        &self,
283        ptr: NonNull<u8>,
284        old_layout: Layout,
285        new_layout: Layout,
286    ) -> Result<NonNull<[u8]>, AllocError> {
287        // like the default implementation of `Allocator::grow` in the standard library
288        debug_checked_precondition!(
289            new_layout.size() >= old_layout.size(),
290            "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
291        );
292
293        let new_ptr = self.allocate(new_layout)?;
294
295        // SAFETY: because `new_layout.size()` must be greater than or equal to
296        // `old_layout.size()`, both the old and new memory allocation are valid for
297        // reads and writes for `old_layout.size()` bytes. Also, because the old
298        // allocation wasn't yet deallocated, it cannot overlap `new_ptr`. Thus,
299        // the call to `copy_nonoverlapping` is safe. The safety contract for
300        // `dealloc` must be upheld by the caller.
301        unsafe {
302            ptr::copy_nonoverlapping(ptr.as_ptr(), nonnull_as_mut_ptr(new_ptr), old_layout.size());
303            self.deallocate(ptr, old_layout);
304        }
305
306        Ok(new_ptr)
307    }
308}
309
310unsafe impl Allocator for SecStackSinglePageAlloc {
311    // The backing memory is zeroed on deallocation and `mmap` initialises the
312    // memory with zeros so every allocation has zeroed memory.
313    // We always return a multiple of 8 bytes and a minimal alignment of 8. This
314    // allows for fast zeroization and reduces the chance for (external) memory
315    // fragmentation, at the cost of increased internal memory fragmentation.
316    fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
317        debug_checked_precondition!(layout.align().is_power_of_two());
318
319        // catch zero sized allocations immediately so we do not have to bother with
320        // them
321        if layout.size() == 0 {
322            // SAFETY: `layout.align()` is a power of 2 since that is required by the
323            // `Layout` type
324            return Ok(unsafe { Self::allocate_zerosized(layout.align()) });
325        }
326        // if rounding up to a multiple of 8 wraps a usize, the result will be 0 and
327        // layout clearly doesn't fit our page, so we return an error
328        let rounded_req_size = layout.size().wrapping_add(7usize) & !7usize;
329        if unlikely(rounded_req_size == 0) {
330            return Err(AllocError);
331        }
332        // error if we do not have enough space for this allocation
333        if rounded_req_size > self.page.page_size() - self.stack_offset.get() {
334            return Err(AllocError);
335        }
336
337        // SAFETY: `self.stack_offset` is at most the page size so fits an `isize` and
338        // the addition does not wrap.
339        // SAFETY: `self.stack_offset` is at most the page size so the result of `add`
340        // still points into the mapped memory page or one byte after it
341        // SAFETY: hence the use of `add` is sound
342        let stack_ptr: *mut u8 = unsafe { self.page.as_ptr_mut().add(self.stack_offset.get()) };
343        // also the pointer is 8 byte aligned since `self.stack_offset` is a multiple of
344        // 8 and the page pointer is page aligned, so also 8 byte aligned
345
346        // we use a minimum alignment of 8 since this allows a fast path for many
347        // zeroizers and reduces external memory fragmentation
348        if layout.align() <= 8 {
349            // fast path for low align
350            debug_assert!(
351                layout.align() == 1
352                    || layout.align() == 2
353                    || layout.align() == 4
354                    || layout.align() == 8
355            );
356
357            let alloc_slice_ptr: *mut [u8] =
358                ptr::slice_from_raw_parts_mut(stack_ptr, rounded_req_size);
359            // SAFETY: the page pointer is nonnull and the addition doesn't wrap so the
360            // result is nonnull
361            let alloc_slice_ptr: NonNull<[u8]> = unsafe { NonNull::new_unchecked(alloc_slice_ptr) };
362
363            // SAFETY: rounded_req_size is a multiple of 8 (by rounding) so that
364            // `self.stack_offset` stays a multiple of 8
365            self.stack_offset
366                .set(self.stack_offset.get() + rounded_req_size);
367
368            self.bytes.set(self.bytes.get() + rounded_req_size);
369            Ok(alloc_slice_ptr)
370        } else {
371            // slower path for large align
372            // first pointer >= `stack_ptr` which is `layout.align()` bytes aligned
373            // SAFETY: `layout.align()` is a power of 2
374            let next_aligned_ptr = unsafe { align_up_ptr_mut(stack_ptr, layout.align()) };
375            // if this wraps the address space, then the result is null and the layout
376            // doesn't fit the remaining memory of our page, so error
377            if unlikely(next_aligned_ptr.is_null()) {
378                return Err(AllocError);
379            }
380            // offset of `next_align_ptr` relative from our base page pointer
381            // SAFETY: `next_align_ptr` is higher in the memory than `stack_ptr`
382            let next_align_pageoffset =
383                unsafe { large_offset_from(next_aligned_ptr, self.page.as_ptr()) };
384            // error if `next_aligned_ptr` falls outside of our page
385            if next_align_pageoffset >= self.page.page_size() {
386                return Err(AllocError);
387            }
388            // the new allocation will start at `next_aligned_ptr` and be `rounded_req_size`
389            // long; error if we do not have enough space for this allocation
390            // by the previous branch `self.page.page_size() - next_align_pageoffset` won't
391            // wrap (`self.page.page_size() - next_align_pageoffset` is the
392            // number of bytes available)
393            if rounded_req_size > self.page.page_size() - next_align_pageoffset {
394                return Err(AllocError);
395            }
396
397            // if we reach here then [next_aligned_ptr .. next_aligned_ptr +
398            // rounded_req_size] lies entirely within our memory page
399            let alloc_slice_ptr: *mut [u8] =
400                ptr::slice_from_raw_parts_mut(next_aligned_ptr, rounded_req_size);
401            // SAFETY: the page pointer is nonnull and the addition doesn't wrap so the
402            // result is nonnull
403            let alloc_slice_ptr: NonNull<[u8]> = unsafe { NonNull::new_unchecked(alloc_slice_ptr) };
404
405            // SAFETY: `rounded_req_size` is a multiple of 8 (by rounding) and
406            // `next_align_pageoffset` is so, therefore `self.stack_offset` stays a multiple
407            // of 8 SAFETY: `next_align_pageoffset + rounded_req_size` is the
408            // first offset after the currently created allocation
409            // (`alloc_slice_ptr`)
410            self.stack_offset
411                .set(next_align_pageoffset + rounded_req_size);
412
413            self.bytes.set(self.bytes.get() + rounded_req_size);
414            Ok(alloc_slice_ptr)
415        }
416    }
417
418    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
419        // zero initialisation doesn't come at a cost, see `allocate_zeroed`
420        self.allocate_zeroed(layout)
421    }
422
423    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
424        // catch zero sized allocations immediately so we do not have to bother with
425        // them
426        if layout.size() == 0 {
427            return;
428        }
429
430        // `ptr` must be returned by this allocator, so it lies in the currently used
431        // part of the memory page
432        debug_checked_precondition!(self.page.as_ptr().addr() <= ptr.as_ptr().addr());
433        debug_checked_precondition!(
434            ptr.as_ptr().addr() <= self.page.as_ptr().addr() + self.stack_offset.get()
435        );
436
437        // SAFETY: this `rounded_req_size` is identical to the value of
438        // `rounded_req_size` in `self.allocate_zeroed` when the block was first
439        // allocated since layout must fit the block returned by that function
440        // so `layout.size()` now is in the range `layout.size() ..=
441        // rounded_req_size` for the values back then this will be important for
442        // safety and correct functioning
443        let rounded_req_size = align_up_usize(layout.size(), 8);
444
445        // The pointer we got from the caller might have provenance for only
446        // `layout.size()` bytes. We reconstruct the pointer with our full page
447        // provenance, so that `ptr` is valid for `rounded_req_size` byte writes.
448        let ptr = self.page.as_ptr_mut().with_addr(ptr.as_ptr().addr());
449
450        // securely wipe the deallocated memory
451        // SAFETY: `ptr` is valid for writes of `rounded_req_size` bytes since it was
452        // previously successfully allocated (by the safety contract for this
453        // function) and not yet deallocated
454        unsafe {
455            zeroize_mem(ptr, rounded_req_size);
456        }
457        // `self.bytes - rounded_req_size` doesn't overflow since the memory has
458        // previously been allocated
459        self.bytes.set(self.bytes.get() - rounded_req_size);
460
461        // if `self.bytes` is now 0 then this was the last allocation
462        // hence we can reset the allocator: reset the stack offset
463        if self.bytes.get() == 0 {
464            self.stack_offset.set(0);
465            return;
466        }
467
468        // otherwise, if this allocation was the last one on the stack, rewind the stack
469        // offset so we can reuse the memory for later allocation requests
470
471        // SAFETY: this doesn't overflow as `ptr` was returned by a previous allocation
472        // request so lies in our memory page, so `ptr` is larger than the page
473        // pointer
474        let alloc_start_offset = unsafe { large_offset_from(ptr, self.page.as_ptr()) };
475        let alloc_end_offset = alloc_start_offset + rounded_req_size;
476        // `alloc_end_offset` is the stack offset directly after it's allocation
477        if alloc_end_offset == self.stack_offset.get() {
478            // SAFETY: `alloc_start_offset` is a multiple of 8 since both `ptr` and the page
479            // pointer are 8 byte aligned
480            self.stack_offset.set(alloc_start_offset);
481        }
482    }
483
484    unsafe fn shrink(
485        &self,
486        ptr: NonNull<u8>,
487        old_layout: Layout,
488        new_layout: Layout,
489    ) -> Result<NonNull<[u8]>, AllocError> {
490        debug_checked_precondition!(
491            new_layout.size() <= old_layout.size(),
492            "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
493        );
494
495        // catch zero sized allocations immediately so we do not have to bother with
496        // them
497        if new_layout.size() == 0 {
498            // SAFETY: safety contract must be uphold by the caller
499            unsafe {
500                self.deallocate(ptr, old_layout);
501            }
502            // SAFETY: `layout.align()` is a power of 2 since that is required by the
503            // `Layout` type
504            return Ok(unsafe { Self::allocate_zerosized(new_layout.align()) });
505        }
506
507        // `ptr` must be returned by this allocator, so it lies in the currently used
508        // part of the memory page
509        debug_checked_precondition!(self.page.as_ptr().addr() <= ptr.as_ptr().addr());
510        debug_checked_precondition!(
511            ptr.as_ptr().addr() <= self.page.as_ptr().addr() + self.stack_offset.get()
512        );
513
514        // check whether the existing allocation has the requested alignment
515        if is_aligned_ptr(ptr.as_ptr(), new_layout.align()) {
516            // old allocation has the (new) required alignment
517            // we can shrink the allocation in place
518            // for a non-final allocation (not last allocation on the memory page) this
519            // unfortunately fragments memory; we could as well just not shrink, but we want
520            // to zeroize memory as early as possible (and guaranty zeroization)
521            // so we do shrink
522
523            // round old layout size to a multiple of 8, since allocation sizes are
524            // multiples of 8
525            let rounded_size: usize = align_up_usize(old_layout.size(), 8);
526            // if the allocation is the final allocation in our memory page, then we can
527            // shrink
528
529            // shrink in place
530            let new_rounded_size: usize = align_up_usize(new_layout.size(), 8);
531            // SAFETY: `ptr` points to an allocation of size at least `rounded_size`, and
532            // `new_rounded_size` not larger, so `ptr + new_rounded_size` still points
533            // inside our memory page
534            // SAFETY: `new_rounded_size` is a multiple of 8 and `ptr` is 8 byte aligned so
535            // `new_alloc_end` is so too
536            let new_alloc_end: *mut u8 = unsafe { ptr.as_ptr().add(new_rounded_size) };
537            // doesn't wrap since `old_layout.size() >= new_layout.size()`, and the
538            // inequality is invariant under rounding up to a multiple of 8;
539            // also `size_decrease` is therefore a multiple of 8
540            let size_decrease: usize = rounded_size - new_rounded_size;
541            // securely wipe the deallocated memory
542            // SAFETY: `new_alloc_end` is valid for writes of `rounded_size -
543            // new_rounded_size` bytes since it is only `new_rounded_size` past
544            // `ptr`, which was successfully allocated (by the safety contract
545            // for this function) and not yet deallocated
546            unsafe {
547                zeroize_mem(new_alloc_end, size_decrease);
548            }
549            // decrement the number of allocated bytes by the allocation size reduction
550            self.bytes.set(self.bytes.get() - size_decrease);
551
552            // if the allocation is the final allocation in our memory page, then we can
553            // rewind the stack offset to limit memory fragmentation
554            // `ptr` is allocated with `self` and `rounded_size` fits it and is a multiple
555            // of 8
556            if self.ptr_is_last_allocation(ptr, rounded_size) {
557                // SAFETY: `size_decrease` is a multiple of 8 so `self.stack_offset` remains so
558                self.stack_offset
559                    .set(self.stack_offset.get() - size_decrease);
560            }
561
562            // create the pointer to the shrunken allocation
563            let alloc_slice_ptr: *mut [u8] =
564                ptr::slice_from_raw_parts_mut(ptr.as_ptr(), new_rounded_size);
565            // SAFETY: `ptr.as_ptr()` is nunnull by the type of `ptr`
566            let alloc_slice_ptr: NonNull<[u8]> = unsafe { NonNull::new_unchecked(alloc_slice_ptr) };
567
568            Ok(alloc_slice_ptr)
569        } else {
570            // wrong alignment, we have to reallocate
571            // SAFETY: safety contract must be uphold by the caller
572            unsafe { self.realloc_shrink(ptr, old_layout, new_layout) }
573        }
574    }
575
576    unsafe fn grow_zeroed(
577        &self,
578        ptr: NonNull<u8>,
579        old_layout: Layout,
580        new_layout: Layout,
581    ) -> Result<NonNull<[u8]>, AllocError> {
582        debug_checked_precondition!(
583            new_layout.size() >= old_layout.size(),
584            "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
585        );
586
587        // catch zero sized allocations immediately so we do not have to bother with
588        // them
589        if old_layout.size() == 0 {
590            // old allocation was zero sized so no need for deallocation
591            return self.allocate(new_layout);
592        }
593
594        // `ptr` must be returned by this allocator, so it lies in the currently used
595        // part of the memory page
596        debug_checked_precondition!(self.page.as_ptr().addr() <= ptr.as_ptr().addr());
597        debug_checked_precondition!(
598            ptr.as_ptr().addr() <= self.page.as_ptr().addr() + self.stack_offset.get()
599        );
600
601        // check whether the existing allocation has the requested alignment
602        if is_aligned_ptr(ptr.as_ptr(), new_layout.align()) {
603            // old allocation has the (new) required alignment
604            // if the allocation is the final allocation in our memory page, then we can
605            // increase the allocation in-place
606
607            // round old layout size to a multiple of 8, since allocation sizes are
608            // multiples of 8
609            let rounded_size: usize = align_up_usize(old_layout.size(), 8);
610            // `ptr` is allocated with `self` and `rounded_size` fits it and is a multiple
611            // of 8
612            if self.ptr_is_last_allocation(ptr, rounded_size) {
613                // increase allocation in-place
614
615                let new_rounded_size: usize = align_up_usize(new_layout.size(), 8);
616                // if this wraps the address space, then the result is 0 and the layout doesn't
617                // fit the remaining memory of our page, so error
618                if unlikely(new_rounded_size == 0) {
619                    return Err(AllocError);
620                }
621
622                // SAFETY: this doesn't overflow as `ptr` was returned by a previous allocation
623                // request so lies in our memory page, so `ptr` is larger than
624                // the page pointer
625                let alloc_start_offset =
626                    unsafe { large_offset_from(ptr.as_ptr(), self.page.as_ptr()) };
627                // if the requested allocation size doesn't fit the rest of our page, error
628                // the subtraction doesn't wrap since `alloc_start_offset` is the part of the
629                // page that is used (without counting the allocation currently
630                // being resized)
631                if new_rounded_size > self.page.page_size() - alloc_start_offset {
632                    return Err(AllocError);
633                }
634
635                // if we get here then the resized allocation fits the rest of our memory page
636                // this doesn't wrap since `new_layout.size() >= old_layout.size()` so after
637                // rounding both to a multiple of 8, `new_rounded_size >= rounded_size`
638                // since both values are multiples of 8, `size_increase` is so too
639                let size_increase: usize = new_rounded_size - rounded_size;
640                // increase the number of allocated bytes by the allocation size increase
641                self.bytes.set(self.bytes.get() + size_increase);
642                // and the stack offset
643                // SAFETY: `size_increase` is a multiple of 8 so `self.stack_offset` remains so
644                self.stack_offset
645                    .set(self.stack_offset.get() + size_increase);
646
647                // create the pointer to the grown allocation
648                let alloc_slice_ptr: *mut [u8] =
649                    ptr::slice_from_raw_parts_mut(ptr.as_ptr(), new_rounded_size);
650                // SAFETY: `ptr.as_ptr()` is non-null by the type of `ptr`
651                let alloc_slice_ptr: NonNull<[u8]> =
652                    unsafe { NonNull::new_unchecked(alloc_slice_ptr) };
653
654                return Ok(alloc_slice_ptr);
655            }
656        }
657        // if the alignment of the old allocation is not enough or the allocation is not
658        // the last on our memory page, then fall back to making a new
659        // allocation and deallocating the older SAFETY: caller must uphold
660        // safety contract
661        unsafe { self.realloc_grow(ptr, old_layout, new_layout) }
662    }
663
664    unsafe fn grow(
665        &self,
666        ptr: NonNull<u8>,
667        old_layout: Layout,
668        new_layout: Layout,
669    ) -> Result<NonNull<[u8]>, AllocError> {
670        // SAFETY: caller must uphold safety contract of `Allocator::grow_zeroed`
671        unsafe { self.grow_zeroed(ptr, old_layout, new_layout) }
672    }
673}
674
675#[cfg(test)]
676mod tests {
677    use super::*;
678    use crate::allocator_api::{Box, Vec};
679    use std::mem::drop;
680
681    #[derive(Copy, Clone, Debug, PartialEq, Eq)]
682    #[repr(align(16))]
683    struct Align16(u128);
684
685    #[derive(Copy, Clone, Debug, PartialEq, Eq)]
686    #[repr(align(16))]
687    struct ByteAlign16(u8);
688
689    #[test]
690    fn create_consistency() {
691        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
692        allocator.consistency_check();
693    }
694
695    #[test]
696    fn box_allocation_8b() {
697        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
698        allocator.consistency_check();
699        {
700            let _heap_mem = Box::new_in([1u8; 8], &allocator);
701            allocator.consistency_check();
702        } // drop `_heap_mem`
703        allocator.consistency_check();
704        // drop `allocator`
705    }
706
707    #[test]
708    fn box_allocation_9b() {
709        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
710        allocator.consistency_check();
711        {
712            let _heap_mem = Box::new_in([1u8; 9], &allocator);
713            allocator.consistency_check();
714        } // drop `_heap_mem`
715        allocator.consistency_check();
716        // drop `allocator`
717    }
718
719    #[test]
720    fn box_allocation_zst() {
721        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
722        allocator.consistency_check();
723        {
724            let _heap_mem = Box::new_in([(); 8], &allocator);
725            allocator.consistency_check();
726        } // drop `_heap_mem`
727        allocator.consistency_check();
728        // drop `allocator`
729    }
730
731    #[test]
732    fn multiple_box_allocations() {
733        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
734        allocator.consistency_check();
735        {
736            let _heap_mem = Box::new_in([1u8; 9], &allocator);
737            allocator.consistency_check();
738            {
739                let _heap_mem2 = Box::new_in([1u8; 9], &allocator);
740                allocator.consistency_check();
741            } // drop `_heap_mem2`
742            allocator.consistency_check();
743            {
744                let _heap_mem2prime = Box::new_in([1u8; 9], &allocator);
745                allocator.consistency_check();
746            } // drop `_heap_mem2prime`
747            allocator.consistency_check();
748        } // drop `_heap_mem`
749        allocator.consistency_check();
750        // drop `allocator`
751    }
752
753    #[test]
754    fn multiple_box_allocations_high_align() {
755        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
756        allocator.consistency_check();
757        {
758            let _heap_mem = Box::new_in([Align16(1); 5], &allocator);
759            allocator.consistency_check();
760            {
761                let _heap_mem2 = Box::new_in([Align16(1); 9], &allocator);
762                allocator.consistency_check();
763            } // drop `_heap_mem2`
764            allocator.consistency_check();
765            {
766                let _heap_mem2prime = Box::new_in([Align16(1); 2], &allocator);
767                allocator.consistency_check();
768            } // drop `_heap_mem2prime`
769            allocator.consistency_check();
770        } // drop `_heap_mem`
771        allocator.consistency_check();
772        // drop `allocator`
773    }
774
775    #[test]
776    fn multiple_box_allocations_mixed_align() {
777        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
778        allocator.consistency_check();
779        {
780            let _heap_mem = Box::new_in([1u8; 17], &allocator);
781            allocator.consistency_check();
782            {
783                let _heap_mem2 = Box::new_in([Align16(1); 9], &allocator);
784                allocator.consistency_check();
785            } // drop `_heap_mem2`
786            allocator.consistency_check();
787            {
788                let _heap_mem2prime = Box::new_in([Align16(1); 2], &allocator);
789                allocator.consistency_check();
790            } // drop `_heap_mem2prime`
791            allocator.consistency_check();
792        } // drop `_heap_mem`
793        allocator.consistency_check();
794        // drop `allocator`
795    }
796
797    #[test]
798    fn many_box_allocations_mixed_align_nonstacked_drop() {
799        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
800        allocator.consistency_check();
801        {
802            let heap_mem1 = Box::new_in([Align16(1); 11], &allocator);
803            allocator.consistency_check();
804            let heap_mem2 = Box::new_in([ByteAlign16(1); 51], &allocator);
805            allocator.consistency_check();
806            let heap_mem3 = Box::new_in([1u8; 143], &allocator);
807            allocator.consistency_check();
808            drop(heap_mem3);
809            allocator.consistency_check();
810            let heap_mem4 = Box::new_in(ByteAlign16(1), &allocator);
811            allocator.consistency_check();
812            let heap_mem5 = Box::new_in(Align16(1), &allocator);
813            allocator.consistency_check();
814            drop(heap_mem2);
815            allocator.consistency_check();
816            drop(heap_mem1);
817            allocator.consistency_check();
818            drop(heap_mem4);
819            allocator.consistency_check();
820            drop(heap_mem5);
821            allocator.consistency_check();
822        } // drop `_heap_mem`
823        allocator.consistency_check();
824        // drop `allocator`
825    }
826
827    #[test]
828    fn vec_allocation_9b() {
829        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
830        allocator.consistency_check();
831        {
832            let _heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
833            allocator.consistency_check();
834        } // drop `_heap_mem`
835        allocator.consistency_check();
836        // drop `allocator`
837    }
838
839    #[test]
840    fn vec_allocation_grow_repeated() {
841        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
842        allocator.consistency_check();
843        {
844            let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
845            allocator.consistency_check();
846            heap_mem.reserve(10);
847            allocator.consistency_check();
848            heap_mem.reserve(17);
849            allocator.consistency_check();
850        } // drop `heap_mem`
851        allocator.consistency_check();
852        // drop `allocator`
853    }
854
855    #[test]
856    fn vec_allocation_nonfinal_grow() {
857        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
858        allocator.consistency_check();
859        {
860            let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
861            allocator.consistency_check();
862            {
863                let _heap_mem2 = Box::new_in(37_u64, &allocator);
864                allocator.consistency_check();
865                heap_mem.reserve(10);
866                allocator.consistency_check();
867                heap_mem.reserve(17);
868                allocator.consistency_check();
869            } // drop `_heap_mem2`
870            allocator.consistency_check();
871        } // drop `heap_mem`
872        allocator.consistency_check();
873        // drop `allocator`
874    }
875
876    #[test]
877    fn vec_allocation_shrink() {
878        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
879        allocator.consistency_check();
880        {
881            let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
882            allocator.consistency_check();
883            heap_mem.push(255);
884            allocator.consistency_check();
885            heap_mem.shrink_to_fit();
886            allocator.consistency_check();
887        } // drop `heap_mem`
888        allocator.consistency_check();
889        // drop `allocator`
890    }
891
892    #[test]
893    fn vec_allocation_nonfinal_shrink() {
894        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
895        allocator.consistency_check();
896        {
897            let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
898            allocator.consistency_check();
899            {
900                let _heap_mem2 = Box::new_in(37_u64, &allocator);
901                allocator.consistency_check();
902                heap_mem.push(1);
903                allocator.consistency_check();
904                heap_mem.shrink_to_fit();
905                allocator.consistency_check();
906            } // drop `_heap_mem2`
907            allocator.consistency_check();
908        } // drop `heap_mem`
909        allocator.consistency_check();
910        // drop `allocator`
911    }
912
913    #[test]
914    fn allocate_zeroed() {
915        let allocator = SecStackSinglePageAlloc::new().expect("allocator creation failed");
916
917        let layout = Layout::new::<[u8; 16]>();
918        let ptr = allocator
919            .allocate_zeroed(layout)
920            .expect("allocation failed");
921        for i in 0..16 {
922            let val: u8 = unsafe { (ptr.as_ptr() as *const u8).add(i).read() };
923            assert_eq!(val, 0_u8);
924        }
925        unsafe {
926            allocator.deallocate(ptr.cast(), layout);
927        }
928    }
929}