outsource_heap/
bounded.rs

1//! A port of my `bounded_alloc` module from *Monadic Bot*.
2//! Unlike the original, this one is more tightly bound to the
3//! closures it is used with.
4//! Also unlike the original, this one does not rely on *undefined behavior*.
5//! By default, that is.
6//! There are two ways to make this bounded allocator useful:
7//! - Rely on `std` library UB and make alloc() unwind
8//! - Rely on a Nightly feature and set an unwinding alloc_error_hook
9//!
10//! There is a secret CFG option for the first thing, and you can do the second thing yourself like:
11//! ```
12//! #![feature(alloc_error_hook)]
13//! use ::std::alloc::{Layout, set_alloc_error_hook};
14//! fn your_alloc_error_hook(layout: Layout) {
15//!    panic!("memory allocation of {} bytes failed", layout.size());
16//! }
17//! ::std::alloc::set_alloc_error_hook(your_alloc_error_hook);
18//! ```
19//! In either case, the [`with_max_alloc`](./macro.with_max_alloc.html) macro simplifies basic usage
20//! by performing the `catch_unwind` for you.
21use ::std::alloc::{System, Layout};
22use ::core::sync::atomic::{AtomicUsize, Ordering};
23use super::Store;
24
25/// A [`Store`] wrapper which provides an allocation limit on top of whatever allocator it is given.
26pub struct Bounded<T> {
27    exclusive_upper: usize,
28    total: AtomicUsize,
29    alloc: T,
30}
31impl Bounded<System> {
32    pub const fn system(exclusive_upper: usize) -> Bounded<System> {
33        Bounded { exclusive_upper, total: AtomicUsize::new(0), alloc: System }
34    }
35}
36impl<T> Bounded<T> {
37    pub const fn new(exclusive_upper: usize, alloc: T) -> Self {
38        Self { exclusive_upper, total: AtomicUsize::new(0), alloc }
39    }
40    fn attempt_add(&self, amt: usize) -> Result<usize, usize> {
41        self.total.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
42            // Alter the stored value of total if and only if
43            // the new total is a permitted amount.
44            // Note that the saturating addition + exclusive upper bound
45            // entails that any case that would cause overflow is necessarily disallowed.
46            let x = x.saturating_add(amt);
47            if x >= self.exclusive_upper {
48                None
49            } else {
50                Some(x)
51            }
52        })
53    }
54}
55
56#[cfg(OUTSOURCE_HEAP_BOUNDED_UB)]
57pub struct Oom;
58#[cfg(OUTSOURCE_HEAP_BOUNDED_UB)]
59fn fail_alloc() -> *mut u8 { ::std::panic::panic_any(Oom) }
60#[cfg(not(OUTSOURCE_HEAP_BOUNDED_UB))]
61fn fail_alloc() -> *mut u8 { ::core::ptr::null_mut() }
62
63unsafe impl<T: Store> Store for Bounded<T> {
64    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
65        if layout.size() > 0 {
66            // A first pass outline of what we *want* to do here is:
67            // 1. Saturating increment the allocation total.
68            // 2. Check if we went over our capacity.
69            // 3. If we did go over, fail allocation and restore total to what it was.
70            // But we're in a multithreaded context, and making the intermediate
71            // state of total visible to other threads will cause spurious failures.
72            // So, instead, we load the total first, perform our checks,
73            // then save whatever the new value is.
74            // Except, doing that will cause a Time of Check to Time of Use bug.
75            // Since another allocation could be performed on another thread
76            // at roughly the same time, and finish allocating something that would
77            // make our current allocation jump over the allowed capacity.
78            // Even further instead, we're going to use the fetch_update method
79            // on AtomicUsize, which appears to do what we want (which is a compare_exchange loop).
80            let total = self.attempt_add(layout.size());
81            match total {
82                Ok(_) => unsafe { Store::alloc(&self.alloc, layout) },
83                Err(_) => fail_alloc(),
84            }
85        } else {
86            unreachable!("calling GlobalAlloc::alloc with a layout.size == 0 is library UB")
87        }
88    }
89    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
90        // It is impossible for this subtraction to overflow,
91        // since the corresponding addition already passed alloc.
92        // There is a proof in alloc that no overflowing adds to
93        // the stored total ever occur, which means subtracting
94        // that which has been successfully added to the stored total
95        // cannot possibly overflow past 0.
96        let prev = self.total.fetch_sub(layout.size(), Ordering::Relaxed);
97        debug_assert!(prev >= layout.size());
98        unsafe { Store::dealloc(&self.alloc, ptr, layout) }
99    }
100    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
101        match self.attempt_add(layout.size()) {
102            Ok(_) => unsafe { Store::alloc_zeroed(&self.alloc, layout) },
103            Err(_) => fail_alloc(),
104        }
105    }
106    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
107        if new_size > layout.size() {
108            let diff = new_size - layout.size();
109            match self.attempt_add(diff) {
110                Ok(_) => unsafe { Store::realloc(&self.alloc, ptr, layout, new_size) },
111                Err(_) => fail_alloc(),
112            }
113        } else if new_size < layout.size() {
114            let diff = layout.size() - new_size;
115            // See proof in dealloc. Shrinking an existing allocation is analogous.
116            let prev = self.total.fetch_sub(diff, Ordering::Relaxed);
117            debug_assert!(prev > diff);
118            unsafe { Store::realloc(&self.alloc, ptr, layout, new_size) }
119        } else {
120            // do nothing lmao
121            ptr
122        }
123    }
124}
125
126/// Run a task with a statically known bound on allocation.
127#[macro_export]
128macro_rules! with_max_alloc {
129    ($amt:expr, || $blk:expr) => {
130        {
131            static BOUND_PROVIDER: $crate::bounded::Bounded<System> = $crate::bounded::Bounded::system($amt);
132            ::std::panic::catch_unwind(
133                ::core::panic::AssertUnwindSafe(|| $crate::run(&BOUND_PROVIDER, || $blk)))
134        }
135    }
136}
137#[doc(inline)]
138pub use crate::with_max_alloc;