slabmalloc/
sc.rs

1//! A SCAllocator that can allocate fixed size objects.
2
3use crate::*;
4
5/// A genius(?) const min()
6///
7/// # What this does
8/// * create an array of the two elements you want to choose between
9/// * create an arbitrary boolean expression
10/// * cast said expresison to a usize
11/// * use that value to index into the array created above
12///
13/// # Source
14/// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
15#[cfg(feature = "unstable")]
16const fn cmin(a: usize, b: usize) -> usize {
17    [a, b][(a > b) as usize]
18}
19
20/// The boring variant of min (not const).
21#[cfg(not(feature = "unstable"))]
22fn cmin(a: usize, b: usize) -> usize {
23    core::cmp::min(a, b)
24}
25
26/// A slab allocator allocates elements of a fixed size.
27///
28/// It maintains three internal lists of objects that implement `AllocablePage`
29/// from which it can allocate memory.
30///
31///  * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
32///    has 0 allocations in them, these can be given back to a requestor in case
33///    of reclamation.
34///  * `slabs`: A list of pages partially allocated and still have room for more.
35///  * `full_slabs`: A list of pages that are completely allocated.
36///
37/// On allocation we allocate memory from `slabs`, however if the list is empty
38/// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
39/// error. If a page becomes full after the allocation we move it from `slabs` to
40/// `full_slabs`.
41///
42/// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
43/// or from `slabs` to `empty_slabs` after we deallocated an object.
44///
45/// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
46/// it using the `refill` function.
47pub struct SCAllocator<'a, P: AllocablePage> {
48    /// Maximum possible allocation size for this `SCAllocator`.
49    pub(crate) size: usize,
50    /// Keeps track of succeeded allocations.
51    pub(crate) allocation_count: usize,
52    /// max objects per page
53    pub(crate) obj_per_page: usize,
54    /// List of empty ObjectPages (nothing allocated in these).
55    pub(crate) empty_slabs: PageList<'a, P>,
56    /// List of partially used ObjectPage (some objects allocated but pages are not full).
57    pub(crate) slabs: PageList<'a, P>,
58    /// List of full ObjectPages (everything allocated in these don't need to search them).
59    pub(crate) full_slabs: PageList<'a, P>,
60}
61
62/// Creates an instance of a scallocator, we do this in a macro because we
63/// re-use the code in const and non-const functions
64macro_rules! new_sc_allocator {
65    ($size:expr) => {
66        SCAllocator {
67            size: $size,
68            allocation_count: 0,
69            obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64),
70            empty_slabs: PageList::new(),
71            slabs: PageList::new(),
72            full_slabs: PageList::new(),
73        }
74    };
75}
76
77impl<'a, P: AllocablePage> SCAllocator<'a, P> {
78    const REBALANCE_COUNT: usize = 64;
79
80    /// Create a new SCAllocator.
81    #[cfg(feature = "unstable")]
82    pub const fn new(size: usize) -> SCAllocator<'a, P> {
83        new_sc_allocator!(size)
84    }
85
86    #[cfg(not(feature = "unstable"))]
87    pub fn new(size: usize) -> SCAllocator<'a, P> {
88        new_sc_allocator!(size)
89    }
90
91    /// Returns the maximum supported object size of this allocator.
92    pub fn size(&self) -> usize {
93        self.size
94    }
95
96    /// Add a new ObjectPage.
97    fn insert_partial_slab(&mut self, new_head: &'a mut P) {
98        self.slabs.insert_front(new_head);
99    }
100
101    /// Add page to empty list.
102    fn insert_empty(&mut self, new_head: &'a mut P) {
103        assert_eq!(
104            new_head as *const P as usize % P::SIZE,
105            0,
106            "Inserted page is not aligned to page-size."
107        );
108        self.empty_slabs.insert_front(new_head);
109    }
110
111    /// Since `dealloc` can not reassign pages without requiring a lock
112    /// we check slabs and full slabs periodically as part of `alloc`
113    /// and move them to the empty or partially allocated slab lists.
114    pub(crate) fn check_page_assignments(&mut self) {
115        for slab_page in self.full_slabs.iter_mut() {
116            if !slab_page.is_full() {
117                // We need to move it from self.full_slabs -> self.slabs
118                trace!("move {:p} full -> partial", slab_page);
119                self.move_full_to_partial(slab_page);
120            }
121        }
122
123        for slab_page in self.slabs.iter_mut() {
124            if slab_page.is_empty(self.obj_per_page) {
125                // We need to move it from self.slabs -> self.empty_slabs
126                trace!("move {:p} partial -> empty", slab_page);
127                self.move_to_empty(slab_page);
128            }
129        }
130    }
131
132    /// Move a page from `slabs` to `empty_slabs`.
133    fn move_to_empty(&mut self, page: &'a mut P) {
134        let page_ptr = page as *const P;
135
136        debug_assert!(self.slabs.contains(page_ptr));
137        debug_assert!(
138            !self.empty_slabs.contains(page_ptr),
139            "Page {:p} already in emtpy_slabs",
140            page_ptr
141        );
142
143        self.slabs.remove_from_list(page);
144        self.empty_slabs.insert_front(page);
145
146        debug_assert!(!self.slabs.contains(page_ptr));
147        debug_assert!(self.empty_slabs.contains(page_ptr));
148    }
149
150    /// Move a page from `full_slabs` to `slab`.
151    fn move_partial_to_full(&mut self, page: &'a mut P) {
152        let page_ptr = page as *const P;
153
154        debug_assert!(self.slabs.contains(page_ptr));
155        debug_assert!(!self.full_slabs.contains(page_ptr));
156
157        self.slabs.remove_from_list(page);
158        self.full_slabs.insert_front(page);
159
160        debug_assert!(!self.slabs.contains(page_ptr));
161        debug_assert!(self.full_slabs.contains(page_ptr));
162    }
163
164    /// Move a page from `full_slabs` to `slab`.
165    fn move_full_to_partial(&mut self, page: &'a mut P) {
166        let page_ptr = page as *const P;
167
168        debug_assert!(!self.slabs.contains(page_ptr));
169        debug_assert!(self.full_slabs.contains(page_ptr));
170
171        self.full_slabs.remove_from_list(page);
172        self.slabs.insert_front(page);
173
174        debug_assert!(self.slabs.contains(page_ptr));
175        debug_assert!(!self.full_slabs.contains(page_ptr));
176    }
177
178    /// Tries to allocate a block of memory with respect to the `layout`.
179    /// Searches within already allocated slab pages, if no suitable spot is found
180    /// will try to use a page from the empty page list.
181    ///
182    /// # Arguments
183    ///  * `sc_layout`: This is not the original layout but adjusted for the
184    ///     SCAllocator size (>= original).
185    fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
186        // TODO: Do we really need to check multiple slab pages (due to alignment)
187        // If not we can get away with a singly-linked list and have 8 more bytes
188        // for the bitfield in an ObjectPage.
189
190        for slab_page in self.slabs.iter_mut() {
191            let ptr = slab_page.allocate(sc_layout);
192            if !ptr.is_null() {
193                if slab_page.is_full() {
194                    trace!("move {:p} partial -> full", slab_page);
195                    self.move_partial_to_full(slab_page);
196                }
197                self.allocation_count += 1;
198                return ptr;
199            } else {
200                continue;
201            }
202        }
203
204        // Periodically rebalance page-lists (since dealloc can't do it for us)
205        if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
206            self.check_page_assignments();
207            self.allocation_count = 0;
208        }
209
210        ptr::null_mut()
211    }
212
213    pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
214    where
215        F: FnMut(*mut P),
216    {
217        self.check_page_assignments();
218        let mut reclaimed = 0;
219        while reclaimed < to_reclaim {
220            if let Some(page) = self.empty_slabs.pop() {
221                dealloc(page as *mut P);
222                reclaimed += 1;
223            } else {
224                break;
225            }
226        }
227
228        reclaimed
229    }
230
231    /// Refill the SCAllocator
232    ///
233    /// # Safety
234    /// ObjectPage needs to be empty etc.
235    pub unsafe fn refill(&mut self, page: &'a mut P) {
236        page.bitfield_mut()
237            .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
238        *page.prev() = Rawlink::none();
239        *page.next() = Rawlink::none();
240        trace!("adding page to SCAllocator {:p}", page);
241        self.insert_empty(page);
242    }
243
244    /// Allocates a block of memory descriped by `layout`.
245    ///
246    /// Returns a pointer to a valid region of memory or an
247    /// AllocationError.
248    ///
249    /// The function may also move around pages between lists
250    /// (empty -> partial or partial -> full).
251    pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
252        trace!(
253            "SCAllocator({}) is trying to allocate {:?}",
254            self.size,
255            layout
256        );
257        assert!(layout.size() <= self.size);
258        assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
259        let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
260        assert!(new_layout.size() >= layout.size());
261
262        let ptr = {
263            // Try to allocate from partial slabs,
264            // if we fail check if we have empty pages and allocate from there
265            let ptr = self.try_allocate_from_pagelist(new_layout);
266            if ptr.is_null() && self.empty_slabs.head.is_some() {
267                // Re-try allocation in empty page
268                let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
269                debug_assert!(!self.empty_slabs.contains(empty_page));
270
271                let ptr = empty_page.allocate(layout);
272                debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
273
274                trace!(
275                    "move {:p} empty -> partial empty count {}",
276                    empty_page,
277                    self.empty_slabs.elements
278                );
279                // Move empty page to partial pages
280                self.insert_partial_slab(empty_page);
281                ptr
282            } else {
283                ptr
284            }
285        };
286
287        let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
288
289        if !ptr.is_null() {
290            trace!(
291                "SCAllocator({}) allocated ptr=0x{:x}",
292                self.size,
293                ptr as usize
294            );
295        }
296
297        res
298    }
299
300    /// Deallocates a previously allocated `ptr` described by `Layout`.
301    ///
302    /// May return an error in case an invalid `layout` is provided.
303    /// The function may also move internal slab pages between lists partial -> empty
304    /// or full -> partial lists.
305    pub fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
306        assert!(layout.size() <= self.size);
307        assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
308        trace!(
309            "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
310            self.size,
311            ptr,
312            layout,
313            P::SIZE
314        );
315
316        let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1) as usize;
317
318        // Figure out which page we are on and construct a reference to it
319        // TODO: The linked list will have another &mut reference
320        let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
321        let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
322
323        let ret = slab_page.deallocate(ptr, new_layout);
324        debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
325        ret
326    }
327}