Skip to main content

buddy_slab_allocator/slab/
cache.rs

1/// Per-size-class slab cache.
2///
3/// Maintains three intrusive doubly-linked lists of slab pages:
4/// - **partial**: some objects free (preferred for allocation)
5/// - **full**: no objects free
6/// - **empty**: all objects free (at most one cached; rest returned to buddy)
7use super::page::SlabPageHeader;
8use super::size_class::SizeClass;
9
10/// Intrusive list head (address of the first `SlabPageHeader`, 0 = empty).
11#[derive(Debug, Clone, Copy)]
12struct ListHead {
13    first: usize,
14}
15
16impl ListHead {
17    const fn empty() -> Self {
18        Self { first: 0 }
19    }
20
21    fn is_empty(&self) -> bool {
22        self.first == 0
23    }
24
25    /// Push a slab page onto the front of the list.
26    ///
27    /// # Safety
28    /// `base` must point to a valid `SlabPageHeader`.
29    unsafe fn push_front(&mut self, base: usize) {
30        unsafe {
31            let hdr = &mut *(base as *mut SlabPageHeader);
32            hdr.list_prev = 0;
33            hdr.list_next = self.first;
34            if self.first != 0 {
35                let old = &mut *(self.first as *mut SlabPageHeader);
36                old.list_prev = base;
37            }
38            self.first = base;
39        }
40    }
41
42    /// Remove a slab page from this list.
43    ///
44    /// # Safety
45    /// `base` must be in this list.
46    unsafe fn remove(&mut self, base: usize) {
47        unsafe {
48            let hdr = &*(base as *const SlabPageHeader);
49            let prev = hdr.list_prev;
50            let next = hdr.list_next;
51
52            if prev != 0 {
53                (*(prev as *mut SlabPageHeader)).list_next = next;
54            } else {
55                self.first = next;
56            }
57            if next != 0 {
58                (*(next as *mut SlabPageHeader)).list_prev = prev;
59            }
60            // Clear links
61            let hdr = &mut *(base as *mut SlabPageHeader);
62            hdr.list_prev = 0;
63            hdr.list_next = 0;
64        }
65    }
66
67    /// Pop the first page from the list.  Returns 0 if empty.
68    unsafe fn pop_front(&mut self) -> usize {
69        unsafe {
70            if self.first == 0 {
71                return 0;
72            }
73            let base = self.first;
74            self.remove(base);
75            base
76        }
77    }
78}
79
80/// Cache for a single [`SizeClass`].
81pub struct SlabCache {
82    pub size_class: SizeClass,
83    partial: ListHead,
84    full: ListHead,
85    empty: ListHead,
86    /// Number of empty slabs cached (we keep at most 1).
87    empty_count: usize,
88}
89
90/// Result of a per-cache deallocation.
91pub enum CacheDeallocResult {
92    /// Object freed, slab stays.
93    Done,
94    /// Slab became empty and should be returned to the page allocator.
95    FreeSlab { base: usize, pages: usize },
96}
97
98impl SlabCache {
99    pub const fn new(size_class: SizeClass) -> Self {
100        Self {
101            size_class,
102            partial: ListHead::empty(),
103            full: ListHead::empty(),
104            empty: ListHead::empty(),
105            empty_count: 0,
106        }
107    }
108
109    /// Try to allocate one object.  Returns `Some(obj_addr)` or `None` if no slabs available.
110    pub fn alloc_object<const PAGE_SIZE: usize>(&mut self) -> Option<usize> {
111        // 1. Try the first partial slab (drain remote frees first).
112        if let Some(addr) = self.try_alloc_from_partial::<PAGE_SIZE>() {
113            return Some(addr);
114        }
115
116        // 2. A full slab may have gained free objects via lock-free remote frees.
117        if let Some(base) = self.reclaim_full_with_remote_frees() {
118            unsafe { self.partial.push_front(base) };
119            return self.try_alloc_from_partial::<PAGE_SIZE>();
120        }
121
122        // 3. Try recycling an empty slab.
123        if !self.empty.is_empty() {
124            let base = unsafe { self.empty.pop_front() };
125            self.empty_count -= 1;
126            // Move to partial and alloc from it.
127            unsafe { self.partial.push_front(base) };
128            return self.try_alloc_from_partial::<PAGE_SIZE>();
129        }
130
131        None
132    }
133
134    /// Drain remote frees from the first full slab that has them and move it
135    /// back to the partial list.
136    fn reclaim_full_with_remote_frees(&mut self) -> Option<usize> {
137        let mut base = self.full.first;
138        while base != 0 {
139            let next = unsafe { (*(base as *const SlabPageHeader)).list_next };
140            let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
141            if hdr.has_remote_frees() {
142                hdr.drain_remote_frees(base);
143                unsafe { self.full.remove(base) };
144                return Some(base);
145            }
146            base = next;
147        }
148        None
149    }
150
151    /// Attempt allocation from the first partial slab.
152    fn try_alloc_from_partial<const PAGE_SIZE: usize>(&mut self) -> Option<usize> {
153        let base = self.partial.first;
154        if base == 0 {
155            return None;
156        }
157
158        let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
159
160        // Drain any remote frees first.
161        if hdr.has_remote_frees() {
162            hdr.drain_remote_frees(base);
163        }
164
165        if let Some(idx) = hdr.local_alloc() {
166            let obj_addr = hdr.object_addr(base, idx);
167            // If slab is now full, move to full list.
168            if hdr.is_local_full() && !hdr.has_remote_frees() {
169                unsafe {
170                    self.partial.remove(base);
171                    self.full.push_front(base);
172                }
173            }
174            return Some(obj_addr);
175        }
176        None
177    }
178
179    /// Free an object back to this cache (local CPU path — under lock).
180    ///
181    /// Returns whether the slab should be returned to the page allocator.
182    pub fn dealloc_object<const PAGE_SIZE: usize>(
183        &mut self,
184        obj_addr: usize,
185    ) -> CacheDeallocResult {
186        let slab_bytes = self.size_class.slab_pages(PAGE_SIZE) * PAGE_SIZE;
187        let base = SlabPageHeader::base_from_obj_addr::<PAGE_SIZE>(obj_addr, slab_bytes);
188        let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
189        let was_full = hdr.is_local_full() && !hdr.has_remote_frees();
190
191        let idx = hdr.object_index(base, obj_addr);
192        hdr.local_free(idx);
193
194        if was_full {
195            // Move from full to partial.
196            unsafe {
197                self.full.remove(base);
198                self.partial.push_front(base);
199            }
200        }
201
202        // Check if slab is now completely empty.
203        // First drain remote frees so we have an accurate count.
204        if hdr.has_remote_frees() {
205            hdr.drain_remote_frees(base);
206        }
207
208        if hdr.is_all_free() {
209            if self.empty_count == 0 {
210                // Cache one empty slab for reuse.
211                unsafe {
212                    self.partial.remove(base);
213                    self.empty.push_front(base);
214                }
215                self.empty_count += 1;
216                CacheDeallocResult::Done
217            } else {
218                // Already have a cached empty slab — return this one.
219                unsafe { self.partial.remove(base) };
220                CacheDeallocResult::FreeSlab {
221                    base,
222                    pages: self.size_class.slab_pages(PAGE_SIZE),
223                }
224            }
225        } else {
226            CacheDeallocResult::Done
227        }
228    }
229
230    /// Register a newly allocated slab page (from the buddy allocator).
231    pub fn add_slab(&mut self, base: usize, bytes: usize, owner_cpu: u16) {
232        let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
233        hdr.init(self.size_class, bytes, owner_cpu);
234        unsafe { self.partial.push_front(base) };
235    }
236}