Skip to main content

buddy_slab_allocator/slab/
mod.rs

1//! Slab allocator — bitmap-based with lock-free cross-CPU freeing.
2//!
3//! The [`SlabAllocator`] is a standalone component that manages object allocation
4//! within pre-supplied slab pages.  It does **not** allocate pages itself; instead
5//! it returns [`SlabAllocResult::NeedsSlab`] to request pages from the caller.
6//!
7//! Cross-CPU frees go through the lock-free [`SlabPageHeader::remote_free`] path.
8
9pub mod cache;
10pub mod page;
11pub mod size_class;
12
13pub use page::SlabPageHeader;
14pub use size_class::SizeClass;
15
16use cache::{CacheDeallocResult, SlabCache};
17use core::alloc::Layout;
18use core::ptr::NonNull;
19use spin::Mutex as SpinMutex;
20
21use crate::error::{AllocError, AllocResult};
22
23/// Result of a slab allocation attempt.
24pub enum SlabAllocResult {
25    /// Object successfully allocated.
26    Allocated(NonNull<u8>),
27    /// The slab cache for this size class has no free objects.
28    /// The caller should allocate `pages` pages from the buddy allocator,
29    /// call [`SlabAllocator::add_slab`], and retry.
30    NeedsSlab { size_class: SizeClass, pages: usize },
31}
32
33/// Result of a slab deallocation.
34pub enum SlabDeallocResult {
35    /// Object freed, nothing else to do.
36    Done,
37    /// The slab page at `base` became empty and should be returned to the buddy.
38    FreeSlab { base: usize, pages: usize },
39}
40
41/// Result of a pool-mediated slab deallocation.
42pub enum SlabPoolDeallocResult {
43    /// Object freed on the local CPU path.
44    Done,
45    /// Object was queued onto the owner's remote-free list.
46    RemoteQueued,
47    /// The slab page at `base` became empty and should be returned to the buddy.
48    FreeSlab { base: usize, pages: usize },
49}
50
51/// Object-safe slab interface used by [`crate::GlobalAllocator`] EII hooks.
52pub trait SlabTrait: Sync {
53    /// Logical CPU id this slab belongs to.
54    fn cpu_id(&self) -> usize;
55
56    /// Page size used by this slab.
57    fn page_size(&self) -> usize;
58
59    /// Allocate one object.
60    fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult>;
61
62    /// Register a freshly allocated slab page.
63    fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize);
64
65    /// Free an object on the owner CPU path.
66    fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult;
67
68    /// Free an object on the remote CPU path.
69    fn dealloc_remote(&self, ptr: NonNull<u8>) {
70        let owner_cpu = u16::try_from(self.cpu_id()).expect("CPU id exceeds slab owner range");
71        unsafe { SlabPageHeader::remote_free_object(ptr, owner_cpu, self.page_size()) };
72    }
73}
74
75/// Object-safe slab-pool interface used by [`crate::GlobalAllocator`] EII hooks.
76pub trait SlabPoolTrait: Sync {
77    /// Return the slab belonging to the current CPU.
78    fn current_slab(&self) -> &dyn SlabTrait;
79
80    /// Return the owner slab for the given CPU.
81    fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait;
82
83    /// Logical CPU id of the current CPU.
84    fn current_cpu_id(&self) -> usize {
85        self.current_slab().cpu_id()
86    }
87
88    /// Allocate one object from the current CPU's slab.
89    fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
90        self.current_slab().alloc(layout)
91    }
92
93    /// Register a freshly allocated slab page in the current CPU's slab.
94    fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
95        self.current_slab().add_slab(size_class, base, bytes)
96    }
97
98    /// Free an object, routing to local or remote slab ownership as needed.
99    fn dealloc(&self, ptr: NonNull<u8>, layout: Layout, owner_cpu: usize) -> SlabPoolDeallocResult {
100        if owner_cpu == self.current_cpu_id() {
101            match self.current_slab().dealloc_local(ptr, layout) {
102                SlabDeallocResult::Done => SlabPoolDeallocResult::Done,
103                SlabDeallocResult::FreeSlab { base, pages } => {
104                    SlabPoolDeallocResult::FreeSlab { base, pages }
105                }
106            }
107        } else {
108            self.owner_slab(owner_cpu).dealloc_remote(ptr);
109            SlabPoolDeallocResult::RemoteQueued
110        }
111    }
112}
113
114/// Convenience helpers for callback-style slab access.
115pub trait SlabPoolExt: SlabPoolTrait {
116    /// Access the current CPU's slab via a callback.
117    fn with_current_slab<R>(&self, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
118        f(self.current_slab())
119    }
120
121    /// Access the given owner's slab via a callback.
122    fn with_owner_slab<R>(&self, cpu_idx: usize, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
123        f(self.owner_slab(cpu_idx))
124    }
125}
126
127impl<T: ?Sized + SlabPoolTrait> SlabPoolExt for T {}
128
129/// Standalone slab allocator (one per CPU or standalone use).
130pub struct SlabAllocator<const PAGE_SIZE: usize = 0x1000> {
131    caches: [SlabCache; SizeClass::COUNT],
132}
133
134/// Default per-CPU slab wrapper used by EII integrators.
135pub struct PerCpuSlab<const PAGE_SIZE: usize = 0x1000> {
136    cpu_id: u16,
137    inner: SpinMutex<SlabAllocator<PAGE_SIZE>>,
138}
139
140/// Default static slab-pool wrapper used by EII integrators.
141pub struct StaticSlabPool<const PAGE_SIZE: usize = 0x1000, const N: usize = 1> {
142    slabs: [PerCpuSlab<PAGE_SIZE>; N],
143    current_cpu_id: fn() -> usize,
144}
145
146impl<const PAGE_SIZE: usize> PerCpuSlab<PAGE_SIZE> {
147    /// Create an empty per-CPU slab wrapper for `cpu_id`.
148    pub const fn new(cpu_id: u16) -> Self {
149        Self {
150            cpu_id,
151            inner: SpinMutex::new(SlabAllocator::new()),
152        }
153    }
154
155    /// Reset the inner slab allocator to an empty state.
156    pub fn reset(&self) {
157        *self.inner.lock() = SlabAllocator::new();
158    }
159
160    /// Return this slab's logical CPU id.
161    pub const fn cpu_id(&self) -> usize {
162        self.cpu_id as usize
163    }
164
165    /// Allocate one object.
166    pub fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
167        self.inner.lock().alloc(layout)
168    }
169
170    /// Register a freshly allocated slab page.
171    pub fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
172        self.inner
173            .lock()
174            .add_slab(size_class, base, bytes, self.cpu_id);
175    }
176
177    /// Free an object on the owner CPU path.
178    pub fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
179        self.inner.lock().dealloc(ptr, layout)
180    }
181
182    /// Queue an object onto this slab's remote-free list.
183    pub fn dealloc_remote(&self, ptr: NonNull<u8>) {
184        unsafe { SlabPageHeader::remote_free_object(ptr, self.cpu_id, PAGE_SIZE) };
185    }
186}
187
188impl<const PAGE_SIZE: usize, const N: usize> StaticSlabPool<PAGE_SIZE, N> {
189    /// Create a static slab pool from pre-built per-CPU slabs and a CPU-id hook.
190    pub const fn new(slabs: [PerCpuSlab<PAGE_SIZE>; N], current_cpu_id: fn() -> usize) -> Self {
191        Self {
192            slabs,
193            current_cpu_id,
194        }
195    }
196}
197
198impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
199    /// Create a new (empty) slab allocator.  No pages are owned yet.
200    pub const fn new() -> Self {
201        Self {
202            caches: [
203                SlabCache::new(SizeClass::Bytes8),
204                SlabCache::new(SizeClass::Bytes16),
205                SlabCache::new(SizeClass::Bytes32),
206                SlabCache::new(SizeClass::Bytes64),
207                SlabCache::new(SizeClass::Bytes128),
208                SlabCache::new(SizeClass::Bytes256),
209                SlabCache::new(SizeClass::Bytes512),
210                SlabCache::new(SizeClass::Bytes1024),
211                SlabCache::new(SizeClass::Bytes2048),
212            ],
213        }
214    }
215}
216
217impl<const PAGE_SIZE: usize> Default for SlabAllocator<PAGE_SIZE> {
218    fn default() -> Self {
219        Self::new()
220    }
221}
222
223impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
224    /// Try to allocate an object matching `layout`.
225    ///
226    /// If the matching cache is exhausted, [`SlabAllocResult::NeedsSlab`] is returned
227    /// so the caller can supply pages and retry.
228    pub fn alloc(&mut self, layout: Layout) -> AllocResult<SlabAllocResult> {
229        let sc = SizeClass::from_layout(layout).ok_or(AllocError::InvalidParam)?;
230        let cache = &mut self.caches[sc.index()];
231
232        match cache.alloc_object::<PAGE_SIZE>() {
233            Some(addr) => {
234                // SAFETY: `addr` is non-null, aligned, and within a live slab page.
235                let ptr = unsafe { NonNull::new_unchecked(addr as *mut u8) };
236                Ok(SlabAllocResult::Allocated(ptr))
237            }
238            None => Ok(SlabAllocResult::NeedsSlab {
239                size_class: sc,
240                pages: sc.slab_pages(PAGE_SIZE),
241            }),
242        }
243    }
244
245    /// Free an object previously allocated with [`alloc`](Self::alloc).
246    ///
247    /// This is the **local** (owner-CPU) path.  Cross-CPU frees should go through
248    /// [`SlabPageHeader::remote_free`] directly (see [`GlobalAllocator`]).
249    pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
250        let sc = SizeClass::from_layout(layout).expect("layout exceeds slab size");
251        let cache = &mut self.caches[sc.index()];
252
253        match cache.dealloc_object::<PAGE_SIZE>(ptr.as_ptr() as usize) {
254            CacheDeallocResult::Done => SlabDeallocResult::Done,
255            CacheDeallocResult::FreeSlab { base, pages } => {
256                SlabDeallocResult::FreeSlab { base, pages }
257            }
258        }
259    }
260
261    /// Supply a freshly allocated slab page to the given size class.
262    ///
263    /// `base` is the virtual address of the page(s), `bytes` = pages × PAGE_SIZE.
264    pub fn add_slab(&mut self, size_class: SizeClass, base: usize, bytes: usize, owner_cpu: u16) {
265        self.caches[size_class.index()].add_slab(base, bytes, owner_cpu);
266    }
267}
268
269impl<const PAGE_SIZE: usize> SlabTrait for PerCpuSlab<PAGE_SIZE> {
270    fn cpu_id(&self) -> usize {
271        PerCpuSlab::cpu_id(self)
272    }
273
274    fn page_size(&self) -> usize {
275        PAGE_SIZE
276    }
277
278    fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
279        PerCpuSlab::alloc(self, layout)
280    }
281
282    fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
283        PerCpuSlab::add_slab(self, size_class, base, bytes)
284    }
285
286    fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
287        PerCpuSlab::dealloc_local(self, ptr, layout)
288    }
289}
290
291impl<const PAGE_SIZE: usize, const N: usize> SlabPoolTrait for StaticSlabPool<PAGE_SIZE, N> {
292    fn current_slab(&self) -> &dyn SlabTrait {
293        &self.slabs[(self.current_cpu_id)()]
294    }
295
296    fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait {
297        &self.slabs[cpu_idx]
298    }
299}