buddy_slab_allocator/slab/
mod.rs1pub mod cache;
10pub mod page;
11pub mod size_class;
12
13pub use page::SlabPageHeader;
14pub use size_class::SizeClass;
15
16use cache::{CacheDeallocResult, SlabCache};
17use core::alloc::Layout;
18use core::ptr::NonNull;
19use spin::Mutex as SpinMutex;
20
21use crate::error::{AllocError, AllocResult};
22
23pub enum SlabAllocResult {
25 Allocated(NonNull<u8>),
27 NeedsSlab { size_class: SizeClass, pages: usize },
31}
32
33pub enum SlabDeallocResult {
35 Done,
37 FreeSlab { base: usize, pages: usize },
39}
40
41pub enum SlabPoolDeallocResult {
43 Done,
45 RemoteQueued,
47 FreeSlab { base: usize, pages: usize },
49}
50
51pub trait SlabTrait: Sync {
53 fn cpu_id(&self) -> usize;
55
56 fn page_size(&self) -> usize;
58
59 fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult>;
61
62 fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize);
64
65 fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult;
67
68 fn dealloc_remote(&self, ptr: NonNull<u8>) {
70 let owner_cpu = u16::try_from(self.cpu_id()).expect("CPU id exceeds slab owner range");
71 unsafe { SlabPageHeader::remote_free_object(ptr, owner_cpu, self.page_size()) };
72 }
73}
74
75pub trait SlabPoolTrait: Sync {
77 fn current_slab(&self) -> &dyn SlabTrait;
79
80 fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait;
82
83 fn current_cpu_id(&self) -> usize {
85 self.current_slab().cpu_id()
86 }
87
88 fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
90 self.current_slab().alloc(layout)
91 }
92
93 fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
95 self.current_slab().add_slab(size_class, base, bytes)
96 }
97
98 fn dealloc(&self, ptr: NonNull<u8>, layout: Layout, owner_cpu: usize) -> SlabPoolDeallocResult {
100 if owner_cpu == self.current_cpu_id() {
101 match self.current_slab().dealloc_local(ptr, layout) {
102 SlabDeallocResult::Done => SlabPoolDeallocResult::Done,
103 SlabDeallocResult::FreeSlab { base, pages } => {
104 SlabPoolDeallocResult::FreeSlab { base, pages }
105 }
106 }
107 } else {
108 self.owner_slab(owner_cpu).dealloc_remote(ptr);
109 SlabPoolDeallocResult::RemoteQueued
110 }
111 }
112}
113
114pub trait SlabPoolExt: SlabPoolTrait {
116 fn with_current_slab<R>(&self, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
118 f(self.current_slab())
119 }
120
121 fn with_owner_slab<R>(&self, cpu_idx: usize, f: impl FnOnce(&dyn SlabTrait) -> R) -> R {
123 f(self.owner_slab(cpu_idx))
124 }
125}
126
127impl<T: ?Sized + SlabPoolTrait> SlabPoolExt for T {}
128
129pub struct SlabAllocator<const PAGE_SIZE: usize = 0x1000> {
131 caches: [SlabCache; SizeClass::COUNT],
132}
133
134pub struct PerCpuSlab<const PAGE_SIZE: usize = 0x1000> {
136 cpu_id: u16,
137 inner: SpinMutex<SlabAllocator<PAGE_SIZE>>,
138}
139
140pub struct StaticSlabPool<const PAGE_SIZE: usize = 0x1000, const N: usize = 1> {
142 slabs: [PerCpuSlab<PAGE_SIZE>; N],
143 current_cpu_id: fn() -> usize,
144}
145
146impl<const PAGE_SIZE: usize> PerCpuSlab<PAGE_SIZE> {
147 pub const fn new(cpu_id: u16) -> Self {
149 Self {
150 cpu_id,
151 inner: SpinMutex::new(SlabAllocator::new()),
152 }
153 }
154
155 pub fn reset(&self) {
157 *self.inner.lock() = SlabAllocator::new();
158 }
159
160 pub const fn cpu_id(&self) -> usize {
162 self.cpu_id as usize
163 }
164
165 pub fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
167 self.inner.lock().alloc(layout)
168 }
169
170 pub fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
172 self.inner
173 .lock()
174 .add_slab(size_class, base, bytes, self.cpu_id);
175 }
176
177 pub fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
179 self.inner.lock().dealloc(ptr, layout)
180 }
181
182 pub fn dealloc_remote(&self, ptr: NonNull<u8>) {
184 unsafe { SlabPageHeader::remote_free_object(ptr, self.cpu_id, PAGE_SIZE) };
185 }
186}
187
188impl<const PAGE_SIZE: usize, const N: usize> StaticSlabPool<PAGE_SIZE, N> {
189 pub const fn new(slabs: [PerCpuSlab<PAGE_SIZE>; N], current_cpu_id: fn() -> usize) -> Self {
191 Self {
192 slabs,
193 current_cpu_id,
194 }
195 }
196}
197
198impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
199 pub const fn new() -> Self {
201 Self {
202 caches: [
203 SlabCache::new(SizeClass::Bytes8),
204 SlabCache::new(SizeClass::Bytes16),
205 SlabCache::new(SizeClass::Bytes32),
206 SlabCache::new(SizeClass::Bytes64),
207 SlabCache::new(SizeClass::Bytes128),
208 SlabCache::new(SizeClass::Bytes256),
209 SlabCache::new(SizeClass::Bytes512),
210 SlabCache::new(SizeClass::Bytes1024),
211 SlabCache::new(SizeClass::Bytes2048),
212 ],
213 }
214 }
215}
216
217impl<const PAGE_SIZE: usize> Default for SlabAllocator<PAGE_SIZE> {
218 fn default() -> Self {
219 Self::new()
220 }
221}
222
223impl<const PAGE_SIZE: usize> SlabAllocator<PAGE_SIZE> {
224 pub fn alloc(&mut self, layout: Layout) -> AllocResult<SlabAllocResult> {
229 let sc = SizeClass::from_layout(layout).ok_or(AllocError::InvalidParam)?;
230 let cache = &mut self.caches[sc.index()];
231
232 match cache.alloc_object::<PAGE_SIZE>() {
233 Some(addr) => {
234 let ptr = unsafe { NonNull::new_unchecked(addr as *mut u8) };
236 Ok(SlabAllocResult::Allocated(ptr))
237 }
238 None => Ok(SlabAllocResult::NeedsSlab {
239 size_class: sc,
240 pages: sc.slab_pages(PAGE_SIZE),
241 }),
242 }
243 }
244
245 pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
250 let sc = SizeClass::from_layout(layout).expect("layout exceeds slab size");
251 let cache = &mut self.caches[sc.index()];
252
253 match cache.dealloc_object::<PAGE_SIZE>(ptr.as_ptr() as usize) {
254 CacheDeallocResult::Done => SlabDeallocResult::Done,
255 CacheDeallocResult::FreeSlab { base, pages } => {
256 SlabDeallocResult::FreeSlab { base, pages }
257 }
258 }
259 }
260
261 pub fn add_slab(&mut self, size_class: SizeClass, base: usize, bytes: usize, owner_cpu: u16) {
265 self.caches[size_class.index()].add_slab(base, bytes, owner_cpu);
266 }
267}
268
269impl<const PAGE_SIZE: usize> SlabTrait for PerCpuSlab<PAGE_SIZE> {
270 fn cpu_id(&self) -> usize {
271 PerCpuSlab::cpu_id(self)
272 }
273
274 fn page_size(&self) -> usize {
275 PAGE_SIZE
276 }
277
278 fn alloc(&self, layout: Layout) -> AllocResult<SlabAllocResult> {
279 PerCpuSlab::alloc(self, layout)
280 }
281
282 fn add_slab(&self, size_class: SizeClass, base: usize, bytes: usize) {
283 PerCpuSlab::add_slab(self, size_class, base, bytes)
284 }
285
286 fn dealloc_local(&self, ptr: NonNull<u8>, layout: Layout) -> SlabDeallocResult {
287 PerCpuSlab::dealloc_local(self, ptr, layout)
288 }
289}
290
291impl<const PAGE_SIZE: usize, const N: usize> SlabPoolTrait for StaticSlabPool<PAGE_SIZE, N> {
292 fn current_slab(&self) -> &dyn SlabTrait {
293 &self.slabs[(self.current_cpu_id)()]
294 }
295
296 fn owner_slab(&self, cpu_idx: usize) -> &dyn SlabTrait {
297 &self.slabs[cpu_idx]
298 }
299}