buddy_slab_allocator/slab/
cache.rs1use super::page::SlabPageHeader;
8use super::size_class::SizeClass;
9
10#[derive(Debug, Clone, Copy)]
12struct ListHead {
13 first: usize,
14}
15
16impl ListHead {
17 const fn empty() -> Self {
18 Self { first: 0 }
19 }
20
21 fn is_empty(&self) -> bool {
22 self.first == 0
23 }
24
25 unsafe fn push_front(&mut self, base: usize) {
30 unsafe {
31 let hdr = &mut *(base as *mut SlabPageHeader);
32 hdr.list_prev = 0;
33 hdr.list_next = self.first;
34 if self.first != 0 {
35 let old = &mut *(self.first as *mut SlabPageHeader);
36 old.list_prev = base;
37 }
38 self.first = base;
39 }
40 }
41
42 unsafe fn remove(&mut self, base: usize) {
47 unsafe {
48 let hdr = &*(base as *const SlabPageHeader);
49 let prev = hdr.list_prev;
50 let next = hdr.list_next;
51
52 if prev != 0 {
53 (*(prev as *mut SlabPageHeader)).list_next = next;
54 } else {
55 self.first = next;
56 }
57 if next != 0 {
58 (*(next as *mut SlabPageHeader)).list_prev = prev;
59 }
60 let hdr = &mut *(base as *mut SlabPageHeader);
62 hdr.list_prev = 0;
63 hdr.list_next = 0;
64 }
65 }
66
67 unsafe fn pop_front(&mut self) -> usize {
69 unsafe {
70 if self.first == 0 {
71 return 0;
72 }
73 let base = self.first;
74 self.remove(base);
75 base
76 }
77 }
78}
79
80pub struct SlabCache {
82 pub size_class: SizeClass,
83 partial: ListHead,
84 full: ListHead,
85 empty: ListHead,
86 empty_count: usize,
88}
89
90pub enum CacheDeallocResult {
92 Done,
94 FreeSlab { base: usize, pages: usize },
96}
97
98impl SlabCache {
99 pub const fn new(size_class: SizeClass) -> Self {
100 Self {
101 size_class,
102 partial: ListHead::empty(),
103 full: ListHead::empty(),
104 empty: ListHead::empty(),
105 empty_count: 0,
106 }
107 }
108
109 pub fn alloc_object<const PAGE_SIZE: usize>(&mut self) -> Option<usize> {
111 if let Some(addr) = self.try_alloc_from_partial::<PAGE_SIZE>() {
113 return Some(addr);
114 }
115
116 if let Some(base) = self.reclaim_full_with_remote_frees() {
118 unsafe { self.partial.push_front(base) };
119 return self.try_alloc_from_partial::<PAGE_SIZE>();
120 }
121
122 if !self.empty.is_empty() {
124 let base = unsafe { self.empty.pop_front() };
125 self.empty_count -= 1;
126 unsafe { self.partial.push_front(base) };
128 return self.try_alloc_from_partial::<PAGE_SIZE>();
129 }
130
131 None
132 }
133
134 fn reclaim_full_with_remote_frees(&mut self) -> Option<usize> {
137 let mut base = self.full.first;
138 while base != 0 {
139 let next = unsafe { (*(base as *const SlabPageHeader)).list_next };
140 let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
141 if hdr.has_remote_frees() {
142 hdr.drain_remote_frees(base);
143 unsafe { self.full.remove(base) };
144 return Some(base);
145 }
146 base = next;
147 }
148 None
149 }
150
151 fn try_alloc_from_partial<const PAGE_SIZE: usize>(&mut self) -> Option<usize> {
153 let base = self.partial.first;
154 if base == 0 {
155 return None;
156 }
157
158 let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
159
160 if hdr.has_remote_frees() {
162 hdr.drain_remote_frees(base);
163 }
164
165 if let Some(idx) = hdr.local_alloc() {
166 let obj_addr = hdr.object_addr(base, idx);
167 if hdr.is_local_full() && !hdr.has_remote_frees() {
169 unsafe {
170 self.partial.remove(base);
171 self.full.push_front(base);
172 }
173 }
174 return Some(obj_addr);
175 }
176 None
177 }
178
179 pub fn dealloc_object<const PAGE_SIZE: usize>(
183 &mut self,
184 obj_addr: usize,
185 ) -> CacheDeallocResult {
186 let slab_bytes = self.size_class.slab_pages(PAGE_SIZE) * PAGE_SIZE;
187 let base = SlabPageHeader::base_from_obj_addr::<PAGE_SIZE>(obj_addr, slab_bytes);
188 let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
189 let was_full = hdr.is_local_full() && !hdr.has_remote_frees();
190
191 let idx = hdr.object_index(base, obj_addr);
192 hdr.local_free(idx);
193
194 if was_full {
195 unsafe {
197 self.full.remove(base);
198 self.partial.push_front(base);
199 }
200 }
201
202 if hdr.has_remote_frees() {
205 hdr.drain_remote_frees(base);
206 }
207
208 if hdr.is_all_free() {
209 if self.empty_count == 0 {
210 unsafe {
212 self.partial.remove(base);
213 self.empty.push_front(base);
214 }
215 self.empty_count += 1;
216 CacheDeallocResult::Done
217 } else {
218 unsafe { self.partial.remove(base) };
220 CacheDeallocResult::FreeSlab {
221 base,
222 pages: self.size_class.slab_pages(PAGE_SIZE),
223 }
224 }
225 } else {
226 CacheDeallocResult::Done
227 }
228 }
229
230 pub fn add_slab(&mut self, base: usize, bytes: usize, owner_cpu: u16) {
232 let hdr = unsafe { &mut *(base as *mut SlabPageHeader) };
233 hdr.init(self.size_class, bytes, owner_cpu);
234 unsafe { self.partial.push_front(base) };
235 }
236}