buddy_slab_allocator/slab/
slab_node.rs1#[cfg(feature = "log")]
7use log::error;
8
9pub use super::slab_byte_allocator::SizeClass;
10
11#[repr(C)]
12pub(crate) struct SlabHeader {
13 magic: u32,
14 size_class: u16,
15 object_count: u16,
16 free_count: u16,
17 _reserved: u16,
18 slab_bytes: usize,
19 prev: usize,
20 next: usize,
21 free_bitmap: [u64; FREE_BITMAP_WORDS],
22}
23
24const SLAB_HEADER_MAGIC: u32 = 0x534c_4142;
25const FREE_BITMAP_WORDS: usize = 8;
26
27#[derive(Debug, Clone, Copy)]
28pub struct SlabNode {
29 pub addr: usize, pub size_class: SizeClass, }
32
33impl SlabNode {
34 pub const MAX_OBJECTS: usize = FREE_BITMAP_WORDS * 64;
35
36 pub const fn new(addr: usize, size_class: SizeClass) -> Self {
37 Self { addr, size_class }
38 }
39
40 fn header_size_aligned(&self) -> usize {
41 crate::align_up(core::mem::size_of::<SlabHeader>(), self.size_class.size())
42 }
43
44 fn object_base(&self) -> usize {
45 self.addr + self.header_size_aligned()
46 }
47
48 fn header(&self) -> &SlabHeader {
49 unsafe { &*(self.addr as *const SlabHeader) }
50 }
51
52 fn header_mut(&mut self) -> &mut SlabHeader {
53 unsafe { &mut *(self.addr as *mut SlabHeader) }
54 }
55
56 pub fn init_header(&mut self, slab_bytes: usize) {
57 let object_size = self.size_class.size();
58 let header_size_aligned = self.header_size_aligned();
59 let object_count = if slab_bytes > header_size_aligned {
60 (slab_bytes - header_size_aligned) / object_size
61 } else {
62 0
63 }
64 .min(Self::MAX_OBJECTS);
65
66 if object_count == 0 {
67 return;
68 }
69
70 let mut free_bitmap = [u64::MAX; FREE_BITMAP_WORDS];
71 if object_count < Self::MAX_OBJECTS {
72 let full_words = object_count / 64;
73 let rem_bits = object_count % 64;
74 #[allow(clippy::needless_range_loop)]
75 for i in 0..FREE_BITMAP_WORDS {
76 if i < full_words {
77 continue;
78 }
79 if i == full_words {
80 if rem_bits == 0 {
81 free_bitmap[i] = 0;
82 } else {
83 free_bitmap[i] &= (1u64 << rem_bits) - 1;
84 }
85 } else {
86 free_bitmap[i] = 0;
87 }
88 }
89 }
90
91 let header = self.header_mut();
92 *header = SlabHeader {
93 magic: SLAB_HEADER_MAGIC,
94 size_class: object_size as u16,
95 object_count: object_count as u16,
96 free_count: object_count as u16,
97 _reserved: 0,
98 slab_bytes,
99 prev: 0,
100 next: 0,
101 free_bitmap,
102 };
103 }
104
105 pub fn is_valid_for_size_class(&self) -> bool {
106 let header = self.header();
107 header.magic == SLAB_HEADER_MAGIC && header.size_class as usize == self.size_class.size()
108 }
109
110 pub fn in_use(&self) -> u32 {
111 let header = self.header();
112 header.object_count as u32 - header.free_count as u32
113 }
114
115 pub fn free_count(&self) -> u32 {
116 self.header().free_count as u32
117 }
118
119 pub fn is_full(&self) -> bool {
120 self.header().free_count == 0
121 }
122
123 pub fn is_empty(&self) -> bool {
124 self.header().free_count == self.header().object_count
125 }
126
127 pub fn alloc_object(&mut self) -> Option<usize> {
128 let header = self.header_mut();
129 if header.free_count == 0 {
130 return None;
131 }
132 let object_count = header.object_count as usize;
133 for (word_idx, &word) in header.free_bitmap.iter().enumerate() {
134 if word != 0 {
135 let bit_pos = word.trailing_zeros() as usize;
136 let object_index = word_idx * 64 + bit_pos;
137
138 if object_index >= object_count {
139 continue;
140 }
141
142 header.free_bitmap[word_idx] &= !(1u64 << bit_pos);
143 header.free_count -= 1;
144 return Some(object_index);
145 }
146 }
147 None
148 }
149
150 pub fn dealloc_object(&mut self, object_index: usize) -> bool {
151 let header = self.header_mut();
152 if object_index < header.object_count as usize {
153 let word_idx = object_index / 64;
154 let bit_idx = object_index % 64;
155 let mask = 1u64 << bit_idx;
156 let was_free = (header.free_bitmap[word_idx] & mask) != 0;
157
158 if was_free {
159 return false;
161 }
162
163 header.free_bitmap[word_idx] |= mask;
164 header.free_count = header.free_count.saturating_add(1);
165 true } else {
167 false
168 }
169 }
170
171 pub fn object_addr(&self, object_index: usize) -> usize {
172 self.object_base() + object_index * self.size_class.size()
173 }
174
175 pub fn object_index_from_addr(&self, obj_addr: usize) -> Option<usize> {
176 let base = self.object_base();
177 if obj_addr < base {
178 return None;
179 }
180
181 let offset = obj_addr - base;
182 if offset % self.size_class.size() != 0 {
183 error!("Invalid object address: {obj_addr:x}");
184 return None;
185 }
186
187 let object_index = offset / self.size_class.size();
188
189 if object_index < self.header().object_count as usize {
190 Some(object_index)
191 } else {
192 None
193 }
194 }
195
196 pub fn page_count(&self, page_size: usize) -> usize {
197 let object_size = self.size_class.size();
198 let bytes_needed = Self::MAX_OBJECTS * object_size;
199 bytes_needed.div_ceil(page_size)
200 }
201
202 pub fn prev(&self) -> Option<usize> {
203 let prev = self.header().prev;
204 if prev == 0 {
205 None
206 } else {
207 Some(prev)
208 }
209 }
210
211 pub fn next(&self) -> Option<usize> {
212 let next = self.header().next;
213 if next == 0 {
214 None
215 } else {
216 Some(next)
217 }
218 }
219
220 pub fn set_prev(&mut self, prev: Option<usize>) {
221 self.header_mut().prev = prev.unwrap_or(0);
222 }
223
224 pub fn set_next(&mut self, next: Option<usize>) {
225 self.header_mut().next = next.unwrap_or(0);
226 }
227}
228
229#[cfg(test)]
230mod tests {
231 use super::*;
232 use alloc::alloc::{alloc, dealloc};
233 use core::alloc::Layout;
234
235 #[test]
236 fn test_slab_node() {
237 let mut node = SlabNode::new(0, SizeClass::Bytes64);
238 let slab_pages = node.page_count(4096);
239 let slab_bytes = slab_pages * 4096;
240 let layout = Layout::from_size_align(slab_bytes, slab_bytes).unwrap();
241 let base = unsafe { alloc(layout) } as usize;
242 assert_ne!(base, 0);
243 node.addr = base;
244 node.init_header(slab_bytes);
245
246 assert!(node.is_empty());
247 assert!(!node.is_full());
248 assert!(node.free_count() <= SlabNode::MAX_OBJECTS as u32);
249 assert_eq!(node.in_use(), 0);
250
251 let obj_idx = node.alloc_object().unwrap();
253 assert_eq!(node.object_addr(obj_idx), node.object_base());
254 assert_eq!(node.in_use(), 1);
255 assert_eq!(
256 node.free_count(),
257 (node.header().object_count as u32).saturating_sub(1)
258 );
259
260 node.dealloc_object(obj_idx);
262 assert!(node.is_empty());
263 assert_eq!(node.in_use(), 0);
264 assert_eq!(node.free_count(), node.header().object_count as u32);
265
266 unsafe { dealloc(base as *mut u8, layout) };
267 }
268
269 #[test]
270 fn test_object_index_from_addr() {
271 let mut node = SlabNode::new(0, SizeClass::Bytes64);
272 let slab_pages = node.page_count(4096);
273 let slab_bytes = slab_pages * 4096;
274 let layout = Layout::from_size_align(slab_bytes, slab_bytes).unwrap();
275 let base = unsafe { alloc(layout) } as usize;
276 assert_ne!(base, 0);
277 node.addr = base;
278 node.init_header(slab_bytes);
279
280 let obj0 = node.object_addr(0);
281 assert_eq!(node.object_index_from_addr(obj0), Some(0));
282 assert_eq!(node.object_index_from_addr(obj0 + 64), Some(1));
283 assert_eq!(node.object_index_from_addr(obj0 + 63), None);
284 assert_eq!(node.object_index_from_addr(obj0 + slab_bytes), None);
285
286 unsafe { dealloc(base as *mut u8, layout) };
287 }
288
289 #[test]
290 fn test_page_count() {
291 let node8 = SlabNode::new(0, SizeClass::Bytes8);
292 assert_eq!(node8.page_count(4096), 1); let node64 = SlabNode::new(0, SizeClass::Bytes64);
295 assert_eq!(node64.page_count(4096), 8); let node2048 = SlabNode::new(0, SizeClass::Bytes2048);
298 assert_eq!(node2048.page_count(4096), 256); }
300}