Skip to main content

buddy_slab_allocator/slab/
slab_cache.rs

1//! Slab cache implementation for a single size class.
2//!
3//! This module implements SlabCache which manages three lists (empty, partial, full)
4//! of slab nodes for a specific size class.
5
6#[cfg(feature = "log")]
7use log::{error, warn};
8
9use super::slab_byte_allocator::{PageAllocatorForSlab as BytePageAllocator, SizeClass};
10use super::slab_node::SlabNode;
11use crate::{AllocError, AllocResult};
12
13fn align_down_any(pos: usize, align: usize) -> usize {
14    if align == 0 {
15        return pos;
16    }
17    (pos / align) * align
18}
19
20struct SlabIntrusiveList {
21    head: Option<usize>,
22    tail: Option<usize>,
23    len: usize,
24}
25
26impl SlabIntrusiveList {
27    pub const fn new() -> Self {
28        Self {
29            head: None,
30            tail: None,
31            len: 0,
32        }
33    }
34
35    pub fn len(&self) -> usize {
36        self.len
37    }
38
39    pub fn back(&self) -> Option<usize> {
40        self.tail
41    }
42
43    pub fn push_back(&mut self, size_class: SizeClass, slab_base: usize) {
44        let mut node = SlabNode::new(slab_base, size_class);
45        node.set_prev(self.tail);
46        node.set_next(None);
47
48        if let Some(tail) = self.tail {
49            let mut tail_node = SlabNode::new(tail, size_class);
50            tail_node.set_next(Some(slab_base));
51        } else {
52            self.head = Some(slab_base);
53        }
54
55        self.tail = Some(slab_base);
56        self.len += 1;
57    }
58
59    pub fn pop_back(&mut self, size_class: SizeClass) -> Option<usize> {
60        let tail = self.tail?;
61        self.remove(size_class, tail);
62        Some(tail)
63    }
64
65    pub fn remove(&mut self, size_class: SizeClass, slab_base: usize) {
66        let mut node = SlabNode::new(slab_base, size_class);
67        let prev = node.prev();
68        let next = node.next();
69
70        if let Some(prev_base) = prev {
71            let mut prev_node = SlabNode::new(prev_base, size_class);
72            prev_node.set_next(next);
73        } else {
74            self.head = next;
75        }
76
77        if let Some(next_base) = next {
78            let mut next_node = SlabNode::new(next_base, size_class);
79            next_node.set_prev(prev);
80        } else {
81            self.tail = prev;
82        }
83
84        node.set_prev(None);
85        node.set_next(None);
86        self.len = self.len.saturating_sub(1);
87    }
88}
89
90/// Slab cache for a specific size class
91pub struct SlabCache {
92    size_class: SizeClass,
93    empty: SlabIntrusiveList,
94    partial: SlabIntrusiveList,
95    full: SlabIntrusiveList,
96}
97
98impl SlabCache {
99    pub const fn new(size_class: SizeClass) -> Self {
100        Self {
101            size_class,
102            empty: SlabIntrusiveList::new(),
103            partial: SlabIntrusiveList::new(),
104            full: SlabIntrusiveList::new(),
105        }
106    }
107
108    /// Allocate an object from this cache
109    /// Returns (object_addr, bytes_allocated_from_page_allocator)
110    pub fn alloc_object(
111        &mut self,
112        page_allocator: &mut dyn BytePageAllocator,
113        page_size: usize,
114    ) -> AllocResult<(usize, usize)> {
115        // 1. Try to allocate from partial list
116        if let Some(slab_base) = self.partial.back() {
117            let mut node = SlabNode::new(slab_base, self.size_class);
118            if !node.is_valid_for_size_class() {
119                return Err(AllocError::InvalidParam);
120            }
121            if let Some(obj_idx) = node.alloc_object() {
122                let obj_addr = node.object_addr(obj_idx);
123                if node.is_full() {
124                    self.partial.remove(self.size_class, slab_base);
125                    self.full.push_back(self.size_class, slab_base);
126                }
127                return Ok((obj_addr, 0));
128            }
129            panic!("Allocation from partial slab failed despite free_count > 0, bitmap inconsistency detected");
130        }
131
132        // 2. Try to allocate from empty list
133        if let Some(slab_base) = self.empty.pop_back(self.size_class) {
134            let mut node = SlabNode::new(slab_base, self.size_class);
135            if !node.is_valid_for_size_class() {
136                return Err(AllocError::InvalidParam);
137            }
138            if let Some(obj_idx) = node.alloc_object() {
139                let obj_addr = node.object_addr(obj_idx);
140                self.partial.push_back(self.size_class, slab_base);
141
142                let prealloc_bytes = self.preallocate_empty_slab(page_allocator, page_size);
143                return Ok((obj_addr, prealloc_bytes));
144            }
145            panic!("Allocation from empty slab failed despite all objects being free, bitmap inconsistency detected");
146        }
147
148        // 3. Allocate a new node from page allocator
149        let (obj_addr, bytes) = self.allocate_new_slab(page_allocator, page_size)?;
150        Ok((obj_addr, bytes))
151    }
152
153    /// Allocate a new slab from page allocator
154    /// Returns (object_addr, bytes_allocated_from_page_allocator)
155    fn allocate_new_slab(
156        &mut self,
157        page_allocator: &mut dyn BytePageAllocator,
158        page_size: usize,
159    ) -> AllocResult<(usize, usize)> {
160        let object_size = self.size_class.size();
161        let bytes_needed = SlabNode::MAX_OBJECTS * object_size;
162        let page_count = bytes_needed.div_ceil(page_size);
163        let slab_bytes = page_count * page_size;
164
165        let start_addr = page_allocator.alloc_pages(page_count, slab_bytes)?;
166
167        let mut new_node = SlabNode::new(start_addr, self.size_class);
168        new_node.init_header(slab_bytes);
169
170        if let Some(obj_idx) = new_node.alloc_object() {
171            let obj_addr = new_node.object_addr(obj_idx);
172            self.partial.push_back(self.size_class, start_addr);
173
174            let prealloc_bytes = self.preallocate_empty_slab(page_allocator, page_size);
175            return Ok((obj_addr, slab_bytes + prealloc_bytes));
176        }
177
178        // This should never happen - newly initialized slab must have at least one free object
179        panic!("Failed to allocate from newly initialized slab: bitmap inconsistency or corruption detected");
180    }
181
182    /// Pre-allocate an empty slab for future allocations
183    /// Returns bytes allocated from page allocator (0 if already has empty nodes)
184    fn preallocate_empty_slab(
185        &mut self,
186        page_allocator: &mut dyn BytePageAllocator,
187        page_size: usize,
188    ) -> usize {
189        if self.empty.len() > 0 {
190            return 0;
191        }
192
193        let object_size = self.size_class.size();
194        let bytes_needed = SlabNode::MAX_OBJECTS * object_size;
195        let page_count = bytes_needed.div_ceil(page_size);
196        let slab_bytes = page_count * page_size;
197
198        if let Ok(start_addr) = page_allocator.alloc_pages(page_count, slab_bytes) {
199            let mut new_node = SlabNode::new(start_addr, self.size_class);
200            new_node.init_header(slab_bytes);
201            self.empty.push_back(self.size_class, start_addr);
202            return slab_bytes;
203        }
204
205        0
206    }
207
208    /// Deallocate an object
209    /// Returns (bytes_freed_from_page_allocator, actually_deallocated)
210    /// actually_deallocated is false if this was a double-free
211    pub fn dealloc_object(
212        &mut self,
213        obj_addr: usize,
214        page_allocator: &mut dyn BytePageAllocator,
215        page_size: usize,
216    ) -> (usize, bool) {
217        let object_size = self.size_class.size();
218        let bytes_needed = SlabNode::MAX_OBJECTS * object_size;
219        let page_count = bytes_needed.div_ceil(page_size);
220        let slab_bytes = page_count * page_size;
221
222        let slab_base = align_down_any(obj_addr, slab_bytes);
223        let mut node = SlabNode::new(slab_base, self.size_class);
224        if !node.is_valid_for_size_class() {
225            // This can happen if the slab was already returned to the page allocator
226            // and the memory was reused, or if the pointer is completely invalid.
227            // For robustness, especially in double-free tests, we return false.
228            warn!(
229                "slab allocator: Invalid slab base {:#x} for size class {:?}",
230                slab_base, self.size_class
231            );
232            warn!("this can happen if the slab was already returned to the page allocator and the memory was reused, 
233                or if the pointer is completely invalid");
234            return (0, false);
235        }
236
237        let was_full = node.is_full();
238        let (should_dealloc_slab, actually_freed) =
239            if let Some(obj_idx) = node.object_index_from_addr(obj_addr) {
240                // dealloc_object returns true if object was allocated, false if already free (double-free)
241                let actually_freed = node.dealloc_object(obj_idx);
242                (node.is_empty() && actually_freed, actually_freed)
243            } else {
244                error!("Invalid address {obj_addr:x} in slab at {slab_base:x}: not a valid object");
245                return (0, true); // Not a double-free, just invalid address (treat as no-op)
246            };
247
248        // Only manipulate lists if this was not a double-free
249        if actually_freed {
250            // Remove slab from its current list before moving or deallocating it
251            if was_full {
252                self.full.remove(self.size_class, slab_base);
253            } else {
254                self.partial.remove(self.size_class, slab_base);
255            }
256
257            if should_dealloc_slab {
258                // Slab became empty - either deallocate or move to empty list
259                if self.empty.len() >= 2 {
260                    page_allocator.dealloc_pages(slab_base, page_count);
261                    return (slab_bytes, true);
262                } else {
263                    self.empty.push_back(self.size_class, slab_base);
264                    return (0, true);
265                }
266            }
267
268            // Slab still has objects - if it was full, it's now partial
269            if was_full {
270                self.partial.push_back(self.size_class, slab_base);
271            }
272        }
273
274        (0, actually_freed)
275    }
276}
277
278#[cfg(test)]
279mod tests {
280    use super::*;
281    use alloc::alloc::{alloc, dealloc};
282    use core::alloc::Layout;
283
284    // Re-import SizeClass for tests
285    use super::super::slab_byte_allocator::SizeClass;
286
287    struct MockPageAllocator {
288        allocated: alloc::vec::Vec<(usize, Layout, usize)>,
289    }
290
291    impl MockPageAllocator {
292        fn new() -> Self {
293            Self {
294                allocated: alloc::vec::Vec::new(),
295            }
296        }
297    }
298
299    impl BytePageAllocator for MockPageAllocator {
300        fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
301            let size = num_pages * 4096;
302            let layout =
303                Layout::from_size_align(size, alignment).map_err(|_| AllocError::InvalidParam)?;
304            let addr = unsafe { alloc(layout) } as usize;
305            if addr == 0 {
306                return Err(AllocError::NoMemory);
307            }
308            self.allocated.push((addr, layout, num_pages));
309            Ok(addr)
310        }
311
312        fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
313            if let Some(idx) = self
314                .allocated
315                .iter()
316                .position(|&(addr, _layout, count)| addr == pos && count == num_pages)
317            {
318                let (_addr, layout, _count) = self.allocated.swap_remove(idx);
319                unsafe { dealloc(pos as *mut u8, layout) };
320            }
321        }
322    }
323
324    #[test]
325    fn test_alloc_dealloc() {
326        let mut cache = SlabCache::new(SizeClass::Bytes64);
327        let mut page_allocator = MockPageAllocator::new();
328
329        let (obj_addr, _) = cache.alloc_object(&mut page_allocator, 4096).unwrap();
330
331        assert_ne!(obj_addr, 0);
332
333        cache.dealloc_object(obj_addr, &mut page_allocator, 4096);
334    }
335
336    #[test]
337    fn test_multiple_allocs() {
338        let mut cache = SlabCache::new(SizeClass::Bytes64);
339        let mut page_allocator = MockPageAllocator::new();
340
341        let mut addrs = alloc::vec::Vec::new();
342        for _ in 0..10 {
343            let (addr, _) = cache.alloc_object(&mut page_allocator, 4096).unwrap();
344            addrs.push(addr);
345        }
346
347        assert_eq!(addrs.len(), 10);
348
349        for addr in addrs {
350            cache.dealloc_object(addr, &mut page_allocator, 4096);
351        }
352    }
353
354    #[test]
355    fn test_empty_node_management() {
356        let mut cache = SlabCache::new(SizeClass::Bytes64);
357        let mut page_allocator = MockPageAllocator::new();
358
359        let (addr1, _) = cache.alloc_object(&mut page_allocator, 4096).unwrap();
360        cache.dealloc_object(addr1, &mut page_allocator, 4096);
361
362        let (addr2, _) = cache.alloc_object(&mut page_allocator, 4096).unwrap();
363        cache.dealloc_object(addr2, &mut page_allocator, 4096);
364
365        let (addr3, _) = cache.alloc_object(&mut page_allocator, 4096).unwrap();
366        cache.dealloc_object(addr3, &mut page_allocator, 4096);
367
368        assert!(cache.empty.len() <= 2);
369    }
370}