Skip to main content

citadel_buffer/
allocator.rs

1//! Page allocator with two-phase pending-free model for CoW B+ tree.
2
3use citadel_core::types::PageId;
4
5pub struct PageAllocator {
6    /// Next page ID to allocate from (high water mark).
7    next_page_id: u32,
8    /// Pages reclaimed from pending-free chain (safe to reuse).
9    ready_to_use: Vec<PageId>,
10    /// Pages freed in the current write transaction.
11    freed_this_txn: Vec<PageId>,
12    /// In-place CoW mode (SyncMode::Off + no readers): reuse page IDs.
13    in_place: bool,
14}
15
16impl PageAllocator {
17    pub fn new(high_water_mark: u32) -> Self {
18        Self {
19            next_page_id: high_water_mark,
20            ready_to_use: Vec::new(),
21            freed_this_txn: Vec::new(),
22            in_place: false,
23        }
24    }
25
26    pub fn set_in_place(&mut self, enabled: bool) {
27        self.in_place = enabled;
28    }
29
30    pub fn in_place(&self) -> bool {
31        self.in_place
32    }
33
34    /// Prefers reusing reclaimed pages over incrementing the high water mark.
35    pub fn allocate(&mut self) -> PageId {
36        if let Some(id) = self.ready_to_use.pop() {
37            id
38        } else {
39            let id = PageId(self.next_page_id);
40            self.next_page_id += 1;
41            id
42        }
43    }
44
45    /// Not immediately reusable - goes into pending-free list.
46    pub fn free(&mut self, page_id: PageId) {
47        self.freed_this_txn.push(page_id);
48    }
49
50    pub fn high_water_mark(&self) -> u32 {
51        self.next_page_id
52    }
53
54    pub fn freed_this_txn(&self) -> &[PageId] {
55        &self.freed_this_txn
56    }
57
58    pub fn add_ready_to_use(&mut self, pages: Vec<PageId>) {
59        self.ready_to_use.extend(pages);
60    }
61
62    pub fn commit(&mut self) -> Vec<PageId> {
63        std::mem::take(&mut self.freed_this_txn)
64    }
65
66    pub fn rollback(&mut self) {
67        self.freed_this_txn.clear();
68    }
69
70    pub fn ready_count(&self) -> usize {
71        self.ready_to_use.len()
72    }
73
74    pub fn freed_count(&self) -> usize {
75        self.freed_this_txn.len()
76    }
77}
78
79#[cfg(test)]
80mod tests {
81    use super::*;
82
83    #[test]
84    fn allocate_from_hwm() {
85        let mut alloc = PageAllocator::new(0);
86        assert_eq!(alloc.allocate(), PageId(0));
87        assert_eq!(alloc.allocate(), PageId(1));
88        assert_eq!(alloc.allocate(), PageId(2));
89        assert_eq!(alloc.high_water_mark(), 3);
90    }
91
92    #[test]
93    fn allocate_from_ready_to_use() {
94        let mut alloc = PageAllocator::new(10);
95        alloc.add_ready_to_use(vec![PageId(3), PageId(7)]);
96        // Should use ready_to_use first (LIFO)
97        assert_eq!(alloc.allocate(), PageId(7));
98        assert_eq!(alloc.allocate(), PageId(3));
99        // Now falls back to HWM
100        assert_eq!(alloc.allocate(), PageId(10));
101    }
102
103    #[test]
104    fn free_and_commit() {
105        let mut alloc = PageAllocator::new(5);
106        alloc.free(PageId(1));
107        alloc.free(PageId(3));
108        assert_eq!(alloc.freed_count(), 2);
109
110        let freed = alloc.commit();
111        assert_eq!(freed.len(), 2);
112        assert_eq!(alloc.freed_count(), 0);
113    }
114
115    #[test]
116    fn rollback_clears_freed() {
117        let mut alloc = PageAllocator::new(5);
118        alloc.free(PageId(1));
119        alloc.free(PageId(3));
120        alloc.rollback();
121        assert_eq!(alloc.freed_count(), 0);
122    }
123}