Skip to main content

citadel_buffer/
allocator.rs

1//! Page allocator with two-phase pending-free model for CoW B+ tree.
2
3use citadel_core::types::PageId;
4
5#[derive(Clone)]
6pub struct PageAllocator {
7    /// Next page ID to allocate from (high water mark).
8    next_page_id: u32,
9    /// Pages reclaimed from pending-free chain (safe to reuse).
10    ready_to_use: Vec<PageId>,
11    /// Pages freed in the current write transaction.
12    freed_this_txn: Vec<PageId>,
13    /// In-place CoW mode (SyncMode::Off + no readers): reuse page IDs.
14    in_place: bool,
15}
16
17impl PageAllocator {
18    pub fn new(high_water_mark: u32) -> Self {
19        Self {
20            next_page_id: high_water_mark,
21            ready_to_use: Vec::new(),
22            freed_this_txn: Vec::new(),
23            in_place: false,
24        }
25    }
26
27    pub fn set_in_place(&mut self, enabled: bool) {
28        self.in_place = enabled;
29    }
30
31    pub fn in_place(&self) -> bool {
32        self.in_place
33    }
34
35    /// Prefers reusing reclaimed pages over incrementing the high water mark.
36    pub fn allocate(&mut self) -> PageId {
37        if let Some(id) = self.ready_to_use.pop() {
38            id
39        } else {
40            let id = PageId(self.next_page_id);
41            self.next_page_id += 1;
42            id
43        }
44    }
45
46    /// Not immediately reusable - goes into pending-free list.
47    pub fn free(&mut self, page_id: PageId) {
48        self.freed_this_txn.push(page_id);
49    }
50
51    pub fn high_water_mark(&self) -> u32 {
52        self.next_page_id
53    }
54
55    pub fn freed_this_txn(&self) -> &[PageId] {
56        &self.freed_this_txn
57    }
58
59    pub fn add_ready_to_use(&mut self, pages: Vec<PageId>) {
60        self.ready_to_use.extend(pages);
61    }
62
63    pub fn commit(&mut self) -> Vec<PageId> {
64        std::mem::take(&mut self.freed_this_txn)
65    }
66
67    pub fn rollback(&mut self) {
68        self.freed_this_txn.clear();
69    }
70
71    pub fn ready_count(&self) -> usize {
72        self.ready_to_use.len()
73    }
74
75    pub fn freed_count(&self) -> usize {
76        self.freed_this_txn.len()
77    }
78}
79
80#[cfg(test)]
81mod tests {
82    use super::*;
83
84    #[test]
85    fn allocate_from_hwm() {
86        let mut alloc = PageAllocator::new(0);
87        assert_eq!(alloc.allocate(), PageId(0));
88        assert_eq!(alloc.allocate(), PageId(1));
89        assert_eq!(alloc.allocate(), PageId(2));
90        assert_eq!(alloc.high_water_mark(), 3);
91    }
92
93    #[test]
94    fn allocate_from_ready_to_use() {
95        let mut alloc = PageAllocator::new(10);
96        alloc.add_ready_to_use(vec![PageId(3), PageId(7)]);
97        // Should use ready_to_use first (LIFO)
98        assert_eq!(alloc.allocate(), PageId(7));
99        assert_eq!(alloc.allocate(), PageId(3));
100        // Now falls back to HWM
101        assert_eq!(alloc.allocate(), PageId(10));
102    }
103
104    #[test]
105    fn free_and_commit() {
106        let mut alloc = PageAllocator::new(5);
107        alloc.free(PageId(1));
108        alloc.free(PageId(3));
109        assert_eq!(alloc.freed_count(), 2);
110
111        let freed = alloc.commit();
112        assert_eq!(freed.len(), 2);
113        assert_eq!(alloc.freed_count(), 0);
114    }
115
116    #[test]
117    fn rollback_clears_freed() {
118        let mut alloc = PageAllocator::new(5);
119        alloc.free(PageId(1));
120        alloc.free(PageId(3));
121        alloc.rollback();
122        assert_eq!(alloc.freed_count(), 0);
123    }
124}