Skip to main content

citadel_buffer/
allocator.rs

1//! Page allocator with two-phase pending-free model for CoW B+ tree.
2
3use citadel_core::types::PageId;
4
5#[derive(Clone)]
6pub struct PageAllocator {
7    /// Next page ID to allocate from (high water mark).
8    next_page_id: u32,
9    /// Pages reclaimed from pending-free chain (safe to reuse).
10    ready_to_use: Vec<PageId>,
11    /// Pages freed in the current write transaction.
12    freed_this_txn: Vec<PageId>,
13    /// All page IDs allocated this txn (in allocation order). Used to bound
14    /// O(allocated) page-cache cleanup on ROLLBACK TO SAVEPOINT.
15    allocated_this_txn: Vec<PageId>,
16    /// In-place CoW mode (SyncMode::Off + no readers): reuse page IDs.
17    in_place: bool,
18}
19
20impl PageAllocator {
21    pub fn new(high_water_mark: u32) -> Self {
22        Self {
23            next_page_id: high_water_mark,
24            ready_to_use: Vec::new(),
25            freed_this_txn: Vec::new(),
26            allocated_this_txn: Vec::new(),
27            in_place: false,
28        }
29    }
30
31    pub fn set_in_place(&mut self, enabled: bool) {
32        self.in_place = enabled;
33    }
34
35    pub fn in_place(&self) -> bool {
36        self.in_place
37    }
38
39    /// Prefers reusing reclaimed pages over incrementing the high water mark.
40    pub fn allocate(&mut self) -> PageId {
41        let id = if let Some(id) = self.ready_to_use.pop() {
42            id
43        } else {
44            let id = PageId(self.next_page_id);
45            self.next_page_id += 1;
46            id
47        };
48        self.allocated_this_txn.push(id);
49        id
50    }
51
52    /// Not immediately reusable - goes into pending-free list.
53    pub fn free(&mut self, page_id: PageId) {
54        self.freed_this_txn.push(page_id);
55    }
56
57    pub fn high_water_mark(&self) -> u32 {
58        self.next_page_id
59    }
60
61    pub fn freed_this_txn(&self) -> &[PageId] {
62        &self.freed_this_txn
63    }
64
65    pub fn allocated_this_txn(&self) -> &[PageId] {
66        &self.allocated_this_txn
67    }
68
69    pub fn add_ready_to_use(&mut self, pages: Vec<PageId>) {
70        self.ready_to_use.extend(pages);
71    }
72
73    pub fn commit(&mut self) -> Vec<PageId> {
74        self.allocated_this_txn.clear();
75        std::mem::take(&mut self.freed_this_txn)
76    }
77
78    pub fn rollback(&mut self) {
79        self.freed_this_txn.clear();
80        self.allocated_this_txn.clear();
81    }
82
83    pub fn ready_count(&self) -> usize {
84        self.ready_to_use.len()
85    }
86
87    pub fn freed_count(&self) -> usize {
88        self.freed_this_txn.len()
89    }
90}
91
92#[cfg(test)]
93mod tests {
94    use super::*;
95
96    #[test]
97    fn allocate_from_hwm() {
98        let mut alloc = PageAllocator::new(0);
99        assert_eq!(alloc.allocate(), PageId(0));
100        assert_eq!(alloc.allocate(), PageId(1));
101        assert_eq!(alloc.allocate(), PageId(2));
102        assert_eq!(alloc.high_water_mark(), 3);
103    }
104
105    #[test]
106    fn allocate_from_ready_to_use() {
107        let mut alloc = PageAllocator::new(10);
108        alloc.add_ready_to_use(vec![PageId(3), PageId(7)]);
109        assert_eq!(alloc.allocate(), PageId(7));
110        assert_eq!(alloc.allocate(), PageId(3));
111        assert_eq!(alloc.allocate(), PageId(10));
112    }
113
114    #[test]
115    fn free_and_commit() {
116        let mut alloc = PageAllocator::new(5);
117        alloc.free(PageId(1));
118        alloc.free(PageId(3));
119        assert_eq!(alloc.freed_count(), 2);
120
121        let freed = alloc.commit();
122        assert_eq!(freed.len(), 2);
123        assert_eq!(alloc.freed_count(), 0);
124    }
125
126    #[test]
127    fn rollback_clears_freed() {
128        let mut alloc = PageAllocator::new(5);
129        alloc.free(PageId(1));
130        alloc.free(PageId(3));
131        alloc.rollback();
132        assert_eq!(alloc.freed_count(), 0);
133    }
134}