Skip to main content

citadel_buffer/
allocator.rs

1//! Page allocator with two-phase pending-free model for CoW B+ tree.
2
3use citadel_core::types::PageId;
4
5#[derive(Clone)]
6pub struct PageAllocator {
7    /// Next page ID to allocate from (high water mark).
8    next_page_id: u32,
9    /// Pages reclaimed from pending-free chain (safe to reuse).
10    ready_to_use: Vec<PageId>,
11    /// Pages freed in the current write transaction.
12    freed_this_txn: Vec<PageId>,
13    /// All page IDs allocated this txn (in allocation order). Used to bound
14    /// O(allocated) page-cache cleanup on ROLLBACK TO SAVEPOINT.
15    allocated_this_txn: Vec<PageId>,
16    /// In-place CoW mode (SyncMode::Off + no readers): reuse page IDs.
17    in_place: bool,
18}
19
20impl PageAllocator {
21    pub fn new(high_water_mark: u32) -> Self {
22        Self {
23            next_page_id: high_water_mark,
24            ready_to_use: Vec::new(),
25            freed_this_txn: Vec::new(),
26            allocated_this_txn: Vec::new(),
27            in_place: false,
28        }
29    }
30
31    pub fn set_in_place(&mut self, enabled: bool) {
32        self.in_place = enabled;
33    }
34
35    pub fn in_place(&self) -> bool {
36        self.in_place
37    }
38
39    /// Prefers reusing reclaimed pages over incrementing the high water mark.
40    pub fn allocate(&mut self) -> PageId {
41        let id = if let Some(id) = self.ready_to_use.pop() {
42            id
43        } else {
44            let id = PageId(self.next_page_id);
45            self.next_page_id += 1;
46            id
47        };
48        self.allocated_this_txn.push(id);
49        id
50    }
51
52    /// Not immediately reusable - goes into pending-free list.
53    pub fn free(&mut self, page_id: PageId) {
54        self.freed_this_txn.push(page_id);
55    }
56
57    pub fn high_water_mark(&self) -> u32 {
58        self.next_page_id
59    }
60
61    pub fn freed_this_txn(&self) -> &[PageId] {
62        &self.freed_this_txn
63    }
64
65    pub fn allocated_this_txn(&self) -> &[PageId] {
66        &self.allocated_this_txn
67    }
68
69    pub fn add_ready_to_use(&mut self, pages: Vec<PageId>) {
70        self.ready_to_use.extend(pages);
71    }
72
73    pub fn commit(&mut self) -> Vec<PageId> {
74        self.allocated_this_txn.clear();
75        std::mem::take(&mut self.freed_this_txn)
76    }
77
78    pub fn rollback(&mut self) {
79        self.freed_this_txn.clear();
80        self.allocated_this_txn.clear();
81    }
82
83    pub fn ready_count(&self) -> usize {
84        self.ready_to_use.len()
85    }
86
87    pub fn freed_count(&self) -> usize {
88        self.freed_this_txn.len()
89    }
90
91    pub fn checkpoint(&self) -> AllocCheckpoint {
92        AllocCheckpoint {
93            next_page_id: self.next_page_id,
94            ready_to_use: self.ready_to_use.clone(),
95            freed_this_txn_len: self.freed_this_txn.len(),
96            allocated_this_txn_len: self.allocated_this_txn.len(),
97            in_place: self.in_place,
98        }
99    }
100
101    pub fn restore(&mut self, cp: AllocCheckpoint) {
102        self.next_page_id = cp.next_page_id;
103        self.ready_to_use = cp.ready_to_use;
104        self.freed_this_txn.truncate(cp.freed_this_txn_len);
105        self.allocated_this_txn.truncate(cp.allocated_this_txn_len);
106        self.in_place = cp.in_place;
107    }
108
109    pub fn allocated_since(&self, checkpoint_len: usize) -> &[PageId] {
110        &self.allocated_this_txn[checkpoint_len..]
111    }
112}
113
114#[derive(Clone)]
115pub struct AllocCheckpoint {
116    next_page_id: u32,
117    ready_to_use: Vec<PageId>,
118    freed_this_txn_len: usize,
119    allocated_this_txn_len: usize,
120    in_place: bool,
121}
122
123impl AllocCheckpoint {
124    pub fn allocated_this_txn_len(&self) -> usize {
125        self.allocated_this_txn_len
126    }
127}
128
129#[cfg(test)]
130mod tests {
131    use super::*;
132
133    #[test]
134    fn allocate_from_hwm() {
135        let mut alloc = PageAllocator::new(0);
136        assert_eq!(alloc.allocate(), PageId(0));
137        assert_eq!(alloc.allocate(), PageId(1));
138        assert_eq!(alloc.allocate(), PageId(2));
139        assert_eq!(alloc.high_water_mark(), 3);
140    }
141
142    #[test]
143    fn allocate_from_ready_to_use() {
144        let mut alloc = PageAllocator::new(10);
145        alloc.add_ready_to_use(vec![PageId(3), PageId(7)]);
146        assert_eq!(alloc.allocate(), PageId(7));
147        assert_eq!(alloc.allocate(), PageId(3));
148        assert_eq!(alloc.allocate(), PageId(10));
149    }
150
151    #[test]
152    fn free_and_commit() {
153        let mut alloc = PageAllocator::new(5);
154        alloc.free(PageId(1));
155        alloc.free(PageId(3));
156        assert_eq!(alloc.freed_count(), 2);
157
158        let freed = alloc.commit();
159        assert_eq!(freed.len(), 2);
160        assert_eq!(alloc.freed_count(), 0);
161    }
162
163    #[test]
164    fn rollback_clears_freed() {
165        let mut alloc = PageAllocator::new(5);
166        alloc.free(PageId(1));
167        alloc.free(PageId(3));
168        alloc.rollback();
169        assert_eq!(alloc.freed_count(), 0);
170    }
171}