Skip to main content

citadel_buffer/
allocator.rs

1//! Page allocator for the CoW B+ tree.
2//!
3//! Uses a two-phase pending-free model:
4//! - New pages are allocated from `ready_to_use` (reclaimed) or high water mark
5//! - Freed pages go into `freed_this_txn` (not reusable until committed + no older readers)
6//! - On-disk pending-free chain persistence happens during commit
7
8use citadel_core::types::PageId;
9
10pub struct PageAllocator {
11    /// Next page ID to allocate from (high water mark).
12    next_page_id: u32,
13    /// Pages reclaimed from pending-free chain (safe to reuse).
14    ready_to_use: Vec<PageId>,
15    /// Pages freed in the current write transaction.
16    freed_this_txn: Vec<PageId>,
17}
18
19impl PageAllocator {
20    pub fn new(high_water_mark: u32) -> Self {
21        Self {
22            next_page_id: high_water_mark,
23            ready_to_use: Vec::new(),
24            freed_this_txn: Vec::new(),
25        }
26    }
27
28    /// Prefers reusing reclaimed pages over incrementing the high water mark.
29    pub fn allocate(&mut self) -> PageId {
30        if let Some(id) = self.ready_to_use.pop() {
31            id
32        } else {
33            let id = PageId(self.next_page_id);
34            self.next_page_id += 1;
35            id
36        }
37    }
38
39    /// Not immediately reusable - goes into pending-free list.
40    pub fn free(&mut self, page_id: PageId) {
41        self.freed_this_txn.push(page_id);
42    }
43
44    pub fn high_water_mark(&self) -> u32 {
45        self.next_page_id
46    }
47
48    pub fn freed_this_txn(&self) -> &[PageId] {
49        &self.freed_this_txn
50    }
51
52    pub fn add_ready_to_use(&mut self, pages: Vec<PageId>) {
53        self.ready_to_use.extend(pages);
54    }
55
56    pub fn commit(&mut self) -> Vec<PageId> {
57        std::mem::take(&mut self.freed_this_txn)
58    }
59
60    pub fn rollback(&mut self) {
61        self.freed_this_txn.clear();
62    }
63
64    pub fn ready_count(&self) -> usize {
65        self.ready_to_use.len()
66    }
67
68    pub fn freed_count(&self) -> usize {
69        self.freed_this_txn.len()
70    }
71}
72
73#[cfg(test)]
74mod tests {
75    use super::*;
76
77    #[test]
78    fn allocate_from_hwm() {
79        let mut alloc = PageAllocator::new(0);
80        assert_eq!(alloc.allocate(), PageId(0));
81        assert_eq!(alloc.allocate(), PageId(1));
82        assert_eq!(alloc.allocate(), PageId(2));
83        assert_eq!(alloc.high_water_mark(), 3);
84    }
85
86    #[test]
87    fn allocate_from_ready_to_use() {
88        let mut alloc = PageAllocator::new(10);
89        alloc.add_ready_to_use(vec![PageId(3), PageId(7)]);
90        // Should use ready_to_use first (LIFO)
91        assert_eq!(alloc.allocate(), PageId(7));
92        assert_eq!(alloc.allocate(), PageId(3));
93        // Now falls back to HWM
94        assert_eq!(alloc.allocate(), PageId(10));
95    }
96
97    #[test]
98    fn free_and_commit() {
99        let mut alloc = PageAllocator::new(5);
100        alloc.free(PageId(1));
101        alloc.free(PageId(3));
102        assert_eq!(alloc.freed_count(), 2);
103
104        let freed = alloc.commit();
105        assert_eq!(freed.len(), 2);
106        assert_eq!(alloc.freed_count(), 0);
107    }
108
109    #[test]
110    fn rollback_clears_freed() {
111        let mut alloc = PageAllocator::new(5);
112        alloc.free(PageId(1));
113        alloc.free(PageId(3));
114        alloc.rollback();
115        assert_eq!(alloc.freed_count(), 0);
116    }
117}