featherdb_storage/
freelist.rs1use crate::BufferPool;
4use bitvec::prelude::*;
5use featherdb_core::{constants, Error, PageId, Result};
6use std::sync::atomic::{AtomicU64, Ordering};
7
8pub struct FreeList {
10 root_page: PageId,
12 bitmap: BitVec<u8, Lsb0>,
14 next_free_hint: AtomicU64,
16 loaded: bool,
18}
19
20impl FreeList {
21 pub fn new(root_page: PageId) -> Self {
23 FreeList {
24 root_page,
25 bitmap: BitVec::new(),
26 next_free_hint: AtomicU64::new(constants::FIRST_DATA_PAGE),
27 loaded: false,
28 }
29 }
30
31 fn load(&mut self, buffer_pool: &BufferPool) -> Result<()> {
33 if self.loaded {
34 return Ok(());
35 }
36
37 let guard = buffer_pool.get_page(self.root_page)?;
42 let page = guard.read();
43
44 let data = page.as_bytes();
46 let bitmap_start = constants::PAGE_HEADER_SIZE;
47 let bitmap_data = &data[bitmap_start..];
48
49 self.bitmap = BitVec::from_slice(bitmap_data);
50 self.loaded = true;
51
52 Ok(())
53 }
54
55 fn save(&self, buffer_pool: &BufferPool) -> Result<()> {
57 let guard = buffer_pool.get_page_for_write(self.root_page)?;
58 let mut page = guard.write();
59
60 let data = page.as_bytes_mut();
61 let bitmap_start = constants::PAGE_HEADER_SIZE;
62
63 let bitmap_bytes = self.bitmap.as_raw_slice();
65 let copy_len = bitmap_bytes.len().min(data.len() - bitmap_start);
66 data[bitmap_start..bitmap_start + copy_len].copy_from_slice(&bitmap_bytes[..copy_len]);
67
68 Ok(())
69 }
70
71 pub fn allocate(&mut self, buffer_pool: &BufferPool) -> Result<PageId> {
73 self.load(buffer_pool)?;
74
75 let hint = self.next_free_hint.load(Ordering::Relaxed) as usize;
76
77 for i in hint..self.bitmap.len() {
79 if !self.bitmap[i] {
80 self.bitmap.set(i, true);
81 self.next_free_hint.store((i + 1) as u64, Ordering::Relaxed);
82 self.save(buffer_pool)?;
83 return Ok(PageId(i as u64));
84 }
85 }
86
87 for i in constants::FIRST_DATA_PAGE as usize..hint {
89 if !self.bitmap[i] {
90 self.bitmap.set(i, true);
91 self.next_free_hint.store((i + 1) as u64, Ordering::Relaxed);
92 self.save(buffer_pool)?;
93 return Ok(PageId(i as u64));
94 }
95 }
96
97 let new_page = PageId(self.bitmap.len() as u64);
99
100 while self.bitmap.len() <= new_page.0 as usize {
102 self.bitmap.push(false);
103 }
104 self.bitmap.set(new_page.0 as usize, true);
105 self.next_free_hint.store(new_page.0 + 1, Ordering::Relaxed);
106
107 self.save(buffer_pool)?;
108 Ok(new_page)
109 }
110
111 pub fn free(&mut self, page_id: PageId, buffer_pool: &BufferPool) -> Result<()> {
113 self.load(buffer_pool)?;
114
115 let idx = page_id.0 as usize;
116
117 while self.bitmap.len() <= idx {
119 self.bitmap.push(false);
120 }
121
122 if !self.bitmap[idx] {
123 return Err(Error::DoubleFree(page_id));
124 }
125
126 self.bitmap.set(idx, false);
127
128 let hint = self.next_free_hint.load(Ordering::Relaxed) as usize;
130 if idx < hint {
131 self.next_free_hint.store(idx as u64, Ordering::Relaxed);
132 }
133
134 self.save(buffer_pool)?;
135 Ok(())
136 }
137
138 pub fn is_allocated(&mut self, page_id: PageId, buffer_pool: &BufferPool) -> Result<bool> {
140 self.load(buffer_pool)?;
141
142 let idx = page_id.0 as usize;
143 if idx >= self.bitmap.len() {
144 return Ok(false);
145 }
146
147 Ok(self.bitmap[idx])
148 }
149
150 pub fn allocated_count(&mut self, buffer_pool: &BufferPool) -> Result<usize> {
152 self.load(buffer_pool)?;
153 Ok(self.bitmap.count_ones())
154 }
155
156 pub fn free_count(&mut self, buffer_pool: &BufferPool) -> Result<usize> {
158 self.load(buffer_pool)?;
159 Ok(self.bitmap.count_zeros())
160 }
161}
162
163#[cfg(test)]
164mod tests {
165 use super::*;
166 use crate::FileManager;
167 use featherdb_core::Config;
168 use std::sync::Arc;
169 use tempfile::TempDir;
170
171 fn create_test_freelist() -> (TempDir, Arc<BufferPool>, FreeList) {
172 let tmp = TempDir::new().unwrap();
173 let path = tmp.path().join("test.db");
174 let config = Config::new(&path);
175
176 let fm = Arc::new(FileManager::open(&config).unwrap());
177 fm.init_if_needed(&config).unwrap();
178
179 let bp = Arc::new(BufferPool::new(100, fm));
180 let fl = FreeList::new(PageId::from(constants::FREELIST_ROOT_PAGE));
181
182 (tmp, bp, fl)
183 }
184
185 #[test]
186 fn test_freelist_allocate() {
187 let (_tmp, bp, mut fl) = create_test_freelist();
188
189 let page1 = fl.allocate(&bp).unwrap();
190 let page2 = fl.allocate(&bp).unwrap();
191
192 assert_ne!(page1, page2);
193 assert!(fl.is_allocated(page1, &bp).unwrap());
194 assert!(fl.is_allocated(page2, &bp).unwrap());
195 }
196
197 #[test]
198 fn test_freelist_free() {
199 let (_tmp, bp, mut fl) = create_test_freelist();
200
201 let page = fl.allocate(&bp).unwrap();
202 assert!(fl.is_allocated(page, &bp).unwrap());
203
204 fl.free(page, &bp).unwrap();
205 assert!(!fl.is_allocated(page, &bp).unwrap());
206 }
207
208 #[test]
209 fn test_freelist_reuse() {
210 let (_tmp, bp, mut fl) = create_test_freelist();
211
212 let page1 = fl.allocate(&bp).unwrap();
213 fl.free(page1, &bp).unwrap();
214
215 let page2 = fl.allocate(&bp).unwrap();
216 assert_eq!(page1, page2);
218 }
219}