sqlitegraph/backend/native/v2/free_space/
manager.rs1use super::block::FreeBlock;
2use super::stats::{AllocationStats, CompactionReport, FreeSpaceAnalysis};
3use crate::backend::native::{NativeBackendError, NativeResult};
4use crate::debug::debug_log;
5
6pub const MIN_BLOCK_SIZE: u32 = 32;
7const MAX_FRAGMENTATION_RATIO: f64 = 0.3;
8
9#[derive(Debug, Clone, Copy)]
10pub enum AllocationStrategy {
11 FirstFit,
12 BestFit,
13 WorstFit,
14}
15
16#[derive(Debug, Clone)]
17pub struct FreeSpaceManager {
18 free_blocks: Vec<FreeBlock>,
19 strategy: AllocationStrategy,
20 stats: AllocationStats,
21}
22
23impl FreeSpaceManager {
24 pub fn new(strategy: AllocationStrategy) -> Self {
25 Self {
26 free_blocks: Vec::new(),
27 strategy,
28 stats: AllocationStats::default(),
29 }
30 }
31
32 pub fn add_free_block(&mut self, offset: u64, size: u32) {
33 if size < MIN_BLOCK_SIZE {
34 return;
35 }
36 self.free_blocks.push(FreeBlock::new(offset, size));
37 self.stats.total_deallocations += 1;
38 self.stats.total_deallocated_bytes += size as u64;
39 self.try_merge_adjacent_blocks();
40 self.update_fragmentation_ratio();
41 }
42
43 pub fn remove_from_free_list(&mut self, offset: u64, size: u32) -> NativeResult<()> {
48 let original_len = self.free_blocks.len();
49
50 self.free_blocks.retain(|block| {
51 !(block.offset == offset && block.size >= size)
53 });
54
55 if self.free_blocks.len() < original_len {
56 debug_log!(
57 "Removed block offset={} size={} from free list ({} blocks removed)",
58 offset,
59 size,
60 original_len - self.free_blocks.len()
61 );
62 self.update_fragmentation_ratio();
63 Ok(())
64 } else {
65 Err(NativeBackendError::CorruptFreeSpace {
66 reason: format!(
67 "Block at offset {} (size {}) not found in free list",
68 offset, size
69 ),
70 })
71 }
72 }
73
74 pub fn allocate(&mut self, requested_size: u32) -> NativeResult<u64> {
75 if requested_size == 0 {
76 return Ok(0);
77 }
78
79 let index = self.find_suitable_block(requested_size)?;
80 let mut block = self.free_blocks.remove(index);
81 let allocated_offset = block.offset;
82
83 if let Some(remaining) = block.split_if_needed(requested_size) {
84 self.free_blocks.push(remaining);
85 self.stats.block_splits += 1;
86 }
87
88 self.stats.total_allocations += 1;
89 self.stats.total_allocated_bytes += requested_size as u64;
90 self.update_fragmentation_ratio();
91 Ok(allocated_offset)
92 }
93
94 fn find_suitable_block(&self, requested_size: u32) -> NativeResult<usize> {
95 let candidates: Vec<usize> = self
96 .free_blocks
97 .iter()
98 .enumerate()
99 .filter(|(_, block)| block.can_accommodate(requested_size))
100 .map(|(index, _)| index)
101 .collect();
102
103 if candidates.is_empty() {
104 return Err(NativeBackendError::OutOfSpace);
105 }
106
107 let selected = match self.strategy {
108 AllocationStrategy::FirstFit => candidates[0],
109 AllocationStrategy::BestFit => candidates
110 .iter()
111 .min_by_key(|&&i| self.free_blocks[i].size)
112 .copied()
113 .ok_or_else(|| NativeBackendError::CorruptFreeSpace {
114 reason: "BestFit strategy failed to find minimum block in non-empty candidates"
115 .to_string(),
116 })?,
117 AllocationStrategy::WorstFit => candidates
118 .iter()
119 .max_by_key(|&&i| self.free_blocks[i].size)
120 .copied()
121 .ok_or_else(|| NativeBackendError::CorruptFreeSpace {
122 reason:
123 "WorstFit strategy failed to find maximum block in non-empty candidates"
124 .to_string(),
125 })?,
126 };
127 Ok(selected)
128 }
129
130 fn try_merge_adjacent_blocks(&mut self) {
131 if self.free_blocks.len() < 2 {
132 return;
133 }
134
135 self.free_blocks.sort_by_key(|block| block.offset);
136 let mut merged = Vec::new();
137 let mut current = self.free_blocks[0].clone();
138
139 for next in self.free_blocks[1..].iter() {
140 if current.can_merge_with(next) {
141 current.merge_with(next);
142 self.stats.block_merges += 1;
143 } else {
144 merged.push(current);
145 current = next.clone();
146 }
147 }
148
149 merged.push(current);
150 self.free_blocks = merged;
151 }
152
153 fn update_fragmentation_ratio(&mut self) {
154 if self.free_blocks.is_empty() {
155 self.stats.fragmentation_ratio = 0.0;
156 return;
157 }
158
159 let total_free: u64 = self.free_blocks.iter().map(|b| b.size as u64).sum();
160 let largest: u64 = self
161 .free_blocks
162 .iter()
163 .map(|b| b.size as u64)
164 .max()
165 .unwrap_or(0);
166
167 self.stats.fragmentation_ratio = if total_free > 0 {
168 1.0 - (largest as f64 / total_free as f64)
169 } else {
170 0.0
171 };
172 }
173
174 pub fn needs_compaction(&self) -> bool {
175 self.stats.fragmentation_ratio > MAX_FRAGMENTATION_RATIO && self.free_blocks.len() > 10
176 }
177
178 pub fn compact(&mut self) -> CompactionReport {
179 let start_fragments = self.free_blocks.len();
180 let start_ratio = self.stats.fragmentation_ratio;
181 self.try_merge_adjacent_blocks();
182
183 let before = self.free_blocks.len();
184 self.free_blocks
185 .retain(|block| block.size >= MIN_BLOCK_SIZE);
186 let removed_small = before - self.free_blocks.len();
187 self.update_fragmentation_ratio();
188
189 CompactionReport {
190 initial_fragments: start_fragments,
191 final_fragments: self.free_blocks.len(),
192 initial_fragmentation_ratio: start_ratio,
193 final_fragmentation_ratio: self.stats.fragmentation_ratio,
194 small_fragments_removed: removed_small,
195 blocks_merged: self.stats.block_merges,
196 }
197 }
198
199 pub fn stats(&self) -> &AllocationStats {
200 &self.stats
201 }
202
203 pub fn free_blocks(&self) -> &[FreeBlock] {
204 &self.free_blocks
205 }
206
207 pub fn total_free_space(&self) -> u64 {
208 self.free_blocks.iter().map(|block| block.size as u64).sum()
209 }
210
211 pub fn largest_free_block(&self) -> Option<u32> {
212 self.free_blocks.iter().map(|block| block.size).max()
213 }
214
215 pub fn validate(&self) -> NativeResult<()> {
216 let mut sorted = self.free_blocks.clone();
217 sorted.sort_by_key(|block| block.offset);
218
219 for i in 0..sorted.len().saturating_sub(1) {
220 let current = &sorted[i];
221 let next = &sorted[i + 1];
222 if current.offset + current.size as u64 > next.offset {
223 return Err(NativeBackendError::CorruptFreeSpace {
224 reason: format!(
225 "Overlapping free blocks: {}-{} and {}-{}",
226 current.offset,
227 current.offset + current.size as u64,
228 next.offset,
229 next.offset + next.size as u64
230 ),
231 });
232 }
233 }
234
235 if !(0.0..=1.0).contains(&self.stats.fragmentation_ratio) {
236 return Err(NativeBackendError::CorruptFreeSpace {
237 reason: format!(
238 "Invalid fragmentation ratio: {}",
239 self.stats.fragmentation_ratio
240 ),
241 });
242 }
243
244 Ok(())
245 }
246
247 pub fn reset(&mut self) {
248 self.free_blocks.clear();
249 self.stats = AllocationStats::default();
250 }
251
252 pub fn export_analysis(&self) -> FreeSpaceAnalysis {
253 let histogram = self.create_size_histogram();
254 FreeSpaceAnalysis {
255 total_blocks: self.free_blocks.len(),
256 total_free_bytes: self.total_free_space(),
257 largest_block: self.largest_free_block().unwrap_or(0),
258 average_block_size: if !self.free_blocks.is_empty() {
259 self.total_free_space() / self.free_blocks.len() as u64
260 } else {
261 0
262 },
263 fragmentation_ratio: self.stats.fragmentation_ratio,
264 blocks_by_size: self
265 .free_blocks
266 .iter()
267 .map(|block| (block.size, block.offset))
268 .collect(),
269 size_histogram: histogram,
270 }
271 }
272
273 fn create_size_histogram(&self) -> Vec<(u32, usize)> {
274 const SIZE_RANGES: [(u32, u32); 6] = [
275 (32, 64),
276 (64, 256),
277 (256, 1024),
278 (1024, 4096),
279 (4096, 16384),
280 (16384, u32::MAX),
281 ];
282
283 let mut histogram = vec![(0, 0); SIZE_RANGES.len()];
284 for block in &self.free_blocks {
285 for (i, &(min, max)) in SIZE_RANGES.iter().enumerate() {
286 if block.size >= min && block.size <= max {
287 histogram[i].1 += 1;
288 break;
289 }
290 }
291 }
292 histogram
293 }
294}