kstone_core/
extent.rs

1/// Extent allocation for SST heap
2///
3/// The SST heap region is managed as a collection of variable-size extents.
4/// Each extent represents a contiguous range of blocks for an SST file.
5
6use parking_lot::Mutex;
7use std::collections::BTreeMap;
8use crate::{Error, Result, layout::BLOCK_SIZE};
9
10/// Extent ID - unique identifier for an extent
11pub type ExtentId = u64;
12
13/// An allocated extent in the SST heap
14#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
15pub struct Extent {
16    pub id: ExtentId,
17    /// Byte offset from start of file
18    pub offset: u64,
19    /// Size in bytes
20    pub size: u64,
21}
22
23impl Extent {
24    pub fn new(id: ExtentId, offset: u64, size: u64) -> Self {
25        Self { id, offset, size }
26    }
27
28    pub fn end(&self) -> u64 {
29        self.offset + self.size
30    }
31
32    /// Number of blocks in this extent
33    pub fn num_blocks(&self) -> u64 {
34        (self.size + BLOCK_SIZE as u64 - 1) / BLOCK_SIZE as u64
35    }
36}
37
38/// Extent allocator for SST heap
39///
40/// Uses a simple bump allocator strategy:
41/// - Allocations always extend the file
42/// - Free space tracking is deferred to compaction
43pub struct ExtentAllocator {
44    inner: Mutex<ExtentAllocatorInner>,
45}
46
47struct ExtentAllocatorInner {
48    /// Base offset of SST heap in file
49    base_offset: u64,
50    /// Next extent ID
51    next_id: ExtentId,
52    /// Current end of allocated space (relative to base_offset)
53    allocated_end: u64,
54    /// Map of extent_id -> extent
55    extents: BTreeMap<ExtentId, Extent>,
56}
57
58impl ExtentAllocator {
59    /// Create a new allocator
60    pub fn new(base_offset: u64) -> Self {
61        Self {
62            inner: Mutex::new(ExtentAllocatorInner {
63                base_offset,
64                next_id: 1,
65                allocated_end: 0,
66                extents: BTreeMap::new(),
67            }),
68        }
69    }
70
71    /// Allocate a new extent with the given size (in bytes)
72    pub fn allocate(&self, size: u64) -> Result<Extent> {
73        if size == 0 {
74            return Err(Error::InvalidArgument("Extent size must be > 0".to_string()));
75        }
76
77        let mut inner = self.inner.lock();
78
79        let id = inner.next_id;
80        inner.next_id += 1;
81
82        // Align size to block boundary
83        let aligned_size = align_to_block(size);
84
85        let offset = inner.base_offset + inner.allocated_end;
86        let extent = Extent::new(id, offset, aligned_size);
87
88        inner.allocated_end += aligned_size;
89        inner.extents.insert(id, extent);
90
91        Ok(extent)
92    }
93
94    /// Free an extent (mark for future compaction)
95    pub fn free(&self, id: ExtentId) -> Result<()> {
96        let mut inner = self.inner.lock();
97
98        if inner.extents.remove(&id).is_none() {
99            return Err(Error::InvalidArgument(format!("Extent {} not found", id)));
100        }
101
102        // Note: We don't reclaim space immediately. This will be done during compaction.
103        Ok(())
104    }
105
106    /// Get an extent by ID
107    pub fn get(&self, id: ExtentId) -> Option<Extent> {
108        let inner = self.inner.lock();
109        inner.extents.get(&id).copied()
110    }
111
112    /// Get total allocated size (in bytes)
113    pub fn allocated_size(&self) -> u64 {
114        let inner = self.inner.lock();
115        inner.allocated_end
116    }
117
118    /// Get number of allocated extents
119    pub fn num_extents(&self) -> usize {
120        let inner = self.inner.lock();
121        inner.extents.len()
122    }
123
124    /// List all allocated extents
125    pub fn list_extents(&self) -> Vec<Extent> {
126        let inner = self.inner.lock();
127        inner.extents.values().copied().collect()
128    }
129}
130
131/// Align size to block boundary (round up)
132fn align_to_block(size: u64) -> u64 {
133    let block_size = BLOCK_SIZE as u64;
134    ((size + block_size - 1) / block_size) * block_size
135}
136
137#[cfg(test)]
138mod tests {
139    use super::*;
140
141    #[test]
142    fn test_align_to_block() {
143        assert_eq!(align_to_block(0), 0);
144        assert_eq!(align_to_block(1), BLOCK_SIZE as u64);
145        assert_eq!(align_to_block(4096), BLOCK_SIZE as u64);
146        assert_eq!(align_to_block(4097), 2 * BLOCK_SIZE as u64);
147        assert_eq!(align_to_block(8192), 2 * BLOCK_SIZE as u64);
148    }
149
150    #[test]
151    fn test_extent_allocator_basic() {
152        let base = 1024 * 1024; // 1MB base offset
153        let allocator = ExtentAllocator::new(base);
154
155        // Allocate first extent
156        let ext1 = allocator.allocate(1000).unwrap();
157        assert_eq!(ext1.id, 1);
158        assert_eq!(ext1.offset, base);
159        assert_eq!(ext1.size, BLOCK_SIZE as u64); // Aligned
160
161        // Allocate second extent
162        let ext2 = allocator.allocate(5000).unwrap();
163        assert_eq!(ext2.id, 2);
164        assert_eq!(ext2.offset, base + BLOCK_SIZE as u64);
165        assert_eq!(ext2.size, 2 * BLOCK_SIZE as u64); // 5000 rounds to 8192
166
167        assert_eq!(allocator.num_extents(), 2);
168    }
169
170    #[test]
171    fn test_extent_allocator_free() {
172        let allocator = ExtentAllocator::new(0);
173
174        let ext1 = allocator.allocate(1000).unwrap();
175        let ext2 = allocator.allocate(2000).unwrap();
176
177        assert_eq!(allocator.num_extents(), 2);
178
179        allocator.free(ext1.id).unwrap();
180        assert_eq!(allocator.num_extents(), 1);
181
182        // Can't free twice
183        let result = allocator.free(ext1.id);
184        assert!(matches!(result, Err(Error::InvalidArgument(_))));
185
186        // Can get remaining extent
187        let retrieved = allocator.get(ext2.id).unwrap();
188        assert_eq!(retrieved, ext2);
189    }
190
191    #[test]
192    fn test_extent_allocator_allocated_size() {
193        let allocator = ExtentAllocator::new(1000);
194
195        assert_eq!(allocator.allocated_size(), 0);
196
197        allocator.allocate(4096).unwrap();
198        assert_eq!(allocator.allocated_size(), 4096);
199
200        allocator.allocate(100).unwrap(); // Aligns to 4096
201        assert_eq!(allocator.allocated_size(), 8192);
202    }
203
204    #[test]
205    fn test_extent_num_blocks() {
206        let ext1 = Extent::new(1, 0, 4096);
207        assert_eq!(ext1.num_blocks(), 1);
208
209        let ext2 = Extent::new(2, 0, 8192);
210        assert_eq!(ext2.num_blocks(), 2);
211
212        let ext3 = Extent::new(3, 0, 5000);
213        assert_eq!(ext3.num_blocks(), 2); // 5000 bytes = 2 blocks
214    }
215
216    #[test]
217    fn test_extent_list() {
218        let allocator = ExtentAllocator::new(0);
219
220        let ext1 = allocator.allocate(1000).unwrap();
221        let ext2 = allocator.allocate(2000).unwrap();
222        let ext3 = allocator.allocate(3000).unwrap();
223
224        let extents = allocator.list_extents();
225        assert_eq!(extents.len(), 3);
226        assert!(extents.contains(&ext1));
227        assert!(extents.contains(&ext2));
228        assert!(extents.contains(&ext3));
229    }
230
231    #[test]
232    fn test_extent_zero_size() {
233        let allocator = ExtentAllocator::new(0);
234        let result = allocator.allocate(0);
235        assert!(matches!(result, Err(Error::InvalidArgument(_))));
236    }
237}