chunked_range_alloc/
lib.rs

1/*!
2A simple range allocator for chunked external memory
3
4[`ChunkedRangeAlloc`] was created for 2 use cases:
5
61. packing game assets into archive files
72. basis of specialized vulkan memory allocator
8
9## Features:
10
11- [`Allocation`] includes [`Allocation::chunk_index`] in addition to [`Allocation::offset`] and [`Allocation::len`]
12- [`ChunkedRangeAlloc::from_allocations`] constructor for loading existing allocations (example: game assets index)
13- optional `bincode` and `serde` support, see [`Allocation`]
14- simple, safe code
15- _good enough_ performance: allocator uses `BTree` internally, best-fit search strategy, immediately coalesces on free
16
17## Non-goals:
18
19- blazingly fast constant O(🚀) time complexity
20
21## Example
22
23```
24use std::num::NonZeroU32;
25
26use chunked_range_alloc::ChunkedRangeAlloc;
27
28// create allocator with chunk size = 4
29let mut alloc = ChunkedRangeAlloc::new(NonZeroU32::new(4).unwrap());
30
31// allocate size 1 with align = 1
32let one = alloc.alloc(NonZeroU32::MIN, NonZeroU32::MIN);
33assert_eq!(one.chunk_index, 0);
34assert_eq!(one.offset, 0);
35assert_eq!(one.len.get(), 1);
36
37// allocate size 1 with align 2
38let two = alloc.alloc(NonZeroU32::MIN, NonZeroU32::new(2).unwrap());
39assert_eq!(two.chunk_index, 0);
40assert_eq!(two.offset, 2); // offset is not 1 because of alignment
41assert_eq!(two.len.get(), 1);
42
43let free_chunk = alloc.free(one);
44assert!(!free_chunk);
45
46// free returns true if chunk becomes completely free
47// you can eagerly free external memory in that case
48let free_chunk = alloc.free(two);
49assert!(free_chunk);
50
51```
52*/
53
54#![forbid(unsafe_code)]
55#![warn(clippy::pedantic)]
56
57use std::num::NonZeroU32;
58
59use std::collections::{BTreeMap, BTreeSet};
60
61/**
62[`ChunkedRangeAlloc`] allocation
63
64Serialization support:
65
66- `bincode` feature: [`bincode::Encode`] and [`bincode::Decode`]
67- `serde` feature: [`serde::Serialize`] and [`serde::Deserialize`]
68
69Size is limited to [`u32::MAX`] (4GB) for a few reasons:
70
71- packed 12-byte struct with 4-byte alignment
72- _chunked_ allocation implies relatively small chunk sizes, not 4GB ones
73
74Provides niche so that `Option<Allocation>` is the same size as `Allocation`
75
76```
77use std::mem::{align_of, size_of};
78use chunked_range_alloc::Allocation;
79
80assert_eq!(size_of::<Allocation>(), 12);
81assert_eq!(align_of::<Allocation>(), 4);
82assert_eq!(size_of::<Option<Allocation>>(), size_of::<Allocation>());
83```
84*/
85#[must_use]
86#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
87#[cfg_attr(feature = "bincode", derive(bincode::Encode, bincode::Decode))]
88#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
89pub struct Allocation {
90    /// Index of chunk this allocation belongs to
91    pub chunk_index: u32,
92    /// Offset within chunk
93    pub offset: u32,
94    /// Size of allocation
95    pub len: NonZeroU32,
96}
97
98/// A simple range allocator for chunked external memory, see [`Self::alloc`]
99pub struct ChunkedRangeAlloc {
100    chunk_size: NonZeroU32,
101    /// Free ranges sorted by size -> chunk -> offset
102    free_ranges: BTreeSet<FreeRange>,
103    chunks: Vec<Chunk>,
104    active_chunks: usize,
105    allocated_memory: u64,
106}
107
108#[derive(Default)]
109struct Chunk {
110    free_offsets: BTreeMap<u32, NonZeroU32>,
111}
112
113impl Chunk {
114    /// First free range before `offset`
115    #[inline]
116    fn previous_free_range(&self, offset: u32) -> Option<(u32, NonZeroU32)> {
117        self.free_offsets
118            .range(..=offset)
119            .next_back()
120            .map(|(&off, &len)| (off, len))
121    }
122
123    /// First free range after `offset`
124    #[inline]
125    fn next_free_range(&self, offset: u32) -> Option<(u32, NonZeroU32)> {
126        self.free_offsets
127            .range(offset..)
128            .next()
129            .map(|(&off, &len)| (off, len))
130    }
131}
132
133/// Sorted by field order: len -> chunk -> offset
134#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
135struct FreeRange {
136    /// sort: first
137    len: NonZeroU32,
138    /// sort: second
139    chunk: u32,
140    /// sort: third
141    offset: u32,
142}
143
144struct AlignedFreeRange {
145    free_range: FreeRange,
146    end: u32,
147    aligned_offset: u32,
148    aligned_end: u32,
149}
150
151impl ChunkedRangeAlloc {
152    /// Constructs a new, empty [`ChunkedRangeAlloc`] with specified `chunk_size`
153    #[must_use]
154    #[inline]
155    pub fn new(chunk_size: NonZeroU32) -> Self {
156        ChunkedRangeAlloc {
157            chunk_size,
158            free_ranges: BTreeSet::new(),
159            chunks: Vec::new(),
160            active_chunks: 0,
161            allocated_memory: 0,
162        }
163    }
164
165    /// Constructs [`ChunkedRangeAlloc`] with specified `chunk_size` and inserts `allocations`
166    ///
167    /// Returns [`None`] if any allocation is invalid or overlaps
168    #[must_use]
169    #[inline]
170    pub fn from_allocations<'a>(
171        chunk_size: NonZeroU32,
172        allocations: impl IntoIterator<Item = &'a Allocation>,
173    ) -> Option<Self> {
174        let mut a = Self::new(chunk_size);
175
176        for allocation in allocations {
177            let chunk_index = usize::try_from(allocation.chunk_index).ok()?;
178
179            // create chunks up to `chunk`
180            let chunk = loop {
181                if let Some(chunk) = a.chunks.get(chunk_index) {
182                    break chunk;
183                }
184
185                let chunk = u32::try_from(a.chunks.len()).ok()?;
186
187                // create new uninitialized chunk
188                a.chunks.push(Chunk::default());
189
190                // immediately insert free range covering entire chunk
191                a.insert_free_range(FreeRange {
192                    len: chunk_size,
193                    chunk,
194                    offset: 0,
195                });
196            };
197
198            let (free_offset, free_len) = chunk.previous_free_range(allocation.offset)?;
199
200            // encure allocation is in free range
201            let free_end = free_offset + free_len.get();
202            let allocation_end = allocation.offset + allocation.len.get();
203
204            if !(free_offset <= allocation.offset && free_end >= allocation_end) {
205                return None;
206            }
207
208            a.alloc_from(&AlignedFreeRange {
209                free_range: FreeRange {
210                    len: free_len,
211                    chunk: allocation.chunk_index,
212                    offset: free_offset,
213                },
214                end: free_offset + free_len.get(),
215                aligned_offset: allocation.offset,
216                aligned_end: allocation.offset + allocation.len.get(),
217            });
218        }
219
220        Some(a)
221    }
222
223    /// Chunk size
224    #[must_use]
225    #[inline]
226    pub fn chunk_size(&self) -> NonZeroU32 {
227        self.chunk_size
228    }
229
230    /**
231    Allocates memory with specified `size` and `align`, alignment MUST be power of two
232
233    Use [`NonZeroU32::MIN`] as `align` if alignment is not required
234
235    Attempts to best-fit find suitable contiguous range, otherwise creates new chunk
236
237    Caller is responsible for allocating external memory when encountering new chunk index, chunk indices are contiguous
238
239    # Panics
240
241    - panics if `size` exceeds [`Self::chunk_size`]
242    - panics if `align` is not power of two
243    - panics if chunk count exceeds [`u32::MAX`]
244     */
245    #[inline]
246    pub fn alloc(&mut self, size: NonZeroU32, align: NonZeroU32) -> Allocation {
247        if let Some(aligned_free_range) = self.find_free_range(size, align) {
248            let allocation = Allocation {
249                chunk_index: aligned_free_range.free_range.chunk,
250                offset: aligned_free_range.aligned_offset,
251                len: size,
252            };
253
254            self.alloc_from(&aligned_free_range);
255
256            return allocation;
257        }
258
259        let len = size;
260        let size = size.get();
261
262        let chunk = u32::try_from(self.chunks.len()).expect("too many chunks");
263
264        let tail_len = self
265            .chunk_size
266            .get()
267            .checked_sub(size)
268            .expect("size exceeds chunk_size");
269
270        // create new uninitialized chunk
271        self.chunks.push(Chunk::default());
272
273        // insert tail [size, end) if required
274        if let Some(len) = NonZeroU32::new(tail_len) {
275            self.insert_free_range(FreeRange {
276                len,
277                chunk,
278                offset: size,
279            });
280        }
281
282        self.active_chunks += 1;
283
284        self.allocated_memory += u64::from(size);
285
286        Allocation {
287            chunk_index: chunk,
288            offset: 0,
289            len,
290        }
291    }
292
293    /**
294    Frees specified `allocation`
295
296    Returns `true` if chunk with index specified in `allocation` becomes completely free
297
298    # Panics
299
300    Panics if allocation is invalid or double freed
301     */
302    #[must_use]
303    #[inline]
304    pub fn free(&mut self, allocation: Allocation) -> bool {
305        let chunk_index = usize::try_from(allocation.chunk_index).expect("invalid chunk index");
306        let chunk = self.chunks.get(chunk_index).expect("invalid chunk index");
307
308        let previous_free_range = chunk.previous_free_range(allocation.offset);
309        let next_free_range = chunk.next_free_range(allocation.offset);
310
311        let mut free_range = FreeRange {
312            len: allocation.len,
313            chunk: allocation.chunk_index,
314            offset: allocation.offset,
315        };
316
317        // coalesce before
318        if let Some((offset, len)) = previous_free_range {
319            let previous_end = offset + len.get();
320
321            assert!(previous_end <= free_range.offset, "double free");
322
323            if previous_end == free_range.offset {
324                free_range.offset = offset;
325                free_range.len = free_range
326                    .len
327                    .checked_add(len.get())
328                    .expect("invalid allocation");
329
330                self.remove_free_range(FreeRange {
331                    len,
332                    chunk: free_range.chunk,
333                    offset,
334                });
335            }
336        }
337
338        // coalesce after
339        if let Some((offset, len)) = next_free_range {
340            let end = free_range.offset + free_range.len.get();
341
342            assert!(end <= offset, "double free");
343
344            if end == offset {
345                free_range.len = free_range
346                    .len
347                    .checked_add(len.get())
348                    .expect("invalid allocation");
349
350                self.remove_free_range(FreeRange {
351                    len,
352                    chunk: free_range.chunk,
353                    offset,
354                });
355            }
356        }
357
358        self.insert_free_range(free_range);
359
360        self.allocated_memory -= u64::from(allocation.len.get());
361
362        let free_chunk = free_range.len >= self.chunk_size;
363        self.active_chunks -= usize::from(free_chunk);
364
365        free_chunk
366    }
367
368    /// Amount of tracked chunks, both active and free/unused
369    #[must_use]
370    #[inline]
371    pub fn chunk_count(&self) -> usize {
372        self.chunks.len()
373    }
374
375    /// Amount of chunks containing live allocations
376    #[must_use]
377    #[inline]
378    pub fn active_chunks(&self) -> usize {
379        self.active_chunks
380    }
381
382    /// Shorthand for `chunk_count() - active_chunks()`
383    #[must_use]
384    #[inline]
385    pub fn unused_chunks(&self) -> usize {
386        self.chunk_count() - self.active_chunks
387    }
388
389    /// Shorthand for `chunk_count() * chunk_size()`
390    #[must_use]
391    #[inline]
392    pub fn capacity(&self) -> u64 {
393        u64::try_from(self.chunk_count()).unwrap_or(u64::MAX) * u64::from(self.chunk_size.get())
394    }
395
396    /// Sum of currently allocated [`Allocation::len`]s
397    #[must_use]
398    #[inline]
399    pub fn allocated_memory(&self) -> u64 {
400        self.allocated_memory
401    }
402
403    /// Shorthand for `capacity() - allocated_memory()`
404    #[must_use]
405    #[inline]
406    pub fn remaining(&self) -> u64 {
407        self.capacity() - self.allocated_memory
408    }
409
410    /// Shorthand for `active_chunks() * chunk_size()`
411    #[must_use]
412    #[inline]
413    pub fn used_memory(&self) -> u64 {
414        u64::try_from(self.active_chunks).unwrap_or(u64::MAX) * u64::from(self.chunk_size.get())
415    }
416
417    /// Shorthand for `used_memory() - allocated_memory()`
418    #[must_use]
419    #[inline]
420    pub fn unused_memory(&self) -> u64 {
421        self.used_memory() - self.allocated_memory
422    }
423
424    #[inline]
425    fn alloc_from(&mut self, aligned_free_range: &AlignedFreeRange) {
426        let &AlignedFreeRange {
427            free_range,
428            end,
429            aligned_offset,
430            aligned_end,
431        } = aligned_free_range;
432
433        self.remove_free_range(free_range);
434
435        // insert head [free_range.offset, aligned_offset) if required
436        if let Some(len) = NonZeroU32::new(aligned_offset - free_range.offset) {
437            self.insert_free_range(FreeRange {
438                len,
439                chunk: free_range.chunk,
440                offset: free_range.offset,
441            });
442        }
443
444        // insert tail [aligned_end, end) if required
445        if let Some(len) = NonZeroU32::new(end - aligned_end) {
446            self.insert_free_range(FreeRange {
447                len,
448                chunk: free_range.chunk,
449                offset: aligned_end,
450            });
451        }
452
453        let free_chunk = free_range.len >= self.chunk_size;
454        self.active_chunks += usize::from(free_chunk);
455
456        self.allocated_memory += u64::from(aligned_end - aligned_offset);
457    }
458
459    /**
460    Finds first free range which could fit aligned size
461
462    # Panics
463
464    - panics if `size` exceeds [`Self::chunk_size`]
465    - panics if `align` is not power of two
466    */
467    #[inline]
468    fn find_free_range(&self, size: NonZeroU32, align: NonZeroU32) -> Option<AlignedFreeRange> {
469        assert!(size <= self.chunk_size);
470        assert!(align.is_power_of_two());
471
472        let align = align.get();
473
474        let start = FreeRange {
475            len: size,
476            chunk: 0,
477            offset: 0,
478        };
479
480        let size = size.get();
481
482        // finding free range suitable for aligned allocation is not as simple as picking first big enough
483        self.free_ranges
484            .range(start..)
485            .copied()
486            .find_map(|free_range| {
487                let end = free_range.offset + free_range.len.get();
488
489                // align  offset and end withing free range, skip on overflow
490                let aligned_offset = free_range.offset.checked_next_multiple_of(align)?;
491                let aligned_end = aligned_offset.checked_add(size)?;
492
493                // check aligned fit
494                if aligned_end > end {
495                    return None;
496                }
497
498                Some(AlignedFreeRange {
499                    free_range,
500                    end,
501                    aligned_offset,
502                    aligned_end,
503                })
504            })
505    }
506
507    /// Inserts free range into `free_ranges` and `chunks`, updates `active_chunks` and `allocated_memory`
508    ///
509    /// Returns true if chunk is entirely free
510    #[inline]
511    fn insert_free_range(&mut self, free_range: FreeRange) {
512        let chunk_index = usize::try_from(free_range.chunk).expect("invalid chunk index");
513        let chunk = self
514            .chunks
515            .get_mut(chunk_index)
516            .expect("invalid chunk index");
517
518        let inserted = self.free_ranges.insert(free_range);
519        assert!(inserted);
520
521        let last = chunk.free_offsets.insert(free_range.offset, free_range.len);
522        assert!(last.is_none());
523    }
524
525    /// Removes free range from `free_ranges` and `chunks`, updates `active_chunks` and `allocated_memory`
526    #[inline]
527    fn remove_free_range(&mut self, free_range: FreeRange) {
528        let chunk_index = usize::try_from(free_range.chunk).expect("invalid chunk index");
529        let chunk = self
530            .chunks
531            .get_mut(chunk_index)
532            .expect("invalid chunk index");
533
534        let removed = self.free_ranges.remove(&free_range);
535        assert!(removed);
536
537        let removed = chunk.free_offsets.remove(&free_range.offset);
538        assert_eq!(removed, Some(free_range.len));
539    }
540}