orderly_allocator/
lib.rs

1#![doc = include_str!("../README.md")]
2#![no_std]
3extern crate alloc;
4use {
5  ::alloc::collections::{BTreeMap, BTreeSet},
6  ::core::{cmp::Ordering, error::Error, fmt, num::NonZero, ops::Range},
7};
8
9type Size = u32;
10type Location = Size;
11
12/// Metadata containing information about an allocation
13///
14/// This is a small `Copy` type. It also provides a niche, so that
15/// `Option<Allocation>` has the same size as `Allocation`.
16/// ```
17/// # use {::core::mem::size_of, ::orderly_allocator::Allocation};
18/// assert_eq!(size_of::<Allocation>(), size_of::<u64>());
19/// assert_eq!(size_of::<Option<Allocation>>(), size_of::<Allocation>());
20/// ```
21#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
22pub struct Allocation {
23  /// The location of this allocation within the buffer
24  pub offset: Location,
25  /// The size of this allocation
26  pub size: NonZero<Size>,
27}
28
29impl Allocation {
30  /// Get the offset of the allocation
31  ///
32  /// This is just a wrapper for `allocation.offset` for symmetry with `size`.
33  pub fn offset(&self) -> Location {
34    self.offset
35  }
36
37  /// Get the size of the allocation
38  ///
39  /// This is just sugar for `allocation.size.get()`.
40  pub fn size(&self) -> Size {
41    self.size.get()
42  }
43
44  /// Get a [`Range<usize>`] from `offset` to `offset + size`
45  ///
46  /// This can be used to directly index a buffer.
47  ///
48  /// For example:
49  /// ```ignore
50  /// # use {::core::num::NonZero, ::orderly_allocator::Allocation};
51  /// let buffer: Vec<usize> = (0..100).collect();
52  /// let allocation = Allocation {
53  ///   offset: 25,
54  ///   size: NonZero::new(4).unwrap()
55  /// };
56  ///
57  /// let region = &buffer[allocation.range()];
58  ///
59  /// assert_eq!(region, &[25, 26, 27, 28]);
60  /// ```
61  pub fn range(&self) -> Range<usize> {
62    (self.offset as usize)..((self.offset + self.size.get()) as usize)
63  }
64}
65
66/// A super-simple soft-realtime allocator for managing an external pool of
67/// memory
68#[derive(Clone)]
69pub struct Allocator {
70  /// An ordered collection of free-regions, sorted primarily by size, then by
71  /// location
72  free: BTreeSet<FreeRegion>,
73  /// An ordered collection of free-regions, sorted by location
74  location_map: BTreeMap<Location, NonZero<Size>>,
75  /// The total capacity
76  capacity: NonZero<Size>,
77  /// The amount of free memory
78  available: Size,
79}
80
81// This type has an explicit implementation of Ord, since we rely on properties
82// of its behaviour to find and select free regions.
83#[derive(PartialEq, Eq, Copy, Clone, Debug)]
84struct FreeRegion {
85  location: Location,
86  size: NonZero<Size>,
87}
88
89impl PartialOrd for FreeRegion {
90  fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
91    Some(self.cmp(other))
92  }
93}
94
95impl Ord for FreeRegion {
96  fn cmp(&self, other: &Self) -> Ordering {
97    use Ordering as O;
98    match (
99      self.size.cmp(&other.size),
100      self.location.cmp(&other.location),
101    ) {
102      (O::Equal, O::Equal) => O::Equal,
103      (O::Equal, O::Less) | (O::Less, _) => O::Less,
104      (O::Equal, O::Greater) | (O::Greater, _) => O::Greater,
105    }
106  }
107}
108
109impl Allocator {
110  /// Create a new allocator to manage a pool of memory
111  ///
112  /// Panics:
113  /// - Panics if `capacity == 0`
114  pub fn new(capacity: Size) -> Self {
115    let capacity = NonZero::new(capacity).expect("`capacity == 0`");
116
117    let mut allocator = Allocator {
118      free: BTreeSet::new(),
119      location_map: BTreeMap::new(),
120      capacity,
121      available: capacity.get(),
122    };
123
124    allocator.reset();
125
126    allocator
127  }
128
129  /// Try to allocate a region with the provided size
130  ///
131  /// Uses a *best-fit* strategy, and returns [`Allocation`]s with arbitrary
132  /// alignment.
133  ///
134  /// Returns `None` if:
135  /// - `size == 0`, or
136  /// - `size + 1` overflows.
137  pub fn alloc(&mut self, size: Size) -> Option<Allocation> {
138    self.alloc_with_align(size, 1)
139  }
140
141  /// Try to allocate a region with the provided size & alignment
142  ///
143  /// Implements the following strategy (not quite *best-fit*):
144  /// - Search for a region with at least `size + align - 1`, and then truncate
145  ///   the start of the region such that alignment is reached.
146  ///
147  /// This is more prone to causing fragmentation compared to an unaligned
148  /// [`alloc`](Self::alloc).
149  ///
150  /// Returns `None` if:
151  /// - there are no free-regions with `size + align - 1` available space, or
152  /// - `size == 0`, or
153  /// - `align == 0`, or
154  /// - `size + align` overflows.
155  pub fn alloc_with_align(
156    &mut self,
157    size: Size,
158    align: Size,
159  ) -> Option<Allocation> {
160    let size = NonZero::new(size)?;
161    let align = NonZero::new(align)?;
162
163    let FreeRegion {
164      location: mut free_region_location,
165      size: free_region_size,
166    } = self.find_free_region(size.checked_add(align.get() - 1)?)?;
167
168    self.remove_free_region(free_region_location, free_region_size);
169
170    let mut free_region_size = free_region_size.get();
171
172    if let Some(misalignment) =
173      NonZero::new((align.get() - (free_region_location % align)) % align)
174    {
175      self.insert_free_region(free_region_location, misalignment);
176      free_region_location += misalignment.get();
177      free_region_size -= misalignment.get();
178    }
179
180    if let Some(size_leftover) = NonZero::new(free_region_size - size.get()) {
181      self
182        .insert_free_region(free_region_location + size.get(), size_leftover);
183    }
184
185    self.available -= size.get();
186
187    Some(Allocation {
188      size,
189      offset: free_region_location,
190    })
191  }
192
193  /// Free the given allocation
194  ///
195  /// # Panics
196  ///
197  /// - May panic if the allocation's location gets freed twice, without first
198  ///   being re-allocated.
199  ///
200  ///   Note: This panic will not catch all double frees.
201  pub fn free(&mut self, alloc: Allocation) {
202    let mut free_region = FreeRegion {
203      location: alloc.offset,
204      size: alloc.size,
205    };
206
207    // coalesce
208    {
209      if let Some(FreeRegion { location, size }) =
210        self.previous_free_region(alloc.offset)
211      {
212        if location + size.get() == free_region.location {
213          self.remove_free_region(location, size);
214          free_region.location = location;
215          // note: this unwrap is ok because the sum of all free-regions cannot
216          // be larger than the total size of the allocator; which we know is
217          // some `Size`.
218          free_region.size = free_region.size.checked_add(size.get()).unwrap();
219        }
220      };
221
222      if let Some(FreeRegion { location, size }) =
223        self.following_free_region(alloc.offset)
224      {
225        if free_region.location + free_region.size.get() == location {
226          self.remove_free_region(location, size);
227          // note: this unwrap is ok because the sum of all free-regions cannot
228          // be larger than the total size of the allocator; which we know is
229          // some `Size`.
230          free_region.size = free_region.size.checked_add(size.get()).unwrap();
231        }
232      }
233    }
234
235    self.insert_free_region(free_region.location, free_region.size);
236    self.available += alloc.size.get();
237  }
238
239  /// Free ***all*** allocations
240  pub fn reset(&mut self) {
241    self.free.clear();
242    self.location_map.clear();
243    self.available = self.capacity.get();
244    self.insert_free_region(0, self.capacity);
245  }
246
247  /// Add new free space at the end of the allocator
248  ///
249  /// Returns `Err(Overflow)` if `self.capacity + additional` would overflow.
250  pub fn grow_capacity(&mut self, additional: Size) -> Result<(), Overflow> {
251    let Some(additional) = NonZero::new(additional) else {
252      return Ok(()); // `additional` is zero, so do nothing
253    };
254
255    let current_capacity = self.capacity;
256    let Some(new_capacity) = current_capacity.checked_add(additional.get())
257    else {
258      return Err(Overflow {
259        current_capacity,
260        additional,
261      });
262    };
263
264    self.capacity = new_capacity;
265    self.free(Allocation {
266      offset: current_capacity.get(),
267      size: additional,
268    });
269    Ok(())
270  }
271
272  /// Try to re-size an existing allocation in-place
273  ///
274  /// Will not change the offset of the allocation and tries to expand the
275  /// allocation to the right if there is sufficient free space.
276  ///
277  /// Returns:
278  /// - `Ok(Allocation)` on success.
279  /// - `Err(InsufficientSpace)` if there is not enough available space
280  ///   to expand the allocation to `new_size`. In this case, the existing
281  ///   allocation is left untouched.
282  pub fn try_reallocate(
283    &mut self,
284    alloc: Allocation,
285    new_size: Size,
286  ) -> Result<Allocation, ReallocateError> {
287    let Some(new_size) = NonZero::new(new_size) else {
288      return Err(ReallocateError::Invalid);
289    };
290
291    match new_size.cmp(&alloc.size) {
292      Ordering::Greater => {
293        let required_additional = NonZero::new(new_size.get() - alloc.size())
294          .unwrap_or_else(|| unreachable!());
295        // find the next free-region;
296        let Some(next_free) = self.following_free_region(alloc.offset) else {
297          return Err(ReallocateError::InsufficientSpace {
298            required_additional,
299            available: 0,
300          });
301        };
302        // Check that the free-region we found is actually contiguous with our
303        // allocation, and that it has enough space
304        if next_free.location != alloc.offset + alloc.size() {
305          return Err(ReallocateError::InsufficientSpace {
306            required_additional,
307            available: 0,
308          });
309        }
310        if next_free.size < required_additional {
311          return Err(ReallocateError::InsufficientSpace {
312            required_additional,
313            available: next_free.size.get(),
314          });
315        }
316        // all good, take what we need and return the rest
317        let new_alloc = Allocation {
318          offset: alloc.offset,
319          size: new_size,
320        };
321        self.remove_free_region(next_free.location, next_free.size);
322        if let Some(new_free_region_size) =
323          NonZero::new(next_free.size.get() - required_additional.get())
324        {
325          self.insert_free_region(
326            new_alloc.offset + new_alloc.size(),
327            new_free_region_size,
328          );
329        }
330        self.available -= required_additional.get();
331
332        Ok(new_alloc)
333      },
334      Ordering::Less => {
335        // free the additional space
336        let additional = NonZero::new(alloc.size() - new_size.get())
337          .unwrap_or_else(|| unreachable!());
338        self.free(Allocation {
339          offset: alloc.offset + alloc.size() - additional.get(),
340          size: additional,
341        });
342
343        Ok(Allocation {
344          offset: alloc.offset,
345          size: new_size,
346        })
347      },
348      Ordering::Equal => {
349        // do nothing
350        Ok(alloc)
351      },
352    }
353  }
354
355  /// Get the total capacity of the pool
356  pub fn capacity(&self) -> Size {
357    self.capacity.get()
358  }
359
360  /// Get the total available memory in this pool
361  ///
362  /// Note: The memory may be fragmented, so it may not be possible to allocate
363  /// an object of this size.
364  pub fn total_available(&self) -> Size {
365    self.available
366  }
367
368  /// Get the size of the largest available memory region in this pool
369  pub fn largest_available(&self) -> Size {
370    self.free.last().map_or(0, |region| region.size.get())
371  }
372
373  /// Returns true if there are no allocations
374  pub fn is_empty(&self) -> bool {
375    self.capacity.get() == self.available
376  }
377
378  /// Returns an iterator over the unallocated regions
379  ///
380  /// This should be used **only** for gathering metadata about the internal
381  /// state of the allocator for debugging purposes.
382  ///
383  /// You must not use this instead of allocating; subsequent calls to `alloc`
384  /// will freely allocate from the reported regions.
385  pub fn report_free_regions(
386    &self,
387  ) -> impl Iterator<Item = Allocation> + use<'_> {
388    self.free.iter().map(|free_region| Allocation {
389      offset: free_region.location,
390      size: free_region.size,
391    })
392  }
393
394  /// Try to find a region with at least `size`
395  fn find_free_region(&mut self, size: NonZero<Size>) -> Option<FreeRegion> {
396    self
397      .free
398      .range(FreeRegion { size, location: 0 }..)
399      .copied()
400      .next()
401  }
402
403  /// Get the first free-region before `location`
404  fn previous_free_region(&self, location: Location) -> Option<FreeRegion> {
405    self
406      .location_map
407      .range(..location)
408      .next_back()
409      .map(|(&location, &size)| FreeRegion { location, size })
410  }
411
412  /// Get the first free-region after `location`
413  fn following_free_region(&self, location: Location) -> Option<FreeRegion> {
414    use ::core::ops::Bound as B;
415    self
416      .location_map
417      .range((B::Excluded(location), B::Unbounded))
418      .next()
419      .map(|(&location, &size)| FreeRegion { location, size })
420  }
421
422  /// remove a region from the internal free lists
423  fn remove_free_region(&mut self, location: Location, size: NonZero<Size>) {
424    self.location_map.remove(&location);
425    let region_existed = self.free.remove(&FreeRegion { location, size });
426
427    assert!(
428      region_existed,
429      "tried to remove a FreeRegion which did not exist: {:?}",
430      FreeRegion { location, size }
431    );
432  }
433
434  /// add a region to the internal free lists
435  fn insert_free_region(&mut self, location: Location, size: NonZero<Size>) {
436    self.free.insert(FreeRegion { location, size });
437    let existing_size = self.location_map.insert(location, size);
438
439    assert!(
440      existing_size.is_none(),
441      "Double free. Tried to add {new:?}, but {existing:?} was already there",
442      new = FreeRegion { location, size },
443      existing = FreeRegion {
444        location,
445        size: existing_size.unwrap_or_else(|| unreachable!())
446      }
447    )
448  }
449}
450
451impl fmt::Debug for Allocator {
452  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
453    f.debug_struct("Allocator")
454      .field("capacity", &self.capacity)
455      .field("total_available", &self.available)
456      .field("largest_available", &self.largest_available())
457      .finish()
458  }
459}
460
461#[derive(Debug, Copy, Clone)]
462pub struct Overflow {
463  pub current_capacity: NonZero<Size>,
464  pub additional: NonZero<Size>,
465}
466impl Error for Overflow {}
467impl fmt::Display for Overflow {
468  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
469    f.write_fmt(format_args!(
470      "Overflow Error: Allocator with capacity {} could not grow by additional {}.",
471      self.current_capacity, self.additional
472    ))
473  }
474}
475
476#[derive(Debug, Copy, Clone)]
477pub enum ReallocateError {
478  InsufficientSpace {
479    required_additional: NonZero<Size>,
480    available: Size,
481  },
482  Invalid,
483}
484
485impl Error for ReallocateError {}
486impl fmt::Display for ReallocateError {
487  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
488    match self {
489      ReallocateError::InsufficientSpace {
490        required_additional,
491        available,
492      } => f.write_fmt(format_args!(
493        "InsufficientSpace Error: Unable to expand allocation: \
494          required_additional:{required_additional}, available:{available}."
495      )),
496      ReallocateError::Invalid => {
497        f.write_str("Invalid allocation or `new_size` was 0")
498      },
499    }
500  }
501}