teaspoon/
lib.rs

1// Copyright © 2024 Andrea Corbellini and contributors
2// SPDX-License-Identifier: BSD-3-Clause
3
4//! Teaspoon: an allocator for when all you have is a teaspoon of memory.
5//!
6//! Teaspoon is a lightweight memory allocator designed for minimal overhead. It's meant to be used
7//! in situations where you have very limited memory available, or when you want to allocate
8//! objects on the stack.
9//!
10//! Teaspoon is optimized for low memory overhead first, and performance second.
11//!
12//! This is a no-`std` and no-`alloc` crate, as such it does not interact with the operating system
13//! to reserve memory pages; the allocatable memory needs to be provided by you as an input to
14//! Teaspoon.
15//!
16//! # Features
17//!
18//! * Small memory overhead: starting at 4 bytes per allocated object
19//! * Compatible with `no_std` environments
20//! * Support for the nightly [`Allocator`](`core::alloc::Allocator`) API
21//!
22//! # Quick start & examples
23//!
24//! ## Initialization
25//!
26//! There are 3 variants of the allocator to choose from:
27//!
28//! * [`Teaspoon4KiB`]
29//! * [`Teaspoon128KiB`]
30//! * [`Teaspoon16MiB`]
31//!
32//! The size in the name (4KiB, 128KiB, 16MiB) refers to the **maximum size for an individual
33//! allocated object**, and the total memory that can be allocated may be greater than that. For
34//! example, with `Teaspoon4KiB`, you *cannot* allocate a 5000-byte object (because that exceeds
35//! the 4 KiB = 4096 byte limit), but you *can* allocate two 3000-byte objects.
36//!
37//! Choosing the right variant is a matter of how much memory you have available, how much memory
38//! you're willing to sacrifice for overhead, and performance. Smaller variants have smaller
39//! overheads. Note that the smaller variants may not necessarily be the faster. All the
40//! differences between the 3 allocator variants are described in [Allocator
41//! limits](#allocator-limits).
42//!
43//! Because Teaspoon does not interact with the operating system, you'll need to initialize the
44//! allocator with some contiguous memory area that you already have. If you know the address, you
45//! can use one of [`Teaspoon::from_ptr`], [`Teaspoon::from_ptr_size`],
46//! [`Teaspoon::from_addr_size`], like this:
47//!
48//! ```no_run
49//! use teaspoon::Teaspoon4KiB;
50//! # #[allow(unused_variables)]
51//! let spoon = unsafe { Teaspoon4KiB::<'static>::from_addr_size(0xdeadbeef, 1024) };
52//! ```
53//!
54//! You can also construct the allocator from a byte slice. This can be useful for example to
55//! construct the allocator from an array on the stack:
56//!
57//! ```
58//! use teaspoon::Teaspoon4KiB;
59//! let mut memory = [0u8; 1024];
60//! # #[allow(unused_variables)]
61//! let spoon = Teaspoon4KiB::from(&mut memory);
62//! ```
63//!
64//! You could also initialize the Teaspoon allocator from memory obtained from the operating system
65//! like this:
66//!
67//! ```
68//! use std::alloc::GlobalAlloc;
69//! use std::alloc::Layout;
70//! use std::alloc::System;
71//! use teaspoon::Teaspoon4KiB;
72//!
73//! let size = 1024;
74//! let ptr =
75//!     unsafe { System.alloc(Layout::from_size_align(size, 4).expect("layout creation failed")) };
76//! # #[allow(unused_variables)]
77//! let spoon = unsafe { Teaspoon4KiB::from_ptr_size(ptr, size) };
78//! # // dealloc to make miri checks pass
79//! # unsafe { System.dealloc(ptr, Layout::from_size_align(size, 4).unwrap()) }
80//! ```
81//!
82//! Regardless of how you initialize the Teaspoon allocator, you have two choices for using it:
83//! using it as a [global allocator](core::alloc::GlobalAlloc) for your entire Rust program, or
84//! using it through the new [Allocator API](core::alloc::Allocator) (currently available on the
85//! nighly compiler only).
86//!
87//! ## Using as a global allocator
88//!
89//! Teaspoon can be used as a [custom global allocator via the `#[global_allocator]`
90//! attribute](https://doc.rust-lang.org/stable/std/alloc/index.html#the-global_allocator-attribute).
91//! Because `#[global_allocator]` requires a `static` item, it's not possible to use [`Teaspoon`]
92//! objects directly, and instead lazy initialization is required. To aid with this, this crate
93//! provides a [`LazyTeaspoon`](lazy::LazyTeaspoon) type that can be used as follows:
94//!
95//! `Cargo.toml`:
96//!
97//! ```toml
98//! teaspoon = { version = "0.1", features = ["lazy"] }
99//! ```
100//!
101//! `main.rs`:
102//!
103//! ```
104//! # #[allow(static_mut_refs)]
105//! # #[cfg(feature = "lazy")]
106//! # {
107//! use teaspoon::lazy::LazyTeaspoon4KiB;
108//! use teaspoon::Teaspoon4KiB;
109//!
110//! #[global_allocator]
111//! static SPOON: LazyTeaspoon4KiB = LazyTeaspoon4KiB::new(|| {
112//!     static mut MEMORY: [u8; 1024] = [0u8; 1024];
113//!     // SAFETY: This closure is called only once, therefore `MEMORY` is entirely owned by
114//!     // this `Teaspoon4KiB`, and no other reference can be created.
115//!     Teaspoon4KiB::from(unsafe { &mut MEMORY })
116//! });
117//! # }
118//! ```
119//!
120//! [`LazyTeaspoon`](lazy::LazyTeaspoon) is initialized on first use by calling the initialization
121//! function passed to [`new`](lazy::LazyTeaspoon::new).
122//!
123//! [`LazyTeaspoon`](lazy::LazyTeaspoon) is a simple wrapper around [`spin::Lazy`] (which is a
124//! `no_std` equivalent to [`std::sync::LazyLock`]) that implements the [`GlobalAlloc`] and
125//! [`Allocator`] traits. There's nothing too special about it--you can write your own custom
126//! wrapper if you need to.
127//!
128//! [`std::sync::LazyLock`]: https://doc.rust-lang.org/stable/std/sync/struct.LazyLock.html
129//!
130//! ## Using via the Allocator API
131//!
132//! Teaspoon can be used as a custom allocator to be passed to the "`new_in`" methods of various
133//! container types (such as [`Box::new_in`], [`Vec::new_in`], ...). Because the Allocator API is
134//! currently experimental, it is only available in the Rust nightly compiler, and with
135//! `#![feature(allocator_api)]`. It can be used as follows:
136//!
137//! [`Box::new_in`]: https://doc.rust-lang.org/stable/std/boxed/struct.Box.html#method.new_in
138//! [`Vec::new_in`]: https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.new_in
139//!
140//! `Cargo.toml`:
141//!
142//! ```toml
143//! teaspoon = { version = "0.1", features = ["allocator-api"] }
144//! ```
145//!
146//! `main.rs`:
147//!
148//! ```
149//! #![feature(allocator_api)]
150//!
151//! # #[cfg(feature = "allocator-api")]
152//! # {
153//! use teaspoon::Teaspoon4KiB;
154//!
155//! let mut memory = [0u8; 1024];
156//! let spoon = Teaspoon4KiB::from(&mut memory);
157//!
158//! let mut vec = Vec::<i32, _>::new_in(&spoon);
159//! vec.push(1);
160//! vec.push(2);
161//! vec.push(3);
162//! # }
163//! ```
164//!
165//! # Allocator limits
166//!
167//! * **Arena Overhead:** amount of memory that is reserved for Teaspoon internal structures. This
168//!   amount of memory is always used by Teaspoon, even when no objects are allocated.
169//!
170//! * **Object Overhead:** amount of extra memory that is allocated for every allocated object
171//!   (with the exception of [zero-sized types], which have no overhead).
172//!
173//! * **Minimum Object Size:** minimum size that is always allocated for every object (with the
174//!   exception of [zero-sized types]). If an allocation requests a size less than the minimum
175//!   size, it is rounded up to the minimum size.
176//!
177//! * **Maximum Object Size:** maximum size that can be allocated to a single object. Allocations
178//!   larger than the maximum size always fail. This does not mean that all allocations up to this
179//!   maximum size will succeed: factors like available memory and memory fragmentation may result
180//!   in an actual lower limit at runtime.
181//!
182//! * **Maximum Total Memory<sup>[note 1]</sup>:** maximum total memory that can be addressed by a
183//!   Teaspoon object.
184//!
185//! |                    | Arena Overhead | Object Overhead | Minimum Object Size | Maximum Object Size     | Maximum Total Memory<sup>[note 1]</sup> |
186//! |--------------------|----------------|-----------------|---------------------|-------------------------|-----------------------------------------|
187//! | [`Teaspoon4KiB`]   | 4 bytes        | 4 bytes         | 4 bytes             | 4096 bytes (4 KiB)      | 8192 bytes (8 KiB)                      |
188//! | [`Teaspoon128KiB`] | 4 bytes        | 6 bytes         | 2 bytes             | 131072 bytes (128 KiB)  | 131072 bytes (128 KiB)                  |
189//! | [`Teaspoon16MiB`]  | 8 bytes        | 8 bytes         | 8 bytes             | 16777216 bytes (16 MiB) | 16777216 bytes (16 MiB)                 |
190//!
191//! **[note 1]:** this restriction may be lifted in a future version of this crate.
192//!
193//! [zero-sized types]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts
194//!
195//! # Internal details
196//!
197//! Teaspoon is a compact memory allocator using a doubly-linked list to track allocated objects,
198//! and a [spin lock](https://en.wikipedia.org/wiki/Spinlock) to ensure thread safety.
199//!
200//! The "Object Overhead" listed in [Allocator limits](#allocator-limits) is used to store the
201//! previous/next pointers of the linked list, and the size of the object. The "Arena Overhead" is
202//! used to store the head/tail pointers of the linked list.
203//!
204//! # Cargo feature flags
205//!
206//! * `allocator-api`: enables the implementation of the [`core::alloc::Allocator`] trait (requires
207//!   a nightly compiler).
208//! * `lazy`: enables the [`LazyTeaspoon`](lazy::LazyTeaspoon) type along with its sized variants.
209
210#![no_std]
211#![cfg_attr(feature = "allocator-api", feature(allocator_api))]
212#![warn(clippy::dbg_macro)]
213#![warn(clippy::print_stderr)]
214#![warn(clippy::print_stdout)]
215#![warn(missing_debug_implementations)]
216#![warn(missing_docs)]
217#![warn(unreachable_pub)]
218#![warn(unused_crate_dependencies)]
219#![warn(unused_macro_rules)]
220#![warn(unused_qualifications)]
221#![doc(test(attr(deny(warnings))))]
222
223mod arena;
224mod iter;
225mod ptr;
226mod segment;
227mod sizing;
228mod usage;
229
230#[cfg(test)]
231mod tests;
232
233#[cfg(feature = "lazy")]
234pub mod lazy;
235
236use crate::arena::Arena;
237use crate::iter::ArenaChunks;
238use crate::iter::Chunk;
239use crate::ptr::SegmentDataPtr;
240use crate::segment::Segment;
241use core::alloc::GlobalAlloc;
242use core::alloc::Layout;
243use core::cmp;
244use core::ptr::NonNull;
245use spin::Mutex;
246
247#[cfg(feature = "allocator-api")]
248use core::alloc::AllocError;
249#[cfg(feature = "allocator-api")]
250use core::alloc::Allocator;
251
252pub use crate::sizing::Sizing;
253pub use crate::sizing::Sizing128KiB;
254pub use crate::sizing::Sizing16MiB;
255pub use crate::sizing::Sizing4KiB;
256pub use crate::usage::Usage;
257
258/// Allocator that supports allocating objects up to 128 KiB.
259///
260/// See the [module-level documentation](crate#allocator-limits) for more information about the
261/// Teaspoon allocator variants and their sizing limits.
262pub type Teaspoon128KiB<'a> = Teaspoon<'a, Sizing128KiB>;
263
264/// Allocator that supports allocating objects up to 16 MiB.
265///
266/// See the [module-level documentation](crate#allocator-limits) for more information about the
267/// Teaspoon allocator variants and their sizing limits.
268pub type Teaspoon16MiB<'a> = Teaspoon<'a, Sizing16MiB>;
269
270/// Allocator that supports allocating objects up to 4 KiB.
271///
272/// See the [module-level documentation](crate#allocator-limits) for more information about the
273/// Teaspoon allocator variants and their sizing limits.
274pub type Teaspoon4KiB<'a> = Teaspoon<'a, Sizing4KiB>;
275
276/// The Teaspoon allocator.
277///
278/// The allocator comes in 3 variants that set different memory overheads and limits. The `S`
279/// parameter specifies the variant, which may be:
280///
281/// * [`Sizing4KiB`]: allows allocating objects up to 4 KiB
282/// * [`Sizing128KiB`]: allows allocating objects up to 128 KiB
283/// * [`Sizing16MiB`]: allows allocating objects up to 16 MiB
284///
285/// See the [module-level documentation](crate#allocator-limits) for more information about
286/// overheads and sizing limits.
287///
288/// `Teaspoon` can be constructed from either a pointer (unsafe) or a slice, and may be accessed
289/// using either the [`GlobalAlloc`] or [`Allocator`] traits. See the [module-level
290/// documentation](file:///home/andrea/src/teaspoon/target/doc/teaspoon/index.html#quick-start--examples)
291/// for details and examples.
292#[derive(Debug)]
293pub struct Teaspoon<'a, S: Sizing> {
294    inner: Mutex<TeaspoonInner<'a, S>>,
295}
296
297impl<'a, S: Sizing> Teaspoon<'a, S> {
298    /// Constructs a Teaspoon memory allocator from a slice.
299    ///
300    /// # Examples
301    ///
302    /// ```
303    /// use teaspoon::Teaspoon4KiB;
304    ///
305    /// let mut memory = [0u8; 1024];
306    /// # #[allow(unused_variables)]
307    /// let spoon = Teaspoon4KiB::from_slice(&mut memory);
308    /// ```
309    #[inline]
310    #[must_use]
311    pub fn from_slice(slice: &'a mut [u8]) -> Self {
312        Self {
313            inner: Mutex::new(TeaspoonInner::from_slice(slice)),
314        }
315    }
316
317    /// Constructs a Teaspoon memory allocator from a slice pointer.
318    ///
319    /// The pointer must be valid for both reads and writes, and must be alive for the lifetime of
320    /// `'a`. Note that because there's no connection between the pointer and the lifetime `'a`,
321    /// you must ensure that the pointer lives long enough; you cannot rely on the compiler to
322    /// check that for you.
323    ///
324    /// # Panics
325    ///
326    /// If `ptr` is a null pointer.
327    ///
328    /// # Safety
329    ///
330    /// - `ptr` must be
331    ///   ["dereferenceable"](https://doc.rust-lang.org/stable/std/ptr/index.html#safety).
332    /// - `ptr` must be alive for the lifetime of `'a`.
333    /// - `ptr` must not be an [*alias*](https://doc.rust-lang.org/nomicon/aliasing.html) for
334    ///   another reference or pointer (in other words, `ptr` is a *unique* pointer).
335    ///
336    /// An exception to those rules is if the length of `ptr` is 0. In that case, `ptr` may be a
337    /// dangling non-null pointer.
338    ///
339    /// # Examples
340    ///
341    /// ```
342    /// use teaspoon::Teaspoon4KiB;
343    ///
344    /// let mut memory = [0u8; 1024];
345    /// let ptr = std::ptr::slice_from_raw_parts_mut(memory.as_mut_ptr(), memory.len());
346    /// # #[allow(unused_variables)]
347    /// let spoon = unsafe { Teaspoon4KiB::from_ptr(ptr) };
348    /// ```
349    #[inline]
350    #[must_use]
351    pub unsafe fn from_ptr(ptr: *mut [u8]) -> Self {
352        Self::from_ptr_size(ptr.cast(), ptr.len())
353    }
354
355    /// Constructs a Teaspoon memory allocator from a pointer and a size.
356    ///
357    /// The pointer must be valid for both reads and writes, and must be alive for the lifetime of
358    /// `'a`. Note that because there's no connection between the pointer and the lifetime `'a`,
359    /// you must ensure that the pointer lives long enough; you cannot rely on the compiler to
360    /// check that for you.
361    ///
362    /// # Panics
363    ///
364    /// If `ptr` is a null pointer.
365    ///
366    /// # Safety
367    ///
368    /// - `ptr` must be
369    ///   ["dereferenceable"](https://doc.rust-lang.org/stable/std/ptr/index.html#safety).
370    /// - `ptr` must be alive for the lifetime of `'a`.
371    /// - `ptr` must not be an [*alias*](https://doc.rust-lang.org/nomicon/aliasing.html) for
372    ///   another reference or pointer (in other words, `ptr` is a *unique* pointer).
373    ///
374    /// An exception to those rules is if the `size` is 0. In that case, `ptr` may be a dangling
375    /// non-null pointer.
376    ///
377    /// # Examples
378    ///
379    /// ```
380    /// use teaspoon::Teaspoon4KiB;
381    ///
382    /// let mut memory = [0u8; 1024];
383    /// # #[allow(unused_variables)]
384    /// let spoon = unsafe { Teaspoon4KiB::from_ptr_size(memory.as_mut_ptr(), memory.len()) };
385    /// ```
386    #[inline]
387    #[must_use]
388    pub unsafe fn from_ptr_size(ptr: *mut u8, size: usize) -> Self {
389        Self {
390            inner: Mutex::new(TeaspoonInner::from_ptr_size(ptr, size)),
391        }
392    }
393
394    /// Constructs a Teaspoon memory allocator from an address and a size.
395    ///
396    /// The memory pointed by address must be valid for both reads and writes, and must be alive
397    /// for the lifetime of `'a`. Note that because there's no connection between the address and
398    /// the lifetime `'a`, you must ensure that the memory pointed by address lives long enough;
399    /// you cannot rely on the compiler to check that for you.
400    ///
401    /// # Panics
402    ///
403    /// If `addr` is 0.
404    ///
405    /// # Safety
406    ///
407    /// - the memory pointed by `addr` must be
408    ///   ["dereferenceable"](https://doc.rust-lang.org/stable/std/ptr/index.html#safety).
409    /// - the memory pointed by `addr` must be alive for the lifetime of `'a`.
410    /// - the memory pointed by `addr` must not be an
411    ///   [*alias*](https://doc.rust-lang.org/nomicon/aliasing.html) for another reference or
412    ///   address (in other words, `addr` is a *unique* address).
413    ///
414    /// An exception to those rules is if the `size` is 0. In that case, `addr` may be a dangling
415    /// non-null address.
416    ///
417    /// # Examples
418    ///
419    /// ```
420    /// use teaspoon::Teaspoon4KiB;
421    ///
422    /// let mut memory = [0u8; 1024];
423    /// # #[allow(unused_variables)]
424    /// let spoon = unsafe { Teaspoon4KiB::from_addr_size(memory.as_mut_ptr() as usize, memory.len()) };
425    /// ```
426    #[inline]
427    #[must_use]
428    pub unsafe fn from_addr_size(addr: usize, size: usize) -> Self {
429        Self::from_ptr_size(addr as *mut u8, size)
430    }
431
432    /// Returns memory usage information for this Teaspoon allocator.
433    ///
434    /// The returned value contains some basic information about the memory currently used by the
435    /// allocator, such as: the total memory available, the total memory used, the total memory
436    /// usable, and the number of objects allocated.
437    ///
438    /// See the [`Usage`] documentation for the exact meaning of each field returned.
439    ///
440    /// Note that the usage computation is not cached or optimized in any way, and requires
441    /// visiting all the objects currently allocated. As such, `usage()` is not a constant-time
442    /// operation (`O(1)`), but it's a linear-time operation (`O(n)`, where `n` is the number of
443    /// objects currently allocated). This is because the Teaspoon allocator is optimized to
444    /// minimize overhead, and a faster `usage()` would require more overhead.
445    ///
446    /// # Examples
447    ///
448    /// ```
449    /// #![feature(allocator_api)]
450    ///
451    /// # #[cfg(feature = "allocator-api")]
452    /// # {
453    /// use std::alloc::Allocator;
454    /// use std::alloc::Layout;
455    /// use teaspoon::Teaspoon4KiB;
456    /// use teaspoon::Usage;
457    ///
458    /// let mut memory = [0u8; 1024];
459    /// let spoon = Teaspoon4KiB::from(&mut memory);
460    ///
461    /// assert_eq!(
462    ///     spoon.usage(),
463    ///     Usage {
464    ///         total: 1024,
465    ///         used: 0,
466    ///         free: 1020,
467    ///         objects: 0
468    ///     }
469    /// );
470    ///
471    /// let _ = spoon.allocate(Layout::new::<u128>());
472    ///
473    /// assert_eq!(
474    ///     spoon.usage(),
475    ///     Usage {
476    ///         total: 1024,
477    ///         used: 16,
478    ///         free: 1000,
479    ///         objects: 1
480    ///     }
481    /// );
482    /// # }
483    /// ```
484    #[inline]
485    #[must_use]
486    pub fn usage(&self) -> Usage {
487        self.inner.lock().usage()
488    }
489}
490
491impl<'a, S: Sizing> From<&'a mut [u8]> for Teaspoon<'a, S> {
492    #[inline]
493    fn from(slice: &'a mut [u8]) -> Self {
494        Self::from_slice(slice)
495    }
496}
497
498impl<'a, S: Sizing, const N: usize> From<&'a mut [u8; N]> for Teaspoon<'a, S> {
499    #[inline]
500    fn from(array: &'a mut [u8; N]) -> Self {
501        Self::from(array.as_mut_slice())
502    }
503}
504
505#[cfg(feature = "allocator-api")]
506unsafe impl<'a, S: Sizing> Allocator for Teaspoon<'a, S> {
507    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
508        self.inner.lock().allocate(layout).ok_or(AllocError)
509    }
510
511    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
512        let data_ptr = SegmentDataPtr::new(ptr);
513        self.inner.lock().deallocate(data_ptr, layout)
514    }
515
516    unsafe fn grow(
517        &self,
518        ptr: NonNull<u8>,
519        old_layout: Layout,
520        new_layout: Layout,
521    ) -> Result<NonNull<[u8]>, AllocError> {
522        let data_ptr = SegmentDataPtr::new(ptr);
523        self.inner
524            .lock()
525            .grow(data_ptr, old_layout, new_layout)
526            .ok_or(AllocError)
527    }
528
529    unsafe fn shrink(
530        &self,
531        ptr: NonNull<u8>,
532        old_layout: Layout,
533        new_layout: Layout,
534    ) -> Result<NonNull<[u8]>, AllocError> {
535        let data_ptr = SegmentDataPtr::new(ptr);
536        self.inner
537            .lock()
538            .shrink(data_ptr, old_layout, new_layout)
539            .ok_or(AllocError)
540    }
541}
542
543unsafe impl<'a, S: Sizing> GlobalAlloc for Teaspoon<'a, S> {
544    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
545        self.inner
546            .lock()
547            .allocate(layout)
548            .map(|ptr| ptr.cast().as_ptr())
549            .unwrap_or_else(core::ptr::null_mut)
550    }
551
552    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
553        let data_ptr = SegmentDataPtr::new(NonNull::new_unchecked(ptr));
554        self.inner.lock().deallocate(data_ptr, layout)
555    }
556
557    unsafe fn realloc(&self, ptr: *mut u8, old_layout: Layout, new_size: usize) -> *mut u8 {
558        let data_ptr = SegmentDataPtr::new(NonNull::new_unchecked(ptr));
559        let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
560        self.inner
561            .lock()
562            .resize(data_ptr, old_layout, new_layout)
563            .map(|ptr| ptr.cast().as_ptr())
564            .unwrap_or_else(core::ptr::null_mut)
565    }
566}
567
568#[repr(transparent)]
569#[derive(Debug)]
570struct TeaspoonInner<'a, S: Sizing> {
571    arena: Arena<'a, S>,
572}
573
574impl<'a, S: Sizing> TeaspoonInner<'a, S> {
575    #[inline]
576    #[must_use]
577    fn from_slice(slice: &'a mut [u8]) -> Self {
578        Self {
579            arena: Arena::from(slice),
580        }
581    }
582
583    #[inline]
584    #[must_use]
585    unsafe fn from_ptr_size(ptr: *mut u8, size: usize) -> Self {
586        let ptr = NonNull::new(ptr).expect("expected non-null pointer");
587        let slice = NonNull::slice_from_raw_parts(ptr, size);
588        let arena = Arena::new(slice);
589        Self { arena }
590    }
591
592    fn allocate(&mut self, layout: Layout) -> Option<NonNull<[u8]>> {
593        if layout.size() == 0 {
594            // SAFETY: `Layout` guarantees that `align` is non-zero
595            // TODO switch to `layout.dangling()` once it's stabilized
596            let dangling = unsafe { NonNull::new_unchecked(layout.align() as *mut u8) };
597            return Some(NonNull::slice_from_raw_parts(dangling, 0));
598        }
599
600        let data = match self.arena.head() {
601            None => self.allocate_first(layout),
602            Some(_) => self
603                .allocate_tail(layout)
604                .or_else(|| self.allocate_anywhere(layout)),
605        };
606
607        if let Some(data) = data {
608            debug_assert!(
609                data.len() >= layout.size(),
610                "allocation returned fewer bytes than requested"
611            );
612            debug_assert!(
613                // TODO switch to `data.is_aligned_to(layout.align())` once it's stabiled
614                data.cast::<u8>().align_offset(layout.align()) == 0,
615                "allocation returned data with wrong alignment"
616            );
617        }
618
619        data
620    }
621
622    fn allocate_first(&mut self, layout: Layout) -> Option<NonNull<[u8]>> {
623        debug_assert!(
624            layout.size() > 0,
625            "`layout.size()` must be greater than zero"
626        );
627        debug_assert!(
628            self.arena.head().is_none(),
629            "arena is expected to be empty, but has a head pointer"
630        );
631        debug_assert!(
632            self.arena.tail().is_none(),
633            "arena is expected to be empty, but has a tail pointer"
634        );
635
636        let segment = unsafe { Segment::new_in(self.arena, self.arena.usable(), layout)? };
637        segment.write();
638
639        self.arena.set_head(Some(segment.ptr()));
640        self.arena.set_tail(Some(segment.ptr()));
641
642        Some(segment.data(layout))
643    }
644
645    fn allocate_tail(&mut self, layout: Layout) -> Option<NonNull<[u8]>> {
646        debug_assert!(
647            layout.size() > 0,
648            "`layout.size()` must be greater than zero"
649        );
650        debug_assert!(
651            self.arena.head().is_some(),
652            "arena is expected to be non-empty, but does not have a head pointer"
653        );
654        debug_assert!(
655            self.arena.tail().is_some(),
656            "arena is expected to be non-empty, but does not have a tail pointer"
657        );
658
659        let mut tail_segment =
660            unsafe { Segment::read(self.arena, self.arena.tail().unwrap_unchecked()) };
661        let mut new_segment =
662            unsafe { Segment::new_in(self.arena, tail_segment.trailing(), layout)? };
663        Segment::connect(&mut tail_segment, &mut new_segment);
664
665        self.arena.set_tail(Some(new_segment.ptr()));
666
667        Some(new_segment.data(layout))
668    }
669
670    fn allocate_anywhere(&mut self, layout: Layout) -> Option<NonNull<[u8]>> {
671        debug_assert!(
672            layout.size() > 0,
673            "`layout.size()` must be greater than zero"
674        );
675
676        let mut iter = ArenaChunks::new(self.arena);
677        let mut prev_segment: Option<Segment<'a, S>> = None;
678
679        while let Some(chunk) = iter.next() {
680            match chunk {
681                Chunk::Used(segment) => {
682                    prev_segment = Some(segment);
683                }
684                Chunk::Unused(unused) => {
685                    if let Some(mut new_segment) =
686                        unsafe { Segment::new_in(self.arena, unused, layout) }
687                    {
688                        let next_segment = match iter.next() {
689                            Some(Chunk::Used(segment)) => Some(segment),
690                            _ => None,
691                        };
692
693                        match prev_segment {
694                            None => self.arena.set_head(Some(new_segment.ptr())),
695                            Some(mut prev_segment) => {
696                                Segment::connect(&mut prev_segment, &mut new_segment)
697                            }
698                        }
699                        match next_segment {
700                            None => self.arena.set_tail(Some(new_segment.ptr())),
701                            Some(mut next_segment) => {
702                                Segment::connect(&mut new_segment, &mut next_segment)
703                            }
704                        }
705
706                        return Some(new_segment.data(layout));
707                    }
708                }
709            }
710        }
711
712        None
713    }
714
715    fn deallocate(&mut self, data_ptr: SegmentDataPtr<S>, layout: Layout) {
716        if layout.size() == 0 {
717            // `data_ptr` is a dangling pointer previously returned by `allocate()`; it doesn't
718            // have a corresponding segment
719            return;
720        }
721
722        let segment = unsafe { Segment::read(self.arena, data_ptr.to_header_ptr()) };
723        self.remove_segment(segment)
724    }
725
726    fn remove_segment(&mut self, segment: Segment<'a, S>) {
727        debug_assert!(
728            self.arena.head().is_some(),
729            "arena is expected to be non-empty, but does not have a head pointer"
730        );
731        debug_assert!(
732            self.arena.tail().is_some(),
733            "arena is expected to be non-empty, but does not have a tail pointer"
734        );
735
736        if segment.prev_ptr().is_none() {
737            self.arena.set_head(segment.next_ptr());
738        }
739        if segment.next_ptr().is_none() {
740            self.arena.set_tail(segment.prev_ptr());
741        }
742
743        segment.disconnect();
744    }
745
746    #[cfg(feature = "allocator-api")]
747    fn grow(
748        &mut self,
749        data_ptr: SegmentDataPtr<S>,
750        old_layout: Layout,
751        new_layout: Layout,
752    ) -> Option<NonNull<[u8]>> {
753        debug_assert!(
754            new_layout.size() >= old_layout.size(),
755            "`new_layout` must be bigger than or equal to `old_layout`"
756        );
757        self.resize(data_ptr, old_layout, new_layout)
758    }
759
760    #[cfg(feature = "allocator-api")]
761    fn shrink(
762        &mut self,
763        data_ptr: SegmentDataPtr<S>,
764        old_layout: Layout,
765        new_layout: Layout,
766    ) -> Option<NonNull<[u8]>> {
767        debug_assert!(
768            new_layout.size() <= old_layout.size(),
769            "`new_layout` must be smaller than or equal to `old_layout`"
770        );
771        self.resize(data_ptr, old_layout, new_layout)
772    }
773
774    fn resize(
775        &mut self,
776        data_ptr: SegmentDataPtr<S>,
777        old_layout: Layout,
778        new_layout: Layout,
779    ) -> Option<NonNull<[u8]>> {
780        if old_layout.size() == 0 || new_layout.size() == 0 {
781            // If `old_layout` is zero-sized, then `data_ptr` is a dangling pointer, and it doesn't
782            // have a corresponding segment. If `new_layout` is zero-sized, then we need to return
783            // a dangling pointer.
784            self.deallocate(data_ptr, old_layout);
785            return self.allocate(new_layout);
786        }
787
788        debug_assert!(
789            self.arena.head().is_some(),
790            "arena is expected to be non-empty, but does not have a head pointer"
791        );
792        debug_assert!(
793            self.arena.tail().is_some(),
794            "arena is expected to be non-empty, but does not have a tail pointer"
795        );
796
797        let copy_size = cmp::min(old_layout.size(), new_layout.size());
798        let old_segment = unsafe { Segment::read(self.arena, data_ptr.to_header_ptr()) };
799        let old_data = old_segment.data(old_layout);
800
801        match unsafe { Segment::new_in(self.arena, old_segment.available(), new_layout) } {
802            None => {
803                let new_data = self.allocate(new_layout)?;
804                unsafe {
805                    core::ptr::copy_nonoverlapping(
806                        old_data.cast::<u8>().as_ptr(),
807                        new_data.cast::<u8>().as_ptr(),
808                        copy_size,
809                    )
810                };
811                self.remove_segment(old_segment);
812                Some(new_data)
813            }
814            Some(mut new_segment) => {
815                let new_data = new_segment.data(new_layout);
816                unsafe {
817                    core::ptr::copy(
818                        old_data.cast::<u8>().as_ptr(),
819                        new_data.cast::<u8>().as_ptr(),
820                        copy_size,
821                    )
822                };
823
824                match old_segment.prev() {
825                    None => self.arena.set_head(Some(new_segment.ptr())),
826                    Some(mut prev) => Segment::connect(&mut prev, &mut new_segment),
827                }
828                match old_segment.next() {
829                    None => self.arena.set_tail(Some(new_segment.ptr())),
830                    Some(mut next) => Segment::connect(&mut new_segment, &mut next),
831                }
832
833                new_segment.write();
834
835                Some(new_data)
836            }
837        }
838    }
839
840    #[inline]
841    #[must_use]
842    fn usage(&self) -> Usage {
843        Usage::get(self.arena)
844    }
845}