musli/alloc/
slice.rs

1use core::cell::UnsafeCell;
2use core::marker::PhantomData;
3use core::mem::{MaybeUninit, align_of, forget, replace, size_of};
4use core::num::NonZeroU16;
5use core::ops::{Deref, DerefMut};
6use core::ptr;
7
8use super::{Alloc, AllocError, Allocator, SliceBuffer};
9
10#[cfg(test)]
11mod tests;
12
13// We keep max bytes to 2^31, since that ensures that addition between two
14// magnitutes never overflow.
15const MAX_BYTES: usize = i32::MAX as usize;
16
17/// A no-std compatible slice-based allocator that can be used with the `musli`
18/// crate.
19///
20/// It is geared towards handling few allocations, but they can be arbitrarily
21/// large. It is optimized to work best when allocations are short lived and
22/// "merged back" into one previously allocated region through
23/// `Buffer::extend`.
24///
25/// It's also optimized to write to one allocation "at a time". So once an
26/// allocation has been grown once, it will be put in a region where it is
27/// unlikely to need to be moved again, usually the last region which has access
28/// to the remainder of the provided buffer.
29///
30/// For the moment, this allocator only supports 65535 unique allocations, which
31/// is fine for use with the `musli` crate, but might be a limitation for other
32/// use-cases.
33///
34/// ## Examples
35///
36/// ```
37/// use musli::alloc::{ArrayBuffer, Slice, Vec};
38///
39/// let mut buf = ArrayBuffer::new();
40/// let alloc = Slice::new(&mut buf);
41///
42/// let mut a = Vec::new_in(&alloc);
43/// let mut b = Vec::new_in(&alloc);
44///
45/// b.extend_from_slice(b"He11o")?;
46/// a.extend_from_slice(b.as_slice())?;
47///
48/// assert_eq!(a.as_slice(), b"He11o");
49/// assert_eq!(a.len(), 5);
50///
51/// a.extend_from_slice(b" W0rld")?;
52///
53/// assert_eq!(a.as_slice(), b"He11o W0rld");
54/// assert_eq!(a.len(), 11);
55///
56/// let mut c = Vec::new_in(&alloc);
57/// c.extend_from_slice(b"!")?;
58/// a.extend_from_slice(c.as_slice())?;
59///
60/// assert_eq!(a.as_slice(), b"He11o W0rld!");
61/// assert_eq!(a.len(), 12);
62/// # Ok::<_, musli::alloc::AllocError>(())
63/// ```
64///
65/// ## Design
66///
67/// The allocator takes a buffer of contiguous memory. This is dynamically
68/// diviced into two parts:
69///
70/// * One part which grows upwards from the base, constituting the memory being
71///   allocated.
72/// * Its metadata growing downward from the end of the buffer, containing
73///   headers for all allocated region.
74///
75/// By designing the allocator so that the memory allocated and its metadata is
76/// separate, neighbouring regions can efficiently be merged as they are written
77/// or freed.
78///
79/// Each allocation is sparse, meaning it does not try to over-allocate memory.
80/// This ensures that subsequent regions with initialized memory can be merged
81/// efficiently, but degrades performance for many small writes performed across
82/// multiple allocations concurrently.
83///
84/// Below is an illustration of this, where `a` and `b` are two allocations
85/// where we write one byte at a time to each. Here `x` below indicates an
86/// occupied `gap` in memory regions.
87///
88/// ```text
89/// a
90/// ab
91/// # a moved to end
92/// xbaa
93/// # b moved to 0
94/// bbaa
95/// # aa not moved
96/// bbaaa
97/// # bb moved to end
98/// xxaaabbb
99/// # aaa moved to 0
100/// aaaaxbbb
101/// # bbb not moved
102/// aaaaxbbbb
103/// # aaaa not moved
104/// aaaaabbbb
105/// # bbbbb not moved
106/// aaaaabbbbb
107/// # aaaaa moved to end
108/// xxxxxbbbbbaaaaaa
109/// # bbbbb moved to 0
110/// bbbbbbxxxxaaaaaa
111/// ```
112pub struct Slice<'a> {
113    // This must be an unsafe cell, since it's mutably accessed through an
114    // immutable pointers. We simply make sure that those accesses do not
115    // clobber each other, which we can do since the API is restricted through
116    // the `Alloc` trait.
117    internal: UnsafeCell<Internal>,
118    // The underlying vector being borrowed.
119    _marker: PhantomData<&'a mut [MaybeUninit<u8>]>,
120}
121
122impl<'a> Slice<'a> {
123    /// Construct a new slice allocator around a [`SliceBuffer`].
124    ///
125    /// See [type-level documentation][Slice] for more information.
126    ///
127    /// # Examples
128    ///
129    /// ```
130    /// use musli::alloc::{ArrayBuffer, Slice, Vec};
131    ///
132    /// let mut buf = ArrayBuffer::new();
133    /// let alloc = Slice::new(&mut buf);
134    /// let mut vec = Vec::new_in(&alloc);
135    /// vec.push(42u32).unwrap();
136    /// assert_eq!(vec.as_slice(), &[42]);
137    /// ```
138    ///
139    /// # Panics
140    ///
141    /// This panics if called with a buffer larger than `2^31` bytes.
142    pub fn new<B>(buffer: &'a mut B) -> Self
143    where
144        B: ?Sized + SliceBuffer,
145    {
146        let buffer = buffer.as_uninit_bytes();
147        let size = buffer.len();
148
149        assert!(
150            size <= MAX_BYTES,
151            "Buffer of {size} bytes is larger than the maximum {MAX_BYTES}"
152        );
153
154        let mut data = Range::new(buffer.as_mut_ptr_range());
155
156        // The region of allocated headers grows downwards from the end of the
157        // buffer, so in order to ensure headers are aligned, we simply align
158        // the end pointer of the buffer preemptively here. Then we don't have
159        // to worry about it.
160        let align = data.end.align_offset(align_of::<Header>());
161
162        if align != 0 {
163            let sub = align_of::<Header>() - align;
164
165            if sub <= size {
166                // SAFETY: We've ensured that the adjustment is less than the
167                // size of the buffer.
168                unsafe {
169                    data.end = data.end.sub(sub);
170                }
171            } else {
172                data.end = data.start;
173            }
174        }
175
176        Self {
177            internal: UnsafeCell::new(Internal {
178                free_head: None,
179                tail: None,
180                occupied: None,
181                full: data,
182                free: data,
183            }),
184            _marker: PhantomData,
185        }
186    }
187}
188
189unsafe impl<'a> Allocator for &'a Slice<'_> {
190    #[inline]
191    fn __do_not_implement() {}
192
193    const IS_GLOBAL: bool = false;
194
195    type Alloc<T> = SliceAlloc<'a, T>;
196
197    #[inline]
198    fn alloc<T>(self, value: T) -> Result<Self::Alloc<T>, AllocError> {
199        if size_of::<T>() == 0 {
200            return Ok(SliceAlloc::ZST);
201        }
202
203        // SAFETY: We have exclusive access to the internal state, and it's only
204        // held for the duration of this call.
205        let region = unsafe {
206            let i = &mut *self.internal.get();
207            let region = i.alloc(size_of::<T>(), align_of::<T>());
208
209            let Some(region) = region else {
210                return Err(AllocError);
211            };
212
213            // Write the value into the region.
214            region.range.start.cast::<T>().write(value);
215            Some(region.id)
216        };
217
218        Ok(SliceAlloc {
219            region,
220            internal: Some(&self.internal),
221            cap: 1,
222            _marker: PhantomData,
223        })
224    }
225
226    #[inline]
227    fn alloc_empty<T>(self) -> Self::Alloc<T> {
228        if size_of::<T>() == 0 {
229            return SliceAlloc::ZST;
230        }
231
232        // SAFETY: We have exclusive access to the internal state, and it's only
233        // held for the duration of this call.
234        let region = unsafe {
235            (*self.internal.get())
236                .alloc(0, align_of::<T>())
237                .map(|r| r.id)
238        };
239
240        SliceAlloc {
241            region,
242            internal: Some(&self.internal),
243            cap: 0,
244            _marker: PhantomData,
245        }
246    }
247}
248
249/// A slice allocated buffer.
250///
251/// See [`Slice`].
252pub struct SliceAlloc<'a, T> {
253    region: Option<HeaderId>,
254    internal: Option<&'a UnsafeCell<Internal>>,
255    // Locally known capacity in units of `T`. The real capacity *might* be
256    // larger than this value.
257    cap: usize,
258    _marker: PhantomData<T>,
259}
260
261impl<T> SliceAlloc<'_, T> {
262    const ZST: Self = Self {
263        region: None,
264        internal: None,
265        cap: usize::MAX,
266        _marker: PhantomData,
267    };
268}
269
270impl<T> SliceAlloc<'_, T> {
271    #[inline]
272    fn free(&mut self) {
273        let (Some(region), Some(internal)) = (self.region.take(), self.internal) else {
274            return;
275        };
276
277        // SAFETY: We have exclusive access to the internal state.
278        unsafe {
279            (*internal.get()).free(region);
280        }
281    }
282}
283
284impl<T> Alloc<T> for SliceAlloc<'_, T> {
285    #[inline]
286    fn as_ptr(&self) -> *const T {
287        let (Some(region), Some(internal)) = (self.region, self.internal) else {
288            return ptr::NonNull::dangling().as_ptr();
289        };
290
291        unsafe {
292            let i = &*internal.get();
293            let this = i.header(region);
294            this.range.start.cast_const().cast()
295        }
296    }
297
298    #[inline]
299    fn as_mut_ptr(&mut self) -> *mut T {
300        let (Some(region), Some(internal)) = (self.region, self.internal) else {
301            return ptr::NonNull::dangling().as_ptr();
302        };
303
304        unsafe {
305            let i = &*internal.get();
306            let this = i.header(region);
307            this.range.start.cast()
308        }
309    }
310
311    #[inline]
312    fn capacity(&self) -> usize {
313        let Some(internal) = self.internal else {
314            return usize::MAX;
315        };
316
317        let Some(region_id) = self.region else {
318            return 0;
319        };
320
321        // SAFETY: Due to invariants in the Buffer trait we know that these
322        // cannot be used incorrectly.
323        unsafe {
324            let i = &mut *internal.get();
325            i.region(region_id).capacity()
326        }
327    }
328
329    #[inline]
330    fn resize(&mut self, len: usize, additional: usize) -> Result<(), AllocError> {
331        // Cheaply check the locally known capacity.
332        //
333        // This capacity is in units of `T`, and is only ever at risk of being
334        // too small if the allocation has been grown.
335        if len + additional <= self.cap {
336            return Ok(());
337        }
338
339        let Some(internal) = self.internal else {
340            debug_assert_eq!(
341                size_of::<T>(),
342                0,
343                "Only ZSTs should lack an internal pointer"
344            );
345            // ZSTs don't need to do any work to be resized.
346            return Ok(());
347        };
348
349        let Some(region_id) = self.region else {
350            return Err(AllocError);
351        };
352
353        let Some(len) = len.checked_mul(size_of::<T>()) else {
354            return Err(AllocError);
355        };
356
357        let Some(additional) = additional.checked_mul(size_of::<T>()) else {
358            return Err(AllocError);
359        };
360
361        let Some(requested) = len.checked_add(additional) else {
362            return Err(AllocError);
363        };
364
365        if requested > MAX_BYTES {
366            return Err(AllocError);
367        }
368
369        // SAFETY: Due to invariants in the Buffer trait we know that these
370        // cannot be used incorrectly.
371        unsafe {
372            let i = &mut *internal.get();
373
374            let region = i.region(region_id);
375
376            let actual = region.capacity();
377
378            // Region can already fit in the requested bytes.
379            if actual >= requested {
380                self.cap = actual / size_of::<T>();
381                return Ok(());
382            };
383
384            let Some(region) = i.realloc(region_id, len, requested, align_of::<T>()) else {
385                return Err(AllocError);
386            };
387
388            self.region = Some(region.id);
389            self.cap = region.capacity() / size_of::<T>();
390            Ok(())
391        }
392    }
393
394    #[inline]
395    fn try_merge<B>(&mut self, this_len: usize, buf: B, other_len: usize) -> Result<(), B>
396    where
397        B: Alloc<T>,
398    {
399        let Some(internal) = self.internal else {
400            // T is a ZST, so merging is trivial. We can just return immediately
401            // here.
402            return Ok(());
403        };
404
405        let Some(region) = self.region else {
406            return Err(buf);
407        };
408
409        let this_len = this_len * size_of::<T>();
410        let other_len = other_len * size_of::<T>();
411
412        // NB: Placing this here to make miri happy, since accessing the
413        // slice will mean mutably accessing the internal state.
414        let other_ptr = buf.as_ptr().cast();
415
416        unsafe {
417            let i = &mut *internal.get();
418            let mut this = i.region(region);
419
420            debug_assert!(this.capacity() >= this_len);
421
422            // If this region immediately follows the other region, we can
423            // optimize the write by simply growing the current region and
424            // de-allocating the second since the only conclusion is that
425            // they share the same allocator.
426            if !ptr::eq(this.range.end.cast_const(), other_ptr) {
427                return Err(buf);
428            }
429
430            let Some(next) = this.next else {
431                return Err(buf);
432            };
433
434            // Prevent the other buffer from being dropped, since we're
435            // taking care of the allocation in here directly instead.
436            forget(buf);
437
438            let next = i.region(next);
439
440            let to = this.range.start.wrapping_add(this_len);
441
442            // Data needs to be shuffle back to the end of the initialized
443            // region.
444            if this.range.end != to {
445                this.range.end.copy_to(to, other_len);
446            }
447
448            let old = i.free_region(next);
449            this.range.end = old.range.end;
450            Ok(())
451        }
452    }
453}
454
455impl<T> Drop for SliceAlloc<'_, T> {
456    #[inline]
457    fn drop(&mut self) {
458        self.free()
459    }
460}
461
462struct Region {
463    id: HeaderId,
464    ptr: *mut Header,
465}
466
467impl Deref for Region {
468    type Target = Header;
469
470    #[inline]
471    fn deref(&self) -> &Self::Target {
472        // SAFETY: Construction of the region is unsafe, so the caller must
473        // ensure that it's used correctly after that.
474        unsafe { &*self.ptr }
475    }
476}
477
478impl DerefMut for Region {
479    #[inline]
480    fn deref_mut(&mut self) -> &mut Self::Target {
481        // SAFETY: Construction of the region is unsafe, so the caller must
482        // ensure that it's used correctly after that.
483        unsafe { &mut *self.ptr }
484    }
485}
486
487/// The identifier of a region.
488#[derive(Debug, Clone, Copy, PartialEq, Eq)]
489#[cfg_attr(test, derive(PartialOrd, Ord, Hash))]
490#[repr(transparent)]
491struct HeaderId(NonZeroU16);
492
493impl HeaderId {
494    #[cfg(test)]
495    const unsafe fn new_unchecked(value: u16) -> Self {
496        Self(unsafe { NonZeroU16::new_unchecked(value) })
497    }
498
499    /// Create a new region identifier.
500    ///
501    /// # Safety
502    ///
503    /// The given value must be non-zero.
504    #[inline]
505    fn new(value: isize) -> Option<Self> {
506        Some(Self(NonZeroU16::new(u16::try_from(value).ok()?)?))
507    }
508
509    /// Get the value of the region identifier.
510    #[inline]
511    fn get(self) -> usize {
512        self.0.get() as usize
513    }
514}
515
516#[derive(Debug, Clone, Copy, PartialEq, Eq)]
517struct Range {
518    start: *mut MaybeUninit<u8>,
519    end: *mut MaybeUninit<u8>,
520}
521
522impl Range {
523    fn new(range: core::ops::Range<*mut MaybeUninit<u8>>) -> Self {
524        Self {
525            start: range.start,
526            end: range.end,
527        }
528    }
529
530    fn head(self) -> Range {
531        Self {
532            start: self.start,
533            end: self.start,
534        }
535    }
536}
537
538struct Internal {
539    // The first free region.
540    free_head: Option<HeaderId>,
541    // Pointer to the tail region.
542    tail: Option<HeaderId>,
543    // The occupied header region
544    occupied: Option<HeaderId>,
545    // The full range used by the allocator.
546    full: Range,
547    // The free range available to the allocator.
548    free: Range,
549}
550
551impl Internal {
552    // Return the number of allocates bytes.
553    #[cfg(test)]
554    #[inline]
555    fn bytes(&self) -> usize {
556        // SAFETY: It is guaranteed that free_end >= free_start inside of the provided region.
557        unsafe { self.free.start.byte_offset_from(self.full.start) as usize }
558    }
559
560    #[cfg(test)]
561    #[inline]
562    fn headers(&self) -> usize {
563        unsafe {
564            self.full
565                .end
566                .cast::<Header>()
567                .offset_from(self.free.end.cast()) as usize
568        }
569    }
570
571    // Return the number of remaining bytes.
572    #[inline]
573    fn remaining(&self) -> usize {
574        // SAFETY: It is guaranteed that free_end >= free_start inside of the provided region.
575        unsafe { self.free.end.byte_offset_from(self.free.start) as usize }
576    }
577
578    /// Get the header pointer corresponding to the given id.
579    #[inline]
580    fn header(&self, at: HeaderId) -> &Header {
581        // SAFETY: Once we've coerced to `&self`, then we guarantee that we can
582        // get a header immutably.
583        unsafe { &*self.full.end.cast::<Header>().wrapping_sub(at.get()) }
584    }
585
586    /// Get the mutable header pointer corresponding to the given id.
587    #[inline]
588    fn header_mut(&mut self, at: HeaderId) -> *mut Header {
589        self.full.end.cast::<Header>().wrapping_sub(at.get())
590    }
591
592    /// Get the mutable region corresponding to the given id.
593    #[inline]
594    fn region(&mut self, id: HeaderId) -> Region {
595        Region {
596            id,
597            ptr: self.header_mut(id),
598        }
599    }
600
601    unsafe fn unlink(&mut self, header: &Header) {
602        unsafe {
603            if let Some(next) = header.next {
604                (*self.header_mut(next)).prev = header.prev;
605            } else {
606                self.tail = header.prev;
607            }
608
609            if let Some(prev) = header.prev {
610                (*self.header_mut(prev)).next = header.next;
611            }
612        }
613    }
614
615    unsafe fn replace_back(&mut self, region: &mut Region) {
616        unsafe {
617            let prev = region.prev.take();
618            let next = region.next.take();
619
620            if let Some(prev) = prev {
621                (*self.header_mut(prev)).next = next;
622            }
623
624            if let Some(next) = next {
625                (*self.header_mut(next)).prev = prev;
626            }
627
628            self.push_back(region);
629        }
630    }
631
632    unsafe fn push_back(&mut self, region: &mut Region) {
633        unsafe {
634            if let Some(tail) = self.tail.replace(region.id) {
635                region.prev = Some(tail);
636                (*self.header_mut(tail)).next = Some(region.id);
637            }
638        }
639    }
640
641    /// Free a region.
642    unsafe fn free_region(&mut self, region: Region) -> Header {
643        unsafe {
644            self.unlink(&region);
645
646            region.ptr.replace(Header {
647                range: self.full.head(),
648                next: self.free_head.replace(region.id),
649                prev: None,
650            })
651        }
652    }
653
654    /// Allocate a new header.
655    ///
656    /// # Safety
657    ///
658    /// The caller msut ensure that there is enough space for the region and
659    /// that the end pointer has been aligned to the requirements of `Header`.
660    unsafe fn alloc_header(&mut self, end: *mut MaybeUninit<u8>) -> Option<Region> {
661        if let Some(region) = self.free_head.take() {
662            let mut region = self.region(region);
663
664            region.range.start = self.free.start;
665            region.range.end = end;
666
667            return Some(region);
668        }
669
670        debug_assert_eq!(
671            self.free.end.align_offset(align_of::<Header>()),
672            0,
673            "End pointer should be aligned to header"
674        );
675
676        let ptr = self.free.end.cast::<Header>().wrapping_sub(1);
677
678        if ptr < self.free.start.cast() || ptr >= self.free.end.cast() {
679            return None;
680        }
681
682        unsafe {
683            let id = HeaderId::new(self.full.end.cast::<Header>().offset_from(ptr))?;
684
685            ptr.write(Header {
686                range: Range::new(self.free.start..end),
687                prev: None,
688                next: None,
689            });
690
691            self.free.end = ptr.cast();
692            Some(Region { id, ptr })
693        }
694    }
695
696    /// Allocate a region.
697    ///
698    /// # Safety
699    ///
700    /// The caller must ensure that `this` is exclusively available.
701    unsafe fn alloc(&mut self, requested: usize, align: usize) -> Option<Region> {
702        if let Some(occupied) = self.occupied {
703            let region = self.region(occupied);
704
705            if region.capacity() >= requested && region.is_aligned(align) {
706                self.occupied = None;
707                return Some(region);
708            }
709        }
710
711        unsafe {
712            self.align(align)?;
713
714            if self.remaining() < requested {
715                return None;
716            }
717
718            let end = self.free.start.wrapping_add(requested);
719
720            let mut region = self.alloc_header(end)?;
721
722            self.free.start = end;
723            debug_assert!(self.free.start <= self.free.end);
724            self.push_back(&mut region);
725            Some(region)
726        }
727    }
728
729    /// Align the free region by the specified alignment.
730    ///
731    /// This might require either expanding the tail region, or introducing an
732    /// occupied region which matches the number of bytes needed to fulfill the
733    /// specified alignment.
734    unsafe fn align(&mut self, align: usize) -> Option<()> {
735        let align = self.free.start.align_offset(align);
736
737        if align == 0 {
738            return Some(());
739        }
740
741        if self.remaining() < align {
742            return None;
743        }
744
745        let aligned_start = self.free.start.wrapping_add(align);
746
747        if let Some(tail) = self.tail {
748            // Simply expand the tail region to fill the gap created.
749            self.region(tail).range.end = aligned_start;
750        } else {
751            unsafe {
752                // We need to construct a new occupied header to fill in the gap
753                // which we just aligned from since there is no previous region
754                // to expand.
755                let mut region = self.alloc_header(aligned_start)?;
756
757                self.push_back(&mut region);
758            }
759        }
760
761        self.free.start = aligned_start;
762        Some(())
763    }
764
765    unsafe fn free(&mut self, region: HeaderId) {
766        let region = self.region(region);
767
768        // Just free up the last region in the slab.
769        if region.next.is_none() {
770            unsafe {
771                self.free_tail(region);
772            }
773
774            return;
775        }
776
777        // If there is no previous region, then mark this region as occupy.
778        let Some(prev) = region.prev else {
779            debug_assert!(
780                self.occupied.is_none(),
781                "There can only be one occupied region"
782            );
783
784            self.occupied = Some(region.id);
785            return;
786        };
787
788        let mut prev = self.region(prev);
789
790        // Move allocation to the previous region.
791        let region = unsafe { self.free_region(region) };
792
793        prev.range.end = region.range.end;
794
795        // The current header being freed is the last in the list.
796        if region.next.is_none() {
797            self.free.start = region.range.start;
798        }
799    }
800
801    /// Free the tail starting at the `current` region.
802    unsafe fn free_tail(&mut self, current: Region) {
803        debug_assert_eq!(self.tail, Some(current.id));
804
805        unsafe {
806            let current = self.free_region(current);
807            debug_assert_eq!(current.next, None);
808
809            self.free.start = match current.prev {
810                // The prior region is occupied, so we can free that as well.
811                Some(prev) if self.occupied == Some(prev) => {
812                    self.occupied = None;
813                    let prev = self.region(prev);
814                    self.free_region(prev).range.start
815                }
816                _ => current.range.start,
817            };
818        }
819    }
820
821    unsafe fn reserve(&mut self, additional: usize, align: usize) -> Option<*mut MaybeUninit<u8>> {
822        unsafe {
823            self.align(align)?;
824        }
825
826        let free_start = self.free.start.wrapping_add(additional);
827
828        if free_start > self.free.end || free_start < self.free.start {
829            return None;
830        }
831
832        Some(free_start)
833    }
834
835    unsafe fn realloc(
836        &mut self,
837        from: HeaderId,
838        len: usize,
839        requested: usize,
840        align: usize,
841    ) -> Option<Region> {
842        let mut from = self.region(from);
843
844        // This is the last region in the slab, so we can just expand it.
845        if from.next.is_none() {
846            // Before we call realloc, we check the capacity of the current
847            // region. So we know that it is <= requested.
848
849            unsafe {
850                let additional = requested - from.capacity();
851                self.free.start = self.reserve(additional, align)?;
852                from.range.end = from.range.end.add(additional);
853            }
854
855            return Some(from);
856        }
857
858        // There is no data allocated in the current region, so we can simply
859        // re-link it to the end of the chain of allocation.
860        if from.range.start == from.range.end {
861            unsafe {
862                let free_start = self.reserve(requested, align)?;
863                from.range.start = replace(&mut self.free.start, free_start);
864                from.range.end = free_start;
865                self.replace_back(&mut from);
866            }
867
868            return Some(from);
869        }
870
871        // Try to merge with a preceeding region, if the requested memory can
872        // fit in it.
873        'bail: {
874            // Check if the immediate prior region can fit the requested allocation.
875            let Some(prev) = from.prev else {
876                break 'bail;
877            };
878
879            if self.occupied != Some(prev) {
880                break 'bail;
881            }
882
883            let mut prev = self.region(prev);
884
885            if prev.capacity() + from.capacity() < requested {
886                break 'bail;
887            }
888
889            if !prev.is_aligned(align) {
890                break 'bail;
891            }
892
893            unsafe {
894                let from = self.free_region(from);
895
896                from.range.start.copy_to(prev.range.start, len);
897                prev.range.end = from.range.end;
898                self.occupied = None;
899                return Some(prev);
900            }
901        }
902
903        unsafe {
904            let to = self.alloc(requested, align)?;
905            from.range.start.copy_to_nonoverlapping(to.range.start, len);
906            self.free(from.id);
907            Some(to)
908        }
909    }
910}
911
912/// The header of a region.
913#[derive(Debug, Clone, Copy, PartialEq, Eq)]
914struct Header {
915    // The range of the allocated region.
916    range: Range,
917    // The previous region.
918    prev: Option<HeaderId>,
919    // The next region.
920    next: Option<HeaderId>,
921}
922
923impl Header {
924    #[inline]
925    fn capacity(&self) -> usize {
926        // SAFETY: Both pointers are defined within the region.
927        unsafe { self.range.end.byte_offset_from(self.range.start) as usize }
928    }
929
930    /// Test if region is aligned to `align`.
931    #[inline]
932    fn is_aligned(&self, align: usize) -> bool {
933        self.range.start.align_offset(align) == 0
934    }
935}