bump_stack/
lib.rs

1#![doc = include_str!("../README.md")]
2#![no_std]
3
4mod iter;
5mod util;
6
7extern crate alloc;
8
9use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
10use core::cell::Cell;
11use core::marker::PhantomData;
12use core::mem::{align_of, size_of};
13use core::ptr::{self, NonNull};
14
15#[derive(Debug)]
16pub struct Stack<T> {
17    /// The current chunk we are bump allocating within.
18    ///
19    /// Its `next` link can point to the dead chunk, or to the cached chunk.
20    ///
21    /// Its `prev` link can point to the dead chunk or to the earlier allocated
22    /// chunk.
23    current_footer: Cell<NonNull<ChunkFooter>>,
24
25    /// The first chunk we allocated, or the dead chunk if we haven't allocated
26    /// anything yet.
27    first_footer: Cell<NonNull<ChunkFooter>>,
28
29    /// The capacity of the stack in elements.
30    capacity: Cell<usize>,
31
32    /// The number of elements currently in the stack.
33    length: Cell<usize>,
34
35    _phantom: PhantomData<T>,
36}
37
38// Public API
39impl<T> Stack<T> {
40    /// Constructs a new, empty `Stack<T>`.
41    ///
42    /// The stack will not allocate until elements are pushed onto it.
43    ///
44    /// # Examples
45    ///
46    /// ```
47    /// use bump_stack::Stack;
48    ///
49    /// let mut stack: Stack<i32> = Stack::new();
50    /// ```
51    pub const fn new() -> Self {
52        Self {
53            current_footer: Cell::new(DEAD_CHUNK.get()),
54            first_footer: Cell::new(DEAD_CHUNK.get()),
55            capacity: Cell::new(0),
56            length: Cell::new(0),
57            _phantom: PhantomData,
58        }
59    }
60
61    /// Constructs a new, empty `Stack<T>` with at least the specified capacity.
62    ///
63    /// The stack will be able to hold at least `capacity` elements without new
64    /// allocations. This method is allowed to allocate for more elements than
65    /// `capacity`. If `capacity` is zero, the stack will not allocate.
66    ///
67    /// If it is important to know the exact allocated capacity of a `Stack`,
68    /// always use the [`capacity`] method after construction.
69    ///
70    /// For `Stack<T>` where `T` is a zero-sized type, there will be no
71    /// allocation and the capacity will always be `usize::MAX`.
72    ///
73    /// [`capacity`]: Stack::capacity
74    ///
75    /// # Panics
76    ///
77    /// Panics if the `capacity` exceeds `isize::MAX` _bytes_.
78    ///
79    /// # Examples
80    ///
81    /// ```
82    /// # use bump_stack::Stack;
83    /// let mut stk = Stack::with_capacity(10);
84    ///
85    /// // The stack contains no items, even though it has capacity for more
86    /// assert_eq!(stk.len(), 0);
87    /// assert!(stk.capacity() >= 10);
88    ///
89    /// // These are all done without additional allocations...
90    /// for i in 0..10 {
91    ///     stk.push(i);
92    /// }
93    /// assert_eq!(stk.len(), 10);
94    /// assert!(stk.capacity() >= 10);
95    ///
96    /// // ...but this may make the stack allocate a new chunk
97    /// stk.push(11);
98    /// assert_eq!(stk.len(), 11);
99    /// assert!(stk.capacity() >= 11);
100    ///
101    /// // A stack of a zero-sized type will always over-allocate, since no
102    /// // allocation is necessary
103    /// let stk_units = Stack::<()>::with_capacity(10);
104    /// assert_eq!(stk_units.capacity(), usize::MAX);
105    /// ```
106    pub fn with_capacity(capacity: usize) -> Self {
107        let stack = Self::new();
108        debug_assert!(unsafe { stack.current_footer.get().as_ref().is_dead() });
109        if const { Self::ELEMENT_SIZE != 0 } && capacity != 0 {
110            let chunk_size = Self::chunk_size_for(capacity);
111            let footer = unsafe { stack.alloc_chunk(chunk_size) };
112            stack.current_footer.set(footer);
113            stack.first_footer.set(footer);
114        }
115        stack
116    }
117
118    /// Returns the total number of elements the stack can hold without new
119    /// allocating.
120    #[inline]
121    pub const fn capacity(&self) -> usize {
122        if const { Self::ELEMENT_SIZE == 0 } {
123            usize::MAX
124        } else {
125            self.capacity.get()
126        }
127    }
128
129    /// Returns the total number of elements the stack can hold without
130    /// additional allocations.
131    ///
132    /// # Examples
133    ///
134    /// ```
135    /// # use bump_stack::Stack;
136    /// let mut stk: Stack<i32> = Stack::with_capacity(10);
137    /// stk.push(42);
138    /// assert!(stk.capacity() >= 10);
139    /// ```
140    ///
141    /// A stack with zero-sized elements will always have a capacity of
142    /// `usize::MAX`:
143    ///
144    /// ```
145    /// # use bump_stack::Stack;
146    /// #[derive(Clone)]
147    /// struct ZeroSized;
148    ///
149    /// fn main() {
150    ///     assert_eq!(std::mem::size_of::<ZeroSized>(), 0);
151    ///     let stk = Stack::<ZeroSized>::with_capacity(0);
152    ///     assert_eq!(stk.capacity(), usize::MAX);
153    /// }
154    /// ```
155    #[inline]
156    pub const fn len(&self) -> usize {
157        self.length.get()
158    }
159
160    /// Returns `true` if the stack contains no elements.
161    ///
162    /// # Examples
163    ///
164    /// ```
165    /// # use bump_stack::Stack;
166    /// let mut s = Stack::new();
167    /// assert!(s.is_empty());
168    ///
169    /// s.push(1);
170    /// assert!(!s.is_empty());
171    /// ```
172    #[inline]
173    pub const fn is_empty(&self) -> bool {
174        self.len() == 0
175    }
176
177    /// Returns a reference to the first element of the stack, or `None` if it
178    /// is empty.
179    ///
180    /// # Examples
181    ///
182    /// ```
183    /// # use bump_stack::Stack;
184    /// let mut s = Stack::new();
185    /// assert_eq!(None, s.first());
186    ///
187    /// s.push(42);
188    /// assert_eq!(Some(&42), s.first());
189    /// ```
190    #[inline]
191    #[must_use]
192    pub fn first(&self) -> Option<&T> {
193        if !self.is_empty() {
194            unsafe { Some(self.first_unchecked().as_mut()) }
195        } else {
196            None
197        }
198    }
199
200    /// Returns a mutable reference to the first element of the slice, or `None`
201    /// if it is empty.
202    ///
203    /// # Examples
204    ///
205    /// ```
206    /// # use bump_stack::Stack;
207    /// let mut s = Stack::new();
208    /// assert_eq!(None, s.first_mut());
209    ///
210    /// s.push(1);
211    /// if let Some(first) = s.first_mut() {
212    ///     *first = 5;
213    /// }
214    /// assert_eq!(s.first(), Some(&5));
215    /// ```
216    #[inline]
217    #[must_use]
218    pub fn first_mut(&mut self) -> Option<&mut T> {
219        if !self.is_empty() {
220            unsafe { Some(self.first_unchecked().as_mut()) }
221        } else {
222            None
223        }
224    }
225
226    /// Returns the reference to last element of the stack, or `None` if it is
227    /// empty.
228    ///
229    /// # Examples
230    ///
231    /// ```
232    /// # use bump_stack::Stack;
233    /// let mut stk = Stack::new();
234    /// assert_eq!(None, stk.last());
235    ///
236    /// stk.push(1);
237    /// assert_eq!(Some(&1), stk.last());
238    /// ```
239    #[inline]
240    #[must_use]
241    pub fn last(&self) -> Option<&T> {
242        if !self.is_empty() {
243            unsafe { Some(self.last_unchecked().as_ref()) }
244        } else {
245            None
246        }
247    }
248    /// Returns a mutable reference to the last item in the stack, or `None` if
249    /// it is empty.
250    ///
251    /// # Examples
252    ///
253    /// ```
254    /// # use bump_stack::Stack;
255    /// let mut stk = Stack::new();
256    /// assert_eq!(None, stk.last_mut());
257    ///
258    /// stk.push(5);
259    /// assert_eq!(Some(&mut 5), stk.last_mut());
260    ///
261    /// if let Some(last) = stk.last_mut() {
262    ///     *last = 10;
263    /// }
264    /// assert_eq!(Some(&mut 10), stk.last_mut());
265    /// ```
266    #[inline]
267    #[must_use]
268    pub fn last_mut(&mut self) -> Option<&mut T> {
269        if !self.is_empty() {
270            unsafe { Some(self.last_unchecked().as_mut()) }
271        } else {
272            None
273        }
274    }
275
276    /// Appends an element to the stack returning a reference to it.
277    ///
278    /// # Panics
279    ///
280    /// Panics if the global allocator cannot allocate a new chunk of memory.
281    ///
282    /// # Examples
283    ///
284    /// ```
285    /// # use bump_stack::Stack;
286    /// let stk = Stack::new();
287    /// let new_element = stk.push(3);
288    ///
289    /// assert_eq!(new_element, &3);
290    /// assert_eq!(stk, [3]);
291    /// ```
292    ///
293    /// # Time complexity
294    ///
295    /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
296    /// exhausted, it tries to use the cached one if it exists, otherwise it
297    /// tries to allocate a new chunk.
298    ///
299    /// If the new chunk of memory is too big, it tries to divide the capacity
300    /// by two and allocate it again until it reaches the minimum capacity. If
301    /// it does, it panics.
302    #[inline]
303    pub fn push(&self, value: T) -> &T {
304        self.push_with(|| value)
305    }
306
307    /// Pre-allocate space for an element in this stack, initializes it using
308    /// the closure, and returns a reference to the new element.
309    ///
310    /// # Examples
311    ///
312    /// ```
313    /// # use bump_stack::Stack;
314    /// let stk = Stack::new();
315    /// let new_element = stk.push_with(|| 3);
316    ///
317    /// assert_eq!(new_element, &3);
318    /// assert_eq!(stk, [3]);
319    /// ```
320    ///
321    /// # Time complexity
322    ///
323    /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
324    /// exhausted, it tries to use the cached one if it exists, otherwise it
325    /// tries to allocate a new chunk.
326    ///
327    /// If the new chunk of memory is too big, it tries to divide the capacity
328    /// by two and allocate it again until it reaches the minimum capacity. If
329    /// it does, it panics.
330    #[inline(always)]
331    pub fn push_with<F>(&self, f: F) -> &T
332    where
333        F: FnOnce() -> T,
334    {
335        unsafe {
336            let p = self.alloc_element();
337            util::write_with(p.as_ptr(), f);
338            self.length.update(|len| len + 1);
339            p.as_ref()
340        }
341    }
342
343    /// Removes the last element from a vector and returns it, or [`None`] if it
344    /// is empty.
345    ///
346    /// # Examples
347    ///
348    /// ```
349    /// # use bump_stack::Stack;
350    /// let mut stk = Stack::from([1, 2, 3]);
351    /// assert_eq!(stk.pop(), Some(3));
352    /// assert_eq!(stk, [1, 2]);
353    /// ```
354    #[inline]
355    pub fn pop(&mut self) -> Option<T> {
356        unsafe {
357            if let Some(element_ptr) = self.dealloc_element() {
358                if const { Self::ELEMENT_SIZE == 0 } && self.length.get() == 0 {
359                    None
360                } else {
361                    self.length.update(|len| len - 1);
362                    Some(ptr::read(element_ptr.as_ptr()))
363                }
364            } else {
365                None
366            }
367        }
368    }
369
370    /// Clears the stack, removing all elements.
371    ///
372    /// This method leaves the biggest chunk of memory for future allocations.
373    ///
374    /// # Examples
375    ///
376    /// ```
377    /// # use bump_stack::Stack;
378    /// let mut stk = Stack::from([1, 2, 3]);
379    ///
380    /// stk.clear();
381    ///
382    /// assert!(stk.is_empty());
383    /// assert!(stk.capacity() > 0);
384    /// ```
385    pub fn clear(&mut self) {
386        // TODO: Reimplement the method running `drop_in_place`
387        while let Some(elem) = self.pop() {
388            drop(elem)
389        }
390    }
391
392    /// Returns an iterator over the stack.
393    ///
394    /// The iterator yields all items from start to end.
395    ///
396    /// # Examples
397    ///
398    /// ```
399    /// # use bump_stack::Stack;
400    /// let stk = Stack::from([1, 2, 4]);
401    /// let mut iterator = stk.iter();
402    ///
403    /// assert_eq!(iterator.next(), Some(&1));
404    /// assert_eq!(iterator.next(), Some(&2));
405    /// assert_eq!(iterator.next(), Some(&4));
406    /// assert_eq!(iterator.next(), None);
407    /// ```
408    ///
409    /// Since `Stack` allows to push new elements using immutable reference to
410    /// itself, you can push during iteration. But iteration is running over
411    /// elements existing at the moment of the iterator creating. It guarantees
412    /// that you won't get infinite loop.
413    ///
414    /// ```
415    /// # use bump_stack::Stack;
416    /// let stk = Stack::from([1, 2, 4]);
417    ///
418    /// for elem in stk.iter() {
419    ///     stk.push(*elem);
420    /// }
421    /// assert_eq!(stk.len(), 6);
422    /// assert_eq!(stk, [1, 2, 4, 1, 2, 4]);
423    /// ```
424    pub fn iter(&self) -> impl core::iter::Iterator<Item = &T> {
425        crate::iter::Iter::new(self)
426    }
427}
428
429impl<T> core::default::Default for Stack<T> {
430    /// Creates an empty `Stack<T>`.
431    ///
432    /// The stack will not allocate until elements are pushed onto it.
433    #[inline]
434    fn default() -> Self {
435        Self::new()
436    }
437}
438
439impl<T, const N: usize> core::convert::From<&[T; N]> for Stack<T>
440where
441    T: Clone,
442{
443    /// Creates a `Stack<T>` with a chunk big enough to contain N items and
444    /// fills it by cloning `slice`'s items.
445    fn from(slice: &[T; N]) -> Self {
446        let stk = Stack::with_capacity(N);
447        for item in slice {
448            stk.push(item.clone());
449        }
450        stk
451    }
452}
453
454impl<T, const N: usize> core::convert::From<&mut [T; N]> for Stack<T>
455where
456    T: Clone,
457{
458    /// Creates a `Stack<T>` with a chunk big enough to contain N items and
459    /// fills it by cloning `slice`'s items.
460    #[inline]
461    fn from(slice: &mut [T; N]) -> Self {
462        core::convert::From::<&[T; N]>::from(slice)
463    }
464}
465
466impl<T, const N: usize> core::convert::From<[T; N]> for Stack<T>
467where
468    T: Clone,
469{
470    /// Creates a `Stack<T>` with a chunk big enough to contain N items and
471    /// fills it by cloning `array`'s items.
472    #[inline]
473    fn from(array: [T; N]) -> Self {
474        core::convert::From::<&[T; N]>::from(&array)
475    }
476}
477
478impl<T> core::convert::From<&[T]> for Stack<T>
479where
480    T: Clone,
481{
482    /// Creates a `Stack<T>` with a chunk big enough to contain N items and
483    /// fills it by cloning `slice`'s items.
484    fn from(slice: &[T]) -> Self {
485        let stk = Stack::with_capacity(slice.len());
486        for item in slice {
487            stk.push(item.clone());
488        }
489        stk
490    }
491}
492
493impl<T> core::convert::From<&mut [T]> for Stack<T>
494where
495    T: Clone,
496{
497    /// Creates a `Stack<T>` with a chunk big enough to contain N items and
498    /// fills it by cloning `slice`'s items.
499    fn from(slice: &mut [T]) -> Self {
500        core::convert::From::<&[T]>::from(slice)
501    }
502}
503
504impl<T> core::ops::Drop for Stack<T> {
505    fn drop(&mut self) {
506        self.clear();
507        unsafe {
508            let current_footer = self.current_footer.get().as_ref();
509            if !current_footer.is_dead() {
510                debug_assert!(current_footer.prev.get().as_ref().is_dead());
511                debug_assert!(current_footer.next.get().as_ref().is_dead());
512                self.dealloc_chunk(self.current_footer.get());
513            }
514        }
515    }
516}
517
518impl<'a, T> core::iter::IntoIterator for &'a Stack<T> {
519    type Item = &'a T;
520    type IntoIter = crate::iter::Iter<'a, T>;
521
522    fn into_iter(self) -> Self::IntoIter {
523        crate::iter::Iter::new(self)
524    }
525}
526
527impl<T, U> core::cmp::PartialEq<[U]> for Stack<T>
528where
529    T: core::cmp::PartialEq<U>,
530{
531    fn eq(&self, other: &[U]) -> bool {
532        self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
533    }
534}
535
536impl<T, U> core::cmp::PartialEq<&[U]> for Stack<T>
537where
538    T: core::cmp::PartialEq<U>,
539{
540    #[inline]
541    fn eq(&self, other: &&[U]) -> bool {
542        core::cmp::PartialEq::<[U]>::eq(self, other)
543    }
544}
545
546impl<T, U> core::cmp::PartialEq<&mut [U]> for Stack<T>
547where
548    T: core::cmp::PartialEq<U>,
549{
550    #[inline]
551    fn eq(&self, other: &&mut [U]) -> bool {
552        core::cmp::PartialEq::<[U]>::eq(self, other)
553    }
554}
555
556impl<T, U, const N: usize> core::cmp::PartialEq<[U; N]> for Stack<T>
557where
558    T: core::cmp::PartialEq<U>,
559{
560    fn eq(&self, other: &[U; N]) -> bool {
561        self.len() == N && self.iter().zip(other.iter()).all(|(a, b)| a == b)
562    }
563}
564
565impl<T, U, const N: usize> core::cmp::PartialEq<&[U; N]> for Stack<T>
566where
567    T: core::cmp::PartialEq<U>,
568{
569    #[inline]
570    fn eq(&self, other: &&[U; N]) -> bool {
571        core::cmp::PartialEq::<[U; N]>::eq(self, other)
572    }
573}
574
575impl<T, U, const N: usize> core::cmp::PartialEq<&mut [U; N]> for Stack<T>
576where
577    T: core::cmp::PartialEq<U>,
578{
579    #[inline]
580    fn eq(&self, other: &&mut [U; N]) -> bool {
581        core::cmp::PartialEq::<[U; N]>::eq(self, other)
582    }
583}
584
585impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for [T; N]
586where
587    T: core::cmp::PartialEq<U>,
588{
589    fn eq(&self, other: &Stack<U>) -> bool {
590        self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
591    }
592}
593
594impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &[T; N]
595where
596    T: core::cmp::PartialEq<U>,
597{
598    fn eq(&self, other: &Stack<U>) -> bool {
599        *self == other
600    }
601}
602
603impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &mut [T; N]
604where
605    T: core::cmp::PartialEq<U>,
606{
607    fn eq(&self, other: &Stack<U>) -> bool {
608        *self == other
609    }
610}
611
612/// This footer is always at the end of the chunk. So memory available for
613/// allocation is `self.data..=self`.
614#[derive(Debug)]
615struct ChunkFooter {
616    /// Pointer to the start of this chunk allocation.
617    data: NonNull<u8>,
618
619    /// Bump allocation finger that is always in the range `self.data..=self`.
620    ptr: Cell<NonNull<u8>>,
621
622    /// The layout of this chunk's allocation.
623    layout: Layout,
624
625    /// Link to the previous chunk.
626    ///
627    /// Note that the last node in the `prev` linked list is the dead chunk,
628    /// whose `prev` link points to itself.
629    prev: Cell<NonNull<ChunkFooter>>,
630
631    /// Link to the next chunk.
632    ///
633    /// Note that the last node in the `next` linked list is the dead chunk,
634    /// whose `next` link points to itself.
635    next: Cell<NonNull<ChunkFooter>>,
636}
637
638impl ChunkFooter {
639    /// Returns a non-null pointer to the chunk footer.
640    fn get(&self) -> NonNull<Self> {
641        NonNull::from(self)
642    }
643
644    /// This is the `DEAD_CHUNK` chunk.
645    fn is_dead(&self) -> bool {
646        ptr::eq(self, &DEAD_CHUNK.0)
647    }
648
649    /// Returns the number of bytes occupied by the chunk.
650    fn occupied(&self) -> usize {
651        let start = self.data.as_ptr() as usize;
652        let ptr = self.ptr.get().as_ptr() as usize;
653        debug_assert!(start <= ptr);
654        ptr - start
655    }
656
657    /// The capacity of the chunk in bytes.
658    fn capacity(&self) -> usize {
659        let end = self.get().as_ptr() as usize;
660        let start = self.data.as_ptr() as usize;
661        debug_assert!(start < end);
662        end - start
663    }
664
665    /// Checks if the chunk is empty.
666    fn is_empty(&self) -> bool {
667        let start = self.data.as_ptr() as usize;
668        let ptr = self.ptr.get().as_ptr() as usize;
669        debug_assert!(start <= ptr);
670        start == ptr
671    }
672}
673
674#[repr(transparent)]
675struct DeadChunkFooter(ChunkFooter);
676
677unsafe impl Sync for DeadChunkFooter {}
678
679impl DeadChunkFooter {
680    const fn get(&'static self) -> NonNull<ChunkFooter> {
681        unsafe { NonNull::new_unchecked(&DEAD_CHUNK as *const DeadChunkFooter as *mut ChunkFooter) }
682    }
683}
684
685// Empty chunk that contains only its footer.
686static DEAD_CHUNK: DeadChunkFooter = DeadChunkFooter(ChunkFooter {
687    data: DEAD_CHUNK.get().cast(),
688    ptr: Cell::new(DEAD_CHUNK.get().cast()),
689    layout: Layout::new::<ChunkFooter>(),
690    prev: Cell::new(DEAD_CHUNK.get()),
691    next: Cell::new(DEAD_CHUNK.get()),
692});
693
694/// Maximum typical overhead per allocation imposed by allocators.
695const ALLOC_OVERHEAD: usize = 16;
696
697// Constants
698impl<T> Stack<T> {
699    /// Element alignment
700    const ELEMENT_ALIGN: usize = align_of::<T>();
701
702    /// Element size
703    const ELEMENT_SIZE: usize = size_of::<T>();
704
705    /// Footer alignment is maximum of element alignment and footer alignment
706    /// itself. It allows to use footer's address as the `end` of elements array
707    /// within the chunk.
708    const FOOTER_ALIGN: usize = util::max(align_of::<ChunkFooter>(), Self::ELEMENT_ALIGN);
709
710    const FOOTER_SIZE: usize = size_of::<ChunkFooter>();
711
712    /// Chunk alignment is the maximum of element and footer alignments.
713    const CHUNK_ALIGN: usize = util::max(Self::ELEMENT_ALIGN, Self::FOOTER_ALIGN);
714
715    /// Chunk size enough for at least one element.
716    const CHUNK_MIN_SIZE: usize = Self::chunk_size_for(1);
717
718    /// Find out if it possible to use footer's address as the `end` of elements
719    /// array within the chunk.
720    const FOOTER_IS_END: bool = {
721        assert!(util::is_aligned_to(Self::CHUNK_ALIGN, Self::ELEMENT_ALIGN));
722        assert!(util::is_aligned_to(Self::CHUNK_ALIGN, Self::FOOTER_ALIGN));
723        Self::FOOTER_ALIGN.is_multiple_of(Self::ELEMENT_SIZE)
724            || Self::ELEMENT_SIZE.is_multiple_of(Self::FOOTER_ALIGN)
725    };
726
727    /// Chunk size for the first chunk if capacity is not specified with
728    /// [`Stack::with_capacity`].
729    const CHUNK_FIRST_SIZE: usize = {
730        let chunk_512b = 0x200 - ALLOC_OVERHEAD;
731        let size = if Self::ELEMENT_SIZE > 1024 {
732            Self::chunk_size_for(2)
733        } else {
734            util::max(chunk_512b, Self::chunk_size_for(8))
735        };
736        assert!((size + ALLOC_OVERHEAD).is_power_of_two());
737        size
738    };
739
740    /// Maximal possible chunk size. Is equal to 4 GiB, if address space is not
741    /// limited. Otherwise, it is equal to (address space size / 8).
742    const CHUNK_MAX_SIZE: usize = {
743        let part_of_entire_memory = util::round_down_to_pow2(isize::MAX as usize >> 2);
744        let common_sensible_in_2025 = 4 << 30; // 4 GiB
745        let size = util::min(part_of_entire_memory, common_sensible_in_2025);
746        assert!(size.is_power_of_two());
747        size - ALLOC_OVERHEAD
748    };
749
750    /// Is `true` if `T` is a zero-sized type.
751    const ELEMENT_IS_ZST: bool = Self::ELEMENT_SIZE == 0;
752
753    /// Calculate chunk size big enough for the given number of elements. The
754    /// chunk is a power of two minus an allocator overhead.
755    const fn chunk_size_for(mut elements_count: usize) -> usize {
756        if elements_count < 2 {
757            // I'm not sure that `alloc` always returns pointer aligned as
758            // requested, so it's possible that the allocated memory for one
759            // element is not enough for that element. So I'm increasing here
760            // the requested size for at least two elements.
761            elements_count = 2;
762        }
763        let mut chunk_size = elements_count * Self::ELEMENT_SIZE;
764        assert!(util::is_aligned_to(chunk_size, Self::ELEMENT_ALIGN));
765
766        let overhead = ALLOC_OVERHEAD + Self::FOOTER_SIZE;
767        chunk_size += overhead.next_multiple_of(Self::FOOTER_ALIGN);
768
769        chunk_size.next_power_of_two() - ALLOC_OVERHEAD
770    }
771
772    /// Checks if the chunk has maximum amount of elements.
773    #[inline(always)]
774    fn chunk_is_full(footer: &ChunkFooter) -> bool {
775        if const { Self::ELEMENT_SIZE == 0 } {
776            return false;
777        }
778        let end = footer.get().as_ptr() as usize;
779        let ptr = footer.ptr.get().as_ptr() as usize;
780        debug_assert!(ptr <= end);
781        if const { Self::FOOTER_IS_END } {
782            end == ptr
783        } else {
784            end - ptr < Self::ELEMENT_SIZE
785        }
786    }
787}
788
789// Private API
790impl<T> Stack<T> {
791    #[inline(always)]
792    unsafe fn alloc_element(&self) -> NonNull<T> {
793        if let Some(ptr) = self.alloc_element_fast() {
794            ptr
795        } else {
796            debug_assert!(!Self::ELEMENT_IS_ZST, "slow alloc is impossible for ZST");
797            unsafe { self.alloc_element_slow() }
798        }
799    }
800
801    #[inline(always)]
802    fn alloc_element_fast(&self) -> Option<NonNull<T>> {
803        let current_footer = unsafe { self.current_footer.get().as_ref() };
804
805        if Self::chunk_is_full(current_footer) {
806            return None;
807        }
808
809        let ptr = current_footer.ptr.get().as_ptr();
810        let new_ptr = ptr.wrapping_byte_add(Self::ELEMENT_SIZE);
811
812        debug_assert!(util::ptr_is_aligned_to(new_ptr, Self::ELEMENT_ALIGN));
813
814        let new_ptr = unsafe { NonNull::new_unchecked(new_ptr) };
815        current_footer.ptr.set(new_ptr);
816
817        let ptr = unsafe { NonNull::new_unchecked(ptr) };
818        Some(ptr.cast())
819    }
820
821    // Should be run only if the current chunk is full
822    unsafe fn alloc_element_slow(&self) -> NonNull<T> {
823        unsafe {
824            let current_footer = self.current_footer.get().as_ref();
825
826            debug_assert!(Self::chunk_is_full(current_footer));
827
828            if current_footer.is_dead() {
829                // this is initial state without allocated chunks at all
830                debug_assert!(current_footer.is_dead());
831                debug_assert!(current_footer.prev.get().as_ref().is_dead());
832                debug_assert!(current_footer.next.get().as_ref().is_dead());
833
834                let new_footer_ptr = self.alloc_chunk(Self::CHUNK_FIRST_SIZE);
835                self.current_footer.set(new_footer_ptr);
836                self.first_footer.set(new_footer_ptr);
837            } else {
838                // at least the current chunk is not dead
839                let next_footer_ptr = current_footer.next.get();
840                let next_footer = next_footer_ptr.as_ref();
841
842                if next_footer.is_dead() {
843                    // the current chunk is single, so create a new one, and
844                    // make it the current chunk.
845                    let current_chunk_size = current_footer.layout.size();
846                    let new_chunk_size = if current_chunk_size == Self::CHUNK_MAX_SIZE {
847                        Self::CHUNK_MAX_SIZE
848                    } else {
849                        debug_assert!(current_chunk_size < Self::CHUNK_MAX_SIZE);
850                        ((current_chunk_size + ALLOC_OVERHEAD) << 1) - ALLOC_OVERHEAD
851                    };
852
853                    debug_assert!((new_chunk_size + ALLOC_OVERHEAD).is_power_of_two());
854
855                    let new_footer_ptr = self.alloc_chunk(new_chunk_size);
856                    let new_footer = new_footer_ptr.as_ref();
857
858                    current_footer.next.set(new_footer_ptr);
859                    new_footer.prev.set(self.current_footer.get());
860
861                    self.current_footer.set(new_footer_ptr);
862                } else {
863                    // there is a next empty chunk, so make it the current chunk
864                    debug_assert!(next_footer.is_empty());
865                    self.current_footer.set(next_footer_ptr);
866                }
867            }
868
869            self.alloc_element_fast().unwrap_unchecked()
870        }
871    }
872
873    unsafe fn alloc_chunk(&self, chunk_size: usize) -> NonNull<ChunkFooter> {
874        debug_assert!(chunk_size <= Self::CHUNK_MAX_SIZE);
875
876        let mut new_chunk_size = chunk_size;
877        let new_chunk_align = Self::CHUNK_ALIGN;
878
879        let (new_chunk_ptr, new_chunk_layout) = loop {
880            // checks for `Layout::from_size_align_unchecked`
881            debug_assert!(new_chunk_align != 0);
882            debug_assert!(new_chunk_align.is_power_of_two());
883            debug_assert!((new_chunk_size + ALLOC_OVERHEAD).is_power_of_two());
884            debug_assert!(new_chunk_size <= isize::MAX as usize);
885
886            let new_chunk_layout =
887                unsafe { Layout::from_size_align_unchecked(new_chunk_size, new_chunk_align) };
888
889            let new_chunk_ptr = unsafe { alloc(new_chunk_layout) };
890            if !new_chunk_ptr.is_null() {
891                assert!(util::ptr_is_aligned_to(new_chunk_ptr, Self::CHUNK_ALIGN));
892                break (new_chunk_ptr, new_chunk_layout);
893            }
894
895            // if couldn't get a new chunk, try to shrink the chunk size by half
896            new_chunk_size = ((new_chunk_size + ALLOC_OVERHEAD) >> 1) - ALLOC_OVERHEAD;
897            if new_chunk_size < Self::CHUNK_MIN_SIZE {
898                handle_alloc_error(new_chunk_layout);
899            }
900        };
901
902        let new_start = new_chunk_ptr;
903        let new_end = new_start.wrapping_byte_add(new_chunk_layout.size());
904        let mut new_footer_start = new_end.wrapping_byte_sub(Self::FOOTER_SIZE);
905        new_footer_start = util::round_mut_ptr_down_to(new_footer_start, Self::FOOTER_ALIGN);
906        let new_ptr = new_start;
907
908        debug_assert!(new_start < new_footer_start);
909        debug_assert!(new_footer_start < new_end);
910        debug_assert!(util::ptr_is_aligned_to(new_ptr, Self::ELEMENT_ALIGN));
911
912        let new_chunk_cap_in_bytes = new_footer_start as usize - new_ptr as usize;
913        let new_chunk_capacity = new_chunk_cap_in_bytes / Self::ELEMENT_SIZE;
914        self.capacity.update(|cap| cap + new_chunk_capacity);
915
916        if const { Self::ELEMENT_SIZE.is_multiple_of(Self::FOOTER_ALIGN) } {
917            // in this case we additionally align the footer address to be
918            // aligned with elements' array
919            let buffer_size = Self::ELEMENT_SIZE * new_chunk_capacity;
920            let corrected_footer_start = new_start.wrapping_byte_add(buffer_size);
921
922            let delta = new_footer_start as usize - corrected_footer_start as usize;
923            debug_assert!(delta < Self::ELEMENT_SIZE);
924
925            new_footer_start = corrected_footer_start;
926        }
927
928        unsafe {
929            let new_footer_ptr = new_footer_start as *mut ChunkFooter;
930
931            util::write_with(new_footer_ptr, || ChunkFooter {
932                data: NonNull::new_unchecked(new_start),
933                ptr: Cell::new(NonNull::new_unchecked(new_ptr)),
934                layout: new_chunk_layout,
935                prev: Cell::new(DEAD_CHUNK.get()),
936                next: Cell::new(DEAD_CHUNK.get()),
937            });
938
939            NonNull::new_unchecked(new_footer_ptr)
940        }
941    }
942
943    #[inline(always)]
944    unsafe fn dealloc_element(&mut self) -> Option<NonNull<T>> {
945        unsafe {
946            if let Some(ptr) = self.dealloc_element_fast() {
947                Some(ptr)
948            } else {
949                self.dealloc_element_slow()
950            }
951        }
952    }
953
954    #[inline(always)]
955    unsafe fn dealloc_element_fast(&mut self) -> Option<NonNull<T>> {
956        let current_footer_ptr = self.current_footer.get();
957        let current_footer = unsafe { current_footer_ptr.as_ref() };
958
959        let start = current_footer.data.as_ptr();
960        let ptr = current_footer.ptr.get().as_ptr();
961        debug_assert!(start <= ptr);
962        let capacity = ptr as usize - start as usize;
963
964        if capacity < Self::ELEMENT_SIZE {
965            return None;
966        }
967
968        let new_ptr = ptr.wrapping_byte_sub(Self::ELEMENT_SIZE);
969
970        debug_assert!(start <= new_ptr);
971        debug_assert!(util::ptr_is_aligned_to(new_ptr, Self::ELEMENT_ALIGN));
972
973        let new_ptr = unsafe { NonNull::new_unchecked(new_ptr) };
974        current_footer.ptr.set(new_ptr);
975
976        Some(new_ptr.cast())
977    }
978
979    unsafe fn dealloc_element_slow(&mut self) -> Option<NonNull<T>> {
980        unsafe {
981            let current_footer_ptr = self.current_footer.get();
982            let current_footer = current_footer_ptr.as_ref();
983
984            let next_footer_ptr = current_footer.next.get();
985            let next_footer = next_footer_ptr.as_ref();
986
987            let prev_footer_ptr = current_footer.prev.get();
988            let prev_footer = prev_footer_ptr.as_ref();
989
990            if current_footer.is_dead() {
991                debug_assert!(next_footer.is_dead());
992                debug_assert!(prev_footer.is_dead());
993                return None;
994            }
995
996            debug_assert!(current_footer.is_empty());
997            debug_assert!(next_footer.is_empty());
998
999            if !next_footer.is_dead() {
1000                if current_footer.layout.size() < next_footer.layout.size() {
1001                    debug_assert!(next_footer.next.get().as_ref().is_dead());
1002
1003                    next_footer.prev.set(prev_footer_ptr);
1004                    self.current_footer.set(next_footer_ptr);
1005
1006                    self.dealloc_chunk(current_footer_ptr);
1007                } else {
1008                    self.dealloc_chunk(next_footer_ptr);
1009                }
1010                self.current_footer
1011                    .get()
1012                    .as_ref()
1013                    .next
1014                    .set(DEAD_CHUNK.get());
1015            }
1016
1017            if prev_footer.is_dead() {
1018                self.first_footer.set(self.current_footer.get());
1019                None
1020            } else {
1021                // check if prev_footer is full
1022                debug_assert!(Self::chunk_is_full(prev_footer));
1023
1024                prev_footer.next.set(self.current_footer.get());
1025                self.current_footer.set(prev_footer_ptr);
1026
1027                self.dealloc_element_fast()
1028            }
1029        }
1030    }
1031
1032    unsafe fn dealloc_chunk(&mut self, mut footer_ptr: NonNull<ChunkFooter>) {
1033        unsafe {
1034            let footer = footer_ptr.as_mut();
1035            let chunk_capacity = footer.capacity() / Self::ELEMENT_SIZE;
1036            debug_assert!(chunk_capacity <= self.capacity());
1037            self.capacity.update(|cap| cap - chunk_capacity);
1038            debug_assert!(self.len() <= self.capacity());
1039            dealloc(footer.data.as_ptr(), footer.layout);
1040        }
1041    }
1042
1043    /// Returns a pointer to the first element of the stack.
1044    ///
1045    /// # Safety
1046    ///
1047    /// The caller must ensure that the stack is not empty.
1048    unsafe fn first_unchecked(&self) -> NonNull<T> {
1049        unsafe {
1050            let first_footer = self.first_footer.get().as_ref();
1051            assert!(first_footer.occupied() >= Self::ELEMENT_SIZE);
1052            first_footer.data.cast()
1053        }
1054    }
1055
1056    /// Returns a pointer to the last element of the stack.
1057    ///
1058    /// # Safety
1059    ///
1060    /// The caller must ensure that the stack is not empty.
1061    unsafe fn last_unchecked(&self) -> NonNull<T> {
1062        unsafe {
1063            let mut footer = self.current_footer.get().as_ref();
1064            if footer.is_empty() {
1065                footer = footer.prev.get().as_ref();
1066            }
1067            assert!(footer.occupied() >= Self::ELEMENT_SIZE);
1068            footer.ptr.get().cast().sub(1)
1069        }
1070    }
1071}
1072
1073/// Collection of static tests for inner constants of Stack. They are enabled
1074/// only for some platforms, where I could test them. Look inside to see which
1075/// ones exactly.
1076mod stest;
1077
1078/// Unit tests.
1079#[cfg(test)]
1080mod utest;