bump_stack/lib.rs
1#![doc = include_str!("../README.md")]
2#![cfg_attr(not(test), no_std)]
3
4mod chunk;
5mod iter;
6mod util;
7
8extern crate alloc;
9
10use crate::chunk::*;
11use crate::util::TypeProps;
12use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::iter::DoubleEndedIterator;
16use core::marker::PhantomData;
17use core::ptr::{self, NonNull};
18
19pub struct Stack<T> {
20 /// The current chunk we are bump allocating within.
21 ///
22 /// Its `next` link can point to the dead chunk, or to the cached chunk.
23 ///
24 /// Its `prev` link can point to the dead chunk or to the earlier allocated
25 /// chunk.
26 current_footer: Cell<NonNull<ChunkFooter>>,
27
28 /// The first chunk we allocated, or the dead chunk if we haven't allocated
29 /// anything yet.
30 first_footer: Cell<NonNull<ChunkFooter>>,
31
32 /// The capacity of the stack in elements.
33 capacity: Cell<usize>,
34
35 /// The number of elements currently in the stack.
36 length: Cell<usize>,
37
38 phantom: PhantomData<Cell<T>>, // invariant over `T`
39}
40
41// Public API
42impl<T> Stack<T> {
43 /// Constructs a new, empty `Stack<T>`.
44 ///
45 /// The stack will not allocate until elements are pushed onto it.
46 ///
47 /// # Examples
48 ///
49 /// ```
50 /// use bump_stack::Stack;
51 ///
52 /// let mut stack: Stack<i32> = Stack::new();
53 /// ```
54 pub const fn new() -> Self {
55 Self {
56 current_footer: Cell::new(DEAD_CHUNK.footer()),
57 first_footer: Cell::new(DEAD_CHUNK.footer()),
58 capacity: Cell::new(0),
59 length: Cell::new(0),
60 phantom: PhantomData,
61 }
62 }
63
64 /// Constructs a new, empty `Stack<T>` with at least the specified capacity.
65 ///
66 /// The stack will be able to hold at least `capacity` elements without new
67 /// allocations. This method is allowed to allocate for more elements than
68 /// `capacity`. If `capacity` is zero, the stack will not allocate.
69 ///
70 /// If it is important to know the exact allocated capacity of a `Stack`,
71 /// always use the [`capacity`] method after construction.
72 ///
73 /// For `Stack<T>` where `T` is a zero-sized type, there will be no
74 /// allocation and the capacity will always be `usize::MAX`.
75 ///
76 /// [`capacity`]: Stack::capacity
77 ///
78 /// # Examples
79 ///
80 /// ```
81 /// # use bump_stack::Stack;
82 /// let mut stk = Stack::with_capacity(10);
83 ///
84 /// // The stack contains no items, even though it has capacity for more
85 /// assert_eq!(stk.len(), 0);
86 /// assert!(stk.capacity() >= 10);
87 ///
88 /// // These are all done without additional allocations...
89 /// for i in 0..10 {
90 /// stk.push(i);
91 /// }
92 /// assert_eq!(stk.len(), 10);
93 /// assert!(stk.capacity() >= 10);
94 ///
95 /// // ...but this may make the stack allocate a new chunk
96 /// stk.push(11);
97 /// assert_eq!(stk.len(), 11);
98 /// assert!(stk.capacity() >= 11);
99 ///
100 /// // A stack of a zero-sized type will always over-allocate, since no
101 /// // allocation is necessary
102 /// let stk_units = Stack::<()>::with_capacity(10);
103 /// assert_eq!(stk_units.capacity(), usize::MAX);
104 /// ```
105 pub fn with_capacity(capacity: usize) -> Self {
106 let stack = Self::new();
107 if const { !T::IS_ZST } && capacity != 0 {
108 let chunk_size = Chunk::<T>::chunk_size_for(capacity);
109 let footer = unsafe { stack.alloc_chunk(chunk_size) };
110 stack.current_footer.set(footer);
111 stack.first_footer.set(footer);
112 }
113 stack
114 }
115
116 /// Returns the total number of elements the stack can hold without
117 /// additional allocations.
118 ///
119 /// # Examples
120 ///
121 /// ```
122 /// # use bump_stack::Stack;
123 /// let mut stk = Stack::with_capacity(10);
124 /// stk.push(42);
125 /// assert!(stk.capacity() >= 10);
126 /// ```
127 ///
128 /// A stack with zero-sized elements will always have a capacity of
129 /// `usize::MAX`:
130 ///
131 /// ```
132 /// # use bump_stack::Stack;
133 /// #[derive(Clone)]
134 /// struct ZeroSized;
135 ///
136 /// assert_eq!(std::mem::size_of::<ZeroSized>(), 0);
137 /// let stk = Stack::<ZeroSized>::with_capacity(0);
138 /// assert_eq!(stk.capacity(), usize::MAX);
139 /// ```
140 #[inline]
141 pub const fn capacity(&self) -> usize {
142 if const { T::IS_ZST } {
143 usize::MAX
144 } else {
145 self.capacity.get()
146 }
147 }
148
149 /// Returns the number of elements in the stack.
150 ///
151 /// # Examples
152 ///
153 /// ```
154 /// # use bump_stack::Stack;
155 /// let stk = Stack::from([1, 2, 3]);
156 /// assert_eq!(stk.len(), 3);
157 /// ```
158 #[inline]
159 pub const fn len(&self) -> usize {
160 self.length.get()
161 }
162
163 /// Returns `true` if the stack contains no elements.
164 ///
165 /// # Examples
166 ///
167 /// ```
168 /// # use bump_stack::Stack;
169 /// let mut s = Stack::new();
170 /// assert!(s.is_empty());
171 ///
172 /// s.push(1);
173 /// assert!(!s.is_empty());
174 /// ```
175 #[inline]
176 pub const fn is_empty(&self) -> bool {
177 self.len() == 0
178 }
179
180 /// Returns a reference to the first element of the stack, or `None` if it
181 /// is empty.
182 ///
183 /// # Examples
184 ///
185 /// ```
186 /// # use bump_stack::Stack;
187 /// let mut s = Stack::new();
188 /// assert_eq!(None, s.first());
189 ///
190 /// s.push(42);
191 /// assert_eq!(Some(&42), s.first());
192 /// ```
193 #[inline]
194 #[must_use]
195 pub fn first(&self) -> Option<&T> {
196 if !self.is_empty() {
197 unsafe {
198 let first_chunk = wrap::<T>(self.first_footer.get());
199 Some(first_chunk.first_unchecked().as_ref())
200 }
201 } else {
202 None
203 }
204 }
205
206 /// Returns a mutable reference to the first element of the slice, or `None`
207 /// if it is empty.
208 ///
209 /// # Examples
210 ///
211 /// ```
212 /// # use bump_stack::Stack;
213 /// let mut s = Stack::new();
214 /// assert_eq!(None, s.first_mut());
215 ///
216 /// s.push(1);
217 /// if let Some(first) = s.first_mut() {
218 /// *first = 5;
219 /// }
220 /// assert_eq!(s.first(), Some(&5));
221 /// ```
222 #[inline]
223 #[must_use]
224 pub fn first_mut(&mut self) -> Option<&mut T> {
225 if !self.is_empty() {
226 unsafe {
227 let first_chunk = wrap::<T>(self.first_footer.get());
228 Some(first_chunk.first_unchecked().as_mut())
229 }
230 } else {
231 None
232 }
233 }
234
235 /// Returns the reference to last element of the stack, or `None` if it is
236 /// empty.
237 ///
238 /// # Examples
239 ///
240 /// ```
241 /// # use bump_stack::Stack;
242 /// let mut stk = Stack::new();
243 /// assert_eq!(None, stk.last());
244 ///
245 /// stk.push(1);
246 /// assert_eq!(Some(&1), stk.last());
247 /// ```
248 #[inline]
249 #[must_use]
250 pub fn last(&self) -> Option<&T> {
251 if !self.is_empty() {
252 unsafe {
253 let mut chunk = wrap::<T>(self.current_footer.get());
254 if chunk.is_empty() {
255 chunk = wrap::<T>(chunk.prev());
256 }
257 Some(chunk.last_unchecked().as_ref())
258 }
259 } else {
260 None
261 }
262 }
263 /// Returns a mutable reference to the last item in the stack, or `None` if
264 /// it is empty.
265 ///
266 /// # Examples
267 ///
268 /// ```
269 /// # use bump_stack::Stack;
270 /// let mut stk = Stack::new();
271 /// assert_eq!(None, stk.last_mut());
272 ///
273 /// stk.push(5);
274 /// assert_eq!(Some(&mut 5), stk.last_mut());
275 ///
276 /// if let Some(last) = stk.last_mut() {
277 /// *last = 10;
278 /// }
279 /// assert_eq!(Some(&mut 10), stk.last_mut());
280 /// ```
281 #[inline]
282 #[must_use]
283 pub fn last_mut(&mut self) -> Option<&mut T> {
284 if !self.is_empty() {
285 unsafe {
286 let mut chunk = wrap::<T>(self.current_footer.get());
287 if chunk.is_empty() {
288 chunk = wrap::<T>(chunk.prev());
289 }
290 Some(chunk.last_unchecked().as_mut())
291 }
292 } else {
293 None
294 }
295 }
296
297 /// Appends an element to the stack returning a reference to it.
298 ///
299 /// # Panics
300 ///
301 /// Panics if the global allocator cannot allocate a new chunk of memory.
302 ///
303 /// # Examples
304 ///
305 /// ```
306 /// # use bump_stack::Stack;
307 /// let stk = Stack::new();
308 /// let new_element = stk.push(3);
309 ///
310 /// assert_eq!(new_element, &3);
311 /// assert_eq!(stk, [3]);
312 /// ```
313 ///
314 /// # Time complexity
315 ///
316 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
317 /// exhausted, it tries to use the cached one if it exists, otherwise it
318 /// tries to allocate a new chunk.
319 ///
320 /// If the new chunk of memory is too big, it tries to divide the capacity
321 /// by two and allocate it again until it reaches the minimum capacity. If
322 /// it does, it panics.
323 #[inline]
324 pub fn push(&self, value: T) -> &T {
325 self.push_with(|| value)
326 }
327
328 /// Appends an element to the stack, returning a mutable reference to it.
329 ///
330 /// # Panics
331 ///
332 /// Panics if there is no way to allocate memory for a new capacity.
333 ///
334 /// # Examples
335 ///
336 /// ```
337 /// # use bump_stack::Stack;
338 /// let mut stk = Stack::from([1, 2]);
339 /// let last = stk.push_mut(3);
340 /// assert_eq!(*last, 3);
341 /// assert_eq!(stk, [3, 2, 1]);
342 ///
343 /// let last = stk.push_mut(3);
344 /// *last += 1;
345 /// assert_eq!(stk, [4, 3, 2, 1]);
346 /// ```
347 ///
348 /// # Time complexity
349 ///
350 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
351 /// exhausted, it tries to use the cached one if it exists, otherwise it
352 /// tries to allocate a new chunk.
353 ///
354 /// If the new chunk of memory is too big, it tries to divide the capacity
355 /// by two and allocate it again until it reaches the minimum capacity. If
356 /// it does, it panics.
357 #[inline]
358 #[must_use = "if you don't need the returned value, consider using `push` instead"]
359 pub fn push_mut(&mut self, value: T) -> &mut T {
360 self.push_mut_with(|| value)
361 }
362
363 /// Pre-allocate space for an element in this stack, initializes it using
364 /// the closure, and returns a reference to the new element.
365 ///
366 /// # Examples
367 ///
368 /// ```
369 /// # use bump_stack::Stack;
370 /// let stk = Stack::new();
371 /// let new_element = stk.push_with(|| 3);
372 ///
373 /// assert_eq!(new_element, &3);
374 /// assert_eq!(stk, [3]);
375 /// ```
376 ///
377 /// # Time complexity
378 ///
379 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
380 /// exhausted, it tries to use the cached one if it exists, otherwise it
381 /// tries to allocate a new chunk.
382 ///
383 /// If the new chunk of memory is too big, it tries to divide the capacity
384 /// by two and allocate it again until it reaches the minimum capacity. If
385 /// it does, it panics.
386 #[inline(always)]
387 pub fn push_with<F>(&self, f: F) -> &T
388 where
389 F: FnOnce() -> T,
390 {
391 if const { T::IS_ZST } {
392 self.length.update(|len| len + 1);
393 unsafe { util::zst_ptr::<T>().as_ref() }
394 } else {
395 unsafe {
396 let p = self.alloc_element();
397 util::write_with(p.as_ptr(), f);
398 self.length.update(|len| len + 1);
399 p.as_ref()
400 }
401 }
402 }
403
404 /// Pre-allocate space for an element in this stack, initializes it using
405 /// the closure, and returns a mutable reference to the new element.
406 ///
407 /// # Examples
408 ///
409 /// ```
410 /// # use bump_stack::Stack;
411 /// let mut stk = Stack::from([1, 2]);
412 /// let last = stk.push_mut(3);
413 /// assert_eq!(*last, 3);
414 /// assert_eq!(stk, [3, 2, 1]);
415 ///
416 /// let last = stk.push_mut(3);
417 /// *last += 1;
418 /// assert_eq!(stk, [4, 3, 2, 1]);
419 /// ```
420 ///
421 /// # Time complexity
422 ///
423 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
424 /// exhausted, it tries to use the cached one if it exists, otherwise it
425 /// tries to allocate a new chunk.
426 ///
427 /// If the new chunk of memory is too big, it tries to divide the capacity
428 /// by two and allocate it again until it reaches the minimum capacity. If
429 /// it does, it panics.
430 #[inline(always)]
431 pub fn push_mut_with<F>(&mut self, f: F) -> &mut T
432 where
433 F: FnOnce() -> T,
434 {
435 if const { T::IS_ZST } {
436 self.length.update(|len| len + 1);
437 unsafe { util::zst_ptr::<T>().as_mut() }
438 } else {
439 unsafe {
440 let mut p = self.alloc_element();
441 util::write_with(p.as_ptr(), f);
442 self.length.update(|len| len + 1);
443 p.as_mut()
444 }
445 }
446 }
447
448 /// Removes the last element from a vector and returns it, or [`None`] if it
449 /// is empty.
450 ///
451 /// # Examples
452 ///
453 /// ```
454 /// # use bump_stack::Stack;
455 /// let mut stk = Stack::from([1, 2, 3]);
456 /// assert_eq!(stk.pop(), Some(3));
457 /// assert_eq!(stk, [2, 1]);
458 /// ```
459 #[inline]
460 pub fn pop(&mut self) -> Option<T> {
461 if const { T::IS_ZST } {
462 if self.length.get() > 0 {
463 self.length.update(|len| len - 1);
464 unsafe { Some(ptr::read(util::zst_ptr::<T>().as_ptr())) }
465 } else {
466 None
467 }
468 } else {
469 // `T` is not ZST
470 if let Some(element_ptr) = unsafe { self.dealloc_element() } {
471 self.length.update(|len| len - 1);
472 Some(unsafe { ptr::read(element_ptr.as_ptr()) })
473 } else {
474 None
475 }
476 }
477 }
478
479 /// Removes and returns the last element from a stack if the predicate
480 /// returns `true`, or [`None`] if the predicate returns false or the stack
481 /// is empty (the predicate will not be called in that case).
482 ///
483 /// # Examples
484 ///
485 /// ```
486 /// # use bump_stack::Stack;
487 /// let mut stk = Stack::from([1, 2, 3, 4]);
488 /// let pred = |x: &mut i32| *x % 2 == 0;
489 ///
490 /// assert_eq!(stk.pop_if(pred), Some(4));
491 /// assert_eq!(stk, [3, 2, 1]);
492 /// assert_eq!(stk.pop_if(pred), None);
493 /// ```
494 pub fn pop_if(&mut self, predicate: impl FnOnce(&mut T) -> bool) -> Option<T> {
495 let last = self.last_mut()?;
496 if predicate(last) { self.pop() } else { None }
497 }
498
499 /// Clears the stack, dropping all elements.
500 ///
501 /// This method leaves the biggest chunk of memory for future allocations.
502 ///
503 /// The order of dropping elements is not defined.
504 ///
505 /// # Examples
506 ///
507 /// ```
508 /// # use bump_stack::Stack;
509 /// let mut stk = Stack::from([1, 2, 3]);
510 ///
511 /// stk.clear();
512 ///
513 /// assert!(stk.is_empty());
514 /// assert!(stk.capacity() > 0);
515 /// ```
516 pub fn clear(&mut self) {
517 let mut first_footer = self.first_footer.get();
518 let mut first_chunk = unsafe { wrap::<T>(first_footer) };
519 if first_chunk.is_dead() {
520 return;
521 }
522 loop {
523 let next_footer = first_chunk.next();
524 let next_chunk = unsafe { wrap(next_footer) };
525
526 if next_chunk.is_dead() {
527 break;
528 }
529
530 unsafe { self.dealloc_chunk(first_footer) };
531
532 first_footer = next_footer;
533 first_chunk = next_chunk;
534 }
535 let dropped = unsafe { wrap::<T>(first_footer).drop() };
536 debug_assert!(self.len() >= dropped);
537 self.length.update(|length| length - dropped);
538
539 first_chunk.set_prev(DEAD_CHUNK.footer());
540 self.first_footer.set(first_footer);
541 self.current_footer.set(first_footer);
542 }
543
544 /// Returns `true` if the stack contains an element with the given value.
545 ///
546 /// This operation is *O*(*n*).
547 ///
548 /// # Examples
549 ///
550 /// ```
551 /// # use bump_stack::Stack;
552 /// let stk = Stack::from([3, 8, 12]);
553 /// assert!(stk.contains(&8));
554 /// assert!(!stk.contains(&20));
555 /// ```
556 #[must_use]
557 pub fn contains(&self, value: &T) -> bool
558 where
559 T: PartialEq,
560 {
561 self.iter().any(|elem| elem == value)
562 }
563
564 /// Copies `self` into a `Vec`.
565 ///
566 /// # Examples
567 ///
568 /// ```
569 /// # use bump_stack::Stack;
570 /// let s = Stack::from([10, 40, 30]);
571 /// let v = s.to_vec();
572 /// assert_eq!(v, &[30, 40, 10]);
573 /// ```
574 #[inline]
575 pub fn to_vec(&self) -> Vec<T>
576 where
577 T: Clone,
578 {
579 let mut vec = Vec::with_capacity(self.len());
580 for chunk in self.chunks() {
581 vec.extend_from_slice(chunk);
582 }
583 vec
584 }
585
586 /// Returns an iterator over the stack.
587 ///
588 /// The iterator yields all items' references in inverted order of their
589 /// insertion, corresponding to a LIFO structure behavior.
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// # use bump_stack::Stack;
595 /// let stk = Stack::from([1, 2, 4]);
596 /// let mut iterator = stk.iter();
597 ///
598 /// assert_eq!(iterator.next(), Some(&4));
599 /// assert_eq!(iterator.next(), Some(&2));
600 /// assert_eq!(iterator.next(), Some(&1));
601 /// assert_eq!(iterator.next(), None);
602 /// ```
603 ///
604 /// Since `Stack` allows to push new elements using immutable reference to
605 /// itself, you can push during iteration. But iteration is running over
606 /// elements existing at the moment of the iterator creating. It guarantees
607 /// that you won't get infinite loop.
608 ///
609 /// ```
610 /// # use bump_stack::Stack;
611 /// let stk = Stack::from([1, 2, 4]);
612 ///
613 /// for elem in stk.iter() {
614 /// stk.push(*elem);
615 /// }
616 /// assert_eq!(stk.len(), 6);
617 /// assert_eq!(stk, [1, 2, 4, 4, 2, 1]);
618 /// ```
619 #[inline]
620 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &T> {
621 crate::iter::Iter::new(self)
622 }
623
624 /// Returns a mutable iterator over the stack.
625 ///
626 /// The iterator yields all items' references in inverted order of their
627 /// insertion, corresponding to a LIFO structure behavior.
628 ///
629 /// # Examples
630 ///
631 /// ```
632 /// # use bump_stack::Stack;
633 /// let mut stk = Stack::from([1, 2, 4]);
634 ///
635 /// for elem in stk.iter_mut() {
636 /// *elem *= 2;
637 /// }
638 ///
639 /// assert_eq!(&stk, &[8, 4, 2]);
640 /// ```
641 #[inline]
642 pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut T> {
643 crate::iter::IterMut::new(self)
644 }
645
646 /// Returns an iterator over slices corresponding to the stack's memory
647 /// chunks.
648 ///
649 /// The iterator yields all items' references in inverted order of their
650 /// insertion, corresponding to a LIFO structure behavior.
651 ///
652 /// # Examples
653 ///
654 /// ```
655 /// # use bump_stack::Stack;
656 /// let stk = Stack::from([0, 1, 2]);
657 /// assert_eq!(stk.chunks().collect::<Vec<_>>(), [&[2, 1, 0]]);
658 ///
659 /// // fill up the first chunk
660 /// for i in 3..stk.capacity() {
661 /// stk.push(i);
662 /// }
663 /// assert_eq!(stk.chunks().count(), 1);
664 ///
665 /// // create a new chunk
666 /// stk.push(42);
667 /// assert_eq!(stk.chunks().count(), 2);
668 /// ```
669 #[inline]
670 pub fn chunks(&self) -> impl DoubleEndedIterator<Item = &[T]> {
671 crate::iter::ChunkIter::new(self)
672 }
673
674 /// Returns an iterator over mutable slices corresponding to the stack's
675 /// memory chunks.
676 ///
677 /// The iterator yields all items' references in inverted order of their
678 /// insertion, corresponding to a LIFO structure behavior.
679 ///
680 /// # Examples
681 ///
682 /// ```
683 /// # use bump_stack::Stack;
684 /// let mut stk = Stack::from([0, 1, 2]);
685 ///
686 /// let chunk = stk.chunks_mut().next().unwrap();
687 /// assert_eq!(chunk, [2, 1, 0]);
688 /// chunk[0] = 42;
689 /// assert_eq!(chunk, [42, 1, 0]);
690 /// ```
691 #[inline]
692 pub fn chunks_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut [T]> {
693 crate::iter::ChunkIterMut::new(self)
694 }
695}
696
697impl<T> core::default::Default for Stack<T> {
698 /// Creates an empty `Stack<T>`.
699 ///
700 /// The stack will not allocate until elements are pushed onto it.
701 #[inline]
702 fn default() -> Self {
703 Self::new()
704 }
705}
706
707impl<T> core::clone::Clone for Stack<T>
708where
709 T: Clone,
710{
711 fn clone(&self) -> Self {
712 Stack::from_iter(self.iter().cloned())
713 }
714}
715
716impl<T, const N: usize> core::convert::From<&[T; N]> for Stack<T>
717where
718 T: Clone,
719{
720 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
721 /// fills it by cloning `slice`'s items.
722 fn from(slice: &[T; N]) -> Self {
723 let stk = Stack::with_capacity(N);
724 for item in slice {
725 stk.push_with(|| item.clone());
726 }
727 stk
728 }
729}
730
731impl<T, const N: usize> core::convert::From<&mut [T; N]> for Stack<T>
732where
733 T: Clone,
734{
735 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
736 /// fills it by cloning `slice`'s items.
737 #[inline]
738 fn from(slice: &mut [T; N]) -> Self {
739 core::convert::From::<&[T; N]>::from(slice)
740 }
741}
742
743impl<T, const N: usize> core::convert::From<[T; N]> for Stack<T>
744where
745 T: Clone,
746{
747 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
748 /// fills it by cloning `array`'s items.
749 #[inline]
750 fn from(array: [T; N]) -> Self {
751 core::convert::From::<&[T; N]>::from(&array)
752 }
753}
754
755impl<T> core::convert::From<&[T]> for Stack<T>
756where
757 T: Clone,
758{
759 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
760 /// fills it by cloning `slice`'s items.
761 fn from(slice: &[T]) -> Self {
762 let stk = Stack::with_capacity(slice.len());
763 for item in slice {
764 stk.push_with(|| item.clone());
765 }
766 stk
767 }
768}
769
770impl<T> core::convert::From<&mut [T]> for Stack<T>
771where
772 T: Clone,
773{
774 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
775 /// fills it by cloning `slice`'s items.
776 #[inline]
777 fn from(slice: &mut [T]) -> Self {
778 core::convert::From::<&[T]>::from(slice)
779 }
780}
781
782impl<T> core::ops::Drop for Stack<T> {
783 #[inline]
784 fn drop(&mut self) {
785 self.clear();
786 unsafe {
787 let current_chunk = wrap::<T>(self.current_footer.get());
788 if !current_chunk.is_dead() {
789 debug_assert!(wrap::<T>(current_chunk.prev()).is_dead());
790 debug_assert!(wrap::<T>(current_chunk.next()).is_dead());
791 self.dealloc_chunk(self.current_footer.get());
792 }
793 }
794 }
795}
796
797impl<T> core::iter::FromIterator<T> for Stack<T>
798where
799 T: Clone,
800{
801 #[inline]
802 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
803 let iter = iter.into_iter();
804 // try to preallocate a chunk big enough to contain iter's items
805 let stk = Stack::with_capacity(iter.size_hint().0);
806 for item in iter {
807 stk.push_with(|| item);
808 }
809 stk
810 }
811}
812
813impl<'a, T> core::iter::IntoIterator for &'a Stack<T> {
814 type Item = &'a T;
815 type IntoIter = crate::iter::Iter<'a, T>;
816
817 #[inline]
818 fn into_iter(self) -> Self::IntoIter {
819 crate::iter::Iter::new(self)
820 }
821}
822
823impl<'a, T> core::iter::IntoIterator for &'a mut Stack<T> {
824 type Item = &'a mut T;
825 type IntoIter = crate::iter::IterMut<'a, T>;
826
827 #[inline]
828 fn into_iter(self) -> Self::IntoIter {
829 crate::iter::IterMut::new(self)
830 }
831}
832
833impl<T> core::iter::IntoIterator for Stack<T> {
834 type Item = T;
835 type IntoIter = crate::iter::IntoIter<T>;
836
837 #[inline]
838 fn into_iter(self) -> Self::IntoIter {
839 crate::iter::IntoIter::new(self)
840 }
841}
842
843impl<T, U> core::cmp::PartialEq<[U]> for Stack<T>
844where
845 T: core::cmp::PartialEq<U>,
846{
847 fn eq(&self, other: &[U]) -> bool {
848 self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
849 }
850}
851
852impl<T, U> core::cmp::PartialEq<&[U]> for Stack<T>
853where
854 T: core::cmp::PartialEq<U>,
855{
856 #[inline]
857 fn eq(&self, other: &&[U]) -> bool {
858 core::cmp::PartialEq::<[U]>::eq(self, other)
859 }
860}
861
862impl<T, U> core::cmp::PartialEq<&mut [U]> for Stack<T>
863where
864 T: core::cmp::PartialEq<U>,
865{
866 #[inline]
867 fn eq(&self, other: &&mut [U]) -> bool {
868 core::cmp::PartialEq::<[U]>::eq(self, other)
869 }
870}
871
872impl<T, U, const N: usize> core::cmp::PartialEq<[U; N]> for Stack<T>
873where
874 T: core::cmp::PartialEq<U>,
875{
876 fn eq(&self, other: &[U; N]) -> bool {
877 self.len() == N && self.iter().zip(other.iter()).all(|(a, b)| a == b)
878 }
879}
880
881impl<T, U, const N: usize> core::cmp::PartialEq<&[U; N]> for Stack<T>
882where
883 T: core::cmp::PartialEq<U>,
884{
885 #[inline]
886 fn eq(&self, other: &&[U; N]) -> bool {
887 core::cmp::PartialEq::<[U; N]>::eq(self, other)
888 }
889}
890
891impl<T, U, const N: usize> core::cmp::PartialEq<&mut [U; N]> for Stack<T>
892where
893 T: core::cmp::PartialEq<U>,
894{
895 #[inline]
896 fn eq(&self, other: &&mut [U; N]) -> bool {
897 core::cmp::PartialEq::<[U; N]>::eq(self, other)
898 }
899}
900
901impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for [T; N]
902where
903 T: core::cmp::PartialEq<U>,
904{
905 fn eq(&self, other: &Stack<U>) -> bool {
906 self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
907 }
908}
909
910impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &[T; N]
911where
912 T: core::cmp::PartialEq<U>,
913{
914 fn eq(&self, other: &Stack<U>) -> bool {
915 *self == other
916 }
917}
918
919impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &mut [T; N]
920where
921 T: core::cmp::PartialEq<U>,
922{
923 fn eq(&self, other: &Stack<U>) -> bool {
924 *self == other
925 }
926}
927
928impl<T> core::fmt::Debug for Stack<T>
929where
930 T: core::fmt::Debug,
931{
932 #[inline]
933 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
934 f.debug_list().entries(self.iter()).finish()
935 }
936}
937
938unsafe impl<T> Send for Stack<T> where T: Send {}
939
940/// Maximum typical overhead per allocation imposed by allocators.
941const ALLOC_OVERHEAD: usize = 16;
942
943// Private API
944impl<T> Stack<T> {
945 /// Allocates memory for a new element and return a pointer to it.
946 ///
947 /// # Safety
948 ///
949 /// The caller must ensure that the method is called only for non zero-sized
950 /// types.
951 #[inline(always)]
952 unsafe fn alloc_element(&self) -> NonNull<T> {
953 debug_assert!(!T::IS_ZST);
954
955 let curr_chunk = unsafe { wrap::<T>(self.current_footer.get()) };
956 if let Some(ptr) = curr_chunk.alloc_element() {
957 ptr
958 } else {
959 unsafe { self.alloc_element_slow() }
960 }
961 }
962
963 // Should be run only if the current chunk is full
964 unsafe fn alloc_element_slow(&self) -> NonNull<T> {
965 debug_assert!(!T::IS_ZST);
966 unsafe {
967 let current_chunk = wrap::<T>(self.current_footer.get());
968 let next_footer = current_chunk.next();
969 let next_chunk = wrap::<T>(next_footer);
970 let prev_chunk = wrap::<T>(current_chunk.prev());
971
972 debug_assert!(current_chunk.is_full());
973
974 if current_chunk.is_dead() {
975 // this is initial state without allocated chunks at all
976 debug_assert!(current_chunk.is_dead());
977 debug_assert!(prev_chunk.is_dead());
978 debug_assert!(next_chunk.is_dead());
979
980 let new_footer_ptr = self.alloc_chunk(Chunk::<T>::CHUNK_FIRST_SIZE);
981 self.current_footer.set(new_footer_ptr);
982 self.first_footer.set(new_footer_ptr);
983 } else {
984 // at least the current chunk is not dead
985 if next_chunk.is_dead() {
986 // the current chunk is single, so create a new one, and
987 // make it the current chunk.
988 let current_chunk_size = current_chunk.size();
989 let new_chunk_size = if current_chunk_size == Chunk::<T>::CHUNK_MAX_SIZE {
990 Chunk::<T>::CHUNK_MAX_SIZE
991 } else {
992 debug_assert!(current_chunk_size < Chunk::<T>::CHUNK_MAX_SIZE);
993 ((current_chunk_size + ALLOC_OVERHEAD) << 1) - ALLOC_OVERHEAD
994 };
995
996 debug_assert!((new_chunk_size + ALLOC_OVERHEAD).is_power_of_two());
997
998 let new_footer = self.alloc_chunk(new_chunk_size);
999 let new_chunk = wrap::<T>(new_footer);
1000
1001 current_chunk.set_next(new_footer);
1002 new_chunk.set_prev(self.current_footer.get());
1003
1004 self.current_footer.set(new_footer);
1005 } else {
1006 // there is a next empty chunk, so make it the current chunk
1007 debug_assert!(next_chunk.is_empty());
1008 self.current_footer.set(next_footer);
1009 }
1010 }
1011
1012 let curr_chunk = wrap::<T>(self.current_footer.get());
1013 curr_chunk.alloc_element().unwrap_unchecked()
1014 }
1015 }
1016
1017 /// Creates a new chunk with the given size. If it can't allocate a chunk
1018 /// with the given size, it tries to allocate a chunk with a two times
1019 /// smaller size. Otherwise, it panics.
1020 ///
1021 /// Properties `data`, `ptr`, and `layout` are initialized to the values
1022 /// of the newly allocated chunk.
1023 ///
1024 /// Properties `prev` and `next` point to the `DEAD_CHUNK`, so you should
1025 /// reinitialize them to needed values, if there exist another chunks in the
1026 /// list.
1027 unsafe fn alloc_chunk(&self, chunk_size: usize) -> NonNull<ChunkFooter> {
1028 debug_assert!(chunk_size <= Chunk::<T>::CHUNK_MAX_SIZE);
1029
1030 let mut chunk_size = chunk_size;
1031 let chunk_align = Chunk::<T>::CHUNK_ALIGN;
1032
1033 let (chunk_ptr, chunk_layout) = loop {
1034 // checks for `Layout::from_size_align_unchecked`
1035 debug_assert!(chunk_align != 0);
1036 debug_assert!(chunk_align.is_power_of_two());
1037 debug_assert!((chunk_size + ALLOC_OVERHEAD).is_power_of_two());
1038 debug_assert!(chunk_size <= isize::MAX as usize);
1039
1040 let chunk_layout =
1041 unsafe { Layout::from_size_align_unchecked(chunk_size, chunk_align) };
1042
1043 let chunk_ptr = unsafe { alloc(chunk_layout) };
1044 if !chunk_ptr.is_null() {
1045 assert!(util::ptr_is_aligned_to(chunk_ptr, Chunk::<T>::CHUNK_ALIGN));
1046 break (chunk_ptr, chunk_layout);
1047 }
1048
1049 // if couldn't get a new chunk, try to shrink the chunk size by half
1050 chunk_size = ((chunk_size + ALLOC_OVERHEAD) >> 1) - ALLOC_OVERHEAD;
1051 if chunk_size < Chunk::<T>::CHUNK_MIN_SIZE {
1052 handle_alloc_error(chunk_layout);
1053 }
1054 };
1055
1056 let chunk_ptr = unsafe { NonNull::new_unchecked(chunk_ptr) };
1057 let (footer_ptr, chunk_capacity) = unsafe { Chunk::<T>::init(chunk_ptr, chunk_layout) };
1058
1059 self.capacity.update(|cap| cap + chunk_capacity);
1060
1061 footer_ptr
1062 }
1063
1064 #[inline(always)]
1065 unsafe fn dealloc_element(&mut self) -> Option<NonNull<T>> {
1066 debug_assert!(!T::IS_ZST);
1067
1068 unsafe {
1069 let curr_chunk = wrap::<T>(self.current_footer.get());
1070 if let Some(ptr) = curr_chunk.dealloc_element() {
1071 Some(ptr)
1072 } else {
1073 self.dealloc_element_slow()
1074 }
1075 }
1076 }
1077
1078 unsafe fn dealloc_element_slow(&mut self) -> Option<NonNull<T>> {
1079 unsafe {
1080 let current_footer = self.current_footer.get();
1081 let current_chunk = wrap::<T>(current_footer);
1082
1083 let next_footer = current_chunk.next();
1084 let next_chunk = wrap::<T>(next_footer);
1085
1086 let prev_footer = current_chunk.prev();
1087 let prev_chunk = wrap::<T>(prev_footer);
1088
1089 if current_chunk.is_dead() {
1090 debug_assert!(next_chunk.is_dead());
1091 debug_assert!(prev_chunk.is_dead());
1092 return None;
1093 }
1094
1095 debug_assert!(current_chunk.is_empty());
1096 debug_assert!(next_chunk.is_empty());
1097
1098 if !next_chunk.is_dead() {
1099 if current_chunk.size() < next_chunk.size() {
1100 debug_assert!(wrap::<T>(next_chunk.next()).is_dead());
1101
1102 next_chunk.set_prev(prev_footer);
1103 self.current_footer.set(next_footer);
1104
1105 self.dealloc_chunk(current_footer);
1106 } else {
1107 self.dealloc_chunk(next_footer);
1108 }
1109 let current_chunk = wrap::<T>(self.current_footer.get());
1110 current_chunk.set_next(DEAD_CHUNK.footer());
1111 }
1112
1113 if prev_chunk.is_dead() {
1114 self.first_footer.set(self.current_footer.get());
1115 None
1116 } else {
1117 // check if prev_footer is full
1118 debug_assert!(prev_chunk.is_full());
1119
1120 prev_chunk.set_next(self.current_footer.get());
1121 self.current_footer.set(prev_footer);
1122
1123 let curr_chunk = wrap::<T>(self.current_footer.get());
1124 curr_chunk.dealloc_element()
1125 }
1126 }
1127 }
1128
1129 unsafe fn dealloc_chunk(&mut self, footer: NonNull<ChunkFooter>) {
1130 unsafe {
1131 let chunk = wrap::<T>(footer);
1132 let dropped = chunk.drop();
1133 debug_assert!(self.len() >= dropped);
1134 self.length.update(|length| length - dropped);
1135
1136 let chunk_capacity = chunk.capacity();
1137 debug_assert!(chunk_capacity <= self.capacity());
1138 self.capacity.update(|cap| cap - chunk_capacity);
1139 debug_assert!(self.len() <= self.capacity());
1140 dealloc(chunk.start().cast().as_ptr(), chunk.layout());
1141 }
1142 }
1143}
1144
1145/// Unit tests.
1146#[cfg(test)]
1147mod utest;