bump_stack/lib.rs
1#![doc = include_str!("../README.md")]
2#![cfg_attr(not(test), no_std)]
3
4mod chunk;
5mod iter;
6mod util;
7
8extern crate alloc;
9
10use crate::chunk::*;
11use crate::util::TypeProps;
12use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::iter::DoubleEndedIterator;
16use core::marker::PhantomData;
17use core::ptr::{self, NonNull};
18
19pub struct Stack<T> {
20 /// The current chunk we are bump allocating within.
21 ///
22 /// Its `next` link can point to the dead chunk, or to the cached chunk.
23 ///
24 /// Its `prev` link can point to the dead chunk or to the earlier allocated
25 /// chunk.
26 current_footer: Cell<NonNull<ChunkFooter>>,
27
28 /// The first chunk we allocated, or the dead chunk if we haven't allocated
29 /// anything yet.
30 first_footer: Cell<NonNull<ChunkFooter>>,
31
32 /// The capacity of the stack in elements.
33 capacity: Cell<usize>,
34
35 /// The number of elements currently in the stack.
36 length: Cell<usize>,
37
38 phantom: PhantomData<T>,
39}
40
41// Public API
42impl<T> Stack<T> {
43 /// Constructs a new, empty `Stack<T>`.
44 ///
45 /// The stack will not allocate until elements are pushed onto it.
46 ///
47 /// # Examples
48 ///
49 /// ```
50 /// use bump_stack::Stack;
51 ///
52 /// let mut stack: Stack<i32> = Stack::new();
53 /// ```
54 pub const fn new() -> Self {
55 Self {
56 current_footer: Cell::new(DEAD_CHUNK.footer()),
57 first_footer: Cell::new(DEAD_CHUNK.footer()),
58 capacity: Cell::new(0),
59 length: Cell::new(0),
60 phantom: PhantomData,
61 }
62 }
63
64 /// Constructs a new, empty `Stack<T>` with at least the specified capacity.
65 ///
66 /// The stack will be able to hold at least `capacity` elements without new
67 /// allocations. This method is allowed to allocate for more elements than
68 /// `capacity`. If `capacity` is zero, the stack will not allocate.
69 ///
70 /// If it is important to know the exact allocated capacity of a `Stack`,
71 /// always use the [`capacity`] method after construction.
72 ///
73 /// For `Stack<T>` where `T` is a zero-sized type, there will be no
74 /// allocation and the capacity will always be `usize::MAX`.
75 ///
76 /// [`capacity`]: Stack::capacity
77 ///
78 /// # Examples
79 ///
80 /// ```
81 /// # use bump_stack::Stack;
82 /// let mut stk = Stack::with_capacity(10);
83 ///
84 /// // The stack contains no items, even though it has capacity for more
85 /// assert_eq!(stk.len(), 0);
86 /// assert!(stk.capacity() >= 10);
87 ///
88 /// // These are all done without additional allocations...
89 /// for i in 0..10 {
90 /// stk.push(i);
91 /// }
92 /// assert_eq!(stk.len(), 10);
93 /// assert!(stk.capacity() >= 10);
94 ///
95 /// // ...but this may make the stack allocate a new chunk
96 /// stk.push(11);
97 /// assert_eq!(stk.len(), 11);
98 /// assert!(stk.capacity() >= 11);
99 ///
100 /// // A stack of a zero-sized type will always over-allocate, since no
101 /// // allocation is necessary
102 /// let stk_units = Stack::<()>::with_capacity(10);
103 /// assert_eq!(stk_units.capacity(), usize::MAX);
104 /// ```
105 pub fn with_capacity(capacity: usize) -> Self {
106 let stack = Self::new();
107 if const { !T::IS_ZST } && capacity != 0 {
108 let chunk_size = Chunk::<T>::chunk_size_for(capacity);
109 let footer = unsafe { stack.alloc_chunk(chunk_size) };
110 stack.current_footer.set(footer);
111 stack.first_footer.set(footer);
112 }
113 stack
114 }
115
116 /// Returns the total number of elements the stack can hold without
117 /// additional allocations.
118 ///
119 /// # Examples
120 ///
121 /// ```
122 /// # use bump_stack::Stack;
123 /// let mut stk = Stack::with_capacity(10);
124 /// stk.push(42);
125 /// assert!(stk.capacity() >= 10);
126 /// ```
127 ///
128 /// A stack with zero-sized elements will always have a capacity of
129 /// `usize::MAX`:
130 ///
131 /// ```
132 /// # use bump_stack::Stack;
133 /// #[derive(Clone)]
134 /// struct ZeroSized;
135 ///
136 /// assert_eq!(std::mem::size_of::<ZeroSized>(), 0);
137 /// let stk = Stack::<ZeroSized>::with_capacity(0);
138 /// assert_eq!(stk.capacity(), usize::MAX);
139 /// ```
140 #[inline]
141 pub const fn capacity(&self) -> usize {
142 if const { T::IS_ZST } {
143 usize::MAX
144 } else {
145 self.capacity.get()
146 }
147 }
148
149 /// Returns the number of elements in the stack.
150 ///
151 /// # Examples
152 ///
153 /// ```
154 /// # use bump_stack::Stack;
155 /// let stk = Stack::from([1, 2, 3]);
156 /// assert_eq!(stk.len(), 3);
157 /// ```
158 #[inline]
159 pub const fn len(&self) -> usize {
160 self.length.get()
161 }
162
163 /// Returns `true` if the stack contains no elements.
164 ///
165 /// # Examples
166 ///
167 /// ```
168 /// # use bump_stack::Stack;
169 /// let mut s = Stack::new();
170 /// assert!(s.is_empty());
171 ///
172 /// s.push(1);
173 /// assert!(!s.is_empty());
174 /// ```
175 #[inline]
176 pub const fn is_empty(&self) -> bool {
177 self.len() == 0
178 }
179
180 /// Returns a reference to the first element of the stack, or `None` if it
181 /// is empty.
182 ///
183 /// # Examples
184 ///
185 /// ```
186 /// # use bump_stack::Stack;
187 /// let mut s = Stack::new();
188 /// assert_eq!(None, s.first());
189 ///
190 /// s.push(42);
191 /// assert_eq!(Some(&42), s.first());
192 /// ```
193 #[inline]
194 #[must_use]
195 pub fn first(&self) -> Option<&T> {
196 if !self.is_empty() {
197 unsafe {
198 let first_chunk = wrap::<T>(self.first_footer.get());
199 Some(first_chunk.first_unchecked().as_ref())
200 }
201 } else {
202 None
203 }
204 }
205
206 /// Returns a mutable reference to the first element of the slice, or `None`
207 /// if it is empty.
208 ///
209 /// # Examples
210 ///
211 /// ```
212 /// # use bump_stack::Stack;
213 /// let mut s = Stack::new();
214 /// assert_eq!(None, s.first_mut());
215 ///
216 /// s.push(1);
217 /// if let Some(first) = s.first_mut() {
218 /// *first = 5;
219 /// }
220 /// assert_eq!(s.first(), Some(&5));
221 /// ```
222 #[inline]
223 #[must_use]
224 pub fn first_mut(&mut self) -> Option<&mut T> {
225 if !self.is_empty() {
226 unsafe {
227 let first_chunk = wrap::<T>(self.first_footer.get());
228 Some(first_chunk.first_unchecked().as_mut())
229 }
230 } else {
231 None
232 }
233 }
234
235 /// Returns the reference to last element of the stack, or `None` if it is
236 /// empty.
237 ///
238 /// # Examples
239 ///
240 /// ```
241 /// # use bump_stack::Stack;
242 /// let mut stk = Stack::new();
243 /// assert_eq!(None, stk.last());
244 ///
245 /// stk.push(1);
246 /// assert_eq!(Some(&1), stk.last());
247 /// ```
248 #[inline]
249 #[must_use]
250 pub fn last(&self) -> Option<&T> {
251 if !self.is_empty() {
252 unsafe {
253 let mut chunk = wrap::<T>(self.current_footer.get());
254 if chunk.is_empty() {
255 chunk = wrap::<T>(chunk.prev());
256 }
257 Some(chunk.last_unchecked().as_ref())
258 }
259 } else {
260 None
261 }
262 }
263 /// Returns a mutable reference to the last item in the stack, or `None` if
264 /// it is empty.
265 ///
266 /// # Examples
267 ///
268 /// ```
269 /// # use bump_stack::Stack;
270 /// let mut stk = Stack::new();
271 /// assert_eq!(None, stk.last_mut());
272 ///
273 /// stk.push(5);
274 /// assert_eq!(Some(&mut 5), stk.last_mut());
275 ///
276 /// if let Some(last) = stk.last_mut() {
277 /// *last = 10;
278 /// }
279 /// assert_eq!(Some(&mut 10), stk.last_mut());
280 /// ```
281 #[inline]
282 #[must_use]
283 pub fn last_mut(&mut self) -> Option<&mut T> {
284 if !self.is_empty() {
285 unsafe {
286 let mut chunk = wrap::<T>(self.current_footer.get());
287 if chunk.is_empty() {
288 chunk = wrap::<T>(chunk.prev());
289 }
290 Some(chunk.last_unchecked().as_mut())
291 }
292 } else {
293 None
294 }
295 }
296
297 /// Appends an element to the stack returning a reference to it.
298 ///
299 /// # Panics
300 ///
301 /// Panics if the global allocator cannot allocate a new chunk of memory.
302 ///
303 /// # Examples
304 ///
305 /// ```
306 /// # use bump_stack::Stack;
307 /// let stk = Stack::new();
308 /// let new_element = stk.push(3);
309 ///
310 /// assert_eq!(new_element, &3);
311 /// assert_eq!(stk, [3]);
312 /// ```
313 ///
314 /// # Time complexity
315 ///
316 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
317 /// exhausted, it tries to use the cached one if it exists, otherwise it
318 /// tries to allocate a new chunk.
319 ///
320 /// If the new chunk of memory is too big, it tries to divide the capacity
321 /// by two and allocate it again until it reaches the minimum capacity. If
322 /// it does, it panics.
323 #[inline]
324 pub fn push(&self, value: T) -> &T {
325 self.push_with(|| value)
326 }
327
328 /// Appends an element to the stack, returning a mutable reference to it.
329 ///
330 /// # Panics
331 ///
332 /// Panics if there is no way to allocate memory for a new capacity.
333 ///
334 /// # Examples
335 ///
336 /// ```
337 /// # use bump_stack::Stack;
338 /// let mut stk = Stack::from([1, 2]);
339 /// let last = stk.push_mut(3);
340 /// assert_eq!(*last, 3);
341 /// assert_eq!(stk, [3, 2, 1]);
342 ///
343 /// let last = stk.push_mut(3);
344 /// *last += 1;
345 /// assert_eq!(stk, [4, 3, 2, 1]);
346 /// ```
347 ///
348 /// # Time complexity
349 ///
350 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
351 /// exhausted, it tries to use the cached one if it exists, otherwise it
352 /// tries to allocate a new chunk.
353 ///
354 /// If the new chunk of memory is too big, it tries to divide the capacity
355 /// by two and allocate it again until it reaches the minimum capacity. If
356 /// it does, it panics.
357 #[inline]
358 #[must_use = "if you don't need the returned value, consider using `push` instead"]
359 pub fn push_mut(&mut self, value: T) -> &mut T {
360 self.push_mut_with(|| value)
361 }
362
363 /// Pre-allocate space for an element in this stack, initializes it using
364 /// the closure, and returns a reference to the new element.
365 ///
366 /// # Examples
367 ///
368 /// ```
369 /// # use bump_stack::Stack;
370 /// let stk = Stack::new();
371 /// let new_element = stk.push_with(|| 3);
372 ///
373 /// assert_eq!(new_element, &3);
374 /// assert_eq!(stk, [3]);
375 /// ```
376 ///
377 /// # Time complexity
378 ///
379 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
380 /// exhausted, it tries to use the cached one if it exists, otherwise it
381 /// tries to allocate a new chunk.
382 ///
383 /// If the new chunk of memory is too big, it tries to divide the capacity
384 /// by two and allocate it again until it reaches the minimum capacity. If
385 /// it does, it panics.
386 #[inline(always)]
387 pub fn push_with<F>(&self, f: F) -> &T
388 where
389 F: FnOnce() -> T,
390 {
391 if const { T::IS_ZST } {
392 self.length.update(|len| len + 1);
393 unsafe { util::zst_ptr::<T>().as_ref() }
394 } else {
395 unsafe {
396 let p = self.alloc_element();
397 util::write_with(p.as_ptr(), f);
398 self.length.update(|len| len + 1);
399 p.as_ref()
400 }
401 }
402 }
403
404 /// Pre-allocate space for an element in this stack, initializes it using
405 /// the closure, and returns a mutable reference to the new element.
406 ///
407 /// # Examples
408 ///
409 /// ```
410 /// # use bump_stack::Stack;
411 /// let mut stk = Stack::from([1, 2]);
412 /// let last = stk.push_mut(3);
413 /// assert_eq!(*last, 3);
414 /// assert_eq!(stk, [3, 2, 1]);
415 ///
416 /// let last = stk.push_mut(3);
417 /// *last += 1;
418 /// assert_eq!(stk, [4, 3, 2, 1]);
419 /// ```
420 ///
421 /// # Time complexity
422 ///
423 /// Takes amortized *O*(1) time. If the stack's current chunk of memory is
424 /// exhausted, it tries to use the cached one if it exists, otherwise it
425 /// tries to allocate a new chunk.
426 ///
427 /// If the new chunk of memory is too big, it tries to divide the capacity
428 /// by two and allocate it again until it reaches the minimum capacity. If
429 /// it does, it panics.
430 #[inline(always)]
431 pub fn push_mut_with<F>(&mut self, f: F) -> &mut T
432 where
433 F: FnOnce() -> T,
434 {
435 if const { T::IS_ZST } {
436 self.length.update(|len| len + 1);
437 unsafe { util::zst_ptr::<T>().as_mut() }
438 } else {
439 unsafe {
440 let mut p = self.alloc_element();
441 util::write_with(p.as_ptr(), f);
442 self.length.update(|len| len + 1);
443 p.as_mut()
444 }
445 }
446 }
447
448 /// Removes the last element from a vector and returns it, or [`None`] if it
449 /// is empty.
450 ///
451 /// # Examples
452 ///
453 /// ```
454 /// # use bump_stack::Stack;
455 /// let mut stk = Stack::from([1, 2, 3]);
456 /// assert_eq!(stk.pop(), Some(3));
457 /// assert_eq!(stk, [2, 1]);
458 /// ```
459 #[inline]
460 pub fn pop(&mut self) -> Option<T> {
461 if const { T::IS_ZST } {
462 if self.length.get() > 0 {
463 self.length.update(|len| len - 1);
464 unsafe { Some(ptr::read(util::zst_ptr::<T>().as_ptr())) }
465 } else {
466 None
467 }
468 } else {
469 // `T` is not ZST
470 if let Some(element_ptr) = unsafe { self.dealloc_element() } {
471 self.length.update(|len| len - 1);
472 Some(unsafe { ptr::read(element_ptr.as_ptr()) })
473 } else {
474 None
475 }
476 }
477 }
478
479 /// Removes and returns the last element from a stack if the predicate
480 /// returns `true`, or [`None`] if the predicate returns false or the stack
481 /// is empty (the predicate will not be called in that case).
482 ///
483 /// # Examples
484 ///
485 /// ```
486 /// # use bump_stack::Stack;
487 /// let mut stk = Stack::from([1, 2, 3, 4]);
488 /// let pred = |x: &mut i32| *x % 2 == 0;
489 ///
490 /// assert_eq!(stk.pop_if(pred), Some(4));
491 /// assert_eq!(stk, [3, 2, 1]);
492 /// assert_eq!(stk.pop_if(pred), None);
493 /// ```
494 pub fn pop_if(&mut self, predicate: impl FnOnce(&mut T) -> bool) -> Option<T> {
495 let last = self.last_mut()?;
496 if predicate(last) { self.pop() } else { None }
497 }
498
499 /// Clears the stack, dropping all elements.
500 ///
501 /// This method leaves the biggest chunk of memory for future allocations.
502 ///
503 /// The order of dropping elements is not defined.
504 ///
505 /// # Examples
506 ///
507 /// ```
508 /// # use bump_stack::Stack;
509 /// let mut stk = Stack::from([1, 2, 3]);
510 ///
511 /// stk.clear();
512 ///
513 /// assert!(stk.is_empty());
514 /// assert!(stk.capacity() > 0);
515 /// ```
516 pub fn clear(&mut self) {
517 let mut first_footer = self.first_footer.get();
518 let mut first_chunk = unsafe { wrap::<T>(first_footer) };
519 if first_chunk.is_dead() {
520 return;
521 }
522 loop {
523 let next_footer = first_chunk.next();
524 let next_chunk = unsafe { wrap(next_footer) };
525
526 if next_chunk.is_dead() {
527 break;
528 }
529
530 unsafe { self.dealloc_chunk(first_footer) };
531
532 first_footer = next_footer;
533 first_chunk = next_chunk;
534 }
535 let dropped = unsafe { wrap::<T>(first_footer).drop() };
536 debug_assert!(self.len() >= dropped);
537 self.length.update(|length| length - dropped);
538
539 first_chunk.set_prev(DEAD_CHUNK.footer());
540 self.first_footer.set(first_footer);
541 self.current_footer.set(first_footer);
542 }
543
544 /// Returns `true` if the stack contains an element with the given value.
545 ///
546 /// This operation is *O*(*n*).
547 ///
548 /// # Examples
549 ///
550 /// ```
551 /// # use bump_stack::Stack;
552 /// let stk = Stack::from([3, 8, 12]);
553 /// assert!(stk.contains(&8));
554 /// assert!(!stk.contains(&20));
555 /// ```
556 #[must_use]
557 pub fn contains(&self, value: &T) -> bool
558 where
559 T: PartialEq,
560 {
561 self.iter().any(|elem| elem == value)
562 }
563
564 /// Copies `self` into a `Vec`.
565 ///
566 /// # Examples
567 ///
568 /// ```
569 /// # use bump_stack::Stack;
570 /// let s = Stack::from([10, 40, 30]);
571 /// let v = s.to_vec();
572 /// assert_eq!(v, &[30, 40, 10]);
573 /// ```
574 #[inline]
575 pub fn to_vec(&self) -> Vec<T>
576 where
577 T: Clone,
578 {
579 let mut vec = Vec::with_capacity(self.len());
580 for chunk in self.chunks() {
581 vec.extend_from_slice(chunk);
582 }
583 vec
584 }
585
586 /// Returns an iterator over the stack.
587 ///
588 /// The iterator yields all items' references in inverted order of their
589 /// insertion, corresponding to a LIFO structure behavior.
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// # use bump_stack::Stack;
595 /// let stk = Stack::from([1, 2, 4]);
596 /// let mut iterator = stk.iter();
597 ///
598 /// assert_eq!(iterator.next(), Some(&4));
599 /// assert_eq!(iterator.next(), Some(&2));
600 /// assert_eq!(iterator.next(), Some(&1));
601 /// assert_eq!(iterator.next(), None);
602 /// ```
603 ///
604 /// Since `Stack` allows to push new elements using immutable reference to
605 /// itself, you can push during iteration. But iteration is running over
606 /// elements existing at the moment of the iterator creating. It guarantees
607 /// that you won't get infinite loop.
608 ///
609 /// ```
610 /// # use bump_stack::Stack;
611 /// let stk = Stack::from([1, 2, 4]);
612 ///
613 /// for elem in stk.iter() {
614 /// stk.push(*elem);
615 /// }
616 /// assert_eq!(stk.len(), 6);
617 /// assert_eq!(stk, [1, 2, 4, 4, 2, 1]);
618 /// ```
619 #[inline]
620 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &T> {
621 crate::iter::Iter::new(self)
622 }
623
624 /// Returns a mutable iterator over the stack.
625 ///
626 /// The iterator yields all items' references in inverted order of their
627 /// insertion, corresponding to a LIFO structure behavior.
628 ///
629 /// # Examples
630 ///
631 /// ```
632 /// # use bump_stack::Stack;
633 /// let mut stk = Stack::from([1, 2, 4]);
634 ///
635 /// for elem in stk.iter_mut() {
636 /// *elem *= 2;
637 /// }
638 ///
639 /// assert_eq!(&stk, &[8, 4, 2]);
640 /// ```
641 #[inline]
642 pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut T> {
643 crate::iter::IterMut::new(self)
644 }
645
646 /// Returns an iterator over slices corresponding to the stack's memory
647 /// chunks.
648 ///
649 /// The iterator yields all items' references in inverted order of their
650 /// insertion, corresponding to a LIFO structure behavior.
651 ///
652 /// # Examples
653 ///
654 /// ```
655 /// # use bump_stack::Stack;
656 /// let stk = Stack::from([0, 1, 2]);
657 /// assert_eq!(stk.chunks().collect::<Vec<_>>(), [&[2, 1, 0]]);
658 ///
659 /// // fill up the first chunk
660 /// for i in 3..stk.capacity() {
661 /// stk.push(i);
662 /// }
663 /// assert_eq!(stk.chunks().count(), 1);
664 ///
665 /// // create a new chunk
666 /// stk.push(42);
667 /// assert_eq!(stk.chunks().count(), 2);
668 /// ```
669 #[inline]
670 pub fn chunks(&self) -> impl DoubleEndedIterator<Item = &[T]> {
671 crate::iter::ChunkIter::new(self)
672 }
673
674 /// Returns an iterator over mutable slices corresponding to the stack's
675 /// memory chunks.
676 ///
677 /// The iterator yields all items' references in inverted order of their
678 /// insertion, corresponding to a LIFO structure behavior.
679 ///
680 /// # Examples
681 ///
682 /// ```
683 /// # use bump_stack::Stack;
684 /// let mut stk = Stack::from([0, 1, 2]);
685 ///
686 /// let chunk = stk.chunks_mut().next().unwrap();
687 /// assert_eq!(chunk, [2, 1, 0]);
688 /// chunk[0] = 42;
689 /// assert_eq!(chunk, [42, 1, 0]);
690 /// ```
691 #[inline]
692 pub fn chunks_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut [T]> {
693 crate::iter::ChunkIterMut::new(self)
694 }
695}
696
697impl<T> core::default::Default for Stack<T> {
698 /// Creates an empty `Stack<T>`.
699 ///
700 /// The stack will not allocate until elements are pushed onto it.
701 #[inline]
702 fn default() -> Self {
703 Self::new()
704 }
705}
706
707impl<T, const N: usize> core::convert::From<&[T; N]> for Stack<T>
708where
709 T: Clone,
710{
711 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
712 /// fills it by cloning `slice`'s items.
713 fn from(slice: &[T; N]) -> Self {
714 let stk = Stack::with_capacity(N);
715 for item in slice {
716 stk.push(item.clone());
717 }
718 stk
719 }
720}
721
722impl<T, const N: usize> core::convert::From<&mut [T; N]> for Stack<T>
723where
724 T: Clone,
725{
726 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
727 /// fills it by cloning `slice`'s items.
728 #[inline]
729 fn from(slice: &mut [T; N]) -> Self {
730 core::convert::From::<&[T; N]>::from(slice)
731 }
732}
733
734impl<T, const N: usize> core::convert::From<[T; N]> for Stack<T>
735where
736 T: Clone,
737{
738 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
739 /// fills it by cloning `array`'s items.
740 #[inline]
741 fn from(array: [T; N]) -> Self {
742 core::convert::From::<&[T; N]>::from(&array)
743 }
744}
745
746impl<T> core::convert::From<&[T]> for Stack<T>
747where
748 T: Clone,
749{
750 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
751 /// fills it by cloning `slice`'s items.
752 fn from(slice: &[T]) -> Self {
753 let stk = Stack::with_capacity(slice.len());
754 for item in slice {
755 stk.push(item.clone());
756 }
757 stk
758 }
759}
760
761impl<T> core::convert::From<&mut [T]> for Stack<T>
762where
763 T: Clone,
764{
765 /// Creates a `Stack<T>` with a chunk big enough to contain N items and
766 /// fills it by cloning `slice`'s items.
767 #[inline]
768 fn from(slice: &mut [T]) -> Self {
769 core::convert::From::<&[T]>::from(slice)
770 }
771}
772
773impl<T> core::ops::Drop for Stack<T> {
774 #[inline]
775 fn drop(&mut self) {
776 self.clear();
777 unsafe {
778 let current_chunk = wrap::<T>(self.current_footer.get());
779 if !current_chunk.is_dead() {
780 debug_assert!(wrap::<T>(current_chunk.prev()).is_dead());
781 debug_assert!(wrap::<T>(current_chunk.next()).is_dead());
782 self.dealloc_chunk(self.current_footer.get());
783 }
784 }
785 }
786}
787
788impl<T> core::iter::FromIterator<T> for Stack<T>
789where
790 T: Clone,
791{
792 #[inline]
793 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
794 let iter = iter.into_iter();
795 // try to preallocate a chunk big enough to contain iter's items
796 let stk = Stack::with_capacity(iter.size_hint().0);
797 for item in iter {
798 stk.push(item);
799 }
800 stk
801 }
802}
803
804impl<'a, T> core::iter::IntoIterator for &'a Stack<T> {
805 type Item = &'a T;
806 type IntoIter = crate::iter::Iter<'a, T>;
807
808 #[inline]
809 fn into_iter(self) -> Self::IntoIter {
810 crate::iter::Iter::new(self)
811 }
812}
813
814impl<'a, T> core::iter::IntoIterator for &'a mut Stack<T> {
815 type Item = &'a mut T;
816 type IntoIter = crate::iter::IterMut<'a, T>;
817
818 #[inline]
819 fn into_iter(self) -> Self::IntoIter {
820 crate::iter::IterMut::new(self)
821 }
822}
823
824impl<T> core::iter::IntoIterator for Stack<T> {
825 type Item = T;
826 type IntoIter = crate::iter::IntoIter<T>;
827
828 #[inline]
829 fn into_iter(self) -> Self::IntoIter {
830 crate::iter::IntoIter::new(self)
831 }
832}
833
834impl<T, U> core::cmp::PartialEq<[U]> for Stack<T>
835where
836 T: core::cmp::PartialEq<U>,
837{
838 fn eq(&self, other: &[U]) -> bool {
839 self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
840 }
841}
842
843impl<T, U> core::cmp::PartialEq<&[U]> for Stack<T>
844where
845 T: core::cmp::PartialEq<U>,
846{
847 #[inline]
848 fn eq(&self, other: &&[U]) -> bool {
849 core::cmp::PartialEq::<[U]>::eq(self, other)
850 }
851}
852
853impl<T, U> core::cmp::PartialEq<&mut [U]> for Stack<T>
854where
855 T: core::cmp::PartialEq<U>,
856{
857 #[inline]
858 fn eq(&self, other: &&mut [U]) -> bool {
859 core::cmp::PartialEq::<[U]>::eq(self, other)
860 }
861}
862
863impl<T, U, const N: usize> core::cmp::PartialEq<[U; N]> for Stack<T>
864where
865 T: core::cmp::PartialEq<U>,
866{
867 fn eq(&self, other: &[U; N]) -> bool {
868 self.len() == N && self.iter().zip(other.iter()).all(|(a, b)| a == b)
869 }
870}
871
872impl<T, U, const N: usize> core::cmp::PartialEq<&[U; N]> for Stack<T>
873where
874 T: core::cmp::PartialEq<U>,
875{
876 #[inline]
877 fn eq(&self, other: &&[U; N]) -> bool {
878 core::cmp::PartialEq::<[U; N]>::eq(self, other)
879 }
880}
881
882impl<T, U, const N: usize> core::cmp::PartialEq<&mut [U; N]> for Stack<T>
883where
884 T: core::cmp::PartialEq<U>,
885{
886 #[inline]
887 fn eq(&self, other: &&mut [U; N]) -> bool {
888 core::cmp::PartialEq::<[U; N]>::eq(self, other)
889 }
890}
891
892impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for [T; N]
893where
894 T: core::cmp::PartialEq<U>,
895{
896 fn eq(&self, other: &Stack<U>) -> bool {
897 self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a == b)
898 }
899}
900
901impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &[T; N]
902where
903 T: core::cmp::PartialEq<U>,
904{
905 fn eq(&self, other: &Stack<U>) -> bool {
906 *self == other
907 }
908}
909
910impl<T, U, const N: usize> core::cmp::PartialEq<Stack<U>> for &mut [T; N]
911where
912 T: core::cmp::PartialEq<U>,
913{
914 fn eq(&self, other: &Stack<U>) -> bool {
915 *self == other
916 }
917}
918
919impl<T> core::fmt::Debug for Stack<T>
920where
921 T: core::fmt::Debug,
922{
923 #[inline]
924 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
925 f.debug_list().entries(self.iter()).finish()
926 }
927}
928
929unsafe impl<T> Send for Stack<T> where T: Send {}
930
931/// Maximum typical overhead per allocation imposed by allocators.
932const ALLOC_OVERHEAD: usize = 16;
933
934// Private API
935impl<T> Stack<T> {
936 /// Allocates memory for a new element and return a pointer to it.
937 ///
938 /// # Safety
939 ///
940 /// The caller must ensure that the method is called only for non zero-sized
941 /// types.
942 #[inline(always)]
943 unsafe fn alloc_element(&self) -> NonNull<T> {
944 debug_assert!(!T::IS_ZST);
945
946 let curr_chunk = unsafe { wrap::<T>(self.current_footer.get()) };
947 if let Some(ptr) = curr_chunk.alloc_element() {
948 ptr
949 } else {
950 unsafe { self.alloc_element_slow() }
951 }
952 }
953
954 // Should be run only if the current chunk is full
955 unsafe fn alloc_element_slow(&self) -> NonNull<T> {
956 debug_assert!(!T::IS_ZST);
957 unsafe {
958 let current_chunk = wrap::<T>(self.current_footer.get());
959 let next_footer = current_chunk.next();
960 let next_chunk = wrap::<T>(next_footer);
961 let prev_chunk = wrap::<T>(current_chunk.prev());
962
963 debug_assert!(current_chunk.is_full());
964
965 if current_chunk.is_dead() {
966 // this is initial state without allocated chunks at all
967 debug_assert!(current_chunk.is_dead());
968 debug_assert!(prev_chunk.is_dead());
969 debug_assert!(next_chunk.is_dead());
970
971 let new_footer_ptr = self.alloc_chunk(Chunk::<T>::CHUNK_FIRST_SIZE);
972 self.current_footer.set(new_footer_ptr);
973 self.first_footer.set(new_footer_ptr);
974 } else {
975 // at least the current chunk is not dead
976 if next_chunk.is_dead() {
977 // the current chunk is single, so create a new one, and
978 // make it the current chunk.
979 let current_chunk_size = current_chunk.size();
980 let new_chunk_size = if current_chunk_size == Chunk::<T>::CHUNK_MAX_SIZE {
981 Chunk::<T>::CHUNK_MAX_SIZE
982 } else {
983 debug_assert!(current_chunk_size < Chunk::<T>::CHUNK_MAX_SIZE);
984 ((current_chunk_size + ALLOC_OVERHEAD) << 1) - ALLOC_OVERHEAD
985 };
986
987 debug_assert!((new_chunk_size + ALLOC_OVERHEAD).is_power_of_two());
988
989 let new_footer = self.alloc_chunk(new_chunk_size);
990 let new_chunk = wrap::<T>(new_footer);
991
992 current_chunk.set_next(new_footer);
993 new_chunk.set_prev(self.current_footer.get());
994
995 self.current_footer.set(new_footer);
996 } else {
997 // there is a next empty chunk, so make it the current chunk
998 debug_assert!(next_chunk.is_empty());
999 self.current_footer.set(next_footer);
1000 }
1001 }
1002
1003 let curr_chunk = wrap::<T>(self.current_footer.get());
1004 curr_chunk.alloc_element().unwrap_unchecked()
1005 }
1006 }
1007
1008 /// Creates a new chunk with the given size. If it can't allocate a chunk
1009 /// with the given size, it tries to allocate a chunk with a two times
1010 /// smaller size. Otherwise, it panics.
1011 ///
1012 /// Properties `data`, `ptr`, and `layout` are initialized to the values
1013 /// of the newly allocated chunk.
1014 ///
1015 /// Properties `prev` and `next` point to the `DEAD_CHUNK`, so you should
1016 /// reinitialize them to needed values, if there exist another chunks in the
1017 /// list.
1018 unsafe fn alloc_chunk(&self, chunk_size: usize) -> NonNull<ChunkFooter> {
1019 debug_assert!(chunk_size <= Chunk::<T>::CHUNK_MAX_SIZE);
1020
1021 let mut chunk_size = chunk_size;
1022 let chunk_align = Chunk::<T>::CHUNK_ALIGN;
1023
1024 let (chunk_ptr, chunk_layout) = loop {
1025 // checks for `Layout::from_size_align_unchecked`
1026 debug_assert!(chunk_align != 0);
1027 debug_assert!(chunk_align.is_power_of_two());
1028 debug_assert!((chunk_size + ALLOC_OVERHEAD).is_power_of_two());
1029 debug_assert!(chunk_size <= isize::MAX as usize);
1030
1031 let chunk_layout =
1032 unsafe { Layout::from_size_align_unchecked(chunk_size, chunk_align) };
1033
1034 let chunk_ptr = unsafe { alloc(chunk_layout) };
1035 if !chunk_ptr.is_null() {
1036 assert!(util::ptr_is_aligned_to(chunk_ptr, Chunk::<T>::CHUNK_ALIGN));
1037 break (chunk_ptr, chunk_layout);
1038 }
1039
1040 // if couldn't get a new chunk, try to shrink the chunk size by half
1041 chunk_size = ((chunk_size + ALLOC_OVERHEAD) >> 1) - ALLOC_OVERHEAD;
1042 if chunk_size < Chunk::<T>::CHUNK_MIN_SIZE {
1043 handle_alloc_error(chunk_layout);
1044 }
1045 };
1046
1047 let chunk_ptr = unsafe { NonNull::new_unchecked(chunk_ptr) };
1048 let (footer_ptr, chunk_capacity) = unsafe { Chunk::<T>::init(chunk_ptr, chunk_layout) };
1049
1050 self.capacity.update(|cap| cap + chunk_capacity);
1051
1052 footer_ptr
1053 }
1054
1055 #[inline(always)]
1056 unsafe fn dealloc_element(&mut self) -> Option<NonNull<T>> {
1057 debug_assert!(!T::IS_ZST);
1058
1059 unsafe {
1060 let curr_chunk = wrap::<T>(self.current_footer.get());
1061 if let Some(ptr) = curr_chunk.dealloc_element() {
1062 Some(ptr)
1063 } else {
1064 self.dealloc_element_slow()
1065 }
1066 }
1067 }
1068
1069 unsafe fn dealloc_element_slow(&mut self) -> Option<NonNull<T>> {
1070 unsafe {
1071 let current_footer = self.current_footer.get();
1072 let current_chunk = wrap::<T>(current_footer);
1073
1074 let next_footer = current_chunk.next();
1075 let next_chunk = wrap::<T>(next_footer);
1076
1077 let prev_footer = current_chunk.prev();
1078 let prev_chunk = wrap::<T>(prev_footer);
1079
1080 if current_chunk.is_dead() {
1081 debug_assert!(next_chunk.is_dead());
1082 debug_assert!(prev_chunk.is_dead());
1083 return None;
1084 }
1085
1086 debug_assert!(current_chunk.is_empty());
1087 debug_assert!(next_chunk.is_empty());
1088
1089 if !next_chunk.is_dead() {
1090 if current_chunk.size() < next_chunk.size() {
1091 debug_assert!(wrap::<T>(next_chunk.next()).is_dead());
1092
1093 next_chunk.set_prev(prev_footer);
1094 self.current_footer.set(next_footer);
1095
1096 self.dealloc_chunk(current_footer);
1097 } else {
1098 self.dealloc_chunk(next_footer);
1099 }
1100 let current_chunk = wrap::<T>(self.current_footer.get());
1101 current_chunk.set_next(DEAD_CHUNK.footer());
1102 }
1103
1104 if prev_chunk.is_dead() {
1105 self.first_footer.set(self.current_footer.get());
1106 None
1107 } else {
1108 // check if prev_footer is full
1109 debug_assert!(prev_chunk.is_full());
1110
1111 prev_chunk.set_next(self.current_footer.get());
1112 self.current_footer.set(prev_footer);
1113
1114 let curr_chunk = wrap::<T>(self.current_footer.get());
1115 curr_chunk.dealloc_element()
1116 }
1117 }
1118 }
1119
1120 unsafe fn dealloc_chunk(&mut self, footer: NonNull<ChunkFooter>) {
1121 unsafe {
1122 let chunk = wrap::<T>(footer);
1123 let dropped = chunk.drop();
1124 debug_assert!(self.len() >= dropped);
1125 self.length.update(|length| length - dropped);
1126
1127 let chunk_capacity = chunk.capacity();
1128 debug_assert!(chunk_capacity <= self.capacity());
1129 self.capacity.update(|cap| cap - chunk_capacity);
1130 debug_assert!(self.len() <= self.capacity());
1131 dealloc(chunk.start().cast().as_ptr(), chunk.layout());
1132 }
1133 }
1134}
1135
1136/// Unit tests.
1137#[cfg(test)]
1138mod utest;