Skip to main content

small_collections/vecs/
deque.rs

1//! Stack-allocated double-ended queue that spills to the heap when full.
2//!
3//! # Why not `heapless::Deque`?
4//! `heapless::Deque` exists but was intentionally **not** used as the stack-side storage
5//! for [`SmallDeque`].  The key reasons are:
6//!
7//! 1. **Forced power-of-two capacity**: `heapless::Deque<T, N>` internally requires
8//!    `N` to be a power of two to support its bitmask-based wrap-around arithmetic.
9//!    `SmallDeque` imposes the same constraint, yet needs to manage the ring-buffer
10//!    indices (head/len) *outside* the stored data so they can be preserved independent
11//!    of the storage backend.  Embedding them inside a `heapless::Deque` value stored
12//!    in a union field would require reading from the (possibly heap-active) union branch,
13//!    which is unsound without unsafe indirection that yields no benefit over the raw
14//!    `[MaybeUninit<T>; N]` approach used here.
15//!
16//! 2. **No first-class drain/into-iter that preserves ring order**: when spilling to the
17//!    heap we need to iterate in logical order (head … tail) to fill `VecDeque`.  The
18//!    raw-array approach gives us direct index arithmetic (`wrap_add`) to do this;
19//!    `heapless::Deque` would require an additional copy through its iterator which
20//!    offers the same cost but less control.
21//!
22//! 3. **Union soundness**: the `DequeData` union holds either a raw array or a
23//!    heap-allocated `VecDeque`.  `heapless::Deque` contains internal state (`read`,
24//!    `write` cursors) that would be overwritten if treated as an uninitialised `VecDeque`
25//!    and vice versa.  Using `[MaybeUninit<T>; N]` makes the union semantics trivial:
26//!    the stack side is just memory, no drop glue.
27
28use core::fmt;
29use core::mem::{ManuallyDrop, MaybeUninit};
30use core::ptr;
31use std::collections::VecDeque;
32
33// ─── AnyDeque ─────────────────────────────────────────────────────────────────
34
35/// An object-safe abstraction over double-ended queue types.
36///
37/// Implemented by both `VecDeque<T>` (heap) and `SmallDeque<T, N>` (small/stack)
38/// so that code can operate on a deque without knowing which backend is active.
39pub trait AnyDeque<T> {
40    /// Returns the number of elements in the deque.
41    fn len(&self) -> usize;
42    /// Returns `true` if the deque contains no elements.
43    fn is_empty(&self) -> bool {
44        self.len() == 0
45    }
46    /// Appends an element to the back.
47    fn push_back(&mut self, item: T);
48    /// Prepends an element to the front.
49    fn push_front(&mut self, item: T);
50    /// Removes and returns the element from the back, or `None` if empty.
51    fn pop_back(&mut self) -> Option<T>;
52    /// Removes and returns the element from the front, or `None` if empty.
53    fn pop_front(&mut self) -> Option<T>;
54    /// Removes and returns the element at `index`, or `None` if out of bounds.
55    fn remove(&mut self, index: usize) -> Option<T>;
56    /// Removes all elements.
57    fn clear(&mut self);
58    /// Returns a shared reference to the front element, or `None` if empty.
59    fn front(&self) -> Option<&T>;
60    /// Returns a shared reference to the back element, or `None` if empty.
61    fn back(&self) -> Option<&T>;
62    /// Returns an exclusive reference to the front element, or `None` if empty.
63    fn front_mut(&mut self) -> Option<&mut T>;
64    /// Returns an exclusive reference to the back element, or `None` if empty.
65    fn back_mut(&mut self) -> Option<&mut T>;
66}
67
68impl<T> AnyDeque<T> for VecDeque<T> {
69    fn len(&self) -> usize {
70        self.len()
71    }
72    fn push_back(&mut self, item: T) {
73        self.push_back(item);
74    }
75    fn push_front(&mut self, item: T) {
76        self.push_front(item);
77    }
78    fn pop_back(&mut self) -> Option<T> {
79        self.pop_back()
80    }
81    fn pop_front(&mut self) -> Option<T> {
82        self.pop_front()
83    }
84    fn remove(&mut self, index: usize) -> Option<T> {
85        self.remove(index)
86    }
87    fn clear(&mut self) {
88        self.clear();
89    }
90    fn front(&self) -> Option<&T> {
91        self.front()
92    }
93    fn back(&self) -> Option<&T> {
94        self.back()
95    }
96    fn front_mut(&mut self) -> Option<&mut T> {
97        self.front_mut()
98    }
99    fn back_mut(&mut self) -> Option<&mut T> {
100        self.back_mut()
101    }
102}
103
104/// Tagged union holding either the stack ring-buffer or the heap `VecDeque`.
105///
106/// # Safety
107/// The active variant is tracked by `SmallDeque::on_stack`.  Only the active
108/// variant must be accessed at any time.  The stack variant is
109/// `[MaybeUninit<T>; N]`, so it has no drop glue — `ManuallyDrop` around the
110/// stack side is therefore redundant but kept for symmetry with the heap side.
111pub union DequeData<T, const N: usize> {
112    /// Automatically generated documentation for this item.
113    pub stack: ManuallyDrop<[MaybeUninit<T>; N]>,
114    /// Automatically generated documentation for this item.
115    pub heap: ManuallyDrop<VecDeque<T>>,
116}
117
118/// A double-ended queue that lives on the stack for up to `N` items, then spills to
119/// a heap-allocated `VecDeque` transparently.
120///
121/// # Stack representation
122/// Items are stored in a ring buffer of `[MaybeUninit<T>; N]` with a `head` cursor and
123/// a `len` counter.  Two helpers, `wrap_add` and `wrap_sub`, implement the modular
124/// arithmetic using a bitmask (requires `N` to be a power of two — enforced by a
125/// `const` assertion in [`new`](SmallDeque::new)).
126///
127/// # Spill behaviour
128/// When `push_back` or `push_front` causes `len > capacity` on the stack, the ring
129/// buffer is drained in logical order into a freshly heap-allocated `VecDeque` via
130/// `spill_to_heap`.  After a spill, all subsequent mutations go directly to the
131/// `VecDeque`; the struct never returns to stack storage.
132///
133/// # Generic parameters
134/// | Parameter | Meaning |
135/// |-----------|--------|
136/// | `T` | Element type |
137/// | `N` | Stack capacity; **must be a power of two** |
138///
139/// # Compile-time assertions
140/// `new()` uses `const { assert!(...) }` to verify:
141/// - `size_of::<Self>() <= 16 KiB` — prevents accidental blowing of the stack frame.
142/// - `N.is_power_of_two()` — required for bitmask ring-buffer arithmetic.
143pub struct SmallDeque<T, const N: usize> {
144    len: usize,
145    capacity: usize,
146    head: usize,
147    on_stack: bool,
148    data: DequeData<T, N>,
149}
150
151impl<T, const N: usize> AnyDeque<T> for SmallDeque<T, N> {
152    fn len(&self) -> usize {
153        self.len
154    }
155    fn push_back(&mut self, item: T) {
156        self.push_back(item);
157    }
158    fn push_front(&mut self, item: T) {
159        self.push_front(item);
160    }
161    fn pop_back(&mut self) -> Option<T> {
162        self.pop_back()
163    }
164    fn pop_front(&mut self) -> Option<T> {
165        self.pop_front()
166    }
167    fn remove(&mut self, index: usize) -> Option<T> {
168        self.remove(index)
169    }
170    fn clear(&mut self) {
171        self.clear();
172    }
173    fn front(&self) -> Option<&T> {
174        self.front()
175    }
176    fn back(&self) -> Option<&T> {
177        self.back()
178    }
179    fn front_mut(&mut self) -> Option<&mut T> {
180        self.front_mut()
181    }
182    fn back_mut(&mut self) -> Option<&mut T> {
183        self.back_mut()
184    }
185}
186
187impl<T, const N: usize> SmallDeque<T, N> {
188    /// Maximum allowed struct size in bytes.  Prevents accidentally allocating huge
189    /// arrays on the call stack.
190    const MAX_STACK_SIZE: usize = 16 * 1024;
191
192    /// Creates a new empty deque backed by stack storage.
193    ///
194    /// # Panics (compile-time)
195    /// Asserts that `size_of::<Self>() <= 16 KiB` and that `N` is a power of two.
196    pub fn new() -> Self {
197        const {
198            assert!(
199                std::mem::size_of::<Self>() <= SmallDeque::<T, N>::MAX_STACK_SIZE,
200                "SmallDeque is too large! Reduce N."
201            );
202            assert!(N.is_power_of_two(), "SmallDeque N must be a power of two");
203        }
204        Self {
205            len: 0,
206            capacity: N,
207            head: 0,
208            on_stack: true,
209            data: DequeData {
210                stack: ManuallyDrop::new(unsafe { MaybeUninit::uninit().assume_init() }),
211            },
212        }
213    }
214
215    /// Creates a deque that starts on the heap if `capacity > N`, otherwise on the stack.
216    ///
217    /// Useful when the caller already knows the required size will exceed `N`.
218    pub fn with_capacity(capacity: usize) -> Self {
219        if capacity <= N {
220            Self::new()
221        } else {
222            let heap_deque = VecDeque::with_capacity(capacity);
223            Self {
224                len: 0,
225                capacity: heap_deque.capacity(),
226                head: 0,
227                on_stack: false,
228                data: DequeData {
229                    heap: ManuallyDrop::new(heap_deque),
230                },
231            }
232        }
233    }
234
235    /// Returns `true` if the deque is currently using stack storage.
236    #[inline(always)]
237    pub fn is_on_stack(&self) -> bool {
238        self.on_stack
239    }
240
241    /// Returns the number of elements currently in the deque.
242    #[inline(always)]
243    pub fn len(&self) -> usize {
244        self.len
245    }
246
247    /// Returns `true` if the deque contains no elements.
248    #[inline(always)]
249    pub fn is_empty(&self) -> bool {
250        self.len == 0
251    }
252
253    /// Returns the current capacity (not necessarily `N` after a spill).
254    #[inline(always)]
255    pub fn capacity(&self) -> usize {
256        self.capacity
257    }
258
259    /// Maps a logical index into the ring buffer to a physical slot index.
260    /// Uses bitmask `(capacity - 1)` — valid because `N` is a power of two.
261    #[inline(always)]
262    fn wrap_add(&self, idx: usize, add: usize) -> usize {
263        (idx + add) & (self.capacity - 1)
264    }
265
266    /// Maps a logical index into the ring buffer, wrapping backwards.
267    #[inline(always)]
268    fn wrap_sub(&self, idx: usize, sub: usize) -> usize {
269        (idx.wrapping_sub(sub)) & (self.capacity - 1)
270    }
271
272    /// Returns a shared reference to the element at logical `index`, or `None`.
273    ///
274    /// Logical index 0 is the front (oldest element).
275    #[inline(always)]
276    pub fn get(&self, index: usize) -> Option<&T> {
277        if index < self.len {
278            unsafe {
279                if self.on_stack {
280                    let real_idx = self.wrap_add(self.head, index);
281                    let ptr = (*self.data.stack).as_ptr() as *const T;
282                    Some(&*ptr.add(real_idx))
283                } else {
284                    (*self.data.heap).get(index)
285                }
286            }
287        } else {
288            None
289        }
290    }
291
292    /// Returns an exclusive reference to the element at logical `index`, or `None`.
293    #[inline(always)]
294    pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
295        if index < self.len {
296            unsafe {
297                if self.on_stack {
298                    let real_idx = self.wrap_add(self.head, index);
299                    let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
300                    Some(&mut *ptr.add(real_idx))
301                } else {
302                    (*self.data.heap).get_mut(index)
303                }
304            }
305        } else {
306            None
307        }
308    }
309
310    /// Reserves capacity for at least `additional` more elements, spilling to the heap
311    /// if necessary.
312    pub fn reserve(&mut self, additional: usize) {
313        if self.len + additional > self.capacity {
314            unsafe {
315                if self.on_stack {
316                    self.spill_to_heap();
317                }
318                (*self.data.heap).reserve(additional);
319                self.capacity = (*self.data.heap).capacity();
320            }
321        }
322    }
323
324    /// Appends `item` to the back of the deque.  Spills to heap if the stack is full.
325    #[inline(always)]
326    pub fn push_back(&mut self, item: T) {
327        if self.len < self.capacity && self.on_stack {
328            unsafe {
329                let tail = self.wrap_add(self.head, self.len);
330                let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
331                ptr::write(ptr.add(tail), item);
332                self.len += 1;
333            }
334        } else {
335            self.grow_and_push_back(item);
336        }
337    }
338
339    /// Cold path: spills to heap then delegates `push_back`.
340    #[inline(never)]
341    fn grow_and_push_back(&mut self, item: T) {
342        unsafe {
343            if self.on_stack {
344                self.spill_to_heap();
345            }
346            (*self.data.heap).push_back(item);
347            self.len = (*self.data.heap).len();
348            self.capacity = (*self.data.heap).capacity();
349        }
350    }
351
352    /// Prepends `item` to the front of the deque.  Spills to heap if the stack is full.
353    #[inline(always)]
354    pub fn push_front(&mut self, item: T) {
355        if self.len < self.capacity && self.on_stack {
356            unsafe {
357                self.head = self.wrap_sub(self.head, 1);
358                let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
359                ptr::write(ptr.add(self.head), item);
360                self.len += 1;
361            }
362        } else {
363            self.grow_and_push_front(item);
364        }
365    }
366
367    /// Cold path: spills to heap then delegates `push_front`.
368    #[inline(never)]
369    fn grow_and_push_front(&mut self, item: T) {
370        unsafe {
371            if self.on_stack {
372                self.spill_to_heap();
373            }
374            (*self.data.heap).push_front(item);
375            self.len = (*self.data.heap).len();
376            self.capacity = (*self.data.heap).capacity();
377        }
378    }
379
380    /// Removes and returns the last element, or `None` if empty.
381    #[inline(always)]
382    pub fn pop_back(&mut self) -> Option<T> {
383        if self.len == 0 {
384            None
385        } else {
386            self.len -= 1;
387            unsafe {
388                if self.on_stack {
389                    let tail = self.wrap_add(self.head, self.len);
390                    let ptr = (*self.data.stack).as_ptr() as *const T;
391                    Some(ptr::read(ptr.add(tail)))
392                } else {
393                    (*self.data.heap).pop_back()
394                }
395            }
396        }
397    }
398
399    /// Removes and returns the first element, or `None` if empty.
400    #[inline(always)]
401    pub fn pop_front(&mut self) -> Option<T> {
402        if self.len == 0 {
403            None
404        } else {
405            unsafe {
406                let val = if self.on_stack {
407                    let ptr = (*self.data.stack).as_ptr() as *const T;
408                    let v = ptr::read(ptr.add(self.head));
409                    self.head = self.wrap_add(self.head, 1);
410                    v
411                } else {
412                    (*self.data.heap).pop_front().unwrap()
413                };
414                self.len -= 1;
415                Some(val)
416            }
417        }
418    }
419
420    /// Removes the element at logical `index` and returns it, or `None` if out of bounds.
421    ///
422    /// Chooses whether to shift from the front or back depending on which is cheaper
423    /// (shift the shorter half).
424    pub fn remove(&mut self, index: usize) -> Option<T> {
425        if index >= self.len {
426            return None;
427        }
428
429        if index == 0 {
430            return self.pop_front();
431        }
432        if index == self.len - 1 {
433            return self.pop_back();
434        }
435
436        unsafe {
437            if self.on_stack {
438                let real_idx = self.wrap_add(self.head, index);
439                let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
440                let val = ptr::read(ptr.add(real_idx));
441
442                // Shift elements
443                if index < self.len / 2 {
444                    // Shift head forward
445                    for i in (0..index).rev() {
446                        let from = self.wrap_add(self.head, i);
447                        let to = self.wrap_add(from, 1);
448                        ptr::copy_nonoverlapping(ptr.add(from), ptr.add(to), 1);
449                    }
450                    self.head = self.wrap_add(self.head, 1);
451                } else {
452                    // Shift tail backward
453                    for i in (index + 1)..self.len {
454                        let from = self.wrap_add(self.head, i);
455                        let to = self.wrap_sub(from, 1);
456                        ptr::copy_nonoverlapping(ptr.add(from), ptr.add(to), 1);
457                    }
458                }
459                self.len -= 1;
460                Some(val)
461            } else {
462                let val = (*self.data.heap).remove(index);
463                self.len = (*self.data.heap).len();
464                val
465            }
466        }
467    }
468
469    /// Shortens the deque to `len`, dropping all elements beyond that point.
470    ///
471    /// If `len >= self.len()`, this is a no-op.
472    pub fn clear(&mut self) {
473        self.truncate(0);
474    }
475
476    /// Shortens the deque to at most `len` elements, dropping those beyond.
477    pub fn truncate(&mut self, len: usize) {
478        if len < self.len {
479            unsafe {
480                if self.on_stack {
481                    let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
482                    for i in len..self.len {
483                        let real_idx = self.wrap_add(self.head, i);
484                        ptr::drop_in_place(ptr.add(real_idx));
485                    }
486                } else {
487                    (*self.data.heap).truncate(len);
488                }
489            }
490            self.len = len;
491        }
492    }
493
494    /// Returns a shared reference to the front element, or `None` if empty.
495    #[inline(always)]
496    pub fn front(&self) -> Option<&T> {
497        self.get(0)
498    }
499
500    /// Returns a shared reference to the back element, or `None` if empty.
501    #[inline(always)]
502    pub fn back(&self) -> Option<&T> {
503        if self.len == 0 {
504            None
505        } else {
506            self.get(self.len - 1)
507        }
508    }
509
510    /// Returns an exclusive reference to the front element, or `None` if empty.
511    #[inline(always)]
512    pub fn front_mut(&mut self) -> Option<&mut T> {
513        self.get_mut(0)
514    }
515
516    /// Returns an exclusive reference to the back element, or `None` if empty.
517    #[inline(always)]
518    pub fn back_mut(&mut self) -> Option<&mut T> {
519        if self.len == 0 {
520            None
521        } else {
522            self.get_mut(self.len - 1)
523        }
524    }
525
526    /// Returns up to two contiguous slices covering the logical range `[0, len)`.
527    ///
528    /// Returns `(head_slice, &[])` when the ring buffer hasn't wrapped, or
529    /// `(head_slice, tail_slice)` when it has.  On the heap path delegates to
530    /// `VecDeque::as_slices`.
531    #[inline(always)]
532    pub fn as_slices(&self) -> (&[T], &[T]) {
533        unsafe {
534            if self.on_stack {
535                let ptr = (*self.data.stack).as_ptr() as *const T;
536                if self.head + self.len <= self.capacity {
537                    (
538                        core::slice::from_raw_parts(ptr.add(self.head), self.len),
539                        &[],
540                    )
541                } else {
542                    let head_len = self.capacity - self.head;
543                    let tail_len = self.len - head_len;
544                    (
545                        core::slice::from_raw_parts(ptr.add(self.head), head_len),
546                        core::slice::from_raw_parts(ptr, tail_len),
547                    )
548                }
549            } else {
550                (*self.data.heap).as_slices()
551            }
552        }
553    }
554
555    /// Mutable counterpart of [`as_slices`](SmallDeque::as_slices).
556    #[inline(always)]
557    pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
558        unsafe {
559            if self.on_stack {
560                let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
561                if self.head + self.len <= self.capacity {
562                    (
563                        core::slice::from_raw_parts_mut(ptr.add(self.head), self.len),
564                        &mut [],
565                    )
566                } else {
567                    let head_len = self.capacity - self.head;
568                    let tail_len = self.len - head_len;
569                    let (s1, s2) = (
570                        core::slice::from_raw_parts_mut(ptr.add(self.head), head_len),
571                        core::slice::from_raw_parts_mut(ptr, tail_len),
572                    );
573                    (s1, s2)
574                }
575            } else {
576                (*self.data.heap).as_mut_slices()
577            }
578        }
579    }
580
581    /// Migrates all elements from the stack ring buffer to a heap `VecDeque`.
582    ///
583    /// Elements are written in logical order (front → back) so `VecDeque` indices
584    /// match the caller's expectations.  After this call, `on_stack == false`.
585    ///
586    /// # Safety
587    /// Must only be called when `on_stack == true`.  The stack variant of `data` must
588    /// contain `len` initialized elements starting at `head`.
589    #[inline(never)]
590    unsafe fn spill_to_heap(&mut self) {
591        unsafe {
592            let mut heap_deque = VecDeque::with_capacity(self.capacity * 2);
593            let ptr = (*self.data.stack).as_ptr() as *const T;
594            for i in 0..self.len {
595                let real_idx = self.wrap_add(self.head, i);
596                heap_deque.push_back(ptr::read(ptr.add(real_idx)));
597            }
598            ptr::write(&mut self.data.heap, ManuallyDrop::new(heap_deque));
599            self.on_stack = false;
600            self.capacity = (*self.data.heap).capacity();
601        }
602    }
603}
604
605impl<T, const N: usize> Drop for SmallDeque<T, N> {
606    fn drop(&mut self) {
607        if self.on_stack {
608            unsafe {
609                let ptr = (*self.data.stack).as_mut_ptr() as *mut T;
610                for i in 0..self.len {
611                    let real_idx = self.wrap_add(self.head, i);
612                    ptr::drop_in_place(ptr.add(real_idx));
613                }
614            }
615        } else {
616            unsafe {
617                ManuallyDrop::drop(&mut self.data.heap);
618            }
619        }
620    }
621}
622
623impl<T: Clone, const N: usize> Clone for SmallDeque<T, N> {
624    fn clone(&self) -> Self {
625        if self.on_stack {
626            let mut stack_arr: [MaybeUninit<T>; N] = unsafe { MaybeUninit::uninit().assume_init() };
627            let (s1, s2) = self.as_slices();
628            let mut idx = 0;
629            for item in s1 {
630                stack_arr[idx] = MaybeUninit::new(item.clone());
631                idx += 1;
632            }
633            for item in s2 {
634                stack_arr[idx] = MaybeUninit::new(item.clone());
635                idx += 1;
636            }
637            Self {
638                len: self.len,
639                capacity: N,
640                head: 0,
641                on_stack: true,
642                data: DequeData {
643                    stack: ManuallyDrop::new(stack_arr),
644                },
645            }
646        } else {
647            let heap_deque = unsafe { (*self.data.heap).clone() };
648            Self {
649                len: self.len,
650                capacity: heap_deque.capacity(),
651                head: 0,
652                on_stack: false,
653                data: DequeData {
654                    heap: ManuallyDrop::new(heap_deque),
655                },
656            }
657        }
658    }
659}
660
661impl<T: fmt::Debug, const N: usize> fmt::Debug for SmallDeque<T, N> {
662    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
663        let (s1, s2) = self.as_slices();
664        f.debug_list().entries(s1.iter().chain(s2.iter())).finish()
665    }
666}
667
668impl<T, const N: usize> Default for SmallDeque<T, N> {
669    fn default() -> Self {
670        Self::new()
671    }
672}
673
674impl<T: PartialEq, const N: usize> PartialEq for SmallDeque<T, N> {
675    fn eq(&self, other: &Self) -> bool {
676        if self.len != other.len {
677            return false;
678        }
679        let (s1_a, s2_a) = self.as_slices();
680        let (s1_b, s2_b) = other.as_slices();
681        s1_a.iter()
682            .chain(s2_a.iter())
683            .zip(s1_b.iter().chain(s2_b.iter()))
684            .all(|(a, b)| a == b)
685    }
686}
687impl<T: Eq, const N: usize> Eq for SmallDeque<T, N> {}
688
689impl<T: PartialOrd, const N: usize> PartialOrd for SmallDeque<T, N> {
690    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
691        let (s1_a, s2_a) = self.as_slices();
692        let (s1_b, s2_b) = other.as_slices();
693        s1_a.iter()
694            .chain(s2_a.iter())
695            .partial_cmp(s1_b.iter().chain(s2_b.iter()))
696    }
697}
698
699impl<T: Ord, const N: usize> Ord for SmallDeque<T, N> {
700    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
701        let (s1_a, s2_a) = self.as_slices();
702        let (s1_b, s2_b) = other.as_slices();
703        s1_a.iter()
704            .chain(s2_a.iter())
705            .cmp(s1_b.iter().chain(s2_b.iter()))
706    }
707}
708
709impl<T, const N: usize> Extend<T> for SmallDeque<T, N> {
710    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
711        for i in iter {
712            self.push_back(i);
713        }
714    }
715}
716
717impl<T, const N: usize> FromIterator<T> for SmallDeque<T, N> {
718    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
719        let mut deque = Self::new();
720        deque.extend(iter);
721        deque
722    }
723}
724
725#[cfg(test)]
726mod deque_basic_tests {
727    use super::*;
728
729    // ─── basic stack ops ──────────────────────────────────────────────────────
730    #[test]
731    fn test_deque_stack_ops_basic() {
732        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
733        assert!(d.is_empty());
734        assert!(d.is_on_stack());
735        d.push_back(1);
736        d.push_back(2);
737        d.push_front(0);
738        assert_eq!(d.len(), 3);
739        assert_eq!(d.front(), Some(&0));
740        assert_eq!(d.back(), Some(&2));
741        assert_eq!(d.pop_front(), Some(0));
742        assert_eq!(d.pop_back(), Some(2));
743        assert_eq!(d.len(), 1);
744        assert!(d.is_on_stack());
745    }
746
747    #[test]
748    fn test_deque_stack_ops_pop_empty() {
749        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
750        assert_eq!(d.pop_front(), None);
751        assert_eq!(d.pop_back(), None);
752        assert_eq!(d.front(), None);
753        assert_eq!(d.back(), None);
754    }
755
756    // ─── wrap-around (ring buffer) ────────────────────────────────────────────
757    #[test]
758    fn test_deque_stack_wrap_ring_buffer() {
759        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
760        d.push_back(1);
761        d.push_back(2);
762        d.pop_front(); // head advances
763        d.pop_front();
764        // head is now at index 2
765        d.push_back(3);
766        d.push_back(4);
767        d.push_back(5);
768        d.push_back(6); // wrapped: [3,4,5,6]
769        assert!(d.is_on_stack());
770        assert_eq!(d.pop_front(), Some(3));
771        assert_eq!(d.pop_front(), Some(4));
772        assert_eq!(d.pop_front(), Some(5));
773        assert_eq!(d.pop_front(), Some(6));
774    }
775
776    // ─── spill ────────────────────────────────────────────────────────────────
777    #[test]
778    fn test_deque_spill_trigger() {
779        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
780        d.push_back(1);
781        d.push_back(2);
782        assert!(d.is_on_stack());
783        d.push_back(3); // spill
784        assert!(!d.is_on_stack());
785        assert_eq!(d.len(), 3);
786    }
787
788    #[test]
789    fn test_deque_spill_data_integrity() {
790        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
791        d.push_back(10);
792        d.push_back(20);
793        d.push_back(30); // spill
794        assert_eq!(d.pop_front(), Some(10));
795        assert_eq!(d.pop_front(), Some(20));
796        assert_eq!(d.pop_front(), Some(30));
797        assert_eq!(d.pop_front(), None);
798    }
799
800    #[test]
801    fn test_deque_spill_front_back_after_spill() {
802        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
803        for i in 0..10 {
804            d.push_back(i);
805        }
806        assert!(!d.is_on_stack());
807        assert_eq!(d.front(), Some(&0));
808        assert_eq!(d.back(), Some(&9));
809        assert_eq!(d.len(), 10);
810    }
811
812    // ─── clear ────────────────────────────────────────────────────────────────
813    #[test]
814    fn test_deque_stack_ops_clear() {
815        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
816        d.push_back(1);
817        d.push_back(2);
818        d.clear();
819        assert!(d.is_empty());
820        assert!(d.is_on_stack());
821        // Reusable after clear
822        d.push_back(3);
823        assert_eq!(d.pop_front(), Some(3));
824    }
825
826    // ─── get / as_slices ─────────────────────────────────────────────────────
827    #[test]
828    fn test_deque_stack_ops_get() {
829        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
830        d.push_back(10);
831        d.push_back(20);
832        d.push_back(30);
833        assert_eq!(d.get(0), Some(&10));
834        assert_eq!(d.get(2), Some(&30));
835        assert_eq!(d.get(99), None);
836    }
837
838    #[test]
839    fn test_deque_stack_ops_as_slices_contiguous() {
840        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
841        d.push_back(1);
842        d.push_back(2);
843        let (s1, s2) = d.as_slices();
844        assert_eq!(s1, &[1, 2]);
845        assert!(s2.is_empty());
846    }
847
848    #[test]
849    fn test_deque_traits_comparison() {
850        let d1: SmallDeque<i32, 4> = vec![1, 2, 3].into_iter().collect();
851        let d2: SmallDeque<i32, 4> = vec![1, 2, 3].into_iter().collect();
852        let d3: SmallDeque<i32, 4> = vec![1, 2, 4].into_iter().collect();
853        let d4: SmallDeque<i32, 4> = vec![1, 2].into_iter().collect();
854
855        assert_eq!(d1, d2);
856        assert!(d1 < d3);
857        assert!(d1 > d4);
858        assert!(d3 > d1);
859    }
860
861    // ─── iter ────────────────────────────────────────────────────────────────
862    #[test]
863    fn test_deque_traits_iter_stack() {
864        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
865        d.push_back(1);
866        d.push_back(2);
867        d.push_back(3);
868        // SmallDeque has no .iter(); use as_slices to verify logical order
869        let (s1, s2) = d.as_slices();
870        let v: Vec<_> = s1.iter().chain(s2.iter()).cloned().collect();
871        assert_eq!(v, vec![1, 2, 3]);
872    }
873
874    #[test]
875    fn test_deque_traits_into_iter_stack() {
876        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
877        d.push_back(1);
878        d.push_back(2);
879        d.push_back(3);
880        // SmallDeque has no IntoIterator; drain via pop_front
881        let mut v = Vec::new();
882        while let Some(x) = d.pop_front() {
883            v.push(x);
884        }
885        assert_eq!(v, vec![1, 2, 3]);
886    }
887
888    #[test]
889    fn test_deque_traits_into_iter_heap() {
890        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
891        d.extend([1, 2, 3, 4]);
892        assert!(!d.is_on_stack());
893        let mut v = Vec::new();
894        while let Some(x) = d.pop_front() {
895            v.push(x);
896        }
897        assert_eq!(v, vec![1, 2, 3, 4]);
898    }
899
900    // ─── FromIterator / Extend ────────────────────────────────────────────────
901    #[test]
902    fn test_deque_traits_from_iter_and_extend() {
903        let d: SmallDeque<i32, 4> = vec![1, 2].into_iter().collect();
904        assert_eq!(d.len(), 2);
905
906        let mut d2: SmallDeque<i32, 4> = SmallDeque::new();
907        d2.extend(vec![10, 20, 30]);
908        assert_eq!(d2.len(), 3);
909    }
910
911    // ─── clone ────────────────────────────────────────────────────────────────
912    #[test]
913    fn test_deque_traits_clone_stack() {
914        let mut d: SmallDeque<i32, 4> = SmallDeque::from_iter(vec![1, 2, 3]);
915        let mut cloned = d.clone();
916        d.push_back(4);
917        assert_eq!(d.len(), 4);
918        assert_eq!(cloned.len(), 3);
919        assert_eq!(cloned.pop_front(), Some(1));
920    }
921
922    #[test]
923    fn test_deque_traits_clone_heap() {
924        let d: SmallDeque<i32, 2> = vec![1, 2, 3, 4].into_iter().collect();
925        let cloned = d.clone();
926        assert!(!cloned.is_on_stack());
927        assert_eq!(cloned.len(), 4);
928    }
929
930    // ─── Debug / PartialEq ────────────────────────────────────────────────────
931    #[test]
932    fn test_deque_traits_debug_and_eq() {
933        let d: SmallDeque<i32, 4> = vec![1, 2, 3].into_iter().collect();
934        let d2: SmallDeque<i32, 4> = vec![1, 2, 3].into_iter().collect();
935        assert_eq!(d, d2);
936        let debug = format!("{:?}", d);
937        assert!(debug.contains('1'));
938    }
939
940    // ─── AnyDeque trait dispatch ──────────────────────────────────────────────
941    #[test]
942    fn test_deque_any_deque_trait() {
943        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
944        let any: &mut dyn AnyDeque<i32> = &mut d;
945        any.push_back(10);
946        any.push_front(5);
947        assert_eq!(any.len(), 2);
948        assert!(!any.is_empty());
949        assert_eq!(any.front(), Some(&5));
950        assert_eq!(any.back(), Some(&10));
951        assert_eq!(any.pop_front(), Some(5));
952        any.clear();
953        assert!(any.is_empty());
954    }
955}
956
957#[cfg(test)]
958mod deque_coverage_tests {
959    use super::*;
960    use std::collections::VecDeque;
961
962    #[test]
963    fn test_vec_deque_any_deque_trait_implementation() {
964        let mut d = VecDeque::new();
965        let any: &mut dyn AnyDeque<i32> = &mut d;
966        any.push_back(10);
967        any.push_front(5);
968        assert_eq!(any.len(), 2);
969        assert_eq!(any.front(), Some(&5));
970        assert_eq!(any.back(), Some(&10));
971        assert_eq!(any.front_mut(), Some(&mut 5));
972        assert_eq!(any.back_mut(), Some(&mut 10));
973        assert_eq!(any.pop_front(), Some(5));
974        assert_eq!(any.pop_back(), Some(10));
975
976        any.push_back(1);
977        any.push_back(2);
978        any.remove(0);
979        any.clear();
980        assert!(any.is_empty());
981    }
982
983    #[test]
984    fn test_small_deque_constructor_heap_spill() {
985        let d: SmallDeque<i32, 4> = SmallDeque::with_capacity(2);
986        assert!(d.is_on_stack());
987
988        let d: SmallDeque<i32, 4> = SmallDeque::with_capacity(10);
989        assert!(!d.is_on_stack());
990        assert!(d.capacity() >= 10);
991    }
992
993    #[test]
994    fn test_small_deque_mutable_indexing() {
995        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
996        d.push_back(10);
997        if let Some(v) = d.get_mut(0) {
998            *v = 20;
999        }
1000        assert_eq!(d.get(0), Some(&20));
1001
1002        d.extend([30, 40, 50, 60]); // spills
1003        if let Some(v) = d.get_mut(1) {
1004            *v = 45;
1005        }
1006        assert_eq!(d.get(1), Some(&45));
1007    }
1008
1009    #[test]
1010    fn test_small_deque_capacity_reservation() {
1011        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
1012        d.reserve(10);
1013        assert!(!d.is_on_stack());
1014        d.reserve(20);
1015        assert!(d.capacity() >= 20);
1016    }
1017
1018    #[test]
1019    fn test_small_deque_removal_logic_and_shifting() {
1020        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
1021        d.push_back(1);
1022        d.push_back(2);
1023        d.push_back(3);
1024
1025        // Remove from middle (short half is front)
1026        assert_eq!(d.remove(1), Some(2));
1027        assert_eq!(d.len(), 2);
1028        assert_eq!(d.pop_front(), Some(1));
1029        assert_eq!(d.pop_front(), Some(3));
1030
1031        // Remove from middle (short half is back)
1032        d.clear();
1033        d.extend([1, 2, 3, 4]);
1034        assert_eq!(d.remove(2), Some(3));
1035        assert_eq!(d.len(), 3);
1036
1037        // Remove on heap
1038        d.extend([5, 6, 7]); // spills
1039        assert_eq!(d.remove(2), Some(4));
1040
1041        // Edge cases
1042        assert_eq!(d.remove(99), None);
1043        assert_eq!(d.remove(0), Some(1)); // removes 1, logical state becomes [2, 5, 6, 7]
1044        assert_eq!(d.pop_front(), Some(2)); // removes 2, logical state becomes [5, 6, 7]
1045        assert_eq!(d.remove(d.len() - 1), Some(7));
1046    }
1047
1048    #[test]
1049    fn test_small_deque_front_back_mut_access() {
1050        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
1051        d.push_back(10);
1052        *d.front_mut().unwrap() = 11;
1053        *d.back_mut().unwrap() = 12;
1054        assert_eq!(d.front(), Some(&12));
1055
1056        d.push_back(20);
1057        d.push_back(30); // spills
1058        *d.front_mut().unwrap() = 13;
1059        *d.back_mut().unwrap() = 31;
1060        assert_eq!(d.front(), Some(&13));
1061        assert_eq!(d.back(), Some(&31));
1062    }
1063
1064    #[test]
1065    fn test_small_deque_mutable_slices_wrapped_and_heap() {
1066        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
1067        d.push_back(1);
1068        d.push_back(2);
1069        {
1070            let (s1, _s2) = d.as_mut_slices();
1071            s1[0] = 10;
1072        }
1073        assert_eq!(d.front(), Some(&10));
1074
1075        // Wrapped
1076        d.pop_front();
1077        d.pop_front();
1078        d.push_back(3);
1079        d.push_back(4);
1080        d.push_back(5);
1081        d.push_back(6);
1082        // logical: [3,4,5,6], head at some wrapped index
1083        {
1084            let (s1, s2) = d.as_mut_slices();
1085            assert!(!s1.is_empty());
1086            assert!(!s2.is_empty());
1087        }
1088
1089        // Heap
1090        d.push_back(7); // spills
1091        {
1092            let (s1, _s2) = d.as_mut_slices();
1093            assert!(!s1.is_empty());
1094        }
1095    }
1096
1097    #[test]
1098    fn test_small_deque_heap_storage_clone_and_from_iter() {
1099        let mut d: SmallDeque<i32, 2> = SmallDeque::new();
1100        d.extend([1, 2, 3]); // heap
1101        let cloned = d.clone();
1102        assert_eq!(cloned, d);
1103
1104        let d2 = SmallDeque::<i32, 2>::from_iter([1, 2, 3]);
1105        assert_eq!(d2.len(), 3);
1106    }
1107}
1108
1109#[cfg(test)]
1110mod deque_final_coverage_tests {
1111    use super::*;
1112
1113    #[test]
1114    fn test_small_deque_truncation_iteration_and_ord_traits() {
1115        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
1116
1117        // wrap_sub path
1118        d.push_front(1);
1119
1120        // AnyDeque delegators explicitly
1121        let any: &mut dyn AnyDeque<i32> = &mut d;
1122        any.push_back(2);
1123        any.push_front(0);
1124        assert_eq!(any.pop_back(), Some(2));
1125        assert_eq!(any.pop_front(), Some(0));
1126        any.push_back(10);
1127        assert_eq!(any.remove(0), Some(1));
1128        assert_eq!(any.remove(0), Some(10));
1129
1130        // remove shift head logic (index < len/2)
1131        d.clear();
1132        d.extend([1, 2, 3, 4]); // [1, 2, 3, 4]
1133        assert_eq!(d.remove(1), Some(2)); // index 1 < 3/2=1.5
1134
1135        // truncate on heap
1136        d.extend([5, 6, 7, 8]); // spills
1137        d.truncate(2);
1138        assert_eq!(d.len(), 2);
1139        d.clear();
1140        assert!(d.is_empty());
1141
1142        // Ord / PartialOrd
1143        let d1: SmallDeque<i32, 4> = SmallDeque::from_iter([1, 2]);
1144        let d2: SmallDeque<i32, 4> = SmallDeque::from_iter([1, 3]);
1145        assert!(d1 < d2);
1146        assert_eq!(d1.cmp(&d2), std::cmp::Ordering::Less);
1147
1148        // as_slices / front on heap
1149        d.extend([10, 20, 30, 40, 50]);
1150        let (s1, _s2) = d.as_slices();
1151        assert!(!s1.is_empty());
1152        assert_eq!(d.front(), Some(&10));
1153    }
1154}
1155
1156#[cfg(test)]
1157mod deque_ultra_final_coverage_tests {
1158    use super::*;
1159
1160    #[test]
1161    fn test_small_deque_wrapped_stack_slices_and_heap_delegation() {
1162        let mut d: SmallDeque<i32, 4> = SmallDeque::new();
1163        d.push_back(1);
1164        d.push_back(2);
1165        d.push_back(3);
1166        d.pop_front();
1167        d.pop_front();
1168        d.push_back(4);
1169        d.push_back(5);
1170        d.push_back(6); // should be wrapped on stack now
1171
1172        let (s1, s2) = d.as_slices();
1173        assert!(!s1.is_empty() && !s2.is_empty());
1174
1175        let (m1, m2) = d.as_mut_slices();
1176        assert!(!m1.is_empty() && !m2.is_empty());
1177
1178        // heap delegators
1179        d.push_back(7); // spill
1180        assert_eq!(d.front(), Some(&3));
1181        assert_eq!(d.back(), Some(&7));
1182        *d.front_mut().unwrap() = 30;
1183        *d.back_mut().unwrap() = 70;
1184        assert_eq!(d.pop_front(), Some(30));
1185        assert_eq!(d.pop_back(), Some(70));
1186    }
1187}