Skip to main content

zigzag_alloc/collections/
priority_queue.rs

1//! Heap-based priority queue with an explicit allocator.
2//!
3//! [`ExPriorityQueue`] implements a binary heap whose ordering is determined
4//! by a caller-supplied [`OrdContext<T>`].  Depending on whether the context
5//! implements a min-ordering or a max-ordering, the queue acts as a min-heap
6//! or a max-heap respectively.
7//!
8//! ## Complexity
9//!
10//! | Operation | Complexity |
11//! |-----------|-----------|
12//! | [`push`](ExPriorityQueue::push) | O(log n) amortised |
13//! | [`pop`](ExPriorityQueue::pop) | O(log n) |
14//! | [`peek`](ExPriorityQueue::peek) | O(1) |
15//! | [`push_slice`](ExPriorityQueue::push_slice) | O(n) via `rebuild` |
16//! | [`rebuild`](ExPriorityQueue::rebuild) | O(n) |
17//!
18//! ## Growth Policy
19//!
20//! The initial capacity is **4**; subsequent growth doubles the capacity.
21
22use core::{alloc::Layout, marker::PhantomData, ptr::NonNull};
23
24use crate::alloc::allocator::Allocator;
25use crate::simd;
26use super::OrdContext;
27
28/// A binary-heap priority queue backed by an explicit allocator.
29///
30/// The "top" element is always the one for which `ctx.less(top, other)` is
31/// `true` for every other element — i.e. the element that the context
32/// considers *smallest*.  Use a min-context for a min-heap, a max-context for
33/// a max-heap.
34///
35/// # Invariants
36///
37/// * `ptr` is a valid allocation of `cap * size_of::<T>()` bytes when `cap > 0`.
38/// * Elements at indices `0..len` are fully initialised and satisfy the heap
39///   property: for every `i`, `ctx.less(data[i], data[2*i+1])` and
40///   `ctx.less(data[i], data[2*i+2])` are *false* (parent ≤ children).
41///
42/// # Lifetime
43///
44/// The allocator reference `'a` must outlive the `ExPriorityQueue`.
45pub struct ExPriorityQueue<'a, T, C: OrdContext<T>> {
46    /// Pointer to the heap array.
47    ptr: NonNull<T>,
48    /// Number of elements currently in the heap.
49    len: usize,
50    /// Buffer capacity in elements.
51    cap: usize,
52    /// Allocator used for buffer management.
53    alloc: &'a dyn Allocator,
54    /// Ordering context.
55    ctx: C,
56    _marker: PhantomData<T>,
57}
58
59impl<'a, T, C: OrdContext<T>> ExPriorityQueue<'a, T, C> {
60    /// Creates a new, empty priority queue.
61    ///
62    /// No allocation is performed until the first element is pushed.
63    pub fn new(alloc: &'a dyn Allocator, ctx: C) -> Self {
64        Self { ptr: NonNull::dangling(), len: 0, cap: 0, alloc, ctx, _marker: PhantomData }
65    }
66
67    /// Returns the number of elements in the queue.
68    #[inline]
69    pub fn len(&self) -> usize { self.len }
70
71    /// Returns `true` if the queue is empty.
72    #[inline]
73    pub fn is_empty(&self) -> bool { self.len == 0 }
74
75    /// Returns the current buffer capacity in elements.
76    #[inline]
77    pub fn capacity(&self) -> usize { self.cap }
78
79    /// Returns a reference to the top (smallest/largest) element, or `None`
80    /// if the queue is empty.
81    ///
82    /// The top element is always at index 0 in the heap array.
83    pub fn peek(&self) -> Option<&T> {
84        if self.len == 0 { return None; }
85        // SAFETY: `len > 0` implies at least one initialised element at index 0.
86        Some(unsafe { &*self.ptr.as_ptr() })
87    }
88
89    /// Pushes `value` onto the queue.
90    ///
91    /// # Panics
92    ///
93    /// Panics if the backing allocator cannot grow the buffer.
94    pub fn push(&mut self, value: T) {
95        assert!(self.try_push(value).is_ok(), "ExPriorityQueue: OOM")
96    }
97
98    /// Attempts to push `value`, returning `Err(value)` if OOM.
99    pub fn try_push(&mut self, value: T) -> Result<(), T> {
100        if self.len == self.cap && !self.try_grow() { return Err(value); }
101
102        // SAFETY: After `try_grow`, `cap > len`, so `ptr + len` is within the
103        // buffer and currently uninitialised — safe to write.
104        unsafe { self.ptr.as_ptr().add(self.len).write(value) };
105        self.len += 1;
106        self.sift_up(self.len - 1);
107        Ok(())
108    }
109
110    /// Pushes all elements in `items` and rebuilds the heap in O(n) time.
111    ///
112    /// More efficient than calling [`push`](Self::push) in a loop when many
113    /// elements are added at once.
114    ///
115    /// # Panics
116    ///
117    /// Panics if the backing allocator fails during growth.
118    pub fn push_slice(&mut self, items: &[T]) where T: Copy {
119        let n = items.len();
120        if n == 0 { return; }
121
122        while self.cap < self.len + n {
123            assert!(self.try_grow(), "ExPriorityQueue: OOM")
124        }
125
126        let dst = unsafe { self.ptr.as_ptr().add(self.len) } as *mut u8;
127        let src = items.as_ptr() as *const u8;
128        // SAFETY: `dst` points to `n` uninitialised slots; `src` is a valid
129        // slice of `n` elements.  `T: Copy` ensures no destructor is skipped.
130        unsafe { simd::copy_bytes(dst, src, n * core::mem::size_of::<T>()); }
131        self.len += n;
132
133        // Re-establish the heap property over all elements in O(n).
134        self.rebuild();
135    }
136
137    /// Removes and returns the top element, or `None` if empty.
138    ///
139    /// Swaps the root with the last element, decrements `len`, and sifts the
140    /// new root down to restore the heap property.
141    pub fn pop(&mut self) -> Option<T> {
142        if self.len == 0 { return None; }
143        self.swap(0, self.len - 1);
144        self.len -= 1;
145
146        // SAFETY: The element we swapped to the end is now outside `0..len`,
147        // so it is "logically removed".  Reading it transfers ownership.
148        let val = unsafe { self.ptr.as_ptr().add(self.len).read() };
149        if self.len > 0 { self.sift_down(0); }
150        Some(val)
151    }
152
153    /// Removes the element at `idx` and returns it.
154    ///
155    /// Sifts up *and* down to restore the heap property regardless of whether
156    /// the replacement element is smaller or larger than its neighbours.
157    ///
158    /// # Panics
159    ///
160    /// Panics if `idx >= self.len`.
161    pub fn remove_at(&mut self, idx: usize) -> T {
162        assert!(idx < self.len, "PriorityQueue::remove_at: out of bounds");
163        self.swap(idx, self.len - 1);
164        self.len -= 1;
165
166        // SAFETY: Same as `pop`; the removed element is now past the live range.
167        let val = unsafe { self.ptr.as_ptr().add(self.len).read() };
168        if idx < self.len {
169            self.sift_down(idx);
170            self.sift_up(idx);
171        }
172        val
173    }
174
175    /// Rebuilds the heap from scratch in O(n) via bottom-up heapification.
176    ///
177    /// Called automatically by [`push_slice`](Self::push_slice); can also be
178    /// called manually after bulk insertion via unsafe direct writes.
179    pub fn rebuild(&mut self) {
180        if self.len <= 1 { return; }
181        // Start at the last internal node and sift each one down.
182        let mut i = (self.len / 2).wrapping_sub(1);
183        loop {
184            self.sift_down(i);
185            if i == 0 { break; }
186            i -= 1;
187        }
188    }
189
190    /// Drains the queue in sorted order into `out`.
191    ///
192    /// Repeatedly pops the top element and copies it into successive positions
193    /// of `out`.  After this call the queue is empty.
194    ///
195    /// # Panics
196    ///
197    /// Panics if `out.len() < self.len()`.
198    pub fn drain_sorted(&mut self, out: &mut [T]) where T: Copy {
199        assert!(out.len() >= self.len, "drain_sorted: out slice too short");
200        let mut i = 0;
201        while let Some(val) = self.pop() {
202            let dst = unsafe { out.as_mut_ptr().add(i) } as *mut u8;
203            let src = &val as *const T as *const u8;
204            // SAFETY: `dst` points to an initialised or uninitialised slot of
205            // type `T`; `src` is the address of a local `T` value.  `T: Copy`
206            // means the copy is bitwise-valid.
207            unsafe { simd::copy_bytes(dst, src, core::mem::size_of::<T>()) };
208            i += 1;
209        }
210    }
211
212    /// Calls `f` with a reference to each element in **unspecified** order.
213    ///
214    /// Elements are visited in heap-array order (not sorted order).
215    pub fn for_each<F: FnMut(&T)>(&self, mut f: F) {
216        for i in 0..self.len {
217            // SAFETY: Indices `0..len` are fully initialised.
218            f(unsafe { &*self.ptr.as_ptr().add(i) });
219        }
220    }
221
222    /// Moves the element at `idx` upward until the heap property is restored.
223    ///
224    /// Called after inserting a new element at the end of the array.
225    fn sift_up(&mut self, mut idx: usize) {
226        while idx > 0 {
227            let parent = (idx - 1) / 2;
228            // SAFETY: `idx` and `parent` are both within `0..len`.
229            unsafe {
230                if self.ctx.less(
231                    &*self.ptr.as_ptr().add(idx),
232                    &*self.ptr.as_ptr().add(parent),
233                ) {
234                    self.swap(idx, parent);
235                    idx = parent;
236                } else {
237                    break;
238                }
239            }
240        }
241    }
242
243    /// Moves the element at `idx` downward until the heap property is restored.
244    ///
245    /// Called after removing the root or after bulk insertion via `rebuild`.
246    fn sift_down(&mut self, mut idx: usize) {
247        loop {
248            let left  = 2 * idx + 1;
249            let right = 2 * idx + 2;
250            let mut best = idx;
251
252            // SAFETY: All index arithmetic is bounds-checked against `self.len`.
253            unsafe {
254                if left < self.len && self.ctx.less(
255                    &*self.ptr.as_ptr().add(left),
256                    &*self.ptr.as_ptr().add(best),
257                ) { best = left; }
258                if right < self.len && self.ctx.less(
259                    &*self.ptr.as_ptr().add(right),
260                    &*self.ptr.as_ptr().add(best),
261                ) { best = right; }
262            }
263
264            if best == idx { break; }
265            self.swap(idx, best);
266            idx = best;
267        }
268    }
269
270    /// Swaps the elements at indices `a` and `b`.
271    ///
272    /// # Safety
273    ///
274    /// Both `a` and `b` must be within `0..len`; this is guaranteed by all
275    /// internal callers.
276    #[inline]
277    fn swap(&mut self, a: usize, b: usize) {
278        // SAFETY: `ptr::swap` requires both pointers to be valid and non-overlapping
279        // (since `a != b` in all call sites) within the initialised region.
280        unsafe { core::ptr::swap(self.ptr.as_ptr().add(a), self.ptr.as_ptr().add(b)); }
281    }
282
283    /// Doubles the buffer capacity, migrating existing elements.
284    ///
285    /// Returns `false` if the allocator fails.
286    #[cold]
287    fn try_grow(&mut self) -> bool {
288        let new_cap = if self.cap == 0 { 4 } else { self.cap * 2 };
289        let layout  = match Layout::array::<T>(new_cap) {
290            Ok(l) => l, Err(_) => return false,
291        };
292        let new_ptr = match unsafe { self.alloc.alloc(layout) } {
293            Some(p) => p.cast::<T>(),
294            None    => return false,
295        };
296
297        if self.cap > 0 {
298            // SAFETY: `new_ptr` is valid for `new_cap` elements; we copy only
299            // `len` initialised elements.  The old buffer is then released.
300            unsafe {
301                simd::copy_bytes(
302                    new_ptr.as_ptr() as *mut u8,
303                    self.ptr.as_ptr() as *const u8,
304                    self.len * core::mem::size_of::<T>(),
305                );
306                self.alloc.dealloc(self.ptr.cast(), Layout::array::<T>(self.cap).unwrap());
307            }
308        }
309        self.ptr = new_ptr;
310        self.cap = new_cap;
311        true
312    }
313}
314
315impl<T, C: OrdContext<T>> Drop for ExPriorityQueue<'_, T, C> {
316    /// Drops all elements and releases the backing buffer.
317    fn drop(&mut self) {
318        if self.cap == 0 { return; }
319        for i in 0..self.len {
320            // SAFETY: Indices `0..len` are fully initialised.
321            unsafe { core::ptr::drop_in_place(self.ptr.as_ptr().add(i)) };
322        }
323        // SAFETY: `ptr` was obtained from `self.alloc` with this exact layout.
324        unsafe { self.alloc.dealloc(self.ptr.cast(), Layout::array::<T>(self.cap).unwrap()) };
325    }
326}