orengine_utils/
vec_queue.rs1use crate::hints::unlikely;
4use core::ptr::slice_from_raw_parts;
5use core::{mem, ptr};
6
7pub struct VecQueue<T> {
14 ptr: *mut T,
15 head: usize,
16 tail: usize,
17 capacity: usize,
18 mask: usize,
19}
20
21impl<T> VecQueue<T> {
22 #[cold]
24 fn allocate(capacity: usize) -> *mut T {
25 debug_assert!(capacity > 0 && capacity.is_power_of_two());
26
27 unsafe {
28 alloc::alloc::alloc(alloc::alloc::Layout::array::<T>(capacity).unwrap_unchecked())
29 .cast()
30 }
31 }
32
33 #[cold]
35 fn deallocate(ptr: *mut T, capacity: usize) {
36 unsafe {
37 alloc::alloc::dealloc(
38 ptr.cast(),
39 alloc::alloc::Layout::array::<T>(capacity).unwrap_unchecked(),
40 );
41 }
42 }
43
44 const fn get_mask_for_capacity(capacity: usize) -> usize {
46 debug_assert!(capacity.is_power_of_two());
47
48 capacity - 1
49 }
50
51 #[inline(always)]
53 fn get_physical_index(&self, index: usize) -> usize {
54 debug_assert!(self.capacity.is_power_of_two());
55
56 index & self.mask
57 }
58
59 pub const fn new_const() -> Self {
61 Self {
62 ptr: ptr::null_mut(),
63 head: 0,
64 tail: 0,
65 capacity: 0,
66 mask: 0,
67 }
68 }
69
70 pub fn new() -> Self {
72 const DEFAULT_CAPACITY: usize = 16;
73
74 Self {
75 ptr: Self::allocate(DEFAULT_CAPACITY),
76 head: 0,
77 tail: 0,
78 capacity: DEFAULT_CAPACITY,
79 mask: Self::get_mask_for_capacity(DEFAULT_CAPACITY),
80 }
81 }
82
83 pub fn len(&self) -> usize {
85 self.tail.wrapping_sub(self.head)
86 }
87
88 pub fn is_empty(&self) -> bool {
90 self.head == self.tail
91 }
92
93 pub fn reserve(&mut self, additional: usize) {
100 let needed = self.len() + additional;
101 if needed <= self.capacity {
102 return;
103 }
104
105 let mut new_capacity = self.capacity * 2;
106
107 while unlikely(needed > new_capacity) {
108 new_capacity *= 2;
109 }
110
111 self.extend_to(new_capacity);
112 }
113
114 #[inline(never)]
120 #[cold]
121 #[track_caller]
122 pub fn extend_to(&mut self, capacity: usize) {
123 #[inline(never)]
124 #[cold]
125 fn extend_from_zero<T>(queue: &mut VecQueue<T>, capacity: usize) {
126 queue.mask = VecQueue::<T>::get_mask_for_capacity(capacity);
127 queue.ptr = VecQueue::<T>::allocate(capacity);
128 queue.capacity = capacity;
129 }
130
131 if unlikely(self.capacity == 0 && capacity == 0) {
132 extend_from_zero(self, 4);
133
134 return;
135 }
136
137 assert!(
138 capacity.is_power_of_two(),
139 "Capacity must be a power of two, provided {capacity}"
140 );
141 assert!(capacity > self.capacity);
142
143 let new_ptr = Self::allocate(capacity);
144 let len = self.len();
145
146 unsafe {
147 let phys_head = self.get_physical_index(self.head);
148 let phys_tail = self.get_physical_index(self.tail);
149 let src = self.ptr.add(phys_head);
150 let dst = new_ptr;
151
152 if phys_head < phys_tail {
153 ptr::copy(src, dst, len);
154 } else {
155 ptr::copy(src, dst, self.capacity - phys_head);
156
157 let src = self.ptr;
158 let dst = new_ptr.add(self.capacity - phys_head);
159
160 ptr::copy(src, dst, phys_tail);
161 }
162 }
163
164 Self::deallocate(self.ptr, self.capacity);
165
166 self.head = 0;
167 self.tail = len;
168 self.ptr = new_ptr;
169 self.capacity = capacity;
170 self.mask = Self::get_mask_for_capacity(capacity);
171 }
172
173 #[inline]
175 pub fn push(&mut self, value: T) {
176 if unlikely(self.len() == self.capacity) {
177 self.extend_to(self.capacity * 2);
178 }
179
180 unsafe {
181 let index = self.get_physical_index(self.tail);
182
183 self.ptr.add(index).write(value);
184 }
185
186 self.tail = self.tail.wrapping_add(1);
187 }
188
189 pub fn push_priority_value(&mut self, value: T) {
207 if unlikely(self.len() == self.capacity) {
208 self.extend_to(self.capacity * 2);
209 }
210
211 self.head = self.head.wrapping_sub(1);
212
213 unsafe {
214 let index = self.get_physical_index(self.head);
215
216 self.ptr.add(index).write(value);
217 }
218 }
219
220 #[inline]
222 pub fn pop(&mut self) -> Option<T> {
223 if self.is_empty() {
224 return None;
225 }
226
227 let index = self.get_physical_index(self.head);
228 let value = unsafe { self.ptr.add(index).read() };
229
230 self.head = self.head.wrapping_add(1);
231
232 Some(value)
233 }
234
235 #[inline]
253 pub fn pop_less_priority_value(&mut self) -> Option<T> {
254 if self.is_empty() {
255 return None;
256 }
257
258 self.tail = self.tail.wrapping_sub(1);
259
260 let index = self.get_physical_index(self.tail);
261 let value = unsafe { self.ptr.add(index).read() };
262
263 Some(value)
264 }
265
266 #[inline]
272 pub unsafe fn extend_from_slice(&mut self, slice: &[T]) {
273 self.reserve(slice.len());
274
275 let phys_tail = self.get_physical_index(self.tail);
276 let right_space = self.capacity - phys_tail;
277
278 unsafe {
279 if slice.len() <= right_space {
280 ptr::copy_nonoverlapping(slice.as_ptr(), self.ptr.add(phys_tail), slice.len());
282 } else {
283 ptr::copy_nonoverlapping(slice.as_ptr(), self.ptr.add(phys_tail), right_space);
285
286 ptr::copy_nonoverlapping(
287 slice.as_ptr().add(right_space),
288 self.ptr,
289 slice.len() - right_space,
290 );
291 }
292 }
293
294 self.tail = self.tail.wrapping_add(slice.len());
295 }
296
297 pub unsafe fn take_batch<R, F: FnOnce(&[T], &[T]) -> R>(
333 &mut self,
334 f: F,
335 mut limit: usize,
336 ) -> R {
337 limit = self.len().min(limit);
338
339 let phys_head = self.get_physical_index(self.head);
340 let right_occupied = self.capacity - phys_head;
341
342 self.head = self.head.wrapping_add(limit);
343
344 if limit <= right_occupied {
345 return f(
348 unsafe { &*slice_from_raw_parts(self.ptr.add(phys_head), limit) },
349 &[],
350 );
351 }
352
353 let slice1 = unsafe { &*slice_from_raw_parts(self.ptr.add(phys_head), right_occupied) };
354 let slice2 = unsafe { &*slice_from_raw_parts(self.ptr, limit - right_occupied) };
355
356 f(slice1, slice2)
357
358 }
360
361 pub fn clear_with<F: Fn(T)>(&mut self, f: F) {
363 for i in 0..self.len() {
364 let elem = unsafe { self.ptr.add(self.get_physical_index(self.head + i)).read() };
365
366 f(elem);
367 }
368
369 self.head = 0;
370 self.tail = 0;
371 }
372
373 pub fn clear(&mut self) {
375 if mem::needs_drop::<T>() {
376 self.clear_with(drop);
377
378 return;
379 }
380
381 self.head = 0;
382 self.tail = 0;
383 }
384
385 pub fn iter(&self) -> impl Iterator<Item = &T> {
387 struct Iter<'queue, T> {
388 queue: &'queue VecQueue<T>,
389 current_head: usize,
390 }
391
392 impl<'queue, T> Iterator for Iter<'queue, T> {
393 type Item = &'queue T;
394
395 fn next(&mut self) -> Option<Self::Item> {
396 if unlikely(self.current_head == self.queue.tail) {
397 return None;
398 }
399
400 let index = self.queue.get_physical_index(self.current_head);
401
402 self.current_head = self.current_head.wrapping_add(1);
403
404 Some(unsafe { &*self.queue.ptr.add(index) })
405 }
406 }
407
408 Iter {
409 queue: self,
410 current_head: self.head,
411 }
412 }
413
414 pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut T> {
416 struct Iter<'queue, T> {
417 queue: &'queue mut VecQueue<T>,
418 current_head: usize,
419 }
420
421 impl<'queue, T> Iterator for Iter<'queue, T> {
422 type Item = &'queue mut T;
423
424 fn next(&mut self) -> Option<Self::Item> {
425 if unlikely(self.current_head == self.queue.tail) {
426 return None;
427 }
428
429 let index = self.queue.get_physical_index(self.current_head);
430
431 self.current_head = self.current_head.wrapping_add(1);
432
433 Some(unsafe { &mut *self.queue.ptr.add(index) })
434 }
435 }
436
437 let head = self.head;
438
439 Iter {
440 queue: self,
441 current_head: head,
442 }
443 }
444}
445
446impl<T: Clone> Clone for VecQueue<T> {
447 fn clone(&self) -> Self {
448 let mut new = Self::new();
449
450 new.extend_to(new.capacity);
451
452 for i in 0..self.len() {
453 let elem = unsafe { &*self.ptr.add(self.get_physical_index(self.head + i)) };
454
455 new.push(elem.clone());
456 }
457
458 new
459 }
460}
461
462impl<T> Default for VecQueue<T> {
463 fn default() -> Self {
464 Self::new()
465 }
466}
467
468impl<T> Drop for VecQueue<T> {
469 fn drop(&mut self) {
470 if mem::needs_drop::<T>() {
471 while let Some(val) = self.pop() {
472 drop(val);
473 }
474 }
475
476 Self::deallocate(self.ptr, self.capacity);
477 }
478}