iceoryx2_bb_container/queue.rs
1// Copyright (c) 2023 Contributors to the Eclipse Foundation
2//
3// See the NOTICE file(s) distributed with this work for additional
4// information regarding copyright ownership.
5//
6// This program and the accompanying materials are made available under the
7// terms of the Apache Software License 2.0 which is available at
8// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license
9// which is available at https://opensource.org/licenses/MIT.
10//
11// SPDX-License-Identifier: Apache-2.0 OR MIT
12
13//! Three queue variations that are similar to [`std::collections::VecDeque`].
14//!
15//! * [`FixedSizeQueue`](crate::queue::FixedSizeQueue), compile-time fixed size queue that
16//! is self-contained.
17//! * [`RelocatableQueue`](crate::queue::RelocatableQueue), run-time fixed size queue that
18//! acquires the required memory from a custom user-provided allocator.
19//! * [`Queue`](crate::queue::Queue), run-time fixed size queue that uses by default
20//! heap memory.
21//!
22//! # Basic Examples
23//!
24//! ## Use the [`FixedSizeQueue`](crate::queue::FixedSizeQueue)
25//!
26//! ```
27//! use iceoryx2_bb_container::queue::FixedSizeQueue;
28//!
29//! const QUEUE_CAPACITY: usize = 1;
30//! let mut queue = FixedSizeQueue::<u64, QUEUE_CAPACITY>::new();
31//!
32//! queue.push(123);
33//!
34//! // queue is full, we override the oldest element (123) with the new number (456)
35//! queue.push_with_overflow(456);
36//!
37//! println!("pop from queue {}", queue.pop().unwrap());
38//! ```
39//!
40//! ## Use the [`Queue`](crate::queue::Queue)
41//!
42//! ```
43//! use iceoryx2_bb_container::queue::Queue;
44//!
45//! let queue_capacity = 1234;
46//! let mut queue = Queue::<u64>::new(queue_capacity);
47//!
48//! queue.push(123);
49//!
50//! println!("pop from queue {}", queue.pop().unwrap());
51//! ```
52//!
53//! # Advanced Examples
54//!
55//! ## Create [`RelocatableQueue`](crate::queue::RelocatableQueue) inside constructs which provides memory
56//!
57//! ```
58//! use iceoryx2_bb_container::queue::RelocatableQueue;
59//! use iceoryx2_bb_elementary::math::align_to;
60//! use iceoryx2_bb_elementary::bump_allocator::BumpAllocator;
61//! use iceoryx2_bb_elementary::relocatable_container::RelocatableContainer;
62//! use core::mem::MaybeUninit;
63//!
64//! const QUEUE_CAPACITY:usize = 12;
65//! struct MyConstruct {
66//! queue: RelocatableQueue<u128>,
67//! queue_memory: [MaybeUninit<u128>; QUEUE_CAPACITY],
68//! }
69//!
70//! impl MyConstruct {
71//! pub fn new() -> Self {
72//! let mut new_self = Self {
73//! queue: unsafe { RelocatableQueue::new_uninit(QUEUE_CAPACITY) },
74//! queue_memory: core::array::from_fn(|_| MaybeUninit::uninit()),
75//! };
76//!
77//! let allocator = BumpAllocator::new(core::ptr::addr_of!(new_self.queue_memory) as usize);
78//! unsafe {
79//! new_self.queue.init(&allocator).expect("Enough memory provided.")
80//! };
81//! new_self
82//! }
83//! }
84//! ```
85//!
86//! ## Create [`RelocatableQueue`](crate::queue::RelocatableQueue) with allocator
87//!
88//! ```
89//! use iceoryx2_bb_container::queue::RelocatableQueue;
90//! use iceoryx2_bb_elementary::bump_allocator::BumpAllocator;
91//! use iceoryx2_bb_elementary::relocatable_container::RelocatableContainer;
92//! use core::ptr::NonNull;
93//!
94//! const QUEUE_CAPACITY:usize = 12;
95//! const MEM_SIZE: usize = RelocatableQueue::<u128>::const_memory_size(QUEUE_CAPACITY);
96//! let mut memory = [0u8; MEM_SIZE];
97//!
98//! let bump_allocator = BumpAllocator::new(memory.as_mut_ptr() as usize);
99//!
100//! let mut queue = unsafe { RelocatableQueue::<u128>::new_uninit(QUEUE_CAPACITY) };
101//! unsafe { queue.init(&bump_allocator).expect("queue init failed") };
102//! ```
103//!
104use core::marker::PhantomData;
105use core::{alloc::Layout, fmt::Debug, mem::MaybeUninit};
106use iceoryx2_bb_elementary::allocator::{AllocationError, BaseAllocator};
107use iceoryx2_bb_elementary::bump_allocator::BumpAllocator;
108use iceoryx2_bb_elementary::math::unaligned_mem_size;
109use iceoryx2_bb_elementary::owning_pointer::{GenericOwningPointer, OwningPointer};
110use iceoryx2_bb_elementary::placement_default::PlacementDefault;
111use iceoryx2_bb_elementary::pointer_trait::PointerTrait;
112pub use iceoryx2_bb_elementary::relocatable_container::RelocatableContainer;
113use iceoryx2_bb_elementary::relocatable_ptr::{GenericRelocatablePointer, RelocatablePointer};
114use iceoryx2_bb_elementary::zero_copy_send::ZeroCopySend;
115use iceoryx2_bb_log::{fail, fatal_panic};
116use iceoryx2_pal_concurrency_sync::iox_atomic::IoxAtomicBool;
117
118/// Queue with run-time fixed size capacity. In contrast to its counterpart the
119/// [`RelocatableQueue`] it is movable but is not shared memory compatible.
120pub type Queue<T> = details::MetaQueue<T, GenericOwningPointer>;
121/// **Non-movable** relocatable queue with runtime fixed size capacity.
122pub type RelocatableQueue<T> = details::MetaQueue<T, GenericRelocatablePointer>;
123
124#[doc(hidden)]
125pub mod details {
126 use iceoryx2_bb_elementary::generic_pointer::GenericPointer;
127
128 use super::*;
129 /// **Non-movable** relocatable queue with runtime fixed size capacity.
130 #[repr(C)]
131 #[derive(Debug)]
132 pub struct MetaQueue<T, Ptr: GenericPointer> {
133 data_ptr: Ptr::Type<MaybeUninit<T>>,
134 start: usize,
135 len: usize,
136 capacity: usize,
137 is_initialized: IoxAtomicBool,
138 _phantom_data: PhantomData<T>,
139 }
140
141 unsafe impl<T: Send, Ptr: GenericPointer> Send for MetaQueue<T, Ptr> {}
142
143 impl<T> MetaQueue<T, GenericOwningPointer> {
144 /// Creates a new [`Queue`] with the provided capacity
145 pub fn new(capacity: usize) -> Self {
146 Self {
147 data_ptr: OwningPointer::<MaybeUninit<T>>::new_with_alloc(capacity),
148 start: 0,
149 len: 0,
150 capacity,
151 is_initialized: IoxAtomicBool::new(true),
152 _phantom_data: PhantomData,
153 }
154 }
155
156 /// Removes all elements from the queue
157 pub fn clear(&mut self) {
158 unsafe { self.clear_impl() }
159 }
160
161 /// Returns a reference to the element from the beginning of the queue without removing it.
162 /// If the queue is empty it returns [`None`].
163 pub fn peek(&self) -> Option<&T> {
164 unsafe { self.peek_impl() }
165 }
166
167 /// Returns a mutable reference to the element from the beginning of the queue without removing it.
168 /// If the queue is empty it returns [`None`].
169 pub fn peek_mut(&mut self) -> Option<&mut T> {
170 unsafe { self.peek_mut_impl() }
171 }
172
173 /// Removes the element from the beginning of the queue. If the queue is empty it returns [`None`].
174 pub fn pop(&mut self) -> Option<T> {
175 unsafe { self.pop_impl() }
176 }
177
178 /// Adds an element at the end of the queue. If the queue is full it returns false, otherwise true.
179 pub fn push(&mut self, value: T) -> bool {
180 unsafe { self.push_impl(value) }
181 }
182
183 /// Adds an element at the end of the queue. If the queue is full it returns the oldest element,
184 /// otherwise [`None`].
185 pub fn push_with_overflow(&mut self, value: T) -> Option<T> {
186 unsafe { self.push_with_overflow_impl(value) }
187 }
188 }
189
190 impl<T: Copy + Debug, Ptr: GenericPointer + Debug> MetaQueue<T, Ptr> {
191 /// Returns a copy of the element stored at index. The index is starting by 0 for the first
192 /// element until [`Queue::len()`].
193 ///
194 /// # Safety
195 ///
196 /// * Must satisfy `index` < [`Queue::len()`]
197 pub unsafe fn get_unchecked(&self, index: usize) -> T {
198 unsafe {
199 (*self
200 .data_ptr
201 .as_ptr()
202 .add((self.start - self.len + index) % self.capacity))
203 .assume_init()
204 }
205 }
206
207 /// Returns a copy of the element stored at index. The index is starting by 0 for the first
208 /// element until [`Queue::len()`]queue_memory
209 pub fn get(&self, index: usize) -> T {
210 if self.len() <= index {
211 fatal_panic!(from self, "Unable to copy content since the index {} is out of range.", index);
212 }
213
214 unsafe { self.get_unchecked(index) }
215 }
216 }
217
218 impl<T> RelocatableContainer for MetaQueue<T, GenericRelocatablePointer> {
219 unsafe fn new_uninit(capacity: usize) -> Self {
220 Self {
221 data_ptr: RelocatablePointer::new_uninit(),
222 start: 0,
223 len: 0,
224 capacity,
225 is_initialized: IoxAtomicBool::new(false),
226 _phantom_data: PhantomData,
227 }
228 }
229
230 unsafe fn init<Allocator: BaseAllocator>(
231 &mut self,
232 allocator: &Allocator,
233 ) -> Result<(), AllocationError> {
234 if self
235 .is_initialized
236 .load(core::sync::atomic::Ordering::Relaxed)
237 {
238 fatal_panic!(
239 from "Queue::init()",
240 "Memory already initialized. Initializing it twice may lead to undefined behavior."
241 );
242 }
243
244 self.data_ptr.init(fail!(from "Queue::init", when allocator
245 .allocate(Layout::from_size_align_unchecked(
246 core::mem::size_of::<T>() * self.capacity,
247 core::mem::align_of::<T>(),
248 )), "Failed to initialize queue since the allocation of the data memory failed."
249 ));
250 self.is_initialized
251 .store(true, core::sync::atomic::Ordering::Relaxed);
252
253 Ok(())
254 }
255
256 fn memory_size(capacity: usize) -> usize {
257 Self::const_memory_size(capacity)
258 }
259 }
260
261 unsafe impl<T: ZeroCopySend> ZeroCopySend for MetaQueue<T, GenericRelocatablePointer> {}
262
263 impl<T> MetaQueue<T, GenericRelocatablePointer> {
264 /// Returns the required memory size for a queue with a specified capacity
265 pub const fn const_memory_size(capacity: usize) -> usize {
266 unaligned_mem_size::<T>(capacity)
267 }
268
269 /// Removes all elements from the queue
270 ///
271 /// # Safety
272 ///
273 /// * [`Queue::init()`] must have been called once before
274 ///
275 pub unsafe fn clear(&mut self) {
276 self.clear_impl()
277 }
278
279 /// Returns a reference to the element from the beginning of the queue without removing it.
280 /// If the queue is empty it returns [`None`].
281 ///
282 /// # Safety
283 ///
284 /// * [`Queue::init()`] must have been called once before
285 ///
286 pub fn peek(&self) -> Option<&T> {
287 unsafe { self.peek_impl() }
288 }
289
290 /// Returns a mutable reference to the element from the beginning of the queue without removing it.
291 /// If the queue is empty it returns [`None`].
292 ///
293 /// # Safety
294 ///
295 /// * [`Queue::init()`] must have been called once before
296 ///
297 pub fn peek_mut(&mut self) -> Option<&mut T> {
298 unsafe { self.peek_mut_impl() }
299 }
300
301 /// Removes the element from the beginning of the queue. If the queue is empty it returns [`None`].
302 ///
303 /// # Safety
304 ///
305 /// * [`Queue::init()`] must have been called once before
306 ///
307 pub unsafe fn pop(&mut self) -> Option<T> {
308 self.pop_impl()
309 }
310
311 /// Adds an element at the end of the queue. If the queue is full it returns false, otherwise true.
312 ///
313 /// # Safety
314 ///
315 /// * [`Queue::init()`] must have been called once before
316 ///
317 pub unsafe fn push(&mut self, value: T) -> bool {
318 self.push_impl(value)
319 }
320
321 /// Adds an element at the end of the queue. If the queue is full it returns the oldest element,
322 /// otherwise [`None`].
323 ///
324 /// # Safety
325 ///
326 /// * [`Queue::init()`] must have been called once before
327 ///
328 pub unsafe fn push_with_overflow(&mut self, value: T) -> Option<T> {
329 self.push_with_overflow_impl(value)
330 }
331 }
332
333 impl<T, Ptr: GenericPointer> MetaQueue<T, Ptr> {
334 #[inline(always)]
335 fn verify_init(&self, source: &str) {
336 debug_assert!(
337 self.is_initialized
338 .load(core::sync::atomic::Ordering::Relaxed),
339 "From: MetaQueue<{}>::{}, Undefined behavior - the object was not initialized with 'init' before.",
340 core::any::type_name::<T>(), source
341 );
342 }
343
344 /// Returns true if the queue is empty, otherwise false
345 pub fn is_empty(&self) -> bool {
346 self.len == 0
347 }
348
349 /// Returns the capacity of the queue
350 pub fn capacity(&self) -> usize {
351 self.capacity
352 }
353
354 /// Returns the number of elements inside the queue
355 pub fn len(&self) -> usize {
356 self.len
357 }
358
359 /// Returns true if the queue is full, otherwise false
360 pub fn is_full(&self) -> bool {
361 self.len() == self.capacity()
362 }
363
364 pub(crate) unsafe fn clear_impl(&mut self) {
365 while self.pop_impl().is_some() {}
366 }
367
368 pub(crate) unsafe fn peek_mut_impl(&mut self) -> Option<&mut T> {
369 self.verify_init("peek_mut()");
370
371 if self.is_empty() {
372 return None;
373 }
374
375 let index = (self.start - self.len) % self.capacity;
376
377 Some((*self.data_ptr.as_mut_ptr().add(index)).assume_init_mut())
378 }
379
380 pub(crate) unsafe fn peek_impl(&self) -> Option<&T> {
381 self.verify_init("peek()");
382
383 if self.is_empty() {
384 return None;
385 }
386
387 let index = (self.start - self.len) % self.capacity;
388
389 Some((*self.data_ptr.as_ptr().add(index)).assume_init_ref())
390 }
391
392 pub(crate) unsafe fn pop_impl(&mut self) -> Option<T> {
393 self.verify_init("pop()");
394
395 if self.is_empty() {
396 return None;
397 }
398
399 let index = (self.start - self.len) % self.capacity;
400 self.len -= 1;
401 let value = core::mem::replace(
402 &mut *self.data_ptr.as_mut_ptr().add(index),
403 MaybeUninit::uninit(),
404 );
405 Some(value.assume_init())
406 }
407
408 pub(crate) unsafe fn push_impl(&mut self, value: T) -> bool {
409 self.verify_init("push()");
410
411 if self.len == self.capacity {
412 return false;
413 }
414
415 self.unchecked_push(value);
416 true
417 }
418
419 pub(crate) unsafe fn push_with_overflow_impl(&mut self, value: T) -> Option<T> {
420 self.verify_init("push_with_overflow()");
421
422 let overridden_value = if self.len() == self.capacity() {
423 self.pop_impl()
424 } else {
425 None
426 };
427
428 self.unchecked_push(value);
429 overridden_value
430 }
431
432 unsafe fn unchecked_push(&mut self, value: T) {
433 let index = (self.start) % self.capacity;
434 self.data_ptr
435 .as_mut_ptr()
436 .add(index)
437 .write(MaybeUninit::new(value));
438 self.start += 1;
439 self.len += 1;
440 }
441 }
442
443 impl<T, Ptr: GenericPointer> Drop for MetaQueue<T, Ptr> {
444 fn drop(&mut self) {
445 if self
446 .is_initialized
447 .load(core::sync::atomic::Ordering::Relaxed)
448 {
449 unsafe { self.clear_impl() }
450 }
451 }
452 }
453}
454
455/// Relocatable queue with compile time fixed size capacity. In contrast to its counterpart the
456/// [`Queue`] it is movable.
457#[repr(C)]
458#[derive(Debug)]
459pub struct FixedSizeQueue<T, const CAPACITY: usize> {
460 state: RelocatableQueue<T>,
461 _data: [MaybeUninit<T>; CAPACITY],
462}
463
464unsafe impl<T: ZeroCopySend, const CAPACITY: usize> ZeroCopySend for FixedSizeQueue<T, CAPACITY> {}
465
466impl<T, const CAPACITY: usize> PlacementDefault for FixedSizeQueue<T, CAPACITY> {
467 unsafe fn placement_default(ptr: *mut Self) {
468 let state_ptr = core::ptr::addr_of_mut!((*ptr).state);
469 state_ptr.write(RelocatableQueue::new_uninit(CAPACITY));
470
471 let allocator = BumpAllocator::new(core::ptr::addr_of!((*ptr)._data) as usize);
472 (*ptr)
473 .state
474 .init(&allocator)
475 .expect("All required memory is preallocated.");
476 }
477}
478
479impl<T, const CAPACITY: usize> Default for FixedSizeQueue<T, CAPACITY> {
480 fn default() -> Self {
481 let mut new_self = Self {
482 state: unsafe { RelocatableQueue::new_uninit(CAPACITY) },
483 _data: unsafe { MaybeUninit::uninit().assume_init() },
484 };
485
486 let allocator = BumpAllocator::new(core::ptr::addr_of!(new_self._data) as usize);
487 unsafe {
488 new_self
489 .state
490 .init(&allocator)
491 .expect("All required memory is preallocated.")
492 };
493
494 new_self
495 }
496}
497
498unsafe impl<T: Send, const CAPACITY: usize> Send for FixedSizeQueue<T, CAPACITY> {}
499
500impl<T, const CAPACITY: usize> FixedSizeQueue<T, CAPACITY> {
501 /// Creates a new queue.
502 pub fn new() -> Self {
503 Self::default()
504 }
505
506 /// Returns true if the queue is empty, otherwise false
507 pub fn is_empty(&self) -> bool {
508 self.state.is_empty()
509 }
510
511 /// Returns the capacity of the queue
512 pub fn capacity(&self) -> usize {
513 self.state.capacity()
514 }
515
516 /// Returns the number of elements inside the queue
517 pub fn len(&self) -> usize {
518 self.state.len()
519 }
520
521 /// Returns true if the queue is full, otherwise false
522 pub fn is_full(&self) -> bool {
523 self.state.is_full()
524 }
525
526 /// Removes all elements from the queue
527 pub fn clear(&mut self) {
528 unsafe { self.state.clear_impl() }
529 }
530
531 /// Returns a reference to the element from the beginning of the queue without removing it.
532 /// If the queue is empty it returns [`None`].
533 pub fn peek(&self) -> Option<&T> {
534 unsafe { self.state.peek_impl() }
535 }
536
537 /// Returns a mutable reference to the element from the beginning of the queue without removing it.
538 /// If the queue is empty it returns [`None`].
539 pub fn peek_mut(&mut self) -> Option<&mut T> {
540 unsafe { self.state.peek_mut_impl() }
541 }
542
543 /// Removes the element from the beginning of the queue. If the queue is empty it returns [`None`].
544 pub fn pop(&mut self) -> Option<T> {
545 unsafe { self.state.pop_impl() }
546 }
547
548 /// Adds an element at the end of the queue. If the queue is full it returns false, otherwise true.
549 pub fn push(&mut self, value: T) -> bool {
550 unsafe { self.state.push_impl(value) }
551 }
552
553 /// Adds an element at the end of the queue. If the queue is full it returns the oldest element,
554 /// otherwise [`None`].
555 pub fn push_with_overflow(&mut self, value: T) -> Option<T> {
556 unsafe { self.state.push_with_overflow_impl(value) }
557 }
558}
559
560impl<T: Copy + Debug, const CAPACITY: usize> FixedSizeQueue<T, CAPACITY> {
561 /// Returns a copy of the element stored at index. The index is starting by 0 for the first
562 /// element until [`FixedSizeQueue::len()`].
563 ///
564 /// # Safety
565 ///
566 /// * The index must be not out of bounds
567 ///
568 pub unsafe fn get_unchecked(&self, index: usize) -> T {
569 self.state.get_unchecked(index)
570 }
571
572 /// Returns a copy of the element stored at index. The index is starting by 0 for the first
573 /// element until [`FixedSizeQueue::len()`].
574 pub fn get(&self, index: usize) -> T {
575 self.state.get(index)
576 }
577}