Struct utils_atomics::fill_queue::FillQueue
source · alloc only.Implementations§
source§impl<T> FillQueue<T>
impl<T> FillQueue<T>
sourcepub const fn new() -> Self
pub const fn new() -> Self
Creates a new FillQueue with the global allocator.
Example
use utils_atomics::prelude::*;
let queue = FillQueue::<i32>::new();Examples found in repository?
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
pub fn flag () -> (Flag, Subscribe) {
let flag = Arc::new(FlagQueue(FillQueue::new()));
let sub = Arc::downgrade(&flag);
(Flag { _inner: flag }, Subscribe { inner: sub })
}
#[repr(transparent)]
#[derive(Debug)]
struct FlagQueue (pub FillQueue<Lock>);
impl Drop for FlagQueue {
#[inline(always)]
fn drop(&mut self) {
self.0.chop_mut().for_each(super::lock_wake);
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "futures")] {
use core::{future::Future, task::{Waker, Poll}};
use futures::future::FusedFuture;
/// Creates a new pair of [`AsyncFlag`] and [`AsyncSubscribe`]
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[inline]
pub fn async_flag () -> (AsyncFlag, AsyncSubscribe) {
#[allow(deprecated)]
let flag = AsyncFlag::new();
let sub = flag.subscribe();
return (flag, sub)
}
/// Async flag that will be completed when all references to [`Flag`] have been dropped or marked.
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[derive(Debug, Clone)]
pub struct AsyncFlag {
inner: Arc<AsyncFlagQueue>
}
impl AsyncFlag {
/// Creates a new flag
#[deprecated(since = "0.4.0", note = "use `async_flag` instead")]
#[inline(always)]
pub fn new () -> Self {
Self { inner: Arc::new(AsyncFlagQueue(FillQueue::new())) }
}source§impl<T, A: Allocator> FillQueue<T, A>
impl<T, A: Allocator> FillQueue<T, A>
source§impl<T, A: Allocator> FillQueue<T, A>
impl<T, A: Allocator> FillQueue<T, A>
sourcepub fn push(&self, v: T)
pub fn push(&self, v: T)
Uses atomic operations to push an element to the queue.
Panics
This method panics if alloc fails to allocate the memory needed for the node.
Example
use utils_atomics::prelude::*;
let queue = FillQueue::<i32>::new();
queue.push(1);
assert_eq!(queue.chop().next(), Some(1));Examples found in repository?
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
pub fn wait (&self) {
if let Some(queue) = self.inner.upgrade() {
#[allow(unused_mut)]
let mut waker = lock_new();
#[cfg(feature = "std")] {
queue.0.push(waker);
drop(queue);
std::thread::park();
}
#[cfg(not(feature = "std"))] {
queue.0.push(waker.clone());
drop(queue);
loop {
match Arc::try_unwrap(waker) {
Ok(_) => break,
Err(e) => waker = e
}
// core::hint::spin_loop();
}
}
}
}
}
/// Creates a new pair of [`Flag`] and [`Subscribe`].
///
/// The flag will be completed when all references to [`Flag`] have been dropped or marked.
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
pub fn flag () -> (Flag, Subscribe) {
let flag = Arc::new(FlagQueue(FillQueue::new()));
let sub = Arc::downgrade(&flag);
(Flag { _inner: flag }, Subscribe { inner: sub })
}
#[repr(transparent)]
#[derive(Debug)]
struct FlagQueue (pub FillQueue<Lock>);
impl Drop for FlagQueue {
#[inline(always)]
fn drop(&mut self) {
self.0.chop_mut().for_each(super::lock_wake);
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "futures")] {
use core::{future::Future, task::{Waker, Poll}};
use futures::future::FusedFuture;
/// Creates a new pair of [`AsyncFlag`] and [`AsyncSubscribe`]
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[inline]
pub fn async_flag () -> (AsyncFlag, AsyncSubscribe) {
#[allow(deprecated)]
let flag = AsyncFlag::new();
let sub = flag.subscribe();
return (flag, sub)
}
/// Async flag that will be completed when all references to [`Flag`] have been dropped or marked.
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[derive(Debug, Clone)]
pub struct AsyncFlag {
inner: Arc<AsyncFlagQueue>
}
impl AsyncFlag {
/// Creates a new flag
#[deprecated(since = "0.4.0", note = "use `async_flag` instead")]
#[inline(always)]
pub fn new () -> Self {
Self { inner: Arc::new(AsyncFlagQueue(FillQueue::new())) }
}
/// See [`Arc::into_raw`]
#[inline(always)]
pub unsafe fn into_raw (self) -> *const FillQueue<Waker> {
Arc::into_raw(self.inner).cast()
}
/// See [`Arc::from_raw`]
#[inline(always)]
pub unsafe fn from_raw (ptr: *const FillQueue<Waker>) -> Self {
Self { inner: Arc::from_raw(ptr.cast()) }
}
/// Marks this flag as complete, consuming it
#[inline(always)]
pub fn mark (self) {}
/// Creates a new subscriber to this flag.
#[inline(always)]
pub fn subscribe (&self) -> AsyncSubscribe {
AsyncSubscribe {
inner: Some(Arc::downgrade(&self.inner))
}
}
}
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
/// Subscriber of an [`AsyncFlag`]
#[derive(Debug, Clone)]
pub struct AsyncSubscribe {
inner: Option<Weak<AsyncFlagQueue>>
}
impl Future for AsyncSubscribe {
type Output = ();
#[inline(always)]
fn poll(mut self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> core::task::Poll<Self::Output> {
if let Some(ref queue) = self.inner {
if let Some(queue) = queue.upgrade() {
queue.0.push(cx.waker().clone());
return Poll::Pending;
} else {
self.inner = None;
return Poll::Ready(())
}
}
return Poll::Ready(())
}sourcepub fn try_push(&self, v: T) -> Result<(), AllocError>
pub fn try_push(&self, v: T) -> Result<(), AllocError>
Uses atomic operations to push an element to the queue.
Errors
This method returns an error if alloc fails to allocate the memory needed for the node.
Example
use utils_atomics::prelude::*;
let queue = FillQueue::<i32>::new();
assert!(queue.try_push(1).is_ok());
assert_eq!(queue.chop().next(), Some(1));sourcepub fn try_push_mut(&mut self, v: T) -> Result<(), AllocError>
pub fn try_push_mut(&mut self, v: T) -> Result<(), AllocError>
Uses non-atomic operations to push an element to the queue.
Safety
This method is safe because the mutable reference guarantees we are the only thread that can access this queue.
Errors
This method returns an error if alloc fails to allocate the memory needed for the node.
Example
use utils_atomics::prelude::*;
let mut queue = FillQueue::<i32>::new();
assert!(queue.try_push_mut(1).is_ok());
assert_eq!(queue.chop_mut().next(), Some(1));source§impl<T, A: Allocator> FillQueue<T, A>
impl<T, A: Allocator> FillQueue<T, A>
sourcepub fn chop(&self) -> ChopIter<T, A> ⓘwhere
A: Clone,
pub fn chop(&self) -> ChopIter<T, A> ⓘwhere
A: Clone,
Returns a LIFO (Last In First Out) iterator over a chopped chunk of a FillQueue.
The elements that find themselves inside the chopped region of the queue will be accessed through non-atomic operations.
Example
use utils_atomics::prelude::*;
let queue = FillQueue::<i32>::new();
queue.push(1);
queue.push(2);
queue.push(3);
let mut iter = queue.chop();
assert_eq!(iter.next(), Some(3));
assert_eq!(iter.next(), Some(2));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next(), None)sourcepub fn chop_mut(&mut self) -> ChopIter<T, A> ⓘwhere
A: Clone,
pub fn chop_mut(&mut self) -> ChopIter<T, A> ⓘwhere
A: Clone,
Returns a LIFO (Last In First Out) iterator over a chopped chunk of a FillQueue. The chopping is done with non-atomic operations.
Safety
This method is safe because the mutable reference guarantees we are the only thread that can access this queue.
Example
use utils_atomics::prelude::*;
let mut queue = FillQueue::<i32>::new();
queue.push_mut(1);
queue.push_mut(2);
queue.push_mut(3);
let mut iter = queue.chop_mut();
assert_eq!(iter.next(), Some(3));
assert_eq!(iter.next(), Some(2));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next(), None)Examples found in repository?
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
fn drop(&mut self) {
self.0.chop_mut().for_each(super::lock_wake);
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "futures")] {
use core::{future::Future, task::{Waker, Poll}};
use futures::future::FusedFuture;
/// Creates a new pair of [`AsyncFlag`] and [`AsyncSubscribe`]
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[inline]
pub fn async_flag () -> (AsyncFlag, AsyncSubscribe) {
#[allow(deprecated)]
let flag = AsyncFlag::new();
let sub = flag.subscribe();
return (flag, sub)
}
/// Async flag that will be completed when all references to [`Flag`] have been dropped or marked.
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
#[derive(Debug, Clone)]
pub struct AsyncFlag {
inner: Arc<AsyncFlagQueue>
}
impl AsyncFlag {
/// Creates a new flag
#[deprecated(since = "0.4.0", note = "use `async_flag` instead")]
#[inline(always)]
pub fn new () -> Self {
Self { inner: Arc::new(AsyncFlagQueue(FillQueue::new())) }
}
/// See [`Arc::into_raw`]
#[inline(always)]
pub unsafe fn into_raw (self) -> *const FillQueue<Waker> {
Arc::into_raw(self.inner).cast()
}
/// See [`Arc::from_raw`]
#[inline(always)]
pub unsafe fn from_raw (ptr: *const FillQueue<Waker>) -> Self {
Self { inner: Arc::from_raw(ptr.cast()) }
}
/// Marks this flag as complete, consuming it
#[inline(always)]
pub fn mark (self) {}
/// Creates a new subscriber to this flag.
#[inline(always)]
pub fn subscribe (&self) -> AsyncSubscribe {
AsyncSubscribe {
inner: Some(Arc::downgrade(&self.inner))
}
}
}
#[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", feature = "futures"))))]
/// Subscriber of an [`AsyncFlag`]
#[derive(Debug, Clone)]
pub struct AsyncSubscribe {
inner: Option<Weak<AsyncFlagQueue>>
}
impl Future for AsyncSubscribe {
type Output = ();
#[inline(always)]
fn poll(mut self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> core::task::Poll<Self::Output> {
if let Some(ref queue) = self.inner {
if let Some(queue) = queue.upgrade() {
queue.0.push(cx.waker().clone());
return Poll::Pending;
} else {
self.inner = None;
return Poll::Ready(())
}
}
return Poll::Ready(())
}
}
impl FusedFuture for AsyncSubscribe {
#[inline(always)]
fn is_terminated(&self) -> bool {
self.inner.is_none()
}
}
#[repr(transparent)]
#[derive(Debug)]
struct AsyncFlagQueue (pub FillQueue<Waker>);
impl Drop for AsyncFlagQueue {
#[inline(always)]
fn drop(&mut self) {
self.0.chop_mut().for_each(Waker::wake);
}