1use std::num::NonZeroUsize;
2use std::sync::Arc;
3use std::sync::atomic::{AtomicBool, Ordering};
4
5pub async fn yield_on_complex(complex: bool) {
7 if complex {
8 tokio::task::yield_now().await;
9 } else {
10 tokio::task::consume_budget().await;
11 }
12}
13
14#[derive(Default, Clone, Debug)]
15pub struct CancellationFlag(Arc<AtomicBool>);
16
17impl CancellationFlag {
18 pub fn new() -> Self {
19 Self(Arc::new(AtomicBool::new(false)))
20 }
21
22 #[must_use]
23 pub fn check(&self) -> bool {
24 self.0.load(Ordering::Relaxed)
25 }
26
27 pub fn cancel(&self) {
28 self.0.store(true, Ordering::Release);
29 }
30
31 pub fn debounce(&self, debounce: usize) -> DebounceCancellationFlag {
32 DebounceCancellationFlag {
33 inner: self.clone(),
34 counter: 0,
35 debounce: NonZeroUsize::new(debounce.max(1)).unwrap(),
36 }
37 }
38}
39
40pub struct DebounceCancellationFlag {
41 inner: CancellationFlag,
42 counter: usize,
43 debounce: NonZeroUsize,
44}
45
46impl DebounceCancellationFlag {
47 pub fn into_inner(self) -> CancellationFlag {
48 self.inner
49 }
50
51 #[must_use]
52 pub fn check(&mut self) -> bool {
53 let mut cancelled = false;
54
55 if self.counter.is_multiple_of(self.debounce.get()) {
56 self.counter = 0;
57 cancelled |= self.inner.check();
58 }
59
60 self.counter += 1;
61 cancelled
62 }
63
64 pub fn cancel(&self) {
65 self.inner.cancel();
66 }
67}
68
69#[cfg(test)]
70mod tests {
71 use super::*;
72
73 #[test]
74 fn cancellation_flag() {
75 let flag = CancellationFlag::new();
77 assert!(!flag.check());
78 flag.cancel();
79 assert!(flag.check());
80
81 let flag = CancellationFlag::new();
83 let mut debounce = flag.debounce(10);
84 for _ in 0..5 {
85 assert!(!debounce.check());
86 }
87 debounce.cancel();
88 for _ in 0..5 {
89 assert!(!debounce.check());
90 }
91 assert!(debounce.check());
92 }
93}