1use core::ops::{Deref, DerefMut};
2use std::cell::Cell;
3
4const SPIN_LIMIT: u32 = 8;
5
6#[repr(transparent)]
7#[derive(Debug)]
8pub struct Backoff {
9 rounds: Cell<u32>,
10}
11
12impl Backoff {
13 #[inline]
14 pub fn new() -> Self {
15 Backoff {
16 rounds: Cell::new(0),
17 }
18 }
19
20 #[inline]
21 pub fn reset(&self) {
22 self.rounds.set(0)
23 }
24
25 #[inline]
26 pub fn rounds(&self) -> u32 {
27 self.rounds.get()
28 }
29
30 #[inline]
31 pub fn spin_once(&self) {
32 std::hint::spin_loop();
33 }
34
35 #[inline]
36 pub fn spin(&self) {
37 for _ in 0..1 << self.rounds.get().min(SPIN_LIMIT) {
38 std::hint::spin_loop();
39 }
40
41 if self.rounds.get() <= SPIN_LIMIT {
42 self.rounds.set(self.rounds.get() + 1);
43 }
44 }
45
46 #[inline]
47 pub fn snooze(&self) {
48 if self.rounds.get() <= SPIN_LIMIT {
49 self.spin();
50 } else {
51 std::thread::yield_now();
52 }
53 }
54}
55
56#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
57#[cfg_attr(
58 not(any(target_arch = "x86_64", target_arch = "aarch64")),
59 repr(align(64))
60)]
61#[derive(Debug)]
62pub struct CachePadded<T>(T);
63
64impl<T> CachePadded<T> {
65 pub const fn new(t: T) -> CachePadded<T> {
66 CachePadded(t)
67 }
68 pub fn into_inner(self) -> T {
69 self.0
70 }
71}
72
73impl<T> Deref for CachePadded<T> {
74 type Target = T;
75 fn deref(&self) -> &T {
76 &self.0
77 }
78}
79
80impl<T> DerefMut for CachePadded<T> {
81 fn deref_mut(&mut self) -> &mut T {
82 &mut self.0
83 }
84}