pi_async_rt/lock/
spin_lock.rs1use std::sync::Arc;
5use std::cell::UnsafeCell;
6use std::ops::{Deref, DerefMut};
7use std::sync::atomic::{AtomicBool, Ordering};
8
9use super::spin;
10
11pub struct SpinLockGuard<T> {
15 guarder: Arc<InnerSpinLock<T>>, }
17
18impl<T> Deref for SpinLockGuard<T> {
19 type Target = T;
20
21 fn deref(&self) -> &Self::Target {
22 unsafe {
23 &*self.guarder.inner.get()
24 }
25 }
26}
27
28impl<T> DerefMut for SpinLockGuard<T> {
29 fn deref_mut(&mut self) -> &mut Self::Target {
30 unsafe {
31 &mut *self.guarder.inner.get()
32 }
33 }
34}
35
36impl<T> Drop for SpinLockGuard<T> {
37 fn drop(&mut self) {
38 self.guarder.status.store(false, Ordering::Release);
39 }
40}
41
42pub struct SpinLock<T> {
46 inner: Arc<InnerSpinLock<T>>, }
48
49unsafe impl<T> Send for SpinLock<T> {}
50unsafe impl<T> Sync for SpinLock<T> {}
51
52impl<T> SpinLock<T> {
53 pub fn new(v: T) -> Self {
55 let inner = Arc::new(InnerSpinLock {
56 status: AtomicBool::new(false),
57 inner: UnsafeCell::new(v),
58 });
59
60 SpinLock {
61 inner,
62 }
63 }
64
65 #[cfg(not(target_arch = "aarch64"))]
67 pub fn lock(&self) -> SpinLockGuard<T> {
68 let mut spin_len = 1;
69 loop {
70 match self.inner.status.compare_exchange_weak(false,
71 true,
72 Ordering::Acquire,
73 Ordering::Acquire) {
74 Err(_) => {
75 spin_len = spin(spin_len);
77 continue;
78 },
79 Ok(_) => {
80 return SpinLockGuard {
81 guarder: self.inner.clone(),
82 };
83 },
84 }
85 }
86 }
87 #[cfg(target_arch = "aarch64")]
88 pub fn lock(&self) -> SpinLockGuard<T> {
89 let mut spin_len = 1;
90 loop {
91 match self.inner.status.compare_exchange(false,
92 true,
93 Ordering::Acquire,
94 Ordering::Acquire) {
95 Err(_) => {
96 spin_len = spin(spin_len);
98 continue;
99 },
100 Ok(_) => {
101 return SpinLockGuard {
102 guarder: self.inner.clone(),
103 };
104 },
105 }
106 }
107 }
108}
109
110struct InnerSpinLock<T> {
114 status: AtomicBool, inner: UnsafeCell<T>, }
117
118unsafe impl<T> Send for InnerSpinLock<T> {}
119unsafe impl<T> Sync for InnerSpinLock<T> {}