1use core::{
2 cell::UnsafeCell,
3 mem::{replace, ManuallyDrop, MaybeUninit},
4 sync::atomic::{AtomicUsize, Ordering},
5};
6
7use alloc::vec::Vec;
8
9#[cfg(feature = "nightly")]
10use alloc::alloc::{Allocator, Global};
11#[cfg(not(feature = "nightly"))]
12use allocator_api2::alloc::{Allocator, Global};
13
14use parking_lot::RwLock;
15
16use crate::local::BlinkAlloc;
17
18struct Inner<A: Allocator> {
19 pop_array: Vec<UnsafeCell<ManuallyDrop<BlinkAlloc<A>>>>,
21
22 next_pop: AtomicUsize,
24
25 push_array: Vec<UnsafeCell<MaybeUninit<BlinkAlloc<A>>>>,
27
28 next_push: AtomicUsize,
30}
31
32unsafe impl<A> Sync for Inner<A>
33where
34 A: Allocator,
35 BlinkAlloc<A>: Send,
36{
37}
38
39pub struct BlinkAllocCache<A: Allocator = Global> {
46 inner: RwLock<Inner<A>>,
47}
48
49impl<A> Default for BlinkAllocCache<A>
50where
51 A: Allocator,
52{
53 fn default() -> Self {
54 Self::new()
55 }
56}
57
58impl<A> BlinkAllocCache<A>
59where
60 A: Allocator,
61{
62 pub const fn new() -> Self {
64 BlinkAllocCache {
65 inner: RwLock::new(Inner {
66 pop_array: Vec::new(),
67 next_pop: AtomicUsize::new(0),
68 push_array: Vec::new(),
69 next_push: AtomicUsize::new(0),
70 }),
71 }
72 }
73
74 pub fn pop(&self) -> Option<BlinkAlloc<A>> {
77 let inner = self.inner.read();
78
79 if !inner.pop_array.is_empty() {
80 let idx = inner.next_pop.fetch_add(1, Ordering::Acquire);
82
83 if idx < inner.pop_array.len() {
84 let blink = unsafe { ManuallyDrop::take(&mut *inner.pop_array[idx].get()) };
88
89 return Some(blink);
90 }
91
92 prevent_overflow(&inner.next_pop, idx, inner.pop_array.len());
93 }
94
95 if inner.next_push.load(Ordering::Relaxed) == 0 {
96 return None;
97 }
98
99 drop(inner);
100 let mut inner = self.inner.write();
101
102 Self::flush(&mut inner);
103
104 inner
105 .pop_array
106 .pop()
107 .map(|cell| ManuallyDrop::into_inner(cell.into_inner()))
108 }
109
110 pub fn push(&self, blink: BlinkAlloc<A>) {
111 let inner = self.inner.read();
112
113 if !inner.push_array.is_empty() {
114 let idx = inner.next_push.fetch_add(1, Ordering::Acquire);
116
117 if idx < inner.push_array.len() {
118 MaybeUninit::write(unsafe { &mut *inner.push_array[idx].get() }, blink);
122 return;
123 }
124
125 prevent_overflow(&inner.next_push, idx, inner.push_array.len());
126 }
127
128 drop(inner);
129 let mut inner = self.inner.write();
130
131 Self::flush(&mut inner);
132
133 inner
134 .pop_array
135 .push(UnsafeCell::new(ManuallyDrop::new(blink)));
136 }
137
138 fn flush(inner: &mut Inner<A>) {
139 let pushed = replace(inner.next_push.get_mut(), 0).min(inner.push_array.len());
140 let popped = replace(inner.next_pop.get_mut(), 0).min(inner.pop_array.len());
141
142 let pushed_iter = inner.push_array.drain(..pushed);
143
144 inner.pop_array.splice(
145 ..popped,
146 pushed_iter.map(|cell| {
147 UnsafeCell::new(ManuallyDrop::new(unsafe {
152 cell.into_inner().assume_init()
153 }))
154 }),
155 );
156 }
157}
158
159fn prevent_overflow(atomic: &AtomicUsize, current: usize, upper: usize) {
160 #[cold]
161 fn cold_store(atomic: &AtomicUsize, upper: usize) {
162 atomic.store(upper, Ordering::Relaxed);
163 }
164
165 if current >= isize::MAX as usize {
166 cold_store(atomic, upper);
167 }
168}