1use std::{
2 alloc::{alloc_zeroed, Layout},
3 cell::UnsafeCell,
4 future::Future,
5 mem::MaybeUninit,
6 pin::Pin,
7 ptr::{self, addr_of},
8 sync::atomic::{AtomicPtr, AtomicU64, Ordering},
9 task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
10};
11
12use const_array_init::const_arr;
13
14use crate::runtime::{Scheduler, RUNTIME};
15
16const IDX: usize = (1 << 6) - 1;
17const IDX_MASK: usize = !IDX;
18const LAYOUT: Layout = Layout::new::<TaskArena>();
19
20#[derive(Clone, Copy)]
21pub(crate) struct Index<'a> {
22 pub(crate) arena: &'a TaskArena,
23 idx: usize,
24}
25
26impl<'a> Index<'a> {
27 #[inline(always)]
28 pub(crate) fn new(arena: &'a TaskArena, idx: usize) -> Self {
29 Self { arena, idx }
30 }
31
32 #[inline(always)]
33 pub(crate) fn handle(&self) -> &TaskHandle {
34 &self.arena.tasks[self.idx]
35 }
36
37 #[inline(always)]
38 fn has_task(&self) -> bool {
39 (self.arena.occupancy.load(Ordering::Acquire) & (1 << self.idx)).ne(&0)
40 }
41
42 #[inline(always)]
43 fn raw_waker(&self) -> RawWaker {
44 unsafe fn clone(ptr: *const ()) -> RawWaker {
45 RawWaker::new(ptr, &VTABLE)
46 }
47
48 unsafe fn wake(ptr: *const ()) {
49 let slot = Index::from_raw(ptr as *mut ());
50
51 if slot.has_task() {
52 let handle = slot.handle();
53
54 let next = handle.next_enqueued.load(Ordering::Acquire) as *const ();
55
56 if next.is_null() {
57 let tail = RUNTIME.poll_tail.swap(ptr as *mut (), Ordering::AcqRel);
58
59 if tail.is_null() {
60 Scheduler::schedule_polling(ptr as *mut ());
61 } else if tail.ne(&(ptr as *mut ())) {
62 handle.next_enqueued.store(tail, Ordering::Release);
63 }
64 }
65 }
66 }
67
68 unsafe fn drop(_ptr: *const ()) {}
69
70 static VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake, drop);
71
72 RawWaker::new(self.into_raw(), &VTABLE)
73 }
74
75 #[inline(always)]
76 pub(crate) unsafe fn poll(&self) -> *mut () {
77 let handle = self.handle();
78
79 let poll = {
80 let task = unsafe { (&mut *handle.task.get()).assume_init_mut() };
81
82 let waker = Waker::from_raw(self.raw_waker());
83 let mut cx = Context::from_waker(&waker);
84
85 task.as_mut().poll(&mut cx)
86 };
87
88 if poll.eq(&Poll::Ready(())) {
89 unsafe { (&mut *handle.task.get()).assume_init_drop() };
90 self.release_occupancy();
91 }
92
93 handle.next_enqueued.swap(ptr::null_mut(), Ordering::AcqRel)
94 }
95
96 #[inline(always)]
97 #[cfg(not(feature = "strict_provenance"))]
98 pub(crate) unsafe fn from_raw(ptr: *mut ()) -> Self {
99 Self {
100 arena: &*((ptr as usize & IDX_MASK) as *const _),
101 idx: ptr as usize & IDX,
102 }
103 }
104
105 #[inline(always)]
106 #[cfg(feature = "strict_provenance")]
107 pub(crate) unsafe fn from_raw(ptr: *mut ()) -> Self {
108 Self {
109 arena: &*(ptr.map_addr(|addr| addr & IDX_MASK) as *const _),
110 idx: ptr as usize & IDX,
111 }
112 }
113
114 #[inline(always)]
115 #[cfg(not(feature = "strict_provenance"))]
116 pub(crate) fn into_raw(&self) -> *mut () {
117 ((addr_of!(*self.arena) as usize) | self.idx) as *mut ()
118 }
119
120 #[inline(always)]
121 #[cfg(feature = "strict_provenance")]
122 pub(crate) fn into_raw(&self) -> *mut () {
123 addr_of!(*self.arena).map_addr(|addr| addr | self.idx) as *mut ()
124 }
125
126 #[inline(always)]
127 pub(crate) fn set_as_occupied(&self) -> u64 {
128 let occupancy_bit = 1 << self.idx;
129
130 self.arena
131 .occupancy
132 .fetch_or(occupancy_bit, Ordering::AcqRel)
133 | occupancy_bit
134 }
135
136 #[inline(always)]
137 fn release_occupancy(&self) {
138 let occupancy = self
139 .arena
140 .occupancy
141 .fetch_add(!(1 << self.idx), Ordering::AcqRel);
142
143 if occupancy.eq(&u64::MAX) {
144 let ptr = self.into_raw();
145 let tail = RUNTIME.free_tail.swap(ptr, Ordering::AcqRel);
146
147 if !tail.is_null() {
148 unsafe {
149 Index::from_raw(tail)
150 .arena
151 .next
152 .store(ptr, Ordering::Release);
153 }
154 } else {
155 let next = RUNTIME.next.load(Ordering::Acquire);
156
157 unsafe {
158 Index::from_raw(next)
159 .arena
160 .next
161 .store(ptr, Ordering::Release);
162 }
163 }
164 }
165 }
166
167 #[inline(always)]
168 pub(crate) fn next_index(&'a self, occupancy: u64) -> *mut () {
169 let low_bit = !occupancy & (occupancy.wrapping_add(1));
170
171 if low_bit.ne(&0) {
172 Index::new(&self.arena, low_bit.trailing_zeros() as usize).into_raw()
173 } else {
174 let next = self.arena.next.swap(ptr::null_mut(), Ordering::AcqRel);
175
176 if !next.is_null() {
177 next
178 } else {
179 unsafe { alloc_zeroed(LAYOUT) as *mut () }
180 }
181 }
182 }
183}
184
185pub(crate) struct TaskHandle {
186 pub(crate) task: UnsafeCell<MaybeUninit<Pin<Box<dyn Future<Output = ()>>>>>,
187 pub(crate) next_enqueued: AtomicPtr<()>,
188}
189
190impl TaskHandle {
191 pub(crate) const fn new() -> Self {
192 TaskHandle {
193 task: UnsafeCell::new(MaybeUninit::zeroed()),
194 next_enqueued: AtomicPtr::new(ptr::null_mut()),
195 }
196 }
197}
198
199#[repr(align(64))]
200pub(crate) struct TaskArena {
201 occupancy: AtomicU64,
202 tasks: [TaskHandle; 64],
203 next: AtomicPtr<()>,
204}
205
206impl TaskArena {
207 pub(crate) const fn new() -> Self {
208 TaskArena {
209 occupancy: AtomicU64::new(0),
210 #[allow(clippy::declare_interior_mutable_const)]
211 tasks: const_arr!([TaskHandle; 64], |_| TaskHandle::new()),
212 next: AtomicPtr::new(ptr::null_mut()),
213 }
214 }
215}