bastion_executor/
worker.rs

1//!
2//! SMP parallelism based cache affine worker implementation
3//!
4//! This worker implementation relies on worker run queue statistics which are hold in the pinned global memory
5//! where workload distribution calculated and amended to their own local queues.
6
7use crate::pool;
8
9use lightproc::prelude::*;
10use std::cell::Cell;
11use std::ptr;
12use std::time::Duration;
13
14/// The timeout we'll use when parking before an other Steal attempt
15pub const THREAD_PARK_TIMEOUT: Duration = Duration::from_millis(1);
16
17///
18/// Get the current process's stack
19pub fn current() -> ProcStack {
20    get_proc_stack(|proc| proc.clone())
21        .expect("`proc::current()` called outside the context of the proc")
22}
23
24thread_local! {
25    static STACK: Cell<*const ProcStack> = Cell::new(ptr::null_mut());
26}
27
28///
29/// Set the current process's stack during the run of the future.
30pub(crate) fn set_stack<F, R>(stack: *const ProcStack, f: F) -> R
31where
32    F: FnOnce() -> R,
33{
34    struct ResetStack<'a>(&'a Cell<*const ProcStack>);
35
36    impl Drop for ResetStack<'_> {
37        fn drop(&mut self) {
38            self.0.set(ptr::null());
39        }
40    }
41
42    STACK.with(|st| {
43        st.set(stack);
44        let _guard = ResetStack(st);
45
46        f()
47    })
48}
49
50pub(crate) fn get_proc_stack<F, R>(f: F) -> Option<R>
51where
52    F: FnOnce(&ProcStack) -> R,
53{
54    let res = STACK.try_with(|st| unsafe { st.get().as_ref().map(f) });
55
56    match res {
57        Ok(Some(val)) => Some(val),
58        Ok(None) | Err(_) => None,
59    }
60}
61
62pub(crate) fn schedule(proc: LightProc) {
63    pool::schedule(proc)
64}