logo
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
//!
//! Pool of threads to run heavy processes
//!
//! We spawn futures onto the pool with [`spawn_blocking`] method of global run queue or
//! with corresponding [`Worker`]'s spawn method.
//!
//! [`Worker`]: crate::run_queue::Worker

use crate::thread_manager::{DynamicPoolManager, DynamicRunner};
use crossbeam_channel::{unbounded, Receiver, Sender};
use lazy_static::lazy_static;
use lightproc::lightproc::LightProc;
use lightproc::proc_stack::ProcStack;
use lightproc::recoverable_handle::RecoverableHandle;
use once_cell::sync::{Lazy, OnceCell};
use std::future::Future;
use std::iter::Iterator;
use std::sync::Arc;
use std::time::Duration;
use std::{env, thread};
use tracing::trace;

/// If low watermark isn't configured this is the default scaler value.
/// This value is used for the heuristics of the scaler
const DEFAULT_LOW_WATERMARK: u64 = 2;

const THREAD_RECV_TIMEOUT: Duration = Duration::from_millis(100);

/// Spawns a blocking task.
///
/// The task will be spawned onto a thread pool specifically dedicated to blocking tasks.
pub fn spawn_blocking<F, R>(future: F, stack: ProcStack) -> RecoverableHandle<R>
where
    F: Future<Output = R> + Send + 'static,
    R: Send + 'static,
{
    let (task, handle) = LightProc::recoverable(future, schedule, stack);
    task.schedule();
    handle
}

struct BlockingRunner {
    // We keep a handle to the tokio runtime here to make sure
    // it will never be dropped while the DynamicPoolManager is alive,
    // In case we need to spin up some threads.
    #[cfg(feature = "tokio-runtime")]
    runtime_handle: tokio::runtime::Handle,
}

impl DynamicRunner for BlockingRunner {
    fn run_static(&self, park_timeout: Duration) -> ! {
        loop {
            while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) {
                trace!("static thread: running task");
                self.run(task);
            }

            trace!("static: empty queue, parking with timeout");
            thread::park_timeout(park_timeout);
        }
    }
    fn run_dynamic(&self, parker: &dyn Fn()) -> ! {
        loop {
            while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) {
                trace!("dynamic thread: running task");
                self.run(task);
            }
            trace!(
                "dynamic thread: parking - {:?}",
                std::thread::current().id()
            );
            parker();
        }
    }
    fn run_standalone(&self) {
        while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) {
            self.run(task);
        }
        trace!("standalone thread: quitting.");
    }
}

impl BlockingRunner {
    fn run(&self, task: LightProc) {
        #[cfg(feature = "tokio-runtime")]
        {
            self.runtime_handle.spawn_blocking(|| task.run());
        }
        #[cfg(not(feature = "tokio-runtime"))]
        {
            task.run();
        }
    }
}

/// Pool interface between the scheduler and thread pool
struct Pool {
    sender: Sender<LightProc>,
    receiver: Receiver<LightProc>,
}

static DYNAMIC_POOL_MANAGER: OnceCell<DynamicPoolManager> = OnceCell::new();

static POOL: Lazy<Pool> = Lazy::new(|| {
    #[cfg(feature = "tokio-runtime")]
    {
        let runner = Arc::new(BlockingRunner {
            // We use current() here instead of try_current()
            // because we want bastion to crash as soon as possible
            // if there is no available runtime.
            runtime_handle: tokio::runtime::Handle::current(),
        });

        DYNAMIC_POOL_MANAGER
            .set(DynamicPoolManager::new(*low_watermark() as usize, runner))
            .expect("couldn't create dynamic pool manager");
    }
    #[cfg(not(feature = "tokio-runtime"))]
    {
        let runner = Arc::new(BlockingRunner {});

        DYNAMIC_POOL_MANAGER
            .set(DynamicPoolManager::new(*low_watermark() as usize, runner))
            .expect("couldn't create dynamic pool manager");
    }

    DYNAMIC_POOL_MANAGER
        .get()
        .expect("couldn't get static pool manager")
        .initialize();

    let (sender, receiver) = unbounded();
    Pool { sender, receiver }
});

/// Enqueues work, attempting to send to the thread pool in a
/// nonblocking way and spinning up needed amount of threads
/// based on the previous statistics without relying on
/// if there is not a thread ready to accept the work or not.
fn schedule(t: LightProc) {
    if let Err(err) = POOL.sender.try_send(t) {
        // We were not able to send to the channel without
        // blocking.
        POOL.sender.send(err.into_inner()).unwrap();
    }

    // Add up for every incoming scheduled task
    DYNAMIC_POOL_MANAGER.get().unwrap().increment_frequency();
}

///
/// Low watermark value, defines the bare minimum of the pool.
/// Spawns initial thread set.
/// Can be configurable with env var `BASTION_BLOCKING_THREADS` at runtime.
#[inline]
fn low_watermark() -> &'static u64 {
    lazy_static! {
        static ref LOW_WATERMARK: u64 = {
            env::var_os("BASTION_BLOCKING_THREADS")
                .map(|x| x.to_str().unwrap().parse::<u64>().unwrap())
                .unwrap_or(DEFAULT_LOW_WATERMARK)
        };
    }

    &*LOW_WATERMARK
}