use std::{
mem,
ops::Deref,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc,
},
thread::JoinHandle,
time::Duration,
};
use crossbeam_channel::{bounded, unbounded, RecvTimeoutError, SendError, Sender};
use thiserror::Error;
pub use global::*;
pub(crate) mod global;
enum Blocked {
Shutdown,
Continue,
}
enum Command {
Task(Box<dyn FnOnce() + Send>),
Swap(Sender<()>),
Shutdown,
}
#[derive(Error, Debug, PartialEq, Eq)]
pub enum DispatchError {
#[error("The worker panicked while running a task")]
WorkerPanic,
#[error("Maximum queue size reached")]
QueueFull,
#[error("Pre-init buffer was already flushed")]
AlreadyFlushed,
#[error("Failed to send command to worker thread")]
SendError,
#[error("Failed to receive from channel")]
RecvError(#[from] crossbeam_channel::RecvError),
}
impl<T> From<SendError<T>> for DispatchError {
fn from(_: SendError<T>) -> Self {
DispatchError::SendError
}
}
#[derive(Clone)]
struct DispatchGuard {
inner: Arc<DispatchGuardInner>,
}
impl Deref for DispatchGuard {
type Target = DispatchGuardInner;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
struct DispatchGuardInner {
queue_preinit: AtomicBool,
overflow_count: AtomicUsize,
max_queue_size: usize,
block_sender: Sender<Blocked>,
preinit_sender: Sender<Command>,
sender: Sender<Command>,
}
impl DispatchGuard {
pub fn launch(&self, task: impl FnOnce() + Send + 'static) -> Result<(), DispatchError> {
let task = Command::Task(Box::new(task));
self.send(task)
}
pub fn shutdown(&mut self) -> Result<(), DispatchError> {
self.flush_init().ok();
self.send(Command::Shutdown)
}
fn send(&self, task: Command) -> Result<(), DispatchError> {
if self.queue_preinit.load(Ordering::SeqCst) {
if self.preinit_sender.len() < self.max_queue_size {
self.preinit_sender.send(task)?;
Ok(())
} else {
self.overflow_count.fetch_add(1, Ordering::SeqCst);
Err(DispatchError::QueueFull)
}
} else {
self.sender.send(task)?;
Ok(())
}
}
fn block_on_queue(&self) {
let (tx, rx) = crossbeam_channel::bounded(0);
let task = Command::Task(Box::new(move || {
tx.send(())
.expect("(worker) Can't send message on single-use channel");
}));
self.sender
.send(task)
.expect("Failed to launch the blocking task");
rx.recv()
.expect("Failed to receive message on single-use channel");
}
fn block_on_queue_timeout(&self, timeout: Duration) -> Result<(), RecvTimeoutError> {
let (tx, rx) = crossbeam_channel::bounded(0);
let task = Command::Task(Box::new(move || {
_ = tx.send(());
}));
self.sender
.send(task)
.expect("Failed to launch the blocking task");
rx.recv_timeout(timeout)
}
fn kill(&mut self) -> Result<(), DispatchError> {
let old_val = self.queue_preinit.swap(false, Ordering::SeqCst);
if !old_val {
return Err(DispatchError::AlreadyFlushed);
}
self.block_sender.send(Blocked::Shutdown)?;
Ok(())
}
fn flush_init(&mut self) -> Result<usize, DispatchError> {
let old_val = self.queue_preinit.swap(false, Ordering::SeqCst);
if !old_val {
return Err(DispatchError::AlreadyFlushed);
}
self.block_sender.send(Blocked::Continue)?;
let (swap_sender, swap_receiver) = bounded(0);
self.preinit_sender
.send(Command::Swap(swap_sender))
.map_err(|_| DispatchError::SendError)?;
swap_receiver.recv()?;
global::QUEUE_TASKS.store(false, Ordering::SeqCst);
let overflow_count = self.overflow_count.load(Ordering::SeqCst);
if overflow_count > 0 {
Ok(overflow_count)
} else {
Ok(0)
}
}
}
pub struct Dispatcher {
guard: DispatchGuard,
worker: Option<JoinHandle<()>>,
}
impl Dispatcher {
pub fn new(max_queue_size: usize) -> Self {
let (block_sender, block_receiver) = bounded(1);
let (preinit_sender, preinit_receiver) = unbounded();
let (sender, mut unbounded_receiver) = unbounded();
let queue_preinit = AtomicBool::new(true);
let overflow_count = AtomicUsize::new(0);
let worker = crate::thread::spawn("glean.dispatcher", move || {
match block_receiver.recv() {
Err(_) => {
log::error!("The task producer was disconnected. Worker thread will exit.");
return;
}
Ok(Blocked::Shutdown) => {
return;
}
Ok(Blocked::Continue) => {
}
}
let mut receiver = preinit_receiver;
loop {
use Command::*;
match receiver.recv() {
Ok(Shutdown) => {
break;
}
Ok(Task(f)) => {
(f)();
}
Ok(Swap(swap_done)) => {
mem::swap(&mut receiver, &mut unbounded_receiver);
swap_done
.send(())
.expect("The caller of `flush_init` has gone missing");
}
Err(_) => {
log::error!("The task producer was disconnected. Worker thread will exit.");
return;
}
}
}
})
.expect("Failed to spawn Glean's dispatcher thread");
let inner = Arc::new(DispatchGuardInner {
queue_preinit,
overflow_count,
max_queue_size,
block_sender,
preinit_sender,
sender,
});
let guard = DispatchGuard { inner };
Dispatcher {
guard,
worker: Some(worker),
}
}
fn guard(&self) -> DispatchGuard {
self.guard.clone()
}
#[cfg(test)]
fn join(mut self) -> Result<(), DispatchError> {
if let Some(worker) = self.worker.take() {
worker.join().map_err(|_| DispatchError::WorkerPanic)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use std::sync::atomic::AtomicU8;
use std::sync::Mutex;
use std::thread;
fn enable_test_logging() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn tasks_run_off_the_main_thread() {
enable_test_logging();
let main_thread_id = thread::current().id();
let thread_canary = Arc::new(AtomicBool::new(false));
let dispatcher = Dispatcher::new(100);
dispatcher
.guard()
.flush_init()
.expect("Failed to get out of preinit queue mode");
let canary_clone = thread_canary.clone();
dispatcher
.guard()
.launch(move || {
assert!(thread::current().id() != main_thread_id);
assert!(!canary_clone.load(Ordering::SeqCst));
canary_clone.store(true, Ordering::SeqCst);
})
.expect("Failed to dispatch the test task");
dispatcher.guard().block_on_queue();
assert!(thread_canary.load(Ordering::SeqCst));
assert_eq!(main_thread_id, thread::current().id());
}
#[test]
fn launch_correctly_adds_tasks_to_preinit_queue() {
enable_test_logging();
let main_thread_id = thread::current().id();
let thread_canary = Arc::new(AtomicU8::new(0));
let dispatcher = Dispatcher::new(100);
for _ in 0..3 {
let canary_clone = thread_canary.clone();
dispatcher
.guard()
.launch(move || {
assert!(thread::current().id() != main_thread_id);
canary_clone.fetch_add(1, Ordering::SeqCst);
})
.expect("Failed to dispatch the test task");
}
assert_eq!(0, thread_canary.load(Ordering::SeqCst));
dispatcher
.guard()
.flush_init()
.expect("Failed to get out of preinit queue mode");
assert_eq!(3, thread_canary.load(Ordering::SeqCst));
}
#[test]
fn preinit_tasks_are_processed_after_flush() {
enable_test_logging();
let dispatcher = Dispatcher::new(10);
let result = Arc::new(Mutex::new(vec![]));
for i in 1..=5 {
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
result.lock().unwrap().push(i);
})
.unwrap();
}
result.lock().unwrap().push(0);
dispatcher.guard().flush_init().unwrap();
for i in 6..=10 {
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
result.lock().unwrap().push(i);
})
.unwrap();
}
dispatcher.guard().block_on_queue();
assert_eq!(
&*result.lock().unwrap(),
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
);
}
#[test]
fn tasks_after_shutdown_are_not_processed() {
enable_test_logging();
let dispatcher = Dispatcher::new(10);
let result = Arc::new(Mutex::new(vec![]));
dispatcher.guard().flush_init().unwrap();
dispatcher.guard().shutdown().unwrap();
{
let result = Arc::clone(&result);
let _ = dispatcher.guard().launch(move || {
result.lock().unwrap().push(0);
});
}
dispatcher.join().unwrap();
assert!(result.lock().unwrap().is_empty());
}
#[test]
fn preinit_buffer_fills_up() {
enable_test_logging();
let dispatcher = Dispatcher::new(5);
let result = Arc::new(Mutex::new(vec![]));
for i in 1..=5 {
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
result.lock().unwrap().push(i);
})
.unwrap();
}
{
let result = Arc::clone(&result);
let err = dispatcher.guard().launch(move || {
result.lock().unwrap().push(10);
});
assert_eq!(Err(DispatchError::QueueFull), err);
}
dispatcher.guard().flush_init().unwrap();
{
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
result.lock().unwrap().push(20);
})
.unwrap();
}
dispatcher.guard().block_on_queue();
assert_eq!(&*result.lock().unwrap(), &[1, 2, 3, 4, 5, 20]);
}
#[test]
fn normal_queue_is_unbounded() {
enable_test_logging();
let dispatcher = Dispatcher::new(5);
let result = Arc::new(Mutex::new(vec![]));
for i in 1..=5 {
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
result.lock().unwrap().push(i);
})
.unwrap();
}
dispatcher.guard().flush_init().unwrap();
for i in 6..=20 {
let result = Arc::clone(&result);
dispatcher
.guard()
.launch(move || {
thread::sleep(Duration::from_millis(50));
result.lock().unwrap().push(i);
})
.unwrap();
}
dispatcher.guard().shutdown().unwrap();
dispatcher.join().unwrap();
let expected = (1..=20).collect::<Vec<_>>();
assert_eq!(&*result.lock().unwrap(), &expected);
}
}