use std::{
sync::{
mpsc::{channel, Sender},
LazyLock, Mutex,
},
thread::{self, spawn},
};
static IDLE_THREADS: LazyLock<Mutex<Vec<Sender<Box<dyn FnOnce() + Send + 'static>>>>> =
LazyLock::new(|| Mutex::new(Vec::new()));
static NUM_CPUS: LazyLock<usize> = LazyLock::new(|| thread::available_parallelism().unwrap().get());
#[cfg(any(target_os = "windows", target_os = "linux"))]
static THREAD_PRIORITY: Mutex<Option<thread_priority::ThreadPriority>> = Mutex::new(None);
#[allow(dead_code)]
pub fn get_idle_threads() -> usize {
IDLE_THREADS.lock().unwrap().len()
}
#[cfg(any(target_os = "windows", target_os = "linux"))]
#[allow(dead_code)]
pub fn set_thread_priority(priority: thread_priority::ThreadPriority) {
*THREAD_PRIORITY.lock().unwrap() = Some(priority);
}
pub fn execute<F>(f: F)
where
F: FnOnce() + Send + 'static,
{
if let Some(sender) = IDLE_THREADS.lock().unwrap().pop() {
sender.send(Box::new(f)).unwrap();
} else {
let (tx_schedule, rx_schedule) = channel();
#[cfg(any(target_os = "windows", target_os = "linux"))]
let thread_priority = THREAD_PRIORITY.lock().unwrap().clone();
spawn(move || {
#[cfg(any(target_os = "windows", target_os = "linux"))]
if let Some(priority) = thread_priority {
thread_priority::set_current_thread_priority(priority).unwrap();
}
f();
loop {
if let Ok(mut i) = IDLE_THREADS.lock() {
if i.len() > *NUM_CPUS {
break;
}
i.push(tx_schedule.clone());
} else {
break;
}
if let Ok(f) = rx_schedule.recv() {
f();
} else {
break;
}
}
});
}
}
#[test]
fn test_threadpool() {
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
let a: Arc<AtomicU32> = Arc::new(AtomicU32::new(0));
for _i in 0usize..100 {
let aref = a.clone();
execute(move || {
aref.fetch_add(1, Ordering::AcqRel);
});
}
while a.load(std::sync::atomic::Ordering::Acquire) < 100 {
thread::yield_now();
}
println!("Idle threads: {}", get_idle_threads());
}