#![cfg_attr(docsrs, feature(doc_cfg))]
use std::{
cell::UnsafeCell,
ptr,
sync::atomic::{AtomicBool, AtomicPtr, AtomicU8, AtomicUsize, Ordering},
time::Duration,
};
use compio::time;
use defer_lite::defer;
pub use iter::CpuLoadIter;
use sysinfo::{CpuRefreshKind, RefreshKind, System};
mod iter;
const INIT_DELAY_MS: u64 = 100;
const DEFAULT_INTERVAL: Duration = Duration::from_secs(1);
struct Inner {
global: AtomicU8,
cores: Box<[AtomicU8]>,
rank: Box<[UnsafeCell<usize>]>,
cursor: AtomicUsize,
sorting: AtomicBool,
stop: AtomicBool,
}
unsafe impl Sync for Inner {}
pub struct CpuLoad {
ptr: AtomicPtr<Inner>,
}
impl CpuLoad {
fn sample(sys: &mut System, inner: &Inner) {
sys.refresh_cpu_all();
let g = sys.global_cpu_usage().clamp(0.0, 100.0) as u8;
inner.global.store(g, Ordering::Relaxed);
let cpus = sys.cpus();
for (i, cpu) in cpus.iter().enumerate() {
let usage = cpu.cpu_usage().clamp(0.0, 100.0) as u8;
unsafe { inner.cores.get_unchecked(i) }.store(usage, Ordering::Relaxed);
}
}
#[inline]
fn inner(&self) -> Option<&Inner> {
let p = self.ptr.load(Ordering::Acquire);
if p.is_null() {
None
} else {
Some(unsafe { &*p })
}
}
#[inline]
pub fn new() -> Self {
Self::init(DEFAULT_INTERVAL)
}
pub fn init(interval: Duration) -> Self {
let mut sys =
System::new_with_specifics(RefreshKind::nothing().with_cpu(CpuRefreshKind::everything()));
sys.refresh_cpu_all();
let n = sys.cpus().len().max(1);
let cores: Box<[AtomicU8]> = (0..n).map(|_| AtomicU8::new(0)).collect();
let rank: Box<[UnsafeCell<usize>]> = (0..n).map(UnsafeCell::new).collect();
let inner = Box::new(Inner {
global: AtomicU8::new(0),
cores,
rank,
cursor: AtomicUsize::new(0),
sorting: AtomicBool::new(false),
stop: AtomicBool::new(false),
});
let ptr = Box::into_raw(inner);
let inst = Self {
ptr: AtomicPtr::new(ptr),
};
compio::runtime::spawn(async move {
defer! {
unsafe { drop(Box::from_raw(ptr)) };
}
time::sleep(Duration::from_millis(INIT_DELAY_MS)).await;
loop {
let inner = unsafe { &*ptr };
if inner.stop.load(Ordering::Acquire) {
break;
}
Self::sample(&mut sys, inner);
time::sleep(interval).await;
}
})
.detach();
inst
}
pub fn idlest(&self) -> usize {
let Some(inner) = self.inner() else {
return 0;
};
let n = inner.rank.len();
if n == 0 {
return 0;
}
let cur = inner.cursor.fetch_add(1, Ordering::Relaxed);
if cur >= n
&& inner
.sorting
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
let rank_slice =
unsafe { std::slice::from_raw_parts_mut(inner.rank.as_ptr() as *mut usize, n) };
rank_slice
.sort_unstable_by_key(|&i| unsafe { inner.cores.get_unchecked(i) }.load(Ordering::Relaxed));
inner.cursor.store(1, Ordering::Relaxed);
inner.sorting.store(false, Ordering::Release);
return rank_slice[0];
}
while inner.sorting.load(Ordering::Acquire) {
std::hint::spin_loop();
}
unsafe { *inner.rank.get_unchecked(cur % n).get() }
}
#[inline]
pub fn global(&self) -> u8 {
self.inner().map_or(0, |i| i.global.load(Ordering::Relaxed))
}
#[inline]
pub fn core(&self, idx: usize) -> Option<u8> {
self
.inner()
.and_then(|i| i.cores.get(idx).map(|v| v.load(Ordering::Relaxed)))
}
#[inline]
pub fn len(&self) -> usize {
self.inner().map_or(0, |i| i.cores.len())
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
fn cores(&self) -> Option<&[AtomicU8]> {
self.inner().map(|i| &*i.cores)
}
}
impl Default for CpuLoad {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl Drop for CpuLoad {
fn drop(&mut self) {
let ptr = self.ptr.swap(ptr::null_mut(), Ordering::AcqRel);
if !ptr.is_null() {
unsafe {
(*ptr).stop.store(true, Ordering::Release);
}
}
}
}
impl<'a> IntoIterator for &'a CpuLoad {
type Item = u8;
type IntoIter = CpuLoadIter<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
CpuLoadIter::new(self.cores())
}
}
#[static_init::dynamic]
pub static CPU_LOAD: CpuLoad = CpuLoad::new();