use std::any::type_name;
#[cfg(any(test, feature = "test-util"))]
use std::borrow::Borrow;
use std::fmt;
use std::sync::{Arc, OnceLock, RwLock};
use std::thread::ThreadId;
use foldhash::HashMap;
use nonempty::NonEmpty;
#[cfg(any(test, feature = "test-util"))]
use crate::fake::FakePlatform;
#[cfg(any(test, feature = "test-util"))]
use crate::fake::HardwareBuilder;
use crate::pal::{AbstractProcessor, Platform as PlatformTrait, PlatformFacade};
use crate::{
MemoryRegionId, Processor, ProcessorId, ProcessorSet, ProcessorSetBuilder, ResourceQuota,
};
static CURRENT_HARDWARE: OnceLock<SystemHardware> = OnceLock::new();
#[derive(Clone)]
pub struct SystemHardware {
inner: Arc<SystemHardwareInner>,
}
struct SystemHardwareInner {
platform: PlatformFacade,
thread_states: RwLock<HashMap<ThreadId, ThreadState>>,
all_processors_slice: Box<[Option<Processor>]>,
max_processor_id: ProcessorId,
max_memory_region_id: MemoryRegionId,
cached_processors: OnceLock<NonEmpty<Processor>>,
cached_all_processors: OnceLock<NonEmpty<Processor>>,
}
#[derive(Clone, Default)]
struct ThreadState {
pinned_processor_id: Option<ProcessorId>,
pinned_memory_region_id: Option<MemoryRegionId>,
}
impl SystemHardware {
#[must_use]
pub fn current() -> &'static Self {
CURRENT_HARDWARE.get_or_init(|| Self::from_platform(PlatformFacade::target()))
}
#[cfg(any(test, feature = "test-util"))]
#[must_use]
pub fn fake(builder: impl Borrow<HardwareBuilder>) -> Self {
let backend = FakePlatform::from_builder(builder.borrow());
Self::from_platform(PlatformFacade::from_fake(backend))
}
#[cfg(test)]
#[must_use]
pub(crate) fn fallback() -> Self {
use crate::pal::fallback::BUILD_TARGET_PLATFORM;
Self::from_platform(PlatformFacade::Fallback(&BUILD_TARGET_PLATFORM))
}
fn from_platform(platform: PlatformFacade) -> Self {
let all_pal_processors = platform.get_all_processors();
let max_processor_id = platform.max_processor_id();
let max_memory_region_id = platform.max_memory_region_id();
let max_processor_count = (max_processor_id as usize)
.checked_add(1)
.expect("unrealistic to have more than an usize worth of processors");
let mut all_processors = vec![None; max_processor_count];
for processor in all_pal_processors {
*all_processors
.get_mut(processor.id() as usize)
.expect("encountered processor with ID above max_processor_id") =
Some(Processor::new(processor));
}
Self {
inner: Arc::new(SystemHardwareInner {
platform,
thread_states: RwLock::new(HashMap::default()),
all_processors_slice: all_processors.into_boxed_slice(),
max_processor_id,
max_memory_region_id,
cached_processors: OnceLock::new(),
cached_all_processors: OnceLock::new(),
}),
}
}
#[must_use]
pub fn processors(&self) -> ProcessorSet {
let cached = self.inner.cached_processors.get_or_init(|| {
ProcessorSetBuilder::with_internals(self.clone())
.enforce_resource_quota()
.take_all()
.expect(
"there is always at least one processor available because we are running on it",
)
.into_processors()
});
ProcessorSet::new(cached.clone(), self.clone())
}
#[must_use]
#[cfg_attr(test, mutants::skip)] pub fn thread_processors(&self) -> Option<ProcessorSet> {
if let Some(processor_id) = self.get_pinned_processor_id() {
let processor = self.get_processor(processor_id).clone();
return Some(ProcessorSet::new(
NonEmpty::singleton(processor),
self.clone(),
));
}
if let Some(memory_region_id) = self.get_pinned_memory_region_id() {
let processors: Vec<Processor> = self
.inner
.all_processors_slice
.iter()
.filter_map(|p| p.as_ref())
.filter(|p| p.memory_region_id() == memory_region_id)
.cloned()
.collect();
if let Some(processors) = NonEmpty::from_vec(processors) {
return Some(ProcessorSet::new(processors, self.clone()));
}
}
None
}
#[must_use]
pub fn all_processors(&self) -> ProcessorSet {
let cached = self.inner.cached_all_processors.get_or_init(|| {
ProcessorSetBuilder::with_internals(self.clone())
.take_all()
.expect("there is always at least one processor available")
.into_processors()
});
ProcessorSet::new(cached.clone(), self.clone())
}
#[must_use]
#[inline]
pub fn max_processor_id(&self) -> ProcessorId {
self.inner.max_processor_id
}
#[must_use]
#[inline]
#[cfg_attr(test, mutants::skip)] pub fn max_memory_region_id(&self) -> MemoryRegionId {
self.inner.max_memory_region_id
}
#[must_use]
#[inline]
pub fn max_processor_count(&self) -> usize {
(self.inner.max_processor_id as usize)
.checked_add(1)
.expect("unrealistic to have more than an usize worth of processors")
}
#[must_use]
#[inline]
pub fn max_memory_region_count(&self) -> usize {
(self.inner.max_memory_region_id as usize)
.checked_add(1)
.expect("unrealistic to have more than an usize worth of memory regions")
}
pub fn with_current_processor<F, R>(&self, f: F) -> R
where
F: FnOnce(&Processor) -> R,
{
let processor_id = self.current_processor_id();
let processor = self.get_processor(processor_id);
f(processor)
}
#[must_use]
#[inline]
pub fn current_processor_id(&self) -> ProcessorId {
self.get_pinned_processor_id()
.unwrap_or_else(|| self.inner.platform.current_processor_id())
}
#[must_use]
#[inline]
#[cfg_attr(test, mutants::skip)] pub fn current_memory_region_id(&self) -> MemoryRegionId {
self.get_pinned_memory_region_id().unwrap_or_else(|| {
let processor_id = self.current_processor_id();
self.get_processor(processor_id).memory_region_id()
})
}
#[must_use]
#[inline]
pub fn is_thread_processor_pinned(&self) -> bool {
self.get_pinned_processor_id().is_some()
}
#[must_use]
#[inline]
pub fn is_thread_memory_region_pinned(&self) -> bool {
self.get_pinned_memory_region_id().is_some()
}
#[must_use]
#[inline]
pub fn resource_quota(&self) -> ResourceQuota {
let max_processor_time = self.inner.platform.max_processor_time();
ResourceQuota::new(max_processor_time)
}
#[must_use]
#[inline]
pub fn active_processor_count(&self) -> usize {
self.inner.platform.active_processor_count()
}
pub(crate) fn update_pin_status(
&self,
processor_id: Option<ProcessorId>,
memory_region_id: Option<MemoryRegionId>,
) {
assert!(
!(memory_region_id.is_none() && processor_id.is_some()),
"if processor is pinned, memory region is obviously also pinned"
);
let thread_id = std::thread::current().id();
let mut states = self
.inner
.thread_states
.write()
.expect("thread state lock should never be poisoned");
let state = states.entry(thread_id).or_default();
state.pinned_processor_id = processor_id;
state.pinned_memory_region_id = memory_region_id;
}
fn get_pinned_processor_id(&self) -> Option<ProcessorId> {
let thread_id = std::thread::current().id();
let states = self
.inner
.thread_states
.read()
.expect("thread state lock should never be poisoned");
states.get(&thread_id)?.pinned_processor_id
}
fn get_pinned_memory_region_id(&self) -> Option<MemoryRegionId> {
let thread_id = std::thread::current().id();
let states = self
.inner
.thread_states
.read()
.expect("thread state lock should never be poisoned");
states.get(&thread_id)?.pinned_memory_region_id
}
fn get_processor(&self, processor_id: ProcessorId) -> &Processor {
let processor = self.inner.all_processors_slice.get(processor_id as usize);
if let Some(Some(processor)) = processor {
processor
} else {
self.inner
.all_processors_slice
.iter()
.find_map(|p| p.as_ref())
.expect("the system must have at least one processor for code to execute")
}
}
pub(crate) fn platform(&self) -> &PlatformFacade {
&self.inner.platform
}
}
#[cfg_attr(coverage_nightly, coverage(off))]
impl fmt::Debug for SystemHardware {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let processor_count = self
.inner
.all_processors_slice
.iter()
.filter(|p| p.is_some())
.count();
f.debug_struct(type_name::<Self>())
.field("processor_count", &processor_count)
.field("max_processor_id", &self.inner.max_processor_id)
.field("max_memory_region_id", &self.inner.max_memory_region_id)
.finish()
}
}
#[cfg(test)]
#[cfg_attr(coverage_nightly, coverage(off))]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use itertools::Itertools;
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(SystemHardware: UnwindSafe, RefUnwindSafe);
#[test]
fn current_hardware_is_singleton() {
let h1 = SystemHardware::current();
let h2 = SystemHardware::current();
assert!(Arc::ptr_eq(&h1.inner, &h2.inner));
}
#[test]
fn returns_valid_processor_id() {
let hardware = SystemHardware::current();
let id = hardware.current_processor_id();
assert!(id <= hardware.max_processor_id());
}
#[test]
fn counts_are_positive_values() {
let hardware = SystemHardware::current();
assert!(hardware.max_processor_count() >= 1);
assert!(hardware.max_memory_region_count() >= 1);
}
#[test]
fn pin_status_tracking_per_thread() {
let hardware = SystemHardware::current();
assert!(!hardware.is_thread_processor_pinned());
assert!(!hardware.is_thread_memory_region_pinned());
hardware.update_pin_status(Some(0), Some(0));
assert!(hardware.is_thread_processor_pinned());
assert!(hardware.is_thread_memory_region_pinned());
hardware.update_pin_status(None, None);
assert!(!hardware.is_thread_processor_pinned());
assert!(!hardware.is_thread_memory_region_pinned());
}
#[test]
fn processors_returns_set() {
let hardware = SystemHardware::current();
let processors = hardware.processors();
assert!(processors.len() >= 1);
}
#[test]
fn pinned_current_processor_id_is_unique() {
let hw = SystemHardware::current();
let mut processor_ids = hw
.processors()
.spawn_threads(|processor| {
let processor_id = processor.id();
let current_processor_id = hw.current_processor_id();
assert_eq!(processor_id, current_processor_id);
current_processor_id
})
.into_iter()
.map(|x| x.join().unwrap())
.collect_vec();
processor_ids.sort();
let unique_id_count = processor_ids.iter().dedup().count();
assert_eq!(unique_id_count, processor_ids.len());
}
#[test]
fn active_processor_count_is_at_least_processor_set_len() {
let hw = SystemHardware::current();
let all_processors = hw.all_processors();
let active_processors = hw.active_processor_count();
assert!(active_processors >= all_processors.len());
}
#[test]
#[expect(
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
reason = "unavoidable f64-usize casting but we know the value is positive"
)]
fn resource_quota_is_followed_by_default() {
let hw = SystemHardware::current();
let max_processor_time = hw.resource_quota().max_processor_time();
let processors = hw.processors();
let quota_to_processor_time = (max_processor_time.floor() as usize).max(1);
assert_eq!(quota_to_processor_time, processors.len());
}
#[test]
#[should_panic]
fn panic_if_pinned_processor_with_unpinned_memory_region() {
let hardware = SystemHardware::current();
hardware.update_pin_status(Some(0), None);
}
}
#[cfg(all(test, feature = "test-util"))]
#[cfg_attr(coverage_nightly, coverage(off))]
mod tests_fake {
use new_zealand::nz;
use crate::fake::{HardwareBuilder, ProcessorBuilder};
use crate::{EfficiencyClass, SystemHardware};
#[test]
fn fake_hardware_with_simple_config() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
assert_eq!(hardware.max_processor_count(), 4);
assert_eq!(hardware.max_memory_region_count(), 1);
}
#[test]
fn fake_hardware_with_memory_regions() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(6), nz!(3)));
assert_eq!(hardware.max_processor_count(), 6);
assert_eq!(hardware.max_memory_region_count(), 3);
}
#[test]
fn fake_hardware_with_custom_processors() {
let hardware = SystemHardware::fake(
HardwareBuilder::new()
.processor(
ProcessorBuilder::new()
.id(0)
.memory_region(0)
.efficiency_class(EfficiencyClass::Performance),
)
.processor(
ProcessorBuilder::new()
.id(1)
.memory_region(1)
.efficiency_class(EfficiencyClass::Efficiency),
),
);
assert_eq!(hardware.max_processor_count(), 2);
assert_eq!(hardware.max_memory_region_count(), 2);
}
#[test]
fn fake_hardware_returns_valid_processor_id() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(8), nz!(1)));
let id = hardware.current_processor_id();
assert!(id < 8);
}
#[test]
fn fake_hardware_pin_status_tracking() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
assert!(!hardware.is_thread_processor_pinned());
assert!(!hardware.is_thread_memory_region_pinned());
hardware.update_pin_status(Some(0), Some(0));
assert!(hardware.is_thread_processor_pinned());
assert!(hardware.is_thread_memory_region_pinned());
hardware.update_pin_status(None, None);
assert!(!hardware.is_thread_processor_pinned());
assert!(!hardware.is_thread_memory_region_pinned());
}
#[test]
fn fake_hardware_with_hybrid_processors() {
let hardware = SystemHardware::fake(
HardwareBuilder::new()
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Performance))
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Performance))
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Performance))
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Performance))
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Efficiency))
.processor(ProcessorBuilder::new().efficiency_class(EfficiencyClass::Efficiency)),
);
assert_eq!(hardware.max_processor_count(), 6);
assert_eq!(hardware.active_processor_count(), 6);
}
#[test]
fn fake_hardware_with_resource_quota() {
let hardware = SystemHardware::fake(
HardwareBuilder::from_counts(nz!(8), nz!(1)).max_processor_time(2.5),
);
let quota = hardware.resource_quota();
assert!((quota.max_processor_time() - 2.5).abs() < f64::EPSILON);
}
#[test]
fn multiple_fake_hardware_are_isolated() {
let hardware1 = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let hardware2 = SystemHardware::fake(HardwareBuilder::from_counts(nz!(8), nz!(1)));
assert_eq!(hardware1.max_processor_count(), 4);
assert_eq!(hardware2.max_processor_count(), 8);
hardware1.update_pin_status(Some(0), Some(0));
assert!(hardware1.is_thread_processor_pinned());
assert!(!hardware2.is_thread_processor_pinned());
}
#[test]
fn fake_hardware_clones_share_state() {
let hardware1 = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let hardware2 = hardware1.clone();
hardware1.update_pin_status(Some(0), Some(0));
assert!(hardware1.is_thread_processor_pinned());
assert!(hardware2.is_thread_processor_pinned());
}
#[test]
fn fake_hardware_processors_returns_set() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let processors = hardware.processors();
assert_eq!(processors.len(), 4);
}
#[test]
fn thread_processors_returns_none_when_not_pinned() {
std::thread::spawn(|| {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let result = hardware.thread_processors();
assert!(result.is_none());
})
.join()
.unwrap();
}
#[test]
fn thread_processors_returns_single_processor_when_pinned_to_one() {
std::thread::spawn(|| {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let single = hardware.processors().take(nz!(1)).unwrap();
single.pin_current_thread_to();
let result = hardware.thread_processors().unwrap();
assert_eq!(result.len(), 1);
assert_eq!(
result.processors().first().id(),
single.processors().first().id()
);
})
.join()
.unwrap();
}
#[test]
fn thread_processors_returns_all_region_processors_when_pinned_to_same_region() {
std::thread::spawn(|| {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(4), nz!(1)));
let two = hardware.processors().take(nz!(2)).unwrap();
two.pin_current_thread_to();
let result = hardware.thread_processors().unwrap();
assert_eq!(result.len(), 4);
})
.join()
.unwrap();
}
#[test]
fn max_processor_id_returns_configured_value() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(8), nz!(4)));
assert_eq!(hardware.max_processor_id(), 7);
}
#[test]
fn max_memory_region_id_returns_configured_value() {
let hardware = SystemHardware::fake(HardwareBuilder::from_counts(nz!(8), nz!(4)));
assert_eq!(hardware.max_memory_region_id(), 3);
}
#[test]
fn get_processor_falls_back_for_unknown_id() {
let hardware = SystemHardware::fake(
HardwareBuilder::new()
.processor(ProcessorBuilder::new().id(0).memory_region(0))
.processor(ProcessorBuilder::new().id(5).memory_region(0)),
);
hardware
.platform()
.as_fake()
.set_processor_id_override(Some(3));
hardware.with_current_processor(|processor| {
assert!(processor.id() == 0 || processor.id() == 5);
});
let region = hardware.current_memory_region_id();
assert_eq!(region, 0);
hardware
.platform()
.as_fake()
.set_processor_id_override(None);
}
}