#![cfg(test)]
#![allow(unused_imports)]
use super::super::affinity::*;
use super::super::config::*;
use super::super::types::*;
use super::super::worker::*;
use super::testing::*;
use super::*;
use std::collections::{BTreeMap, BTreeSet};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Duration, Instant};
#[test]
fn set_affinity_via_handle() {
let config = WorkloadConfig {
num_workers: 1,
affinity: AffinityIntent::Inherit,
work_type: WorkType::SpinWait,
sched_policy: SchedPolicy::Normal,
..Default::default()
};
let mut h = WorkloadHandle::spawn(&config).unwrap();
h.start();
let cpus: BTreeSet<usize> = [0].into_iter().collect();
let result = h.set_affinity(0, &cpus);
assert!(result.is_ok());
std::thread::sleep(std::time::Duration::from_millis(100));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 1);
}
#[test]
fn start_idempotent() {
let config = WorkloadConfig {
num_workers: 1,
affinity: AffinityIntent::Inherit,
work_type: WorkType::SpinWait,
sched_policy: SchedPolicy::Normal,
..Default::default()
};
let mut h = WorkloadHandle::spawn(&config).unwrap();
h.start();
h.start(); std::thread::sleep(std::time::Duration::from_millis(100));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 1);
assert!(reports[0].work_units > 0);
}
#[test]
fn page_fault_churn_region_kb_overflow_worker_exits_cleanly() {
let config = WorkloadConfig {
num_workers: 1,
affinity: AffinityIntent::Inherit,
work_type: WorkType::PageFaultChurn {
region_kb: usize::MAX,
touches_per_cycle: 16,
spin_iters: 32,
},
sched_policy: SchedPolicy::Normal,
..Default::default()
};
let mut h = WorkloadHandle::spawn(&config).unwrap();
h.start();
std::thread::sleep(Duration::from_millis(100));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 1, "exactly one worker was spawned");
let r = &reports[0];
assert_eq!(
r.iterations, 0,
"worker with overflowing region_kb must break out of the outer loop \
without completing any page-fault cycle; got iterations={}",
r.iterations,
);
assert_eq!(
r.work_units, 0,
"overflow path must not increment work_units; got {}",
r.work_units,
);
}
#[test]
fn mutex_contention_records_wake_latency() {
let config = WorkloadConfig {
num_workers: 4,
affinity: AffinityIntent::Inherit,
work_type: WorkType::MutexContention {
contenders: 4,
hold_iters: 64,
work_iters: 256,
},
sched_policy: SchedPolicy::Normal,
..Default::default()
};
let mut h = WorkloadHandle::spawn(&config).unwrap();
h.start();
std::thread::sleep(std::time::Duration::from_millis(500));
let reports = h.stop_and_collect();
let has_latencies = reports.iter().any(|r| !r.resume_latencies_ns.is_empty());
assert!(has_latencies, "contenders should record wake latencies");
}
#[test]
fn pathology_page_fault_churn_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::PageFaultChurn {
region_kb: 256,
touches_per_cycle: 16,
spin_iters: 32,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("PageFaultChurn must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
for r in &reports {
assert!(
r.iterations > 0,
"PageFaultChurn worker must iterate: {r:?}"
);
}
}
#[test]
fn pathology_mutex_contention_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::MutexContention {
contenders: 2,
hold_iters: 64,
work_iters: 128,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("MutexContention must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
for r in &reports {
assert!(
r.iterations > 0,
"MutexContention worker must iterate: {r:?}"
);
}
}
#[test]
fn pathology_thundering_herd_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::ThunderingHerd {
waiters: 1,
batches: 50,
inter_batch_ms: 1,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("ThunderingHerd must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
let total: u64 = reports.iter().map(|r| r.iterations).sum();
assert!(total > 0, "ThunderingHerd cohort must iterate: {reports:?}");
}
#[test]
fn pathology_priority_inversion_iterates() {
let cfg = WorkloadConfig {
num_workers: 3,
work_type: WorkType::PriorityInversion {
high_count: 1,
medium_count: 1,
low_count: 1,
hold_iters: 256,
work_iters: 128,
pi_mode: FutexLockMode::Plain,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("PriorityInversion must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 3);
let total: u64 = reports.iter().map(|r| r.iterations).sum();
assert!(
total > 0,
"PriorityInversion cohort must iterate: {reports:?}"
);
}
#[test]
fn pathology_producer_consumer_imbalance_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::ProducerConsumerImbalance {
producers: 1,
consumers: 1,
produce_rate_hz: 200,
consume_iters: 64,
queue_depth_target: 16,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("ProducerConsumerImbalance must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
let total: u64 = reports.iter().map(|r| r.iterations).sum();
assert!(
total > 0,
"Producer/Consumer cohort must iterate: {reports:?}"
);
}
#[test]
fn pathology_rt_starvation_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::RtStarvation {
rt_workers: 1,
cfs_workers: 1,
rt_priority: 50,
burst_iters: 64,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("RtStarvation must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
let total: u64 = reports.iter().map(|r| r.iterations).sum();
assert!(total > 0, "RtStarvation cohort must iterate: {reports:?}");
}
#[test]
fn pathology_asymmetric_waker_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::AsymmetricWaker {
waker_class: SchedClass::Cfs,
wakee_class: SchedClass::Cfs,
burst_iters: 128,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("AsymmetricWaker must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
let total: u64 = reports.iter().map(|r| r.iterations).sum();
assert!(total > 0, "AsymmetricWaker pair must iterate: {reports:?}");
}
#[test]
fn pathology_alu_hot_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::AluHot {
width: AluWidth::Widest,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("AluHot must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
for r in &reports {
assert!(r.iterations > 0, "AluHot worker must iterate: {r:?}");
}
}
#[test]
fn pathology_alu_hot_scalar_iterates() {
let cfg = WorkloadConfig {
num_workers: 1,
work_type: WorkType::AluHot {
width: AluWidth::Scalar,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("AluHot Scalar must spawn");
h.start();
std::thread::sleep(Duration::from_millis(100));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 1);
assert!(
reports[0].iterations > 0,
"AluHot Scalar worker must iterate: {:?}",
reports[0]
);
}
#[test]
fn pathology_alu_hot_populates_iteration_costs() {
let cfg = WorkloadConfig {
num_workers: 1,
work_type: WorkType::AluHot {
width: AluWidth::Scalar,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("AluHot must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 1);
let r = &reports[0];
assert!(
!r.iteration_costs_ns.is_empty(),
"AluHot must populate iteration_costs_ns: {r:?}",
);
assert!(
r.iteration_cost_sample_total >= 1,
"AluHot must record at least one iteration-cost sample: {r:?}",
);
}
#[test]
fn pathology_smt_sibling_spin_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::SmtSiblingSpin,
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("SmtSiblingSpin must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
for r in &reports {
assert!(
r.iterations > 0,
"SmtSiblingSpin worker must iterate: {r:?}"
);
}
}
#[test]
fn smt_sibling_spin_odd_workers_rejects() {
let cfg = WorkloadConfig {
num_workers: 3,
work_type: WorkType::SmtSiblingSpin,
..Default::default()
};
let err = WorkloadHandle::spawn(&cfg)
.err()
.expect("SmtSiblingSpin with odd num_workers must be rejected");
let typed = err
.downcast_ref::<WorkTypeValidationError>()
.expect("error must downcast to WorkTypeValidationError");
assert!(
matches!(
typed,
WorkTypeValidationError::NonDivisibleWorkerCount {
name,
group_idx: 0,
group_size: 2,
num_workers: 3,
} if name == "SmtSiblingSpin"
),
"expected NonDivisibleWorkerCount for SmtSiblingSpin; got: {typed:?}",
);
}
#[test]
fn pathology_ipc_variance_iterates() {
let cfg = WorkloadConfig {
num_workers: 2,
work_type: WorkType::IpcVariance {
hot_iters: 1024,
cold_iters: 64,
period_iters: 4,
},
..Default::default()
};
let mut h = WorkloadHandle::spawn(&cfg).expect("IpcVariance must spawn");
h.start();
std::thread::sleep(Duration::from_millis(200));
let reports = h.stop_and_collect();
assert_eq!(reports.len(), 2);
for r in &reports {
assert!(r.iterations > 0, "IpcVariance worker must iterate: {r:?}");
}
}