use crate::workload::WorkerReport;
use std::collections::{BTreeMap, BTreeSet};
#[derive(Debug, Clone, Default)]
pub struct NumaMapsEntry {
pub addr: u64,
pub node_pages: BTreeMap<usize, u64>,
}
pub fn parse_numa_maps(content: &str) -> Vec<NumaMapsEntry> {
let mut entries = Vec::new();
for line in content.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut parts = line.split_whitespace();
let addr = match parts.next().and_then(|s| u64::from_str_radix(s, 16).ok()) {
Some(a) => a,
None => continue,
};
let _ = parts.next();
let mut entry = NumaMapsEntry {
addr,
..Default::default()
};
for token in parts {
if let Some(rest) = token.strip_prefix('N')
&& let Some((node_str, count_str)) = rest.split_once('=')
&& let (Ok(node), Ok(count)) = (node_str.parse::<usize>(), count_str.parse::<u64>())
{
*entry.node_pages.entry(node).or_insert(0) += count;
}
}
if !entry.node_pages.is_empty() {
entries.push(entry);
}
}
entries
}
pub fn page_locality(entries: &[NumaMapsEntry], expected_nodes: &BTreeSet<usize>) -> f64 {
let mut total: u64 = 0;
let mut local: u64 = 0;
for entry in entries {
for (&node, &count) in &entry.node_pages {
total += count;
if expected_nodes.contains(&node) {
local += count;
}
}
}
if total > 0 {
local as f64 / total as f64
} else {
0.0
}
}
pub fn parse_vmstat_numa_pages_migrated(content: &str) -> Option<u64> {
for line in content.lines() {
let line = line.trim();
if let Some(rest) = line.strip_prefix("numa_pages_migrated") {
let rest = rest.trim();
if let Ok(v) = rest.parse::<u64>() {
return Some(v);
}
}
}
None
}
fn gap_threshold_ms() -> u64 {
if cfg!(debug_assertions) { 3000 } else { 2000 }
}
fn spread_threshold_pct() -> f64 {
if cfg!(debug_assertions) { 35.0 } else { 15.0 }
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub enum DetailKind {
Starved,
Stuck,
Unfair,
Isolation,
Benchmark,
Migration,
PageLocality,
CrossNodeMigration,
SlowTier,
Monitor,
SchedulerDied,
SchedulerEvent,
Skip,
Note,
Other,
}
pub(crate) const SCHED_DIED_PREFIX: &str = "scheduler process died";
pub(crate) fn format_sched_died_after_step(
step_idx: usize,
total_steps: usize,
elapsed_s: f64,
) -> String {
format!(
"{SCHED_DIED_PREFIX} unexpectedly after completing step {step_idx} of {total_steps} ({elapsed_s:.1}s into test)",
)
}
pub(crate) fn format_sched_died_after_all_steps(total_steps: usize, elapsed_s: f64) -> String {
format!(
"{SCHED_DIED_PREFIX} unexpectedly (detected after all {total_steps} steps completed, {elapsed_s:.1}s elapsed)",
)
}
pub(crate) fn format_sched_died_during_workload(elapsed_s: f64) -> String {
format!("{SCHED_DIED_PREFIX} unexpectedly during workload ({elapsed_s:.1}s into test)")
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct AssertDetail {
pub kind: DetailKind,
pub message: String,
}
impl PartialEq<&str> for AssertDetail {
fn eq(&self, other: &&str) -> bool {
self.message == *other
}
}
impl PartialEq<str> for AssertDetail {
fn eq(&self, other: &str) -> bool {
self.message == *other
}
}
impl PartialEq<String> for AssertDetail {
fn eq(&self, other: &String) -> bool {
self.message == *other
}
}
impl AsRef<str> for AssertDetail {
fn as_ref(&self) -> &str {
&self.message
}
}
impl AssertDetail {
pub fn new(kind: DetailKind, message: impl Into<String>) -> Self {
Self {
kind,
message: message.into(),
}
}
pub fn display_with_kind(&self) -> AssertDetailWithKind<'_> {
AssertDetailWithKind { detail: self }
}
}
#[must_use = "AssertDetailWithKind only renders when formatted"]
pub struct AssertDetailWithKind<'a> {
detail: &'a AssertDetail,
}
impl std::fmt::Display for AssertDetailWithKind<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[{:?}] {}", self.detail.kind, self.detail.message)
}
}
impl From<String> for AssertDetail {
fn from(message: String) -> Self {
Self {
kind: DetailKind::Other,
message,
}
}
}
impl From<&str> for AssertDetail {
fn from(s: &str) -> Self {
Self {
kind: DetailKind::Other,
message: s.to_string(),
}
}
}
impl std::ops::Deref for AssertDetail {
type Target = str;
fn deref(&self) -> &str {
&self.message
}
}
impl std::fmt::Display for AssertDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.message)
}
}
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
pub enum NoteValue {
Int(i64),
Uint(u64),
Float(f64),
Bool(bool),
Text(String),
}
impl From<i64> for NoteValue {
fn from(v: i64) -> Self {
Self::Int(v)
}
}
impl From<u64> for NoteValue {
fn from(v: u64) -> Self {
Self::Uint(v)
}
}
impl From<f64> for NoteValue {
fn from(v: f64) -> Self {
Self::Float(v)
}
}
impl From<bool> for NoteValue {
fn from(v: bool) -> Self {
Self::Bool(v)
}
}
impl From<String> for NoteValue {
fn from(v: String) -> Self {
Self::Text(v)
}
}
impl From<&str> for NoteValue {
fn from(v: &str) -> Self {
Self::Text(v.to_string())
}
}
#[must_use = "test verdict is lost if not checked"]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct AssertResult {
pub passed: bool,
pub skipped: bool,
pub details: Vec<AssertDetail>,
pub stats: ScenarioStats,
#[serde(default)]
pub measurements: std::collections::BTreeMap<String, NoteValue>,
}
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize, crate::Claim)]
pub struct CgroupStats {
pub num_workers: usize,
pub num_cpus: usize,
pub avg_off_cpu_pct: f64,
pub min_off_cpu_pct: f64,
pub max_off_cpu_pct: f64,
pub spread: f64,
pub max_gap_ms: u64,
pub max_gap_cpu: usize,
pub total_migrations: u64,
pub migration_ratio: f64,
pub p99_wake_latency_us: f64,
pub median_wake_latency_us: f64,
pub wake_latency_cv: f64,
pub total_iterations: u64,
pub mean_run_delay_us: f64,
pub worst_run_delay_us: f64,
pub page_locality: f64,
pub cross_node_migration_ratio: f64,
#[serde(default)]
pub ext_metrics: BTreeMap<String, f64>,
}
impl CgroupStats {
pub fn wake_latency_tail_ratio(&self) -> f64 {
if self.median_wake_latency_us > 0.0 {
self.p99_wake_latency_us / self.median_wake_latency_us
} else {
0.0
}
}
pub fn iterations_per_worker(&self) -> f64 {
if self.num_workers > 0 {
self.total_iterations as f64 / self.num_workers as f64
} else {
0.0
}
}
}
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize, crate::Claim)]
pub struct ScenarioStats {
pub cgroups: Vec<CgroupStats>,
pub total_workers: usize,
pub total_cpus: usize,
pub total_migrations: u64,
pub worst_spread: f64,
pub worst_gap_ms: u64,
pub worst_gap_cpu: usize,
pub worst_migration_ratio: f64,
pub worst_p99_wake_latency_us: f64,
pub worst_median_wake_latency_us: f64,
pub worst_wake_latency_cv: f64,
pub total_iterations: u64,
pub worst_mean_run_delay_us: f64,
pub worst_run_delay_us: f64,
pub worst_page_locality: f64,
pub worst_cross_node_migration_ratio: f64,
pub worst_wake_latency_tail_ratio: f64,
pub worst_iterations_per_worker: f64,
#[serde(default)]
pub ext_metrics: BTreeMap<String, f64>,
}
impl AssertResult {
pub fn pass() -> Self {
Self {
passed: true,
skipped: false,
details: vec![],
stats: Default::default(),
measurements: std::collections::BTreeMap::new(),
}
}
pub fn skip(reason: impl Into<String>) -> Self {
Self {
passed: true,
skipped: true,
details: vec![AssertDetail::new(DetailKind::Skip, reason)],
stats: Default::default(),
measurements: std::collections::BTreeMap::new(),
}
}
pub fn fail(detail: AssertDetail) -> Self {
Self {
passed: false,
skipped: false,
details: vec![detail],
stats: Default::default(),
measurements: std::collections::BTreeMap::new(),
}
}
pub fn fail_msg(msg: impl Into<String>) -> Self {
Self::fail(AssertDetail::new(DetailKind::Other, msg))
}
pub fn note(&mut self, msg: impl Into<String>) -> &mut Self {
self.details.push(AssertDetail::new(DetailKind::Note, msg));
self
}
pub fn with_note(mut self, msg: impl Into<String>) -> Self {
self.note(msg);
self
}
pub fn is_skipped(&self) -> bool {
self.skipped
}
pub fn is_failed(&self) -> bool {
!self.passed
}
pub fn merge(&mut self, other: AssertResult) {
fn fold_lowest_nonzero(self_field: &mut f64, other_field: f64) {
if other_field > 0.0 && (*self_field == 0.0 || other_field < *self_field) {
*self_field = other_field;
}
}
if !other.passed {
self.passed = false;
}
self.skipped = self.skipped && other.skipped;
self.details.extend(other.details);
let s = &mut self.stats;
let o = &other.stats;
s.total_workers += o.total_workers;
s.total_cpus += o.total_cpus;
s.total_migrations += o.total_migrations;
s.total_iterations += o.total_iterations;
s.worst_spread = s.worst_spread.max(o.worst_spread);
s.worst_migration_ratio = s.worst_migration_ratio.max(o.worst_migration_ratio);
s.worst_p99_wake_latency_us = s.worst_p99_wake_latency_us.max(o.worst_p99_wake_latency_us);
s.worst_median_wake_latency_us = s
.worst_median_wake_latency_us
.max(o.worst_median_wake_latency_us);
s.worst_wake_latency_cv = s.worst_wake_latency_cv.max(o.worst_wake_latency_cv);
s.worst_run_delay_us = s.worst_run_delay_us.max(o.worst_run_delay_us);
s.worst_mean_run_delay_us = s.worst_mean_run_delay_us.max(o.worst_mean_run_delay_us);
s.worst_cross_node_migration_ratio = s
.worst_cross_node_migration_ratio
.max(o.worst_cross_node_migration_ratio);
s.worst_wake_latency_tail_ratio = s
.worst_wake_latency_tail_ratio
.max(o.worst_wake_latency_tail_ratio);
fold_lowest_nonzero(
&mut s.worst_iterations_per_worker,
o.worst_iterations_per_worker,
);
if o.worst_gap_ms > s.worst_gap_ms {
s.worst_gap_ms = o.worst_gap_ms;
s.worst_gap_cpu = o.worst_gap_cpu;
}
fold_lowest_nonzero(&mut s.worst_page_locality, o.worst_page_locality);
for (k, v) in &other.stats.ext_metrics {
let higher_is_worse = crate::stats::metric_def(k)
.map(|m| m.higher_is_worse())
.unwrap_or_else(|| crate::stats::infer_higher_is_worse(k));
let entry = self.stats.ext_metrics.entry(k.clone()).or_insert(*v);
*entry = if higher_is_worse {
entry.max(*v)
} else {
entry.min(*v)
};
}
self.stats.cgroups.extend(other.stats.cgroups);
for (k, v) in other.measurements {
self.measurements.insert(k, v);
}
}
pub fn note_value(&mut self, key: impl Into<String>, value: impl Into<NoteValue>) -> &mut Self {
self.measurements.insert(key.into(), value.into());
self
}
pub fn any_of(branches: impl IntoIterator<Item = AssertResult>) -> AssertResult {
let mut branches: Vec<AssertResult> = branches.into_iter().collect();
if branches.is_empty() {
return AssertResult::fail(AssertDetail::new(
DetailKind::Other,
"any_of: empty branch list — a disjunction of zero alternatives is logically false",
));
}
let first_pass_idx = branches.iter().position(|b| b.passed && !b.skipped);
if let Some(idx) = first_pass_idx {
let mut chosen = branches.swap_remove(idx);
for b in branches {
if b.passed && !b.skipped {
for (k, v) in b.measurements {
chosen.measurements.insert(k, v);
}
}
}
chosen.details.push(AssertDetail::new(
DetailKind::Note,
format!("any_of: branch {idx} satisfied the disjunction"),
));
chosen
} else {
let total_branches = branches.len();
let mut iter = branches.into_iter().enumerate();
let (_, first) = iter.next().expect("non-empty checked above");
let mut acc = AssertResult {
passed: false,
skipped: false,
details: Vec::new(),
stats: first.stats,
measurements: first.measurements,
};
for d in first.details {
acc.details.push(AssertDetail::new(
d.kind,
format!("any_of[0]: {}", d.message),
));
}
for (idx, b) in iter {
for d in b.details {
acc.details.push(AssertDetail::new(
d.kind,
format!("any_of[{idx}]: {}", d.message),
));
}
}
acc.details.push(AssertDetail::new(
DetailKind::Other,
format!("any_of: all {total_branches} branches failed"),
));
acc
}
}
pub fn all_of(branches: impl IntoIterator<Item = AssertResult>) -> AssertResult {
let mut acc = AssertResult::pass();
for b in branches {
acc.merge(b);
}
acc
}
}
#[derive(Clone, Debug)]
pub(crate) struct AssertPlan {
pub(crate) not_starved: bool,
pub(crate) isolation: bool,
pub(crate) max_gap_ms: Option<u64>,
pub(crate) max_spread_pct: Option<f64>,
pub(crate) max_throughput_cv: Option<f64>,
pub(crate) min_work_rate: Option<f64>,
pub(crate) max_p99_wake_latency_ns: Option<u64>,
pub(crate) max_wake_latency_cv: Option<f64>,
pub(crate) min_iteration_rate: Option<f64>,
pub(crate) max_migration_ratio: Option<f64>,
pub(crate) min_page_locality: Option<f64>,
pub(crate) max_cross_node_migration_ratio: Option<f64>,
pub(crate) max_slow_tier_ratio: Option<f64>,
}
impl AssertPlan {
pub(crate) fn new() -> Self {
Self {
not_starved: false,
isolation: false,
max_gap_ms: None,
max_spread_pct: None,
max_throughput_cv: None,
min_work_rate: None,
max_p99_wake_latency_ns: None,
max_wake_latency_cv: None,
min_iteration_rate: None,
max_migration_ratio: None,
min_page_locality: None,
max_cross_node_migration_ratio: None,
max_slow_tier_ratio: None,
}
}
pub(crate) fn assert_cgroup(
&self,
reports: &[WorkerReport],
cpuset: Option<&BTreeSet<usize>>,
numa_nodes: Option<&BTreeSet<usize>>,
) -> AssertResult {
let mut r = AssertResult::pass();
if self.not_starved {
let mut cgroup_result = assert_not_starved(reports);
if let Some(spread_limit) = self.max_spread_pct {
cgroup_result
.details
.retain(|d| d.kind != DetailKind::Unfair);
if let Some(cg) = cgroup_result.stats.cgroups.first() {
if cg.spread > spread_limit && cg.num_workers >= 2 {
cgroup_result.passed = false;
cgroup_result.details.push(AssertDetail::new(
DetailKind::Unfair,
format!(
"unfair cgroup: spread={:.0}% ({:.0}-{:.0}%) {} workers on {} cpus (threshold {:.0}%)",
cg.spread, cg.min_off_cpu_pct, cg.max_off_cpu_pct,
cg.num_workers, cg.num_cpus, spread_limit
),
));
} else {
cgroup_result.passed = !cgroup_result
.details
.iter()
.any(|d| matches!(d.kind, DetailKind::Starved | DetailKind::Stuck));
}
}
}
if let Some(threshold) = self.max_gap_ms {
cgroup_result
.details
.retain(|d| d.kind != DetailKind::Stuck);
let had_gap_failure = reports.iter().any(|w| w.max_gap_ms > threshold);
if had_gap_failure {
cgroup_result.passed = false;
for w in reports {
if w.max_gap_ms > threshold {
cgroup_result.details.push(AssertDetail::new(
DetailKind::Stuck,
format!(
"tid {} stuck {}ms on cpu{} at +{}ms (threshold {}ms)",
w.tid, w.max_gap_ms, w.max_gap_cpu, w.max_gap_at_ms, threshold,
),
));
}
}
} else {
cgroup_result.passed = !cgroup_result
.details
.iter()
.any(|d| matches!(d.kind, DetailKind::Starved | DetailKind::Unfair));
}
}
r.merge(cgroup_result);
}
if self.isolation
&& let Some(cs) = cpuset
{
r.merge(assert_isolation(reports, cs));
}
if self.max_throughput_cv.is_some() || self.min_work_rate.is_some() {
r.merge(assert_throughput_parity(
reports,
self.max_throughput_cv,
self.min_work_rate,
));
}
if self.max_p99_wake_latency_ns.is_some()
|| self.max_wake_latency_cv.is_some()
|| self.min_iteration_rate.is_some()
{
r.merge(assert_benchmarks(
reports,
self.max_p99_wake_latency_ns,
self.max_wake_latency_cv,
self.min_iteration_rate,
));
}
if let Some(max_ratio) = self.max_migration_ratio {
let total_mig: u64 = reports.iter().map(|w| w.migration_count).sum();
let total_iters: u64 = reports.iter().map(|w| w.iterations).sum();
let ratio = if total_iters > 0 {
total_mig as f64 / total_iters as f64
} else {
0.0
};
if ratio > max_ratio {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Migration,
format!(
"migration ratio {:.4} exceeds threshold {:.4} ({} migrations / {} iterations)",
ratio, max_ratio, total_mig, total_iters,
),
));
}
}
if let Some(min_locality) = self.min_page_locality
&& let Some(nodes) = numa_nodes
{
let mut total: u64 = 0;
let mut local: u64 = 0;
for w in reports {
for (&node, &count) in &w.numa_pages {
total += count;
if nodes.contains(&node) {
local += count;
}
}
}
let locality = if total > 0 {
local as f64 / total as f64
} else {
0.0
};
r.merge(assert_page_locality(
locality,
Some(min_locality),
total,
local,
));
}
if let Some(max_ratio) = self.max_cross_node_migration_ratio {
let total_pages: u64 = reports
.iter()
.map(|w| w.numa_pages.values().sum::<u64>())
.sum();
let migrated_pages: u64 = reports
.iter()
.map(|w| w.vmstat_numa_pages_migrated)
.max()
.unwrap_or(0);
r.merge(assert_cross_node_migration(
migrated_pages,
total_pages,
Some(max_ratio),
));
}
if let Some(max_ratio) = self.max_slow_tier_ratio
&& numa_nodes.is_some()
{
for w in reports {
if w.numa_pages.is_empty() {
continue;
}
let total: u64 = w.numa_pages.values().sum();
if total > 0 {
r.merge(assert_slow_tier_ratio(
&w.numa_pages,
max_ratio,
total,
numa_nodes,
));
}
}
}
r
}
}
fn assert_slow_tier_ratio(
numa_pages: &BTreeMap<usize, u64>,
max_ratio: f64,
total_pages: u64,
numa_nodes: Option<&BTreeSet<usize>>,
) -> AssertResult {
let mut r = AssertResult::pass();
let Some(cpu_nodes) = numa_nodes else {
return r;
};
let slow_pages: u64 = numa_pages
.iter()
.filter(|(node, _)| !cpu_nodes.contains(node))
.map(|(_, count)| count)
.sum();
let ratio = slow_pages as f64 / total_pages as f64;
if ratio > max_ratio {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::SlowTier,
format!(
"slow-tier page ratio {ratio:.4} ({pct:.2}%) exceeds threshold {max_ratio:.4} ({thr_pct:.2}%) \
({slow_pages}/{total_pages} pages on non-CPU nodes)",
pct = ratio * 100.0,
thr_pct = max_ratio * 100.0,
),
));
}
r
}
pub fn assert_page_locality(
observed: f64,
min_locality: Option<f64>,
total_pages: u64,
local_pages: u64,
) -> AssertResult {
let mut r = AssertResult::pass();
if let Some(threshold) = min_locality
&& observed < threshold
{
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::PageLocality,
format!(
"page locality {observed:.4} ({pct:.2}%) below threshold {threshold:.4} ({thr_pct:.2}%) ({local_pages}/{total_pages} pages local)",
pct = observed * 100.0,
thr_pct = threshold * 100.0,
),
));
}
r
}
pub fn assert_cross_node_migration(
migrated_pages: u64,
total_pages: u64,
max_ratio: Option<f64>,
) -> AssertResult {
let mut r = AssertResult::pass();
if let Some(threshold) = max_ratio {
if total_pages == 0 {
if migrated_pages > 0 {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::CrossNodeMigration,
format!(
"cross-node migration inconsistent: {migrated_pages} pages migrated but 0 pages observed in numa_maps (threshold {threshold:.4})",
),
));
}
return r;
}
let ratio = migrated_pages as f64 / total_pages as f64;
if ratio > threshold {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::CrossNodeMigration,
format!(
"cross-node migration ratio {ratio:.4} ({pct:.2}%) exceeds threshold {threshold:.4} ({thr_pct:.2}%) ({migrated_pages}/{total_pages} pages migrated)",
pct = ratio * 100.0,
thr_pct = threshold * 100.0,
),
));
}
}
r
}
impl Default for AssertPlan {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
impl AssertPlan {
fn check_not_starved(mut self) -> Self {
self.not_starved = true;
self
}
fn check_isolation(mut self) -> Self {
self.isolation = true;
self
}
fn max_gap_ms(mut self, ms: u64) -> Self {
self.max_gap_ms = Some(ms);
self
}
}
#[must_use = "builder methods return a new Assert; discard means config is lost"]
#[derive(Clone, Copy, Debug)]
pub struct Assert {
pub not_starved: Option<bool>,
pub isolation: Option<bool>,
pub max_gap_ms: Option<u64>,
pub max_spread_pct: Option<f64>,
pub max_throughput_cv: Option<f64>,
pub min_work_rate: Option<f64>,
pub max_p99_wake_latency_ns: Option<u64>,
pub max_wake_latency_cv: Option<f64>,
pub min_iteration_rate: Option<f64>,
pub max_migration_ratio: Option<f64>,
pub max_imbalance_ratio: Option<f64>,
pub max_local_dsq_depth: Option<u32>,
pub fail_on_stall: Option<bool>,
pub sustained_samples: Option<usize>,
pub max_fallback_rate: Option<f64>,
pub max_keep_last_rate: Option<f64>,
pub min_page_locality: Option<f64>,
pub max_cross_node_migration_ratio: Option<f64>,
pub max_slow_tier_ratio: Option<f64>,
}
impl Assert {
pub fn format_human(&self) -> String {
use std::fmt::Write;
let mut out = String::new();
fn row<T: std::fmt::Display>(out: &mut String, name: &str, v: &Option<T>) {
match v {
Some(x) => writeln!(out, " {name:<38}: {x}").unwrap(),
None => writeln!(out, " {name:<38}: none").unwrap(),
}
}
row(&mut out, "not_starved", &self.not_starved);
row(&mut out, "isolation", &self.isolation);
row(&mut out, "max_gap_ms", &self.max_gap_ms);
row(&mut out, "max_spread_pct", &self.max_spread_pct);
row(&mut out, "max_throughput_cv", &self.max_throughput_cv);
row(&mut out, "min_work_rate", &self.min_work_rate);
row(
&mut out,
"max_p99_wake_latency_ns",
&self.max_p99_wake_latency_ns,
);
row(&mut out, "max_wake_latency_cv", &self.max_wake_latency_cv);
row(&mut out, "min_iteration_rate", &self.min_iteration_rate);
row(&mut out, "max_migration_ratio", &self.max_migration_ratio);
row(&mut out, "max_imbalance_ratio", &self.max_imbalance_ratio);
row(&mut out, "max_local_dsq_depth", &self.max_local_dsq_depth);
row(&mut out, "fail_on_stall", &self.fail_on_stall);
row(&mut out, "sustained_samples", &self.sustained_samples);
row(&mut out, "max_fallback_rate", &self.max_fallback_rate);
row(&mut out, "max_keep_last_rate", &self.max_keep_last_rate);
row(&mut out, "min_page_locality", &self.min_page_locality);
row(
&mut out,
"max_cross_node_migration_ratio",
&self.max_cross_node_migration_ratio,
);
row(&mut out, "max_slow_tier_ratio", &self.max_slow_tier_ratio);
out
}
pub const NO_OVERRIDES: Assert = Assert {
not_starved: None,
isolation: None,
max_gap_ms: None,
max_spread_pct: None,
max_throughput_cv: None,
min_work_rate: None,
max_p99_wake_latency_ns: None,
max_wake_latency_cv: None,
min_iteration_rate: None,
max_migration_ratio: None,
max_imbalance_ratio: None,
max_local_dsq_depth: None,
fail_on_stall: None,
sustained_samples: None,
max_fallback_rate: None,
max_keep_last_rate: None,
min_page_locality: None,
max_cross_node_migration_ratio: None,
max_slow_tier_ratio: None,
};
pub const fn default_checks() -> Assert {
use crate::monitor::MonitorThresholds;
Assert {
not_starved: Some(true),
isolation: None,
max_gap_ms: None,
max_spread_pct: None,
max_throughput_cv: None,
min_work_rate: None,
max_p99_wake_latency_ns: None,
max_wake_latency_cv: None,
min_iteration_rate: None,
max_migration_ratio: None,
max_imbalance_ratio: Some(MonitorThresholds::DEFAULT.max_imbalance_ratio),
max_local_dsq_depth: Some(MonitorThresholds::DEFAULT.max_local_dsq_depth),
fail_on_stall: Some(MonitorThresholds::DEFAULT.fail_on_stall),
sustained_samples: Some(MonitorThresholds::DEFAULT.sustained_samples),
max_fallback_rate: Some(MonitorThresholds::DEFAULT.max_fallback_rate),
max_keep_last_rate: Some(MonitorThresholds::DEFAULT.max_keep_last_rate),
min_page_locality: None,
max_cross_node_migration_ratio: None,
max_slow_tier_ratio: None,
}
}
pub fn verdict(self) -> Verdict {
Verdict::with_assert(self)
}
pub const fn empty() -> Self {
Self::NO_OVERRIDES
}
pub const fn defaults() -> Self {
Self::default_checks()
}
pub const fn check_not_starved(mut self) -> Self {
self.not_starved = Some(true);
self
}
pub const fn check_isolation(mut self) -> Self {
self.isolation = Some(true);
self
}
pub const fn max_gap_ms(mut self, ms: u64) -> Self {
self.max_gap_ms = Some(ms);
self
}
pub const fn max_spread_pct(mut self, pct: f64) -> Self {
self.max_spread_pct = Some(pct);
self
}
pub const fn max_throughput_cv(mut self, v: f64) -> Self {
self.max_throughput_cv = Some(v);
self
}
pub const fn min_work_rate(mut self, v: f64) -> Self {
self.min_work_rate = Some(v);
self
}
pub const fn max_p99_wake_latency_ns(mut self, v: u64) -> Self {
self.max_p99_wake_latency_ns = Some(v);
self
}
pub const fn max_wake_latency_cv(mut self, v: f64) -> Self {
self.max_wake_latency_cv = Some(v);
self
}
pub const fn min_iteration_rate(mut self, v: f64) -> Self {
self.min_iteration_rate = Some(v);
self
}
pub const fn max_migration_ratio(mut self, v: f64) -> Self {
self.max_migration_ratio = Some(v);
self
}
pub const fn max_imbalance_ratio(mut self, v: f64) -> Self {
self.max_imbalance_ratio = Some(v);
self
}
pub const fn max_local_dsq_depth(mut self, v: u32) -> Self {
self.max_local_dsq_depth = Some(v);
self
}
pub const fn fail_on_stall(mut self, v: bool) -> Self {
self.fail_on_stall = Some(v);
self
}
pub const fn sustained_samples(mut self, v: usize) -> Self {
self.sustained_samples = Some(v);
self
}
pub const fn max_fallback_rate(mut self, v: f64) -> Self {
self.max_fallback_rate = Some(v);
self
}
pub const fn max_keep_last_rate(mut self, v: f64) -> Self {
self.max_keep_last_rate = Some(v);
self
}
pub const fn min_page_locality(mut self, v: f64) -> Self {
self.min_page_locality = Some(v);
self
}
pub const fn max_cross_node_migration_ratio(mut self, v: f64) -> Self {
self.max_cross_node_migration_ratio = Some(v);
self
}
pub const fn max_slow_tier_ratio(mut self, v: f64) -> Self {
self.max_slow_tier_ratio = Some(v);
self
}
pub const fn has_worker_checks(&self) -> bool {
self.not_starved.is_some()
|| self.isolation.is_some()
|| self.max_gap_ms.is_some()
|| self.max_spread_pct.is_some()
|| self.max_throughput_cv.is_some()
|| self.min_work_rate.is_some()
|| self.max_p99_wake_latency_ns.is_some()
|| self.max_wake_latency_cv.is_some()
|| self.min_iteration_rate.is_some()
|| self.max_migration_ratio.is_some()
|| self.min_page_locality.is_some()
|| self.max_cross_node_migration_ratio.is_some()
|| self.max_slow_tier_ratio.is_some()
}
pub const fn merge(&self, other: &Assert) -> Assert {
Assert {
not_starved: match other.not_starved {
Some(v) => Some(v),
None => self.not_starved,
},
isolation: match other.isolation {
Some(v) => Some(v),
None => self.isolation,
},
max_gap_ms: match other.max_gap_ms {
Some(v) => Some(v),
None => self.max_gap_ms,
},
max_spread_pct: match other.max_spread_pct {
Some(v) => Some(v),
None => self.max_spread_pct,
},
max_throughput_cv: match other.max_throughput_cv {
Some(v) => Some(v),
None => self.max_throughput_cv,
},
min_work_rate: match other.min_work_rate {
Some(v) => Some(v),
None => self.min_work_rate,
},
max_p99_wake_latency_ns: match other.max_p99_wake_latency_ns {
Some(v) => Some(v),
None => self.max_p99_wake_latency_ns,
},
max_wake_latency_cv: match other.max_wake_latency_cv {
Some(v) => Some(v),
None => self.max_wake_latency_cv,
},
min_iteration_rate: match other.min_iteration_rate {
Some(v) => Some(v),
None => self.min_iteration_rate,
},
max_migration_ratio: match other.max_migration_ratio {
Some(v) => Some(v),
None => self.max_migration_ratio,
},
max_imbalance_ratio: match other.max_imbalance_ratio {
Some(v) => Some(v),
None => self.max_imbalance_ratio,
},
max_local_dsq_depth: match other.max_local_dsq_depth {
Some(v) => Some(v),
None => self.max_local_dsq_depth,
},
fail_on_stall: match other.fail_on_stall {
Some(v) => Some(v),
None => self.fail_on_stall,
},
sustained_samples: match other.sustained_samples {
Some(v) => Some(v),
None => self.sustained_samples,
},
max_fallback_rate: match other.max_fallback_rate {
Some(v) => Some(v),
None => self.max_fallback_rate,
},
max_keep_last_rate: match other.max_keep_last_rate {
Some(v) => Some(v),
None => self.max_keep_last_rate,
},
min_page_locality: match other.min_page_locality {
Some(v) => Some(v),
None => self.min_page_locality,
},
max_cross_node_migration_ratio: match other.max_cross_node_migration_ratio {
Some(v) => Some(v),
None => self.max_cross_node_migration_ratio,
},
max_slow_tier_ratio: match other.max_slow_tier_ratio {
Some(v) => Some(v),
None => self.max_slow_tier_ratio,
},
}
}
pub(crate) fn worker_plan(&self) -> AssertPlan {
AssertPlan {
not_starved: self.not_starved.unwrap_or(false),
isolation: self.isolation.unwrap_or(false),
max_gap_ms: self.max_gap_ms,
max_spread_pct: self.max_spread_pct,
max_throughput_cv: self.max_throughput_cv,
min_work_rate: self.min_work_rate,
max_p99_wake_latency_ns: self.max_p99_wake_latency_ns,
max_wake_latency_cv: self.max_wake_latency_cv,
min_iteration_rate: self.min_iteration_rate,
max_migration_ratio: self.max_migration_ratio,
min_page_locality: self.min_page_locality,
max_cross_node_migration_ratio: self.max_cross_node_migration_ratio,
max_slow_tier_ratio: self.max_slow_tier_ratio,
}
}
pub fn assert_cgroup(
&self,
reports: &[crate::workload::WorkerReport],
cpuset: Option<&BTreeSet<usize>>,
) -> AssertResult {
self.worker_plan().assert_cgroup(reports, cpuset, None)
}
pub fn assert_cgroup_with_numa(
&self,
reports: &[crate::workload::WorkerReport],
cpuset: Option<&BTreeSet<usize>>,
numa_nodes: Option<&BTreeSet<usize>>,
) -> AssertResult {
self.worker_plan()
.assert_cgroup(reports, cpuset, numa_nodes)
}
pub fn assert_page_locality(
&self,
observed: f64,
total_pages: u64,
local_pages: u64,
) -> AssertResult {
assert_page_locality(observed, self.min_page_locality, total_pages, local_pages)
}
pub fn assert_cross_node_migration(
&self,
migrated_pages: u64,
total_pages: u64,
) -> AssertResult {
assert_cross_node_migration(
migrated_pages,
total_pages,
self.max_cross_node_migration_ratio,
)
}
pub(crate) fn monitor_thresholds(&self) -> crate::monitor::MonitorThresholds {
use crate::monitor::MonitorThresholds;
let d = MonitorThresholds::DEFAULT;
MonitorThresholds {
max_imbalance_ratio: self.max_imbalance_ratio.unwrap_or(d.max_imbalance_ratio),
max_local_dsq_depth: self.max_local_dsq_depth.unwrap_or(d.max_local_dsq_depth),
fail_on_stall: self.fail_on_stall.unwrap_or(d.fail_on_stall),
sustained_samples: self.sustained_samples.unwrap_or(d.sustained_samples),
max_fallback_rate: self.max_fallback_rate.unwrap_or(d.max_fallback_rate),
max_keep_last_rate: self.max_keep_last_rate.unwrap_or(d.max_keep_last_rate),
}
}
}
pub mod claim;
pub use claim::{ClaimBuilder, SeqClaim, SetClaim, Verdict};
pub fn assert_isolation(reports: &[WorkerReport], expected: &BTreeSet<usize>) -> AssertResult {
let mut r = AssertResult::pass();
for w in reports {
let bad: BTreeSet<usize> = w.cpus_used.difference(expected).copied().collect();
if !bad.is_empty() {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Isolation,
format!("tid {} ran on unexpected CPUs {:?}", w.tid, bad),
));
}
}
r
}
fn percentile(sorted: &[u64], p: f64) -> u64 {
if sorted.is_empty() {
return 0;
}
debug_assert!(
sorted.windows(2).all(|w| w[0] <= w[1]),
"percentile() requires sorted input; got slice with out-of-order pair",
);
let n = sorted.len();
let idx = ((n as f64 * p).ceil() as usize)
.saturating_sub(1)
.min(n - 1);
sorted[idx]
}
pub fn assert_not_starved(reports: &[WorkerReport]) -> AssertResult {
let mut r = AssertResult::pass();
if reports.is_empty() {
return r;
}
let cpus: BTreeSet<usize> = reports
.iter()
.flat_map(|w| w.cpus_used.iter().copied())
.collect();
let mut pcts: Vec<f64> = Vec::new();
for w in reports {
if w.work_units == 0 {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Starved,
format!("tid {} starved (0 work units)", w.tid),
));
}
if w.wall_time_ns > 0 {
pcts.push(w.off_cpu_ns as f64 / w.wall_time_ns as f64 * 100.0);
}
}
let min = pcts.iter().cloned().reduce(f64::min).unwrap_or(0.0);
let max = pcts.iter().cloned().reduce(f64::max).unwrap_or(0.0);
let avg = if pcts.is_empty() {
0.0
} else {
pcts.iter().sum::<f64>() / pcts.len() as f64
};
let spread = max - min;
let worst_gap = reports.iter().max_by_key(|w| w.max_gap_ms);
let (gap_ms, gap_cpu) = worst_gap
.map(|w| (w.max_gap_ms, w.max_gap_cpu))
.unwrap_or((0, 0));
let all_latencies: Vec<u64> = reports
.iter()
.flat_map(|w| w.resume_latencies_ns.iter().copied())
.collect();
let (p99_us, median_us, lat_cv) = if all_latencies.is_empty() {
(0.0, 0.0, 0.0)
} else {
let mut sorted = all_latencies.clone();
sorted.sort_unstable();
let p99 = percentile(&sorted, 0.99) as f64 / 1000.0;
let median = percentile(&sorted, 0.5) as f64 / 1000.0;
let n = all_latencies.len() as f64;
let mean_ns = all_latencies.iter().sum::<u64>() as f64 / n;
let cv = if mean_ns > 0.0 {
let variance = all_latencies
.iter()
.map(|&v| (v as f64 - mean_ns).powi(2))
.sum::<f64>()
/ n;
variance.sqrt() / mean_ns
} else {
0.0
};
(p99, median, cv)
};
let total_iters: u64 = reports.iter().map(|w| w.iterations).sum();
let run_delays: Vec<f64> = reports
.iter()
.map(|w| w.schedstat_run_delay_ns as f64 / 1000.0)
.collect();
let mean_run_delay = if run_delays.is_empty() {
0.0
} else {
run_delays.iter().sum::<f64>() / run_delays.len() as f64
};
let worst_run_delay = run_delays.iter().cloned().reduce(f64::max).unwrap_or(0.0);
let total_mig: u64 = reports.iter().map(|w| w.migration_count).sum();
let mig_ratio = if total_iters > 0 {
total_mig as f64 / total_iters as f64
} else {
0.0
};
let cg = CgroupStats {
num_workers: reports.len(),
num_cpus: cpus.len(),
avg_off_cpu_pct: avg,
min_off_cpu_pct: min,
max_off_cpu_pct: max,
spread,
max_gap_ms: gap_ms,
max_gap_cpu: gap_cpu,
total_migrations: total_mig,
migration_ratio: mig_ratio,
p99_wake_latency_us: p99_us,
median_wake_latency_us: median_us,
wake_latency_cv: lat_cv,
total_iterations: total_iters,
mean_run_delay_us: mean_run_delay,
worst_run_delay_us: worst_run_delay,
page_locality: 0.0,
cross_node_migration_ratio: 0.0,
ext_metrics: BTreeMap::new(),
};
let spread_limit = spread_threshold_pct();
if spread > spread_limit && pcts.len() >= 2 {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Unfair,
format!(
"unfair cgroup: spread={:.0}% ({:.0}-{:.0}%) {} workers on {} cpus (threshold {:.0}%)",
spread,
min,
max,
reports.len(),
cpus.len(),
spread_limit,
),
));
}
let gap_limit = gap_threshold_ms();
for w in reports {
if w.max_gap_ms > gap_limit {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Stuck,
format!(
"tid {} stuck {}ms on cpu{} at +{}ms (threshold {}ms)",
w.tid, w.max_gap_ms, w.max_gap_cpu, w.max_gap_at_ms, gap_limit,
),
));
}
}
r.stats = ScenarioStats {
total_workers: reports.len(),
total_cpus: cpus.len(),
total_migrations: reports.iter().map(|w| w.migration_count).sum(),
worst_spread: spread,
worst_gap_ms: gap_ms,
worst_gap_cpu: gap_cpu,
worst_migration_ratio: cg.migration_ratio,
worst_p99_wake_latency_us: cg.p99_wake_latency_us,
worst_median_wake_latency_us: cg.median_wake_latency_us,
worst_wake_latency_cv: cg.wake_latency_cv,
total_iterations: cg.total_iterations,
worst_mean_run_delay_us: cg.mean_run_delay_us,
worst_run_delay_us: cg.worst_run_delay_us,
worst_page_locality: 0.0,
worst_cross_node_migration_ratio: 0.0,
worst_wake_latency_tail_ratio: cg.wake_latency_tail_ratio(),
worst_iterations_per_worker: cg.iterations_per_worker(),
ext_metrics: cg.ext_metrics.clone(),
cgroups: vec![cg],
};
r
}
pub fn assert_throughput_parity(
reports: &[WorkerReport],
max_cv: Option<f64>,
min_rate: Option<f64>,
) -> AssertResult {
let mut r = AssertResult::pass();
if reports.is_empty() {
return r;
}
let rates: Vec<f64> = reports
.iter()
.map(|w| {
if w.cpu_time_ns == 0 {
0.0
} else {
w.work_units as f64 / (w.cpu_time_ns as f64 / 1e9)
}
})
.collect();
let n = rates.len() as f64;
let mean = rates.iter().sum::<f64>() / n;
if let Some(cv_limit) = max_cv {
let all_zero_cpu = reports.iter().all(|w| w.cpu_time_ns == 0);
if all_zero_cpu {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"throughput CV undefined: all {} workers recorded zero cpu_time_ns (limit {cv_limit:.3})",
reports.len()
),
));
} else if mean > 0.0 && rates.len() >= 2 {
let variance = rates.iter().map(|r| (r - mean).powi(2)).sum::<f64>() / n;
let stddev = variance.sqrt();
let cv = stddev / mean;
if cv > cv_limit {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"throughput CV {cv:.3} exceeds limit {cv_limit:.3} (mean={mean:.0} work/cpu_s)"
),
));
}
}
}
if let Some(floor) = min_rate {
for (i, &rate) in rates.iter().enumerate() {
if rate < floor {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"worker {} throughput {rate:.0} work/cpu_s below floor {floor:.0}",
reports[i].tid
),
));
}
}
}
r
}
pub fn assert_benchmarks(
reports: &[WorkerReport],
max_p99_ns: Option<u64>,
max_cv: Option<f64>,
min_iter_rate: Option<f64>,
) -> AssertResult {
let mut r = AssertResult::pass();
if reports.is_empty() {
return AssertResult::skip("no worker reports — benchmark skipped");
}
let all_latencies: Vec<u64> = reports
.iter()
.flat_map(|w| w.resume_latencies_ns.iter().copied())
.collect();
if let Some(p99_limit) = max_p99_ns
&& !all_latencies.is_empty()
{
let mut sorted = all_latencies.clone();
sorted.sort_unstable();
let p99 = percentile(&sorted, 0.99);
if p99 > p99_limit {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"p99 wake latency {p99}ns exceeds limit {p99_limit}ns ({} samples)",
sorted.len()
),
));
}
}
if let Some(cv_limit) = max_cv
&& all_latencies.len() >= 2
{
let n = all_latencies.len() as f64;
let mean = all_latencies.iter().sum::<u64>() as f64 / n;
if mean > 0.0 {
let variance = all_latencies
.iter()
.map(|&v| (v as f64 - mean).powi(2))
.sum::<f64>()
/ n;
let cv = variance.sqrt() / mean;
if cv > cv_limit {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"wake latency CV {cv:.3} exceeds limit {cv_limit:.3} (mean={mean:.0}ns)"
),
));
}
}
}
if let Some(rate_floor) = min_iter_rate {
for w in reports {
if w.wall_time_ns == 0 {
continue;
}
let rate = w.iterations as f64 / (w.wall_time_ns as f64 / 1e9);
if rate < rate_floor {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"worker {} iteration rate {rate:.1}/s below floor {rate_floor:.1}/s",
w.tid
),
));
}
}
}
r
}
pub fn assert_scx_events_clean(events: &[(&str, i64)], max_count: Option<i64>) -> AssertResult {
let mut r = AssertResult::pass();
for (name, count) in events {
let failed = match max_count {
None => *count != 0,
Some(bound) => *count < 0 || *count > bound,
};
if failed {
r.passed = false;
let bound_desc = match max_count {
None => "0".to_string(),
Some(b) => b.to_string(),
};
r.details.push(AssertDetail::new(
DetailKind::SchedulerEvent,
format!("scx event `{name}` count {count} exceeds bound {bound_desc}",),
));
}
}
r
}
#[must_use = "SchedulerBaseline only takes effect when passed to assert_baseline"]
#[derive(Debug, Clone, Copy, Default)]
pub struct SchedulerBaseline {
pub max_p99_wake_latency_ns: Option<u64>,
pub max_iteration_cost_p99_ns: Option<u64>,
pub max_migrations: Option<u64>,
pub min_work_units: Option<u64>,
}
impl SchedulerBaseline {
pub const EMPTY: SchedulerBaseline = SchedulerBaseline {
max_p99_wake_latency_ns: None,
max_iteration_cost_p99_ns: None,
max_migrations: None,
min_work_units: None,
};
pub const fn strict() -> Self {
Self {
max_p99_wake_latency_ns: Some(10_000_000),
max_iteration_cost_p99_ns: Some(1_000_000),
max_migrations: Some(1000),
min_work_units: Some(1),
}
}
pub const fn with_max_p99_wake_latency_ns(mut self, v: u64) -> Self {
self.max_p99_wake_latency_ns = Some(v);
self
}
pub const fn with_max_iteration_cost_p99_ns(mut self, v: u64) -> Self {
self.max_iteration_cost_p99_ns = Some(v);
self
}
pub const fn with_max_migrations(mut self, v: u64) -> Self {
self.max_migrations = Some(v);
self
}
pub const fn with_min_work_units(mut self, v: u64) -> Self {
self.min_work_units = Some(v);
self
}
}
pub fn assert_baseline(reports: &[WorkerReport], baseline: &SchedulerBaseline) -> AssertResult {
if reports.is_empty() {
return AssertResult::skip("no worker reports to evaluate");
}
let mut r = AssertResult::pass();
if baseline.max_p99_wake_latency_ns.is_some() {
r.merge(assert_benchmarks(
reports,
baseline.max_p99_wake_latency_ns,
None,
None,
));
}
if let Some(cost_limit) = baseline.max_iteration_cost_p99_ns {
let all_costs: Vec<u64> = reports
.iter()
.flat_map(|w| w.iteration_costs_ns.iter().copied())
.collect();
if !all_costs.is_empty() {
let mut sorted = all_costs.clone();
sorted.sort_unstable();
let p99 = percentile(&sorted, 0.99);
if p99 > cost_limit {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Benchmark,
format!(
"p99 iteration cost {p99}ns exceeds limit {cost_limit}ns ({} samples)",
sorted.len(),
),
));
}
}
}
if let Some(max_mig) = baseline.max_migrations {
let total_mig: u64 = reports.iter().map(|w| w.migration_count).sum();
if total_mig > max_mig {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Migration,
format!(
"total migrations {total_mig} exceeds limit {max_mig} ({} workers)",
reports.len(),
),
));
}
}
if let Some(min_units) = baseline.min_work_units {
for w in reports {
if w.work_units < min_units {
r.passed = false;
r.details.push(AssertDetail::new(
DetailKind::Starved,
format!(
"tid {} work_units {} below floor {min_units}",
w.tid, w.work_units,
),
));
}
}
}
r
}
#[cfg(test)]
mod tests_assert;
#[cfg(test)]
mod tests_benchmarks;
#[cfg(test)]
mod tests_common;
#[cfg(test)]
mod tests_merge;
#[cfg(test)]
mod tests_note;
#[cfg(test)]
mod tests_numa;
#[cfg(test)]
mod tests_percentile;
#[cfg(test)]
mod tests_plan;
#[cfg(test)]
mod tests_sched_died;
#[cfg(test)]
mod tests_serde;
#[cfg(test)]
mod tests_stats;
#[cfg(test)]
mod tests_verdict;
#[cfg(test)]
mod tests_worker;