pub mod affinity;
pub mod backdrop;
pub mod basic;
mod catalog;
pub mod cpuset;
pub mod dynamic;
pub mod interaction;
pub mod nested;
pub mod ops;
pub mod payload_run;
pub mod performance;
pub mod scenarios;
pub mod stress;
pub use backdrop::Backdrop;
pub use catalog::all_scenarios;
use std::collections::BTreeSet;
use std::thread;
use std::time::Duration;
use anyhow::Result;
use nix::sys::signal::kill;
use nix::unistd::Pid;
use crate::assert::{self, AssertResult};
use crate::topology::TestTopology;
use crate::workload::*;
fn process_alive(pid: libc::pid_t) -> bool {
if pid <= 0 {
return false;
}
kill(Pid::from_raw(pid), None).is_ok()
}
pub(crate) use crate::read_kmsg;
pub mod flags {
pub struct FlagDecl {
pub name: &'static str,
pub args: &'static [&'static str],
pub requires: &'static [&'static FlagDecl],
}
impl std::fmt::Debug for FlagDecl {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let req_names: Vec<&str> = self.requires.iter().map(|d| d.name).collect();
f.debug_struct("FlagDecl")
.field("name", &self.name)
.field("args", &self.args)
.field("requires", &req_names)
.finish()
}
}
pub static LLC_DECL: FlagDecl = FlagDecl {
name: "llc",
args: &[],
requires: &[],
};
pub static BORROW_DECL: FlagDecl = FlagDecl {
name: "borrow",
args: &[],
requires: &[],
};
pub static STEAL_DECL: FlagDecl = FlagDecl {
name: "steal",
args: &[],
requires: &[&LLC_DECL],
};
pub static REBAL_DECL: FlagDecl = FlagDecl {
name: "rebal",
args: &[],
requires: &[],
};
pub static REJECT_PIN_DECL: FlagDecl = FlagDecl {
name: "reject-pin",
args: &[],
requires: &[],
};
pub static NO_CTRL_DECL: FlagDecl = FlagDecl {
name: "no-ctrl",
args: &[],
requires: &[],
};
pub static ALL_DECLS: &[&FlagDecl] = &[
&LLC_DECL,
&BORROW_DECL,
&STEAL_DECL,
&REBAL_DECL,
&REJECT_PIN_DECL,
&NO_CTRL_DECL,
];
pub const N_FLAGS: usize = 6;
const _: () = assert!(
ALL_DECLS.len() == N_FLAGS,
"N_FLAGS must equal ALL_DECLS.len(); update both together",
);
pub const LLC: &str = ALL_DECLS[0].name;
pub const BORROW: &str = ALL_DECLS[1].name;
pub const STEAL: &str = ALL_DECLS[2].name;
pub const REBAL: &str = ALL_DECLS[3].name;
pub const REJECT_PIN: &str = ALL_DECLS[4].name;
pub const NO_CTRL: &str = ALL_DECLS[5].name;
const _: () = {
const fn bytes_eq(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut i = 0;
while i < a.len() {
if a[i] != b[i] {
return false;
}
i += 1;
}
true
}
assert!(
bytes_eq(LLC.as_bytes(), b"llc"),
"ALL_DECLS[0] must be `llc`"
);
assert!(
bytes_eq(BORROW.as_bytes(), b"borrow"),
"ALL_DECLS[1] must be `borrow`"
);
assert!(
bytes_eq(STEAL.as_bytes(), b"steal"),
"ALL_DECLS[2] must be `steal`"
);
assert!(
bytes_eq(REBAL.as_bytes(), b"rebal"),
"ALL_DECLS[3] must be `rebal`"
);
assert!(
bytes_eq(REJECT_PIN.as_bytes(), b"reject-pin"),
"ALL_DECLS[4] must be `reject-pin`"
);
assert!(
bytes_eq(NO_CTRL.as_bytes(), b"no-ctrl"),
"ALL_DECLS[5] must be `no-ctrl`"
);
};
const fn build_all() -> [&'static str; N_FLAGS] {
let mut out = [""; N_FLAGS];
let mut i = 0;
while i < N_FLAGS {
out[i] = ALL_DECLS[i].name;
i += 1;
}
out
}
pub static ALL: &[&str] = &build_all();
pub fn from_short_name(s: &str) -> Option<&'static str> {
ALL.iter().find(|&&f| f == s).copied()
}
pub fn decl_by_name(name: &str) -> Option<&'static FlagDecl> {
ALL_DECLS.iter().find(|d| d.name == name).copied()
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct FlagDeclJson {
pub name: String,
pub args: Vec<String>,
pub requires: Vec<String>,
}
impl FlagDeclJson {
pub fn from_decl(decl: &FlagDecl) -> Self {
Self {
name: decl.name.to_string(),
args: decl.args.iter().map(|s| s.to_string()).collect(),
requires: decl.requires.iter().map(|r| r.name.to_string()).collect(),
}
}
}
}
#[derive(Debug, Clone)]
pub struct FlagProfile {
pub flags: Vec<&'static str>,
}
impl FlagProfile {
pub fn name(&self) -> String {
if self.flags.is_empty() {
"default".into()
} else {
self.flags.join("+")
}
}
}
pub use crate::workload::AffinityKind;
fn flag_requires(flag: &str) -> Vec<&'static str> {
flags::decl_by_name(flag)
.map(|d| d.requires.iter().map(|r| r.name).collect())
.unwrap_or_default()
}
pub fn compute_flag_profiles<T, F>(
all_names: &[T],
requires_fn: F,
required: &[T],
excluded: &[T],
) -> Vec<Vec<T>>
where
T: Clone + PartialEq,
F: Fn(&T) -> Vec<T>,
{
let optional: Vec<T> = all_names
.iter()
.filter(|f| !required.contains(f) && !excluded.contains(f))
.cloned()
.collect();
debug_assert!(
optional.len() < 32,
"compute_flag_profiles: {} optional flags would overflow u32 power-set mask",
optional.len(),
);
let mut out = Vec::new();
for mask in 0..(1u32 << optional.len()) {
let mut fl: Vec<T> = required.to_vec();
for (i, f) in optional.iter().enumerate() {
if mask & (1 << i) != 0 {
fl.push(f.clone());
}
}
let valid = fl
.iter()
.all(|f| requires_fn(f).iter().all(|r| fl.contains(r)));
if valid {
fl.sort_by_key(|f| all_names.iter().position(|a| a == f).unwrap_or(usize::MAX));
out.push(fl);
}
}
out
}
fn generate_profiles(required: &[&'static str], excluded: &[&'static str]) -> Vec<FlagProfile> {
compute_flag_profiles(flags::ALL, |&f| flag_requires(f), required, excluded)
.into_iter()
.map(|flags| FlagProfile { flags })
.collect()
}
#[derive(Clone, Debug)]
pub enum CpusetPartition {
None,
LlcAligned,
SplitHalf,
SplitMisaligned,
Overlap(f64),
Uneven(f64),
Holdback(f64),
}
pub type CustomFn = std::sync::Arc<dyn Fn(&Ctx) -> Result<AssertResult> + Send + Sync>;
#[derive(Clone)]
pub enum Action {
Steady,
Custom(CustomFn),
}
impl Action {
pub fn custom<F>(f: F) -> Self
where
F: Fn(&Ctx) -> Result<AssertResult> + Send + Sync + 'static,
{
Action::Custom(std::sync::Arc::new(f))
}
}
impl std::fmt::Debug for Action {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Action::Steady => write!(f, "Steady"),
Action::Custom(_) => write!(f, "Custom(<closure>)"),
}
}
}
#[derive(Clone, Debug)]
pub struct Scenario {
pub name: &'static str,
pub category: &'static str,
pub description: &'static str,
pub required_flags: &'static [&'static flags::FlagDecl],
pub excluded_flags: &'static [&'static flags::FlagDecl],
pub num_cgroups: usize,
pub cpuset_partition: CpusetPartition,
pub cgroup_works: Vec<Work>,
pub action: Action,
}
impl Scenario {
pub fn profiles(&self) -> Vec<FlagProfile> {
let req: Vec<&'static str> = self.required_flags.iter().map(|d| d.name).collect();
let excl: Vec<&'static str> = self.excluded_flags.iter().map(|d| d.name).collect();
generate_profiles(&req, &excl)
}
pub fn qualified_name(&self, p: &FlagProfile) -> String {
format!("{}/{}", self.name, p.name())
}
}
#[must_use = "dropping a CgroupGroup immediately destroys the cgroups it manages"]
pub struct CgroupGroup<'a> {
cgroups: &'a dyn crate::cgroup::CgroupOps,
names: Vec<String>,
}
impl std::fmt::Debug for CgroupGroup<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CgroupGroup")
.field("cgroups", &self.cgroups.parent_path())
.field("names", &self.names)
.finish()
}
}
impl<'a> CgroupGroup<'a> {
pub fn new(cgroups: &'a dyn crate::cgroup::CgroupOps) -> Self {
Self {
cgroups,
names: Vec::new(),
}
}
pub fn add_cgroup(&mut self, name: &str, cpuset: &BTreeSet<usize>) -> Result<()> {
self.cgroups.create_cgroup(name)?;
self.cgroups.set_cpuset(name, cpuset)?;
self.names.push(name.to_string());
Ok(())
}
pub fn add_cgroup_no_cpuset(&mut self, name: &str) -> Result<()> {
self.cgroups.create_cgroup(name)?;
self.names.push(name.to_string());
Ok(())
}
pub fn names(&self) -> &[String] {
&self.names
}
}
pub(crate) fn is_io_not_found(err: &anyhow::Error) -> bool {
err.root_cause()
.downcast_ref::<std::io::Error>()
.is_some_and(|io| io.kind() == std::io::ErrorKind::NotFound)
}
pub(crate) fn remove_cgroup_errno_hint(err: &anyhow::Error) -> Option<&'static str> {
let raw = err
.root_cause()
.downcast_ref::<std::io::Error>()?
.raw_os_error()?;
match raw {
libc::EBUSY => {
Some("EBUSY: cgroup still has live tasks — workloads were not drained before teardown")
}
libc::EACCES => {
Some("EACCES: permission denied — check cgroup owner / `user.slice` delegation")
}
_ => None,
}
}
impl Drop for CgroupGroup<'_> {
fn drop(&mut self) {
for name in self.names.iter().rev() {
if let Err(err) = self.cgroups.remove_cgroup(name) {
if is_io_not_found(&err) {
continue;
}
let hint = remove_cgroup_errno_hint(&err).unwrap_or("");
tracing::warn!(
cgroup = %name,
err = %format!("{err:#}"),
hint,
"CgroupGroup::drop: remove_cgroup returned non-ENOENT error",
);
}
}
}
}
pub struct Ctx<'a> {
pub cgroups: &'a dyn crate::cgroup::CgroupOps,
pub topo: &'a TestTopology,
pub duration: Duration,
pub workers_per_cgroup: usize,
pub sched_pid: Option<libc::pid_t>,
pub settle: Duration,
pub work_type_override: Option<WorkType>,
pub assert: crate::assert::Assert,
pub wait_for_map_write: bool,
}
impl std::fmt::Debug for Ctx<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Ctx")
.field("cgroups", &self.cgroups.parent_path())
.field("topo", &self.topo)
.field("duration", &self.duration)
.field("workers_per_cgroup", &self.workers_per_cgroup)
.field("sched_pid", &self.sched_pid)
.field("settle", &self.settle)
.field("work_type_override", &self.work_type_override)
.field("assert", &self.assert)
.field("wait_for_map_write", &self.wait_for_map_write)
.finish()
}
}
impl Ctx<'_> {
pub(crate) fn active_sched_pid(&self) -> Option<libc::pid_t> {
match self.sched_pid {
Some(p) if p > 0 => Some(p),
Some(p) => {
tracing::warn!(
pid = p,
"Ctx::active_sched_pid: sched_pid=Some({p}) squashed to None; \
only positive pids are configured-scheduler values — use \
None for the unconfigured shape instead of a 0-sentinel or \
negative pid"
);
None
}
None => None,
}
}
}
pub struct CtxBuilder<'a> {
cgroups: &'a dyn crate::cgroup::CgroupOps,
topo: &'a TestTopology,
duration: Duration,
workers_per_cgroup: usize,
sched_pid: Option<libc::pid_t>,
settle: Duration,
work_type_override: Option<WorkType>,
assert: crate::assert::Assert,
wait_for_map_write: bool,
}
impl<'a> CtxBuilder<'a> {
pub fn duration(mut self, d: Duration) -> Self {
self.duration = d;
self
}
pub fn workers_per_cgroup(mut self, n: usize) -> Self {
self.workers_per_cgroup = n;
self
}
pub fn sched_pid(mut self, pid: Option<libc::pid_t>) -> Self {
self.sched_pid = pid;
self
}
pub fn settle(mut self, s: Duration) -> Self {
self.settle = s;
self
}
pub fn work_type_override(mut self, wt: Option<WorkType>) -> Self {
self.work_type_override = wt;
self
}
pub fn assert(mut self, a: crate::assert::Assert) -> Self {
self.assert = a;
self
}
pub fn wait_for_map_write(mut self, v: bool) -> Self {
self.wait_for_map_write = v;
self
}
pub fn build(self) -> Ctx<'a> {
Ctx {
cgroups: self.cgroups,
topo: self.topo,
duration: self.duration,
workers_per_cgroup: self.workers_per_cgroup,
sched_pid: self.sched_pid,
settle: self.settle,
work_type_override: self.work_type_override,
assert: self.assert,
wait_for_map_write: self.wait_for_map_write,
}
}
}
impl<'a> Ctx<'a> {
pub fn builder(
cgroups: &'a dyn crate::cgroup::CgroupOps,
topo: &'a TestTopology,
) -> CtxBuilder<'a> {
CtxBuilder {
cgroups,
topo,
duration: Duration::from_secs(1),
workers_per_cgroup: 1,
sched_pid: None,
settle: Duration::from_millis(0),
work_type_override: None,
assert: crate::assert::Assert::default_checks(),
wait_for_map_write: false,
}
}
pub fn payload(
&'a self,
p: &'static crate::test_support::Payload,
) -> crate::scenario::payload_run::PayloadRun<'a> {
crate::scenario::payload_run::PayloadRun::new(self, p)
}
}
fn spawn_and_move<F>(ctx: &Ctx, names: &[String], mut cfg_fn: F) -> Result<Vec<WorkloadHandle>>
where
F: FnMut(usize, &str) -> Result<WorkloadConfig>,
{
let mut handles = Vec::with_capacity(names.len());
for (i, name) in names.iter().enumerate() {
let wl = cfg_fn(i, name.as_str())?;
let h = WorkloadHandle::spawn(&wl)?;
tracing::debug!(
cgroup = %name,
workers = wl.num_workers,
pids = h.worker_pids().len(),
"spawned workers",
);
ctx.cgroups.move_tasks(name.as_str(), &h.worker_pids())?;
handles.push(h);
}
for h in &mut handles {
h.start();
}
Ok(handles)
}
pub fn run_scenario(scenario: &Scenario, ctx: &Ctx) -> Result<AssertResult> {
tracing::info!(scenario = scenario.name, "running");
if let Action::Custom(f) = &scenario.action {
return f(ctx);
}
let cpusets = resolve_cpusets(&scenario.cpuset_partition, scenario.num_cgroups, ctx.topo);
if let Some(ref cs) = cpusets
&& cs.iter().any(|s| s.is_empty())
{
return Ok(AssertResult::skip("not enough CPUs/LLCs"));
}
let scenario_start = std::time::Instant::now();
let names: Vec<String> = (0..scenario.num_cgroups)
.map(|i| format!("cg_{i}"))
.collect();
let mut cgroup_guard = CgroupGroup::new(ctx.cgroups);
for (i, name) in names.iter().enumerate() {
cgroup_guard.add_cgroup_no_cpuset(name)?;
if let Some(ref cs) = cpusets {
ctx.cgroups.set_cpuset(name, &cs[i])?;
}
}
tracing::debug!(cgroups = scenario.num_cgroups, "cgroups created, settling");
thread::sleep(ctx.settle);
if let Some(pid) = ctx.active_sched_pid()
&& !process_alive(pid)
{
anyhow::bail!(
"{} after cgroup creation (pid={})",
crate::assert::SCHED_DIED_PREFIX,
pid,
);
}
let handles = spawn_and_move(ctx, &names, |i, name| {
let cw = scenario
.cgroup_works
.get(i)
.or(scenario.cgroup_works.first())
.cloned()
.unwrap_or_default();
if let Err(reason) = cw.mem_policy.validate() {
anyhow::bail!("cgroup '{}': {}", name, reason);
}
let n = resolve_num_workers(&cw, ctx.workers_per_cgroup, name)?;
let cpuset = cpusets.as_deref().and_then(|cs| cs.get(i));
if let Some(cs) = cpusets.as_deref()
&& i >= cs.len()
{
debug_assert!(
i < cs.len(),
"cgroup_idx {i} out of range for cpusets of len {}",
cs.len(),
);
tracing::warn!(
cgroup_idx = i,
cpusets_len = cs.len(),
"cgroup index out of range for cpusets array; falling back to unrestricted pool"
);
}
let affinity = resolve_affinity_for_cgroup(&cw.affinity, cpuset, ctx.topo);
let effective_work_type = crate::workload::resolve_work_type(
&cw.work_type,
ctx.work_type_override.as_ref(),
matches!(cw.work_type, WorkType::CpuSpin),
n,
);
Ok(WorkloadConfig {
num_workers: n,
affinity,
work_type: effective_work_type,
sched_policy: cw.sched_policy,
mem_policy: cw.mem_policy.clone(),
mpol_flags: cw.mpol_flags,
})
})?;
tracing::debug!(duration_s = ctx.duration.as_secs(), "running workload");
let deadline = std::time::Instant::now() + ctx.duration;
let mut sched_dead = false;
if let Some(pid) = ctx.active_sched_pid() {
while std::time::Instant::now() < deadline {
if !process_alive(pid) {
sched_dead = true;
tracing::warn!("scheduler process died during workload phase");
break;
}
let remaining = deadline.saturating_duration_since(std::time::Instant::now());
thread::sleep(remaining.min(Duration::from_millis(500)));
}
if !sched_dead {
sched_dead = !process_alive(pid);
}
} else {
let remaining = deadline.saturating_duration_since(std::time::Instant::now());
thread::sleep(remaining);
}
let mut result = AssertResult::pass();
for (i, h) in handles.into_iter().enumerate() {
let reports = h.stop_and_collect();
let cs = cpusets.as_ref().map(|v| &v[i]);
let numa_nodes = cs.map(|c| ctx.topo.numa_nodes_for_cpuset(c));
result.merge(
ctx.assert
.assert_cgroup_with_numa(&reports, cs, numa_nodes.as_ref()),
);
}
if !result.passed {
for line in read_kmsg().lines() {
result.details.push(line.to_string().into());
}
}
if sched_dead {
result.passed = false;
result.details.push(crate::assert::AssertDetail::new(
crate::assert::DetailKind::SchedulerDied,
crate::assert::format_sched_died_during_workload(
scenario_start.elapsed().as_secs_f64(),
),
));
}
Ok(result)
}
fn resolve_cpusets(
mode: &CpusetPartition,
n: usize,
topo: &TestTopology,
) -> Option<Vec<BTreeSet<usize>>> {
let all = topo.all_cpus();
let usable = topo.usable_cpus();
match mode {
CpusetPartition::None => None,
CpusetPartition::LlcAligned => {
let llcs = topo.split_by_llc();
if llcs.len() < 2 {
return Some(vec![BTreeSet::new()]);
}
let mut sets: Vec<BTreeSet<usize>> = llcs[..n.min(llcs.len())].to_vec();
if let Some(last) = sets.last_mut()
&& last.len() > 1
{
last.remove(&all[all.len() - 1]);
}
Some(sets)
}
CpusetPartition::SplitHalf => {
let mid = usable.len() / 2;
Some(vec![
usable[..mid].iter().copied().collect(),
usable[mid..].iter().copied().collect(),
])
}
CpusetPartition::SplitMisaligned => {
let split = if topo.num_llcs() > 1 {
topo.cpus_in_llc(0).len() / 2
} else {
usable.len() / 2
};
Some(vec![
usable[..split].iter().copied().collect(),
usable[split..].iter().copied().collect(),
])
}
CpusetPartition::Overlap(frac) => Some(topo.overlapping_cpusets(n, *frac)),
CpusetPartition::Uneven(frac) => {
let split = (usable.len() as f64 * frac) as usize;
Some(vec![
usable[..split.max(1)].iter().copied().collect(),
usable[split.max(1)..].iter().copied().collect(),
])
}
CpusetPartition::Holdback(frac) => {
let keep = all.len() - (all.len() as f64 * frac) as usize;
let mid = keep / 2;
Some(vec![
all[..mid.max(1)].iter().copied().collect(),
all[mid.max(1)..keep].iter().copied().collect(),
])
}
}
}
pub(crate) fn resolve_num_workers(work: &Work, default_n: usize, label: &str) -> Result<usize> {
let n = work.num_workers.unwrap_or(default_n);
if n == 0 {
anyhow::bail!(
"cgroup '{}': num_workers=0 is not allowed — assertions would \
vacuously pass with no WorkerReports; use at least 1 worker or \
drop this Work entry",
label,
);
}
Ok(n)
}
pub fn resolve_affinity_for_cgroup(
kind: &AffinityKind,
cpuset: Option<&BTreeSet<usize>>,
topo: &TestTopology,
) -> AffinityMode {
match kind {
AffinityKind::Inherit => AffinityMode::None,
AffinityKind::RandomSubset => {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
if pool.is_empty() {
tracing::debug!(
"RandomSubset: empty cpuset and empty topology pool, \
falling back to AffinityMode::None"
);
AffinityMode::None
} else {
let count = (pool.len() / 2).max(1);
AffinityMode::Random { from: pool, count }
}
}
AffinityKind::LlcAligned => {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
let mut best_llc = topo.llc_aligned_cpuset(0);
let mut best_overlap = best_llc.intersection(&pool).count();
for idx in 1..topo.num_llcs() {
let llc = topo.llc_aligned_cpuset(idx);
let overlap = llc.intersection(&pool).count();
if overlap > best_overlap {
best_llc = llc;
best_overlap = overlap;
}
}
let effective: BTreeSet<usize> = best_llc.intersection(&pool).copied().collect();
if effective.is_empty() {
AffinityMode::None
} else {
AffinityMode::Fixed(effective)
}
}
AffinityKind::CrossCgroup => {
AffinityMode::Fixed(topo.all_cpuset())
}
AffinityKind::SingleCpu => {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
if let Some(&cpu) = pool.iter().next() {
AffinityMode::SingleCpu(cpu)
} else {
AffinityMode::None
}
}
AffinityKind::Exact(cpus) => {
if let Some(cs) = cpuset {
let effective: BTreeSet<usize> = cpus.intersection(cs).copied().collect();
if effective.is_empty() {
AffinityMode::None
} else {
AffinityMode::Fixed(effective)
}
} else {
AffinityMode::Fixed(cpus.clone())
}
}
}
}
pub fn setup_cgroups<'a>(
ctx: &'a Ctx,
n: usize,
wl: &WorkloadConfig,
) -> Result<(Vec<WorkloadHandle>, CgroupGroup<'a>)> {
let mut guard = CgroupGroup::new(ctx.cgroups);
for i in 0..n {
guard.add_cgroup_no_cpuset(&format!("cg_{i}"))?;
}
thread::sleep(ctx.settle);
if let Some(pid) = ctx.active_sched_pid()
&& !process_alive(pid)
{
anyhow::bail!(
"{} after cgroup creation (pid={})",
crate::assert::SCHED_DIED_PREFIX,
pid,
);
}
let names: Vec<String> = (0..n).map(|i| format!("cg_{i}")).collect();
let handles = spawn_and_move(ctx, &names, |_, _| Ok(wl.clone()))?;
Ok((handles, guard))
}
pub(crate) fn collect_handles<'a>(
handles: impl IntoIterator<Item = (WorkloadHandle, Option<&'a BTreeSet<usize>>)>,
checks: &crate::assert::Assert,
topo: Option<&crate::topology::TestTopology>,
) -> AssertResult {
let mut r = AssertResult::pass();
for (h, cpuset) in handles {
let reports = h.stop_and_collect();
if checks.has_worker_checks() {
let numa_nodes = cpuset.and_then(|cs| topo.map(|t| t.numa_nodes_for_cpuset(cs)));
r.merge(checks.assert_cgroup_with_numa(&reports, cpuset, numa_nodes.as_ref()));
} else {
r.merge(assert::assert_not_starved(&reports));
}
}
r
}
pub fn collect_all(handles: Vec<WorkloadHandle>, checks: &crate::assert::Assert) -> AssertResult {
collect_handles(handles.into_iter().map(|h| (h, None)), checks, None)
}
pub fn dfl_wl(ctx: &Ctx) -> WorkloadConfig {
WorkloadConfig {
num_workers: ctx.workers_per_cgroup,
..Default::default()
}
}
#[cfg(test)]
pub fn split_half(ctx: &Ctx) -> (BTreeSet<usize>, BTreeSet<usize>) {
let usable = ctx.topo.usable_cpus();
let mid = usable.len() / 2;
(
usable[..mid].iter().copied().collect(),
usable[mid..].iter().copied().collect(),
)
}
pub fn spawn_diverse(ctx: &Ctx, cgroup_names: &[&str]) -> Result<Vec<WorkloadHandle>> {
let types = [
WorkType::CpuSpin,
WorkType::bursty(50, 100),
WorkType::IoSync,
WorkType::Mixed,
WorkType::YieldHeavy,
];
let mut handles = Vec::new();
for (i, name) in cgroup_names.iter().enumerate() {
let wt = types[i % types.len()].clone();
let n = if matches!(wt, WorkType::IoSync) {
2
} else {
ctx.workers_per_cgroup
};
let mut h = WorkloadHandle::spawn(&WorkloadConfig {
num_workers: n,
work_type: wt,
..Default::default()
})?;
ctx.cgroups.move_tasks(name, &h.worker_pids())?;
h.start();
handles.push(h);
}
Ok(handles)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn flag_short_name_roundtrip() {
for &f in flags::ALL {
assert_eq!(flags::from_short_name(f), Some(f));
}
}
#[test]
fn flag_all_unique_short_names() {
let unique: std::collections::HashSet<&&str> = flags::ALL.iter().collect();
assert_eq!(flags::ALL.len(), unique.len());
}
#[test]
fn flag_from_short_name_unknown() {
assert_eq!(flags::from_short_name("nonexistent"), None);
}
#[test]
fn profile_name_default() {
assert_eq!(FlagProfile { flags: vec![] }.name(), "default");
}
#[test]
fn profile_name_with_flags() {
let p = FlagProfile {
flags: vec![flags::LLC, flags::BORROW],
};
assert_eq!(p.name(), "llc+borrow");
}
#[test]
fn generate_profiles_no_constraints() {
assert_eq!(generate_profiles(&[], &[]).len(), 48);
}
#[test]
fn generate_profiles_work_stealing_requires_llc() {
let profiles = generate_profiles(&[flags::STEAL], &[]);
for p in &profiles {
assert!(
p.flags.contains(&flags::LLC),
"steal without llc: {:?}",
p.flags
);
}
}
#[test]
fn generate_profiles_excluded_never_present() {
let profiles = generate_profiles(&[], &[flags::NO_CTRL]);
for p in &profiles {
assert!(!p.flags.contains(&flags::NO_CTRL));
}
}
#[test]
fn generate_profiles_required_always_present() {
let profiles = generate_profiles(&[flags::BORROW], &[]);
for p in &profiles {
assert!(p.flags.contains(&flags::BORROW));
}
}
#[test]
fn generate_profiles_required_and_excluded() {
let profiles = generate_profiles(&[flags::BORROW], &[flags::REBAL]);
for p in &profiles {
assert!(p.flags.contains(&flags::BORROW));
assert!(!p.flags.contains(&flags::REBAL));
}
}
#[test]
fn all_scenarios_non_empty() {
assert!(!all_scenarios().is_empty());
}
#[test]
fn all_scenarios_unique_names() {
let scenarios = all_scenarios();
let names: Vec<&str> = scenarios.iter().map(|s| s.name).collect();
let unique: std::collections::HashSet<&&str> = names.iter().collect();
assert_eq!(names.len(), unique.len(), "duplicate scenario names");
}
#[test]
fn all_scenarios_have_profiles() {
for s in &all_scenarios() {
assert!(!s.profiles().is_empty(), "{} has no valid profiles", s.name);
}
}
#[test]
fn resolve_cpusets_none_returns_none() {
let t = crate::topology::TestTopology::synthetic(8, 2);
assert!(resolve_cpusets(&CpusetPartition::None, 2, &t).is_none());
}
#[test]
fn resolve_cpusets_split_half_covers_usable() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::SplitHalf, 2, &t).unwrap();
assert_eq!(r.len(), 2);
let total: usize = r.iter().map(|s| s.len()).sum();
assert_eq!(total, 7);
}
#[test]
fn resolve_cpusets_llc_aligned() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::LlcAligned, 2, &t).unwrap();
assert_eq!(r.len(), 2);
assert!(!r[0].is_empty());
assert!(!r[1].is_empty());
}
#[test]
fn resolve_cpusets_uneven() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Uneven(0.75), 2, &t).unwrap();
assert!(r[0].len() > r[1].len(), "75/25 split should be uneven");
}
#[test]
fn resolve_cpusets_holdback() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Holdback(0.5), 2, &t).unwrap();
let total: usize = r.iter().map(|s| s.len()).sum();
assert!(total < 8, "holdback should use fewer CPUs");
}
#[test]
fn resolve_cpusets_overlap() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Overlap(0.5), 3, &t).unwrap();
assert_eq!(r.len(), 3);
}
#[test]
fn resolve_affinity_inherit() {
let t = crate::topology::TestTopology::synthetic(8, 2);
assert!(matches!(
resolve_affinity_for_cgroup(&AffinityKind::Inherit, None, &t),
AffinityMode::None
));
}
#[test]
fn resolve_affinity_single_cpu() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityKind::SingleCpu, None, &t) {
AffinityMode::SingleCpu(c) => assert_eq!(c, 0),
other => panic!("expected SingleCpu, got {:?}", other),
}
}
#[test]
fn resolve_affinity_cross_cgroup() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityKind::CrossCgroup, None, &t) {
AffinityMode::Fixed(cpus) => assert_eq!(cpus.len(), 8),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityKind::LlcAligned, None, &t) {
AffinityMode::Fixed(cpus) => assert_eq!(cpus, [0, 1, 2, 3].into_iter().collect()),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned_with_cpuset() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![
[0, 1, 2, 3].into_iter().collect(),
[4, 5, 6, 7].into_iter().collect(),
];
match resolve_affinity_for_cgroup(&AffinityKind::LlcAligned, cpusets.get(1), &t) {
AffinityMode::Fixed(cpus) => assert_eq!(cpus, [4, 5, 6, 7].into_iter().collect()),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_random_subset() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![[0, 1, 2, 3].into_iter().collect()];
match resolve_affinity_for_cgroup(&AffinityKind::RandomSubset, cpusets.first(), &t) {
AffinityMode::Random { from, count } => {
assert_eq!(from, cpusets[0]);
assert_eq!(count, 2); }
other => panic!("expected Random, got {:?}", other),
}
}
#[test]
fn resolve_cpusets_split_misaligned() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::SplitMisaligned, 2, &t).unwrap();
assert_eq!(r.len(), 2);
let total: usize = r.iter().map(|s| s.len()).sum();
assert!(total > 0);
assert_ne!(r[0].len(), 4, "misaligned should NOT split at LLC boundary");
}
#[test]
fn resolve_cpusets_llc_aligned_single_llc() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let r = resolve_cpusets(&CpusetPartition::LlcAligned, 2, &t).unwrap();
assert!(
r.iter().any(|s| s.is_empty()),
"should signal skip with empty set"
);
}
#[test]
fn resolve_cpusets_small_topology() {
let t = crate::topology::TestTopology::synthetic(2, 1);
let r = resolve_cpusets(&CpusetPartition::SplitHalf, 2, &t).unwrap();
assert_eq!(r.len(), 2);
assert_eq!(r[0].len(), 1);
assert_eq!(r[1].len(), 1);
}
#[test]
fn cgroup_work_default() {
let cw = Work::default();
assert_eq!(cw.num_workers, None);
assert!(matches!(cw.work_type, WorkType::CpuSpin));
assert!(matches!(cw.sched_policy, SchedPolicy::Normal));
assert!(matches!(cw.affinity, AffinityKind::Inherit));
assert!(matches!(cw.mem_policy, MemPolicy::Default));
}
#[test]
fn scenario_qualified_name() {
let s = &all_scenarios()[0];
let p = FlagProfile { flags: vec![] };
assert_eq!(s.qualified_name(&p), format!("{}/default", s.name));
}
#[test]
fn scenario_qualified_name_with_flags() {
let s = &all_scenarios()[0];
let p = FlagProfile {
flags: vec![flags::LLC, flags::BORROW],
};
assert_eq!(s.qualified_name(&p), format!("{}/llc+borrow", s.name));
}
#[test]
fn all_scenarios_count() {
let scenarios = all_scenarios();
assert!(
scenarios.len() >= 30,
"expected >=30 scenarios, got {}",
scenarios.len()
);
}
#[test]
fn scenario_categories_valid() {
let valid = [
"basic",
"cpuset",
"affinity",
"sched_class",
"dynamic",
"stress",
"stall",
"advanced",
"nested",
"interaction",
"performance",
];
for s in &all_scenarios() {
assert!(
valid.contains(&s.category),
"unknown category '{}' in {}",
s.category,
s.name
);
}
}
#[test]
fn generate_profiles_single_required_count() {
assert_eq!(generate_profiles(&[flags::BORROW], &[]).len(), 24);
}
#[test]
fn profiles_sorted_by_flag_order() {
for p in &generate_profiles(&[], &[]) {
for w in p.flags.windows(2) {
let pos0 = flags::ALL.iter().position(|a| a == &w[0]).unwrap();
let pos1 = flags::ALL.iter().position(|a| a == &w[1]).unwrap();
assert!(pos0 < pos1, "flags not sorted: {:?}", p.flags);
}
}
}
#[test]
fn resolve_cpusets_holdback_reserves_cpus() {
let t = crate::topology::TestTopology::synthetic(12, 3);
let r = resolve_cpusets(&CpusetPartition::Holdback(0.33), 2, &t).unwrap();
let total: usize = r.iter().map(|s| s.len()).sum();
assert_eq!(total, 9, "holdback 33% of 12 should keep 9");
assert!(total < 12, "holdback should use fewer CPUs than total");
assert_eq!(r.len(), 2);
}
#[test]
fn resolve_cpusets_overlap_sets_overlap() {
let t = crate::topology::TestTopology::synthetic(12, 1);
let r = resolve_cpusets(&CpusetPartition::Overlap(0.5), 2, &t).unwrap();
let overlap: BTreeSet<usize> = r[0].intersection(&r[1]).copied().collect();
assert!(
!overlap.is_empty(),
"50% overlap should have overlapping CPUs"
);
}
#[test]
fn resolve_affinity_random_no_cpusets() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityKind::RandomSubset, None, &t) {
AffinityMode::Random { from, count } => {
assert_eq!(from.len(), 8); assert_eq!(count, 4); }
other => panic!("expected Random, got {:?}", other),
}
}
#[test]
fn resolve_affinity_random_subset_empty_pool_is_none() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let empty: BTreeSet<usize> = BTreeSet::new();
match resolve_affinity_for_cgroup(&AffinityKind::RandomSubset, Some(&empty), &t) {
AffinityMode::None => {}
other => panic!("expected None for empty cpuset, got {:?}", other),
}
}
#[test]
fn resolve_affinity_oob_cgroup_idx_falls_back_to_unrestricted() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let cpusets: Vec<BTreeSet<usize>> = vec![[0, 1].into_iter().collect()];
let oob_idx = 5;
let cpuset = cpusets.get(oob_idx);
assert!(cpuset.is_none(), "OOB index must yield None cpuset");
match resolve_affinity_for_cgroup(&AffinityKind::RandomSubset, cpuset, &t) {
AffinityMode::Random { from, count } => {
assert_eq!(from.len(), 4, "OOB idx falls back to full topology");
assert_eq!(count, 2);
}
other => panic!("expected Random with full pool, got {:?}", other),
}
}
#[test]
fn split_half_even() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 4,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let (a, b) = split_half(&ctx);
assert_eq!(a.len() + b.len(), 7);
assert!(a.intersection(&b).count() == 0, "halves should not overlap");
}
#[test]
fn split_half_small() {
let t = crate::topology::TestTopology::synthetic(2, 1);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 1,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let (a, b) = split_half(&ctx);
assert_eq!(a.len() + b.len(), 2);
}
#[test]
fn dfl_wl_propagates_workers() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 7,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let wl = dfl_wl(&ctx);
assert_eq!(wl.num_workers, 7);
assert!(matches!(wl.work_type, WorkType::CpuSpin));
}
#[test]
fn process_alive_self_is_true() {
let pid: libc::pid_t = unsafe { libc::getpid() };
assert!(process_alive(pid));
}
#[test]
fn ctx_active_sched_pid_treats_nonpositive_as_unconfigured() {
let cg = crate::cgroup::CgroupManager::new("/nonexistent");
let topo = crate::topology::TestTopology::synthetic(1, 1);
let ctx_zero = Ctx::builder(&cg, &topo).sched_pid(Some(0)).build();
assert_eq!(
ctx_zero.sched_pid,
Some(0),
"builder must preserve the literal value — the gate lives in the accessor",
);
assert_eq!(
ctx_zero.active_sched_pid(),
None,
"Some(0) must be treated as unconfigured, otherwise the liveness \
bails fire on tests that never ran a scheduler",
);
let ctx_neg = Ctx::builder(&cg, &topo).sched_pid(Some(-1)).build();
assert_eq!(
ctx_neg.active_sched_pid(),
None,
"negative pid must be treated as unconfigured",
);
let ctx_min = Ctx::builder(&cg, &topo)
.sched_pid(Some(libc::pid_t::MIN))
.build();
assert_eq!(
ctx_min.active_sched_pid(),
None,
"pid_t::MIN must be treated as unconfigured — the filter \
is `p > 0`, and the most-negative pid_t stays unconfigured \
under that predicate by construction",
);
let ctx_pos = Ctx::builder(&cg, &topo).sched_pid(Some(1234)).build();
assert_eq!(
ctx_pos.active_sched_pid(),
Some(1234),
"positive pid must pass through unchanged",
);
let ctx_max = Ctx::builder(&cg, &topo)
.sched_pid(Some(libc::pid_t::MAX))
.build();
assert_eq!(
ctx_max.active_sched_pid(),
Some(libc::pid_t::MAX),
"pid_t::MAX must pass the filter — `p > 0` accepts it. \
Liveness determination is the responsibility of the \
downstream `process_alive` call, not this accessor.",
);
let ctx_none = Ctx::builder(&cg, &topo).sched_pid(None).build();
assert_eq!(
ctx_none.active_sched_pid(),
None,
"None must pass through unchanged",
);
}
#[test]
fn process_alive_zero_is_false() {
assert!(!process_alive(0));
}
#[test]
fn process_alive_negative_is_false() {
assert!(!process_alive(-1));
assert!(!process_alive(libc::pid_t::MIN));
}
#[test]
fn process_alive_nonexistent_pid() {
assert!(!process_alive(libc::pid_t::MAX));
}
#[test]
fn cgroup_group_new_empty() {
let cg = crate::cgroup::CgroupManager::new("/nonexistent");
let group = CgroupGroup::new(&cg);
assert!(group.names().is_empty());
}
#[test]
fn decl_by_name_valid() {
for &name in flags::ALL {
assert!(flags::decl_by_name(name).is_some(), "should find {name}");
}
}
#[test]
fn decl_by_name_unknown() {
assert!(flags::decl_by_name("nonexistent").is_none());
}
#[test]
fn decl_by_name_steal_requires_llc() {
let steal = flags::decl_by_name("steal").unwrap();
assert_eq!(steal.requires.len(), 1);
assert_eq!(steal.requires[0].name, "llc");
}
#[test]
fn decl_by_name_borrow_no_requires() {
let borrow = flags::decl_by_name("borrow").unwrap();
assert!(borrow.requires.is_empty());
}
#[test]
fn flag_requires_steal_returns_llc() {
let req = flag_requires("steal");
assert_eq!(req, vec!["llc"]);
}
#[test]
fn flag_requires_borrow_returns_empty() {
assert!(flag_requires("borrow").is_empty());
}
#[test]
fn flag_requires_unknown_returns_empty() {
assert!(flag_requires("nonexistent").is_empty());
}
#[test]
fn profile_name_three_flags() {
let p = FlagProfile {
flags: vec![flags::LLC, flags::BORROW, flags::REBAL],
};
assert_eq!(p.name(), "llc+borrow+rebal");
}
#[test]
fn resolve_cpusets_split_misaligned_single_llc() {
let t = crate::topology::TestTopology::synthetic(8, 1);
let r = resolve_cpusets(&CpusetPartition::SplitMisaligned, 2, &t).unwrap();
assert_eq!(r.len(), 2);
let total: usize = r.iter().map(|s| s.len()).sum();
assert!(total > 0);
}
#[test]
fn resolve_cpusets_uneven_small_frac() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Uneven(0.1), 2, &t).unwrap();
assert!(
r[0].len() < r[1].len(),
"0.1 fraction should give smaller first set"
);
}
#[test]
fn scenario_profiles_count_bounded() {
for s in &all_scenarios() {
let n = s.profiles().len();
assert!(n >= 1, "{} has {} profiles", s.name, n);
assert!(n <= 48, "{} has {} profiles (>48)", s.name, n);
}
}
#[test]
fn all_decls_matches_all_strings() {
assert_eq!(flags::ALL_DECLS.len(), flags::ALL.len());
for (decl, &name) in flags::ALL_DECLS.iter().zip(flags::ALL.iter()) {
assert_eq!(decl.name, name);
}
}
#[test]
fn resolve_affinity_single_cpu_with_cpuset() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let cpusets: Vec<BTreeSet<usize>> = vec![[2, 3].into_iter().collect()];
match resolve_affinity_for_cgroup(&AffinityKind::SingleCpu, cpusets.first(), &t) {
AffinityMode::SingleCpu(c) => assert_eq!(c, 2),
other => panic!("expected SingleCpu, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned_picks_best_overlap() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![[3, 4, 5, 6, 7].into_iter().collect()];
match resolve_affinity_for_cgroup(&AffinityKind::LlcAligned, cpusets.first(), &t) {
AffinityMode::Fixed(cpus) => {
assert_eq!(cpus, [4, 5, 6, 7].into_iter().collect());
}
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn qualified_name_all_scenarios_with_default() {
let p = FlagProfile { flags: vec![] };
for s in &all_scenarios() {
assert_eq!(
s.qualified_name(&p),
format!("{}/default", s.name),
"qualified_name mismatch for {}",
s.name
);
}
}
#[test]
fn qualified_name_single_flag() {
let s = &all_scenarios()[0];
let p = FlagProfile {
flags: vec![flags::REBAL],
};
assert_eq!(s.qualified_name(&p), format!("{}/rebal", s.name));
}
#[test]
fn qualified_name_three_flags_joined() {
let s = &all_scenarios()[0];
let p = FlagProfile {
flags: vec![flags::LLC, flags::STEAL, flags::BORROW],
};
assert_eq!(s.qualified_name(&p), format!("{}/llc+steal+borrow", s.name));
}
#[test]
fn generate_profiles_steal_only_forces_llc() {
let profiles = generate_profiles(&[flags::STEAL], &[]);
assert!(!profiles.is_empty());
for p in &profiles {
assert!(
p.flags.contains(&flags::STEAL),
"steal missing: {:?}",
p.flags
);
assert!(
p.flags.contains(&flags::LLC),
"llc missing when steal present: {:?}",
p.flags
);
}
}
#[test]
fn generate_profiles_all_excluded_returns_single_empty() {
let profiles = generate_profiles(&[], flags::ALL);
assert_eq!(profiles.len(), 1);
assert!(profiles[0].flags.is_empty());
}
#[test]
fn generate_profiles_required_and_excluded_all_others() {
let excluded: Vec<&str> = flags::ALL
.iter()
.copied()
.filter(|f| *f != flags::BORROW)
.collect();
let profiles = generate_profiles(&[flags::BORROW], &excluded);
assert_eq!(profiles.len(), 1);
assert_eq!(profiles[0].flags, vec![flags::BORROW]);
}
#[test]
fn resolve_cpusets_split_half_exact_cpu_assignment() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::SplitHalf, 2, &t).unwrap();
assert_eq!(r[0], [0, 1, 2].into_iter().collect());
assert_eq!(r[1], [3, 4, 5, 6].into_iter().collect());
}
#[test]
fn resolve_cpusets_uneven_75_exact_split() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Uneven(0.75), 2, &t).unwrap();
assert_eq!(r[0].len(), 5);
assert_eq!(r[1].len(), 2);
assert_eq!(r[0], [0, 1, 2, 3, 4].into_iter().collect());
assert_eq!(r[1], [5, 6].into_iter().collect());
}
#[test]
fn resolve_cpusets_holdback_50_exact() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let r = resolve_cpusets(&CpusetPartition::Holdback(0.5), 2, &t).unwrap();
let total: usize = r.iter().map(|s| s.len()).sum();
assert_eq!(total, 4, "holdback 50% of 8 should keep 4");
assert_eq!(r[0], [0, 1].into_iter().collect());
assert_eq!(r[1], [2, 3].into_iter().collect());
}
#[test]
fn resolve_num_workers_zero_rejected_with_label() {
let w = Work {
num_workers: Some(0),
..Default::default()
};
let err = resolve_num_workers(&w, 4, "victim").unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("cgroup 'victim'"),
"label must appear in error: {msg}"
);
assert!(
msg.contains("num_workers=0"),
"error must name the offending field: {msg}"
);
}
#[test]
fn resolve_num_workers_zero_default_also_rejected() {
let w = Work {
num_workers: None,
..Default::default()
};
assert!(resolve_num_workers(&w, 0, "cg").is_err());
}
#[test]
fn resolve_num_workers_falls_back_to_default() {
let w = Work {
num_workers: None,
..Default::default()
};
assert_eq!(resolve_num_workers(&w, 3, "cg").unwrap(), 3);
}
#[test]
fn resolve_num_workers_explicit_wins_over_default() {
let w = Work {
num_workers: Some(7),
..Default::default()
};
assert_eq!(resolve_num_workers(&w, 3, "cg").unwrap(), 7);
}
#[test]
fn catalog_has_no_zero_worker_cgroup_works() {
for scenario in crate::scenario::all_scenarios() {
for (idx, cw) in scenario.cgroup_works.iter().enumerate() {
assert_ne!(
cw.num_workers,
Some(0),
"scenario {:?} cgroup_works[{}] declares num_workers=Some(0)",
scenario.name,
idx,
);
}
}
}
struct DropErrCgroupOps {
parent: std::path::PathBuf,
remove_kind: std::io::ErrorKind,
raw_os_error: Option<i32>,
remove_calls: std::sync::Mutex<Vec<String>>,
}
impl DropErrCgroupOps {
fn new(kind: std::io::ErrorKind, raw: Option<i32>) -> Self {
Self {
parent: std::path::PathBuf::from("/mock/cgroup"),
remove_kind: kind,
raw_os_error: raw,
remove_calls: std::sync::Mutex::new(Vec::new()),
}
}
fn calls(&self) -> Vec<String> {
self.remove_calls.lock().unwrap().clone()
}
}
impl crate::cgroup::CgroupOps for DropErrCgroupOps {
fn parent_path(&self) -> &std::path::Path {
&self.parent
}
fn setup(&self, _: bool) -> Result<()> {
Ok(())
}
fn create_cgroup(&self, _: &str) -> Result<()> {
Ok(())
}
fn remove_cgroup(&self, name: &str) -> Result<()> {
self.remove_calls.lock().unwrap().push(name.to_string());
let io = match self.raw_os_error {
Some(errno) => std::io::Error::from_raw_os_error(errno),
None => std::io::Error::from(self.remove_kind),
};
Err(anyhow::Error::new(io).context("remove_dir cgroup"))
}
fn set_cpuset(&self, _: &str, _: &BTreeSet<usize>) -> Result<()> {
Ok(())
}
fn clear_cpuset(&self, _: &str) -> Result<()> {
Ok(())
}
fn set_cpuset_mems(&self, _: &str, _: &BTreeSet<usize>) -> Result<()> {
Ok(())
}
fn clear_cpuset_mems(&self, _: &str) -> Result<()> {
Ok(())
}
fn move_task(&self, _: &str, _: libc::pid_t) -> Result<()> {
Ok(())
}
fn move_tasks(&self, _: &str, _: &[libc::pid_t]) -> Result<()> {
Ok(())
}
fn clear_subtree_control(&self, _: &str) -> Result<()> {
Ok(())
}
fn drain_tasks(&self, _: &str) -> Result<()> {
Ok(())
}
fn cleanup_all(&self) -> Result<()> {
Ok(())
}
}
#[test]
fn cgroup_group_drop_is_panic_free_on_every_error_kind() {
for (label, kind, raw) in [
("ENOENT", std::io::ErrorKind::NotFound, Some(libc::ENOENT)),
("EBUSY", std::io::ErrorKind::Other, Some(libc::EBUSY)),
(
"EACCES",
std::io::ErrorKind::PermissionDenied,
Some(libc::EACCES),
),
("generic-IO", std::io::ErrorKind::Other, None),
] {
let mock = DropErrCgroupOps::new(kind, raw);
{
let mut group = CgroupGroup::new(&mock);
group.names.push("child-a".to_string());
group.names.push("child-b".to_string());
}
let calls = mock.calls();
assert_eq!(
calls,
vec!["child-b".to_string(), "child-a".to_string()],
"[{label}] Drop must call remove_cgroup for every tracked name in reverse order",
);
}
}
#[test]
fn is_io_not_found_matches_only_notfound() {
let wrap = |k: std::io::ErrorKind| -> anyhow::Error {
anyhow::Error::new(std::io::Error::from(k)).context("wrap")
};
assert!(is_io_not_found(&wrap(std::io::ErrorKind::NotFound)));
assert!(!is_io_not_found(&wrap(
std::io::ErrorKind::PermissionDenied
)));
assert!(!is_io_not_found(&wrap(std::io::ErrorKind::Other)));
let no_io = anyhow::anyhow!("cgroup not found in parent");
assert!(!is_io_not_found(&no_io));
}
#[test]
fn remove_cgroup_errno_hint_covers_ebusy_and_eacces() {
let busy =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::EBUSY)).context("wrap");
let acces =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::EACCES)).context("wrap");
let enotempty =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::ENOTEMPTY)).context("wrap");
let non_io = anyhow::anyhow!("not an io error");
assert!(
remove_cgroup_errno_hint(&busy)
.is_some_and(|h| h.contains("EBUSY") && h.contains("drain")),
"EBUSY hint must name the errno and the drain remediation",
);
assert!(
remove_cgroup_errno_hint(&acces)
.is_some_and(|h| h.contains("EACCES") && h.contains("permission")),
"EACCES hint must name the errno and the permission angle",
);
assert_eq!(
remove_cgroup_errno_hint(&enotempty),
None,
"unclassified errnos must yield no hint so warn stays terse",
);
assert_eq!(
remove_cgroup_errno_hint(&non_io),
None,
"non-io root causes must yield no hint",
);
}
}