pub mod affinity;
pub mod backdrop;
pub mod basic;
pub mod cpuset;
pub mod dynamic;
pub mod interaction;
pub mod nested;
pub mod ops;
pub mod payload_run;
pub mod performance;
pub mod scenarios;
pub mod snapshot;
pub mod stress;
pub use backdrop::Backdrop;
use std::collections::BTreeSet;
use std::thread;
use std::time::Duration;
use anyhow::Result;
use nix::sys::signal::kill;
use nix::unistd::Pid;
use crate::assert::{self, AssertResult};
use crate::topology::TestTopology;
use crate::workload::*;
fn process_alive(pid: libc::pid_t) -> bool {
if pid <= 0 {
return false;
}
kill(Pid::from_raw(pid), None).is_ok()
}
pub mod flags {
pub struct FlagDecl {
pub name: &'static str,
pub args: &'static [&'static str],
pub requires: &'static [&'static FlagDecl],
}
impl std::fmt::Debug for FlagDecl {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let req_names: Vec<&str> = self.requires.iter().map(|d| d.name).collect();
f.debug_struct("FlagDecl")
.field("name", &self.name)
.field("args", &self.args)
.field("requires", &req_names)
.finish()
}
}
pub static LLC_DECL: FlagDecl = FlagDecl {
name: "llc",
args: &[],
requires: &[],
};
pub static BORROW_DECL: FlagDecl = FlagDecl {
name: "borrow",
args: &[],
requires: &[],
};
pub static STEAL_DECL: FlagDecl = FlagDecl {
name: "steal",
args: &[],
requires: &[&LLC_DECL],
};
pub static REBAL_DECL: FlagDecl = FlagDecl {
name: "rebal",
args: &[],
requires: &[],
};
pub static REJECT_PIN_DECL: FlagDecl = FlagDecl {
name: "reject-pin",
args: &[],
requires: &[],
};
pub static NO_CTRL_DECL: FlagDecl = FlagDecl {
name: "no-ctrl",
args: &[],
requires: &[],
};
pub static ALL_DECLS: &[&FlagDecl] = &[
&LLC_DECL,
&BORROW_DECL,
&STEAL_DECL,
&REBAL_DECL,
&REJECT_PIN_DECL,
&NO_CTRL_DECL,
];
pub const N_FLAGS: usize = 6;
const _: () = assert!(
ALL_DECLS.len() == N_FLAGS,
"N_FLAGS must equal ALL_DECLS.len(); update both together",
);
pub const LLC: &str = ALL_DECLS[0].name;
pub const BORROW: &str = ALL_DECLS[1].name;
pub const STEAL: &str = ALL_DECLS[2].name;
pub const REBAL: &str = ALL_DECLS[3].name;
pub const REJECT_PIN: &str = ALL_DECLS[4].name;
pub const NO_CTRL: &str = ALL_DECLS[5].name;
const _: () = {
const fn bytes_eq(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut i = 0;
while i < a.len() {
if a[i] != b[i] {
return false;
}
i += 1;
}
true
}
assert!(
bytes_eq(LLC.as_bytes(), b"llc"),
"ALL_DECLS[0] must be `llc`"
);
assert!(
bytes_eq(BORROW.as_bytes(), b"borrow"),
"ALL_DECLS[1] must be `borrow`"
);
assert!(
bytes_eq(STEAL.as_bytes(), b"steal"),
"ALL_DECLS[2] must be `steal`"
);
assert!(
bytes_eq(REBAL.as_bytes(), b"rebal"),
"ALL_DECLS[3] must be `rebal`"
);
assert!(
bytes_eq(REJECT_PIN.as_bytes(), b"reject-pin"),
"ALL_DECLS[4] must be `reject-pin`"
);
assert!(
bytes_eq(NO_CTRL.as_bytes(), b"no-ctrl"),
"ALL_DECLS[5] must be `no-ctrl`"
);
};
const fn build_all() -> [&'static str; N_FLAGS] {
let mut out = [""; N_FLAGS];
let mut i = 0;
while i < N_FLAGS {
out[i] = ALL_DECLS[i].name;
i += 1;
}
out
}
pub static ALL: &[&str] = &build_all();
pub fn from_short_name(s: &str) -> Option<&'static str> {
ALL.iter().find(|&&f| f == s).copied()
}
pub fn decl_by_name(name: &str) -> Option<&'static FlagDecl> {
ALL_DECLS.iter().find(|d| d.name == name).copied()
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct FlagDeclJson {
pub name: String,
pub args: Vec<String>,
pub requires: Vec<String>,
}
impl FlagDeclJson {
pub fn from_decl(decl: &FlagDecl) -> Self {
Self {
name: decl.name.to_string(),
args: decl.args.iter().map(|s| s.to_string()).collect(),
requires: decl.requires.iter().map(|r| r.name.to_string()).collect(),
}
}
}
}
#[derive(Debug, Clone)]
pub struct FlagProfile {
pub flags: Vec<&'static str>,
}
impl FlagProfile {
pub fn name(&self) -> String {
if self.flags.is_empty() {
"default".into()
} else {
self.flags.join("+")
}
}
}
pub use crate::workload::AffinityIntent;
pub fn compute_flag_profiles<T, F>(
all_names: &[T],
requires_fn: F,
required: &[T],
excluded: &[T],
) -> Vec<Vec<T>>
where
T: Clone + PartialEq,
F: Fn(&T) -> Vec<T>,
{
let optional: Vec<T> = all_names
.iter()
.filter(|f| !required.contains(f) && !excluded.contains(f))
.cloned()
.collect();
debug_assert!(
optional.len() < 32,
"compute_flag_profiles: {} optional flags would overflow u32 power-set mask",
optional.len(),
);
let mut out = Vec::new();
for mask in 0..(1u32 << optional.len()) {
let mut fl: Vec<T> = required.to_vec();
for (i, f) in optional.iter().enumerate() {
if mask & (1 << i) != 0 {
fl.push(f.clone());
}
}
let valid = fl
.iter()
.all(|f| requires_fn(f).iter().all(|r| fl.contains(r)));
if valid {
fl.sort_by_key(|f| all_names.iter().position(|a| a == f).unwrap_or(usize::MAX));
out.push(fl);
}
}
out
}
#[must_use = "dropping a CgroupGroup immediately destroys the cgroups it manages"]
pub struct CgroupGroup<'a> {
cgroups: &'a dyn crate::cgroup::CgroupOps,
names: Vec<String>,
}
impl std::fmt::Debug for CgroupGroup<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CgroupGroup")
.field("cgroups", &self.cgroups.parent_path())
.field("names", &self.names)
.finish()
}
}
impl<'a> CgroupGroup<'a> {
pub fn new(cgroups: &'a dyn crate::cgroup::CgroupOps) -> Self {
Self {
cgroups,
names: Vec::new(),
}
}
pub fn add_cgroup(&mut self, name: &str, cpuset: &BTreeSet<usize>) -> Result<()> {
let mut required = BTreeSet::new();
required.insert(crate::cgroup::Controller::Cpuset);
self.cgroups.setup(&required)?;
self.cgroups.create_cgroup(name)?;
self.cgroups.set_cpuset(name, cpuset)?;
self.names.push(name.to_string());
Ok(())
}
pub fn add_cgroup_no_cpuset(&mut self, name: &str) -> Result<()> {
self.cgroups.create_cgroup(name)?;
self.names.push(name.to_string());
Ok(())
}
pub fn names(&self) -> &[String] {
&self.names
}
}
pub(crate) fn is_io_not_found(err: &anyhow::Error) -> bool {
err.root_cause()
.downcast_ref::<std::io::Error>()
.is_some_and(|io| io.kind() == std::io::ErrorKind::NotFound)
}
pub(crate) fn remove_cgroup_errno_hint(err: &anyhow::Error) -> Option<&'static str> {
let raw = err
.root_cause()
.downcast_ref::<std::io::Error>()?
.raw_os_error()?;
match raw {
libc::EBUSY => {
Some("EBUSY: cgroup still has live tasks — workloads were not drained before teardown")
}
libc::EACCES => {
Some("EACCES: permission denied — check cgroup owner / `user.slice` delegation")
}
_ => None,
}
}
impl Drop for CgroupGroup<'_> {
fn drop(&mut self) {
for name in self.names.iter().rev() {
if let Err(err) = self.cgroups.remove_cgroup(name) {
if is_io_not_found(&err) {
continue;
}
let hint = remove_cgroup_errno_hint(&err).unwrap_or("");
tracing::warn!(
cgroup = %name,
err = %format!("{err:#}"),
hint,
"CgroupGroup::drop: remove_cgroup returned non-ENOENT error",
);
}
}
}
}
pub struct Ctx<'a> {
pub cgroups: &'a dyn crate::cgroup::CgroupOps,
pub topo: &'a TestTopology,
pub duration: Duration,
pub workers_per_cgroup: usize,
pub sched_pid: Option<libc::pid_t>,
pub settle: Duration,
pub work_type_override: Option<WorkType>,
pub assert: crate::assert::Assert,
pub wait_for_map_write: bool,
}
impl std::fmt::Debug for Ctx<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Ctx")
.field("cgroups", &self.cgroups.parent_path())
.field("topo", &self.topo)
.field("duration", &self.duration)
.field("workers_per_cgroup", &self.workers_per_cgroup)
.field("sched_pid", &self.sched_pid)
.field("settle", &self.settle)
.field("work_type_override", &self.work_type_override)
.field("assert", &self.assert)
.field("wait_for_map_write", &self.wait_for_map_write)
.finish()
}
}
impl Ctx<'_> {
pub(crate) fn active_sched_pid(&self) -> Option<libc::pid_t> {
match self.sched_pid {
Some(p) if p > 0 => Some(p),
Some(p) => {
tracing::warn!(
pid = p,
"Ctx::active_sched_pid: sched_pid=Some({p}) squashed to None; \
only positive pids are configured-scheduler values — use \
None for the unconfigured shape instead of a 0-sentinel or \
negative pid"
);
None
}
None => None,
}
}
}
pub struct CtxBuilder<'a> {
cgroups: &'a dyn crate::cgroup::CgroupOps,
topo: &'a TestTopology,
duration: Duration,
workers_per_cgroup: usize,
sched_pid: Option<libc::pid_t>,
settle: Duration,
work_type_override: Option<WorkType>,
assert: crate::assert::Assert,
wait_for_map_write: bool,
}
impl<'a> CtxBuilder<'a> {
#[must_use = "builder methods consume self; bind the result"]
pub fn duration(mut self, d: Duration) -> Self {
self.duration = d;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn workers_per_cgroup(mut self, n: usize) -> Self {
self.workers_per_cgroup = n;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn sched_pid(mut self, pid: Option<libc::pid_t>) -> Self {
self.sched_pid = pid;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn settle(mut self, s: Duration) -> Self {
self.settle = s;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn work_type_override(mut self, wt: Option<WorkType>) -> Self {
self.work_type_override = wt;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn assert(mut self, a: crate::assert::Assert) -> Self {
self.assert = a;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn wait_for_map_write(mut self, v: bool) -> Self {
self.wait_for_map_write = v;
self
}
#[must_use = "dropping a Ctx without running the scenario discards the test setup"]
pub fn build(self) -> Ctx<'a> {
Ctx {
cgroups: self.cgroups,
topo: self.topo,
duration: self.duration,
workers_per_cgroup: self.workers_per_cgroup,
sched_pid: self.sched_pid,
settle: self.settle,
work_type_override: self.work_type_override,
assert: self.assert,
wait_for_map_write: self.wait_for_map_write,
}
}
}
impl<'a> Ctx<'a> {
#[must_use = "discarding a CtxBuilder drops the scenario context defaults; chain setters and call .build()"]
pub fn builder(
cgroups: &'a dyn crate::cgroup::CgroupOps,
topo: &'a TestTopology,
) -> CtxBuilder<'a> {
CtxBuilder {
cgroups,
topo,
duration: Duration::from_secs(1),
workers_per_cgroup: 1,
sched_pid: None,
settle: Duration::from_millis(0),
work_type_override: None,
assert: crate::assert::Assert::default_checks(),
wait_for_map_write: false,
}
}
#[must_use = "dropping a PayloadRun discards the payload configuration; chain setters and call .run()"]
pub fn payload(
&'a self,
p: &'static crate::test_support::Payload,
) -> crate::scenario::payload_run::PayloadRun<'a> {
crate::scenario::payload_run::PayloadRun::new(self, p)
}
}
fn spawn_and_move<F>(ctx: &Ctx, names: &[String], mut cfg_fn: F) -> Result<Vec<WorkloadHandle>>
where
F: FnMut(usize, &str) -> Result<WorkloadConfig>,
{
let mut handles = Vec::with_capacity(names.len());
for (i, name) in names.iter().enumerate() {
let wl = cfg_fn(i, name.as_str())?;
let h = WorkloadHandle::spawn(&wl)?;
tracing::debug!(
cgroup = %name,
workers = wl.num_workers,
pids = h.worker_pids().len(),
"spawned workers",
);
ctx.cgroups
.move_tasks(name.as_str(), &h.worker_pids_for_cgroup_procs()?)?;
handles.push(h);
}
for h in &mut handles {
h.start();
}
Ok(handles)
}
pub(crate) fn resolve_num_workers(work: &WorkSpec, default_n: usize, label: &str) -> Result<usize> {
let n = work.num_workers.unwrap_or(default_n);
if n == 0 {
anyhow::bail!(
"cgroup '{}': num_workers=0 is not allowed — assertions would \
vacuously pass with no WorkerReports; use at least 1 worker or \
drop this WorkSpec entry",
label,
);
}
Ok(n)
}
pub fn resolve_affinity_for_cgroup(
kind: &AffinityIntent,
cpuset: Option<&BTreeSet<usize>>,
topo: &TestTopology,
) -> Result<ResolvedAffinity> {
match kind {
AffinityIntent::Inherit => Ok(ResolvedAffinity::None),
AffinityIntent::RandomSubset { from, count } => {
let pool = if let Some(cs) = cpuset {
from.intersection(cs).copied().collect::<BTreeSet<usize>>()
} else {
from.clone()
};
if pool.is_empty() || *count == 0 {
tracing::debug!(
pool_len = pool.len(),
count = *count,
"RandomSubset: empty pool or zero count after \
cpuset intersection, falling back to \
ResolvedAffinity::None"
);
Ok(ResolvedAffinity::None)
} else {
Ok(ResolvedAffinity::Random {
from: pool,
count: *count,
})
}
}
AffinityIntent::LlcAligned => {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
let mut best_llc = topo.llc_aligned_cpuset(0);
let mut best_overlap = best_llc.intersection(&pool).count();
for idx in 1..topo.num_llcs() {
let llc = topo.llc_aligned_cpuset(idx);
let overlap = llc.intersection(&pool).count();
if overlap > best_overlap {
best_llc = llc;
best_overlap = overlap;
}
}
let effective: BTreeSet<usize> = best_llc.intersection(&pool).copied().collect();
if effective.is_empty() {
Ok(ResolvedAffinity::None)
} else {
Ok(ResolvedAffinity::Fixed(effective))
}
}
AffinityIntent::CrossCgroup => {
Ok(ResolvedAffinity::Fixed(topo.all_cpuset()))
}
AffinityIntent::SingleCpu => {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
if let Some(&cpu) = pool.iter().next() {
Ok(ResolvedAffinity::SingleCpu(cpu))
} else {
Ok(ResolvedAffinity::None)
}
}
AffinityIntent::Exact(cpus) => {
if let Some(cs) = cpuset {
let effective: BTreeSet<usize> = cpus.intersection(cs).copied().collect();
if effective.is_empty() {
Ok(ResolvedAffinity::None)
} else {
Ok(ResolvedAffinity::Fixed(effective))
}
} else {
Ok(ResolvedAffinity::Fixed(cpus.clone()))
}
}
AffinityIntent::SmtSiblingPair => resolve_smt_sibling_pair(cpuset, topo),
}
}
fn resolve_smt_sibling_pair(
cpuset: Option<&BTreeSet<usize>>,
topo: &TestTopology,
) -> Result<ResolvedAffinity> {
let pool = cpuset.cloned().unwrap_or_else(|| topo.all_cpuset());
for llc in topo.llcs() {
for siblings in llc.cores().values() {
let mut iter = siblings.iter().copied().filter(|cpu| pool.contains(cpu));
if let (Some(a), Some(b)) = (iter.next(), iter.next()) {
let pair: BTreeSet<usize> = [a, b].into_iter().collect();
return Ok(ResolvedAffinity::Fixed(pair));
}
}
}
anyhow::bail!(
"AffinityIntent::SmtSiblingPair requires a physical core with at \
least two SMT siblings present in the effective cpuset. The \
current topology and cpuset expose no such pair — \
threads_per_core may be 1 (SMT disabled or non-SMT host), the \
cpuset may have isolated each sibling onto a different cgroup, \
or the topology was built without per-core sibling data. \
Switch to a different AffinityIntent for non-SMT scheduling \
tests, or run on a host whose VM topology has \
threads_per_core >= 2.",
);
}
pub(crate) fn intent_for_spawn(
kind: &AffinityIntent,
cpuset: Option<&BTreeSet<usize>>,
topo: &TestTopology,
) -> Result<AffinityIntent> {
Ok(flatten_for_spawn(resolve_affinity_for_cgroup(
kind, cpuset, topo,
)?))
}
fn flatten_for_spawn(resolved: ResolvedAffinity) -> AffinityIntent {
match resolved {
ResolvedAffinity::None => AffinityIntent::Inherit,
ResolvedAffinity::Fixed(set) => {
if set.is_empty() {
AffinityIntent::Inherit
} else {
AffinityIntent::Exact(set)
}
}
ResolvedAffinity::SingleCpu(cpu) => AffinityIntent::Exact([cpu].into_iter().collect()),
ResolvedAffinity::Random { from, count } => {
if count == 0 || from.is_empty() {
AffinityIntent::Inherit
} else {
AffinityIntent::RandomSubset { from, count }
}
}
}
}
pub fn setup_cgroups<'a>(
ctx: &'a Ctx,
n: usize,
wl: &WorkloadConfig,
) -> Result<(Vec<WorkloadHandle>, CgroupGroup<'a>)> {
let mut guard = CgroupGroup::new(ctx.cgroups);
for i in 0..n {
guard.add_cgroup_no_cpuset(&format!("cg_{i}"))?;
}
thread::sleep(ctx.settle);
if let Some(pid) = ctx.active_sched_pid()
&& !process_alive(pid)
{
anyhow::bail!(
"{} after cgroup creation (pid={})",
crate::assert::SCHED_DIED_PREFIX,
pid,
);
}
let names: Vec<String> = (0..n).map(|i| format!("cg_{i}")).collect();
let handles = spawn_and_move(ctx, &names, |_, _| Ok(wl.clone()))?;
Ok((handles, guard))
}
pub(crate) fn collect_handles<'a>(
handles: impl IntoIterator<Item = (WorkloadHandle, Option<&'a BTreeSet<usize>>)>,
checks: &crate::assert::Assert,
topo: Option<&crate::topology::TestTopology>,
) -> AssertResult {
let mut r = AssertResult::pass();
for (h, cpuset) in handles {
let reports = h.stop_and_collect();
if checks.has_worker_checks() {
let numa_nodes = cpuset.and_then(|cs| topo.map(|t| t.numa_nodes_for_cpuset(cs)));
r.merge(checks.assert_cgroup_with_numa(&reports, cpuset, numa_nodes.as_ref()));
} else {
r.merge(assert::assert_not_starved(&reports));
}
}
r
}
pub fn collect_all(handles: Vec<WorkloadHandle>, checks: &crate::assert::Assert) -> AssertResult {
collect_handles(handles.into_iter().map(|h| (h, None)), checks, None)
}
pub fn dfl_wl(ctx: &Ctx) -> WorkloadConfig {
WorkloadConfig {
num_workers: ctx.workers_per_cgroup,
..Default::default()
}
}
#[cfg(test)]
pub fn split_half(ctx: &Ctx) -> (BTreeSet<usize>, BTreeSet<usize>) {
let usable = ctx.topo.usable_cpus();
let mid = usable.len() / 2;
(
usable[..mid].iter().copied().collect(),
usable[mid..].iter().copied().collect(),
)
}
pub fn spawn_diverse(ctx: &Ctx, cgroup_names: &[&str]) -> Result<Vec<WorkloadHandle>> {
let types = [
WorkType::SpinWait,
WorkType::bursty(Duration::from_millis(50), Duration::from_millis(100)),
WorkType::IoSyncWrite,
WorkType::Mixed,
WorkType::YieldHeavy,
];
let mut handles = Vec::new();
for (i, name) in cgroup_names.iter().enumerate() {
let wt = types[i % types.len()].clone();
let n = if matches!(wt, WorkType::IoSyncWrite) {
2
} else {
ctx.workers_per_cgroup
};
let mut h = WorkloadHandle::spawn(&WorkloadConfig {
num_workers: n,
work_type: wt,
..Default::default()
})?;
ctx.cgroups
.move_tasks(name, &h.worker_pids_for_cgroup_procs()?)?;
h.start();
handles.push(h);
}
Ok(handles)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn flag_short_name_roundtrip() {
for &f in flags::ALL {
assert_eq!(flags::from_short_name(f), Some(f));
}
}
#[test]
fn flag_all_unique_short_names() {
let unique: std::collections::HashSet<&&str> = flags::ALL.iter().collect();
assert_eq!(flags::ALL.len(), unique.len());
}
#[test]
fn flag_from_short_name_unknown() {
assert_eq!(flags::from_short_name("nonexistent"), None);
}
#[test]
fn profile_name_default() {
assert_eq!(FlagProfile { flags: vec![] }.name(), "default");
}
#[test]
fn profile_name_with_flags() {
let p = FlagProfile {
flags: vec![flags::LLC, flags::BORROW],
};
assert_eq!(p.name(), "llc+borrow");
}
#[test]
fn resolve_affinity_inherit() {
let t = crate::topology::TestTopology::synthetic(8, 2);
assert!(matches!(
resolve_affinity_for_cgroup(&AffinityIntent::Inherit, None, &t).unwrap(),
ResolvedAffinity::None
));
}
#[test]
fn resolve_affinity_single_cpu() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityIntent::SingleCpu, None, &t).unwrap() {
ResolvedAffinity::SingleCpu(c) => assert_eq!(c, 0),
other => panic!("expected SingleCpu, got {:?}", other),
}
}
#[test]
fn resolve_affinity_cross_cgroup() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityIntent::CrossCgroup, None, &t).unwrap() {
ResolvedAffinity::Fixed(cpus) => assert_eq!(cpus.len(), 8),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned() {
let t = crate::topology::TestTopology::synthetic(8, 2);
match resolve_affinity_for_cgroup(&AffinityIntent::LlcAligned, None, &t).unwrap() {
ResolvedAffinity::Fixed(cpus) => assert_eq!(cpus, [0, 1, 2, 3].into_iter().collect()),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned_with_cpuset() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![
[0, 1, 2, 3].into_iter().collect(),
[4, 5, 6, 7].into_iter().collect(),
];
match resolve_affinity_for_cgroup(&AffinityIntent::LlcAligned, cpusets.get(1), &t).unwrap()
{
ResolvedAffinity::Fixed(cpus) => assert_eq!(cpus, [4, 5, 6, 7].into_iter().collect()),
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_affinity_random_subset() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![[0, 1, 2, 3].into_iter().collect()];
let intent = AffinityIntent::random_subset(t.all_cpus().iter().copied(), 2);
match resolve_affinity_for_cgroup(&intent, cpusets.first(), &t).unwrap() {
ResolvedAffinity::Random { from, count } => {
assert_eq!(from, cpusets[0]);
assert_eq!(count, 2); }
other => panic!("expected Random, got {:?}", other),
}
}
#[test]
fn cgroup_work_default() {
let cw = WorkSpec::default();
assert_eq!(cw.num_workers, None);
assert!(matches!(cw.work_type, WorkType::SpinWait));
assert!(matches!(cw.sched_policy, SchedPolicy::Normal));
assert!(matches!(cw.affinity, AffinityIntent::Inherit));
assert!(matches!(cw.mem_policy, MemPolicy::Default));
}
#[test]
fn resolve_affinity_random_no_cpusets() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let intent = AffinityIntent::random_subset(t.all_cpus().iter().copied(), 4);
match resolve_affinity_for_cgroup(&intent, None, &t).unwrap() {
ResolvedAffinity::Random { from, count } => {
assert_eq!(from.len(), 8); assert_eq!(count, 4); }
other => panic!("expected Random, got {:?}", other),
}
}
#[test]
fn resolve_affinity_random_subset_empty_pool_is_none() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let empty: BTreeSet<usize> = BTreeSet::new();
let intent = AffinityIntent::random_subset(t.all_cpus().iter().copied(), 1);
match resolve_affinity_for_cgroup(&intent, Some(&empty), &t).unwrap() {
ResolvedAffinity::None => {}
other => panic!("expected None for empty cpuset, got {:?}", other),
}
}
#[test]
fn resolve_affinity_oob_cgroup_idx_falls_back_to_unrestricted() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let cpusets: Vec<BTreeSet<usize>> = vec![[0, 1].into_iter().collect()];
let oob_idx = 5;
let cpuset = cpusets.get(oob_idx);
assert!(cpuset.is_none(), "OOB index must yield None cpuset");
let intent = AffinityIntent::random_subset(t.all_cpus().iter().copied(), 2);
match resolve_affinity_for_cgroup(&intent, cpuset, &t).unwrap() {
ResolvedAffinity::Random { from, count } => {
assert_eq!(from.len(), 4, "OOB idx falls back to full topology");
assert_eq!(count, 2);
}
other => panic!("expected Random with full pool, got {:?}", other),
}
}
#[test]
fn resolve_affinity_smt_sibling_pair_uses_first_core() {
let vmt = crate::vmm::topology::Topology::new(1, 1, 2, 2);
let t = crate::topology::TestTopology::from_vm_topology(&vmt);
match resolve_affinity_for_cgroup(&AffinityIntent::SmtSiblingPair, None, &t).unwrap() {
ResolvedAffinity::Fixed(cpus) => {
assert_eq!(
cpus,
[0usize, 1].into_iter().collect(),
"SmtSiblingPair must pick the first core's siblings"
);
}
other => panic!("expected Fixed({{0, 1}}), got {:?}", other),
}
}
#[test]
fn resolve_affinity_smt_sibling_pair_skips_partial_cores() {
let vmt = crate::vmm::topology::Topology::new(1, 1, 2, 2);
let t = crate::topology::TestTopology::from_vm_topology(&vmt);
let cpuset: BTreeSet<usize> = [2usize, 3].into_iter().collect();
match resolve_affinity_for_cgroup(&AffinityIntent::SmtSiblingPair, Some(&cpuset), &t)
.unwrap()
{
ResolvedAffinity::Fixed(cpus) => {
assert_eq!(
cpus,
[2usize, 3].into_iter().collect(),
"SmtSiblingPair must skip core 0 when cpuset excludes one of its \
siblings and pick the next eligible pair"
);
}
other => panic!("expected Fixed({{2, 3}}), got {:?}", other),
}
}
#[test]
fn resolve_affinity_smt_sibling_pair_errors_without_smt() {
let vmt = crate::vmm::topology::Topology::new(1, 1, 4, 1);
let t = crate::topology::TestTopology::from_vm_topology(&vmt);
let err = resolve_affinity_for_cgroup(&AffinityIntent::SmtSiblingPair, None, &t)
.expect_err("threads_per_core=1 must produce an error, not silent fallback");
let msg = err.to_string();
assert!(
msg.contains("SmtSiblingPair"),
"diagnostic must name the variant, got: {msg}"
);
assert!(
msg.contains("two SMT siblings"),
"diagnostic must explain the missing precondition, got: {msg}"
);
}
#[test]
fn resolve_affinity_smt_sibling_pair_errors_when_cpuset_breaks_pairs() {
let vmt = crate::vmm::topology::Topology::new(1, 1, 2, 2);
let t = crate::topology::TestTopology::from_vm_topology(&vmt);
let cpuset: BTreeSet<usize> = [0usize, 2].into_iter().collect();
let err = resolve_affinity_for_cgroup(&AffinityIntent::SmtSiblingPair, Some(&cpuset), &t)
.expect_err("cpuset that breaks every sibling pair must error");
let msg = err.to_string();
assert!(
msg.contains("SmtSiblingPair"),
"diagnostic must name the variant, got: {msg}"
);
}
#[test]
fn resolve_affinity_smt_sibling_pair_errors_on_synthetic_topology() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let err = resolve_affinity_for_cgroup(&AffinityIntent::SmtSiblingPair, None, &t)
.expect_err("synthetic topology has no per-core sibling data — must error");
let msg = err.to_string();
assert!(
msg.contains("SmtSiblingPair"),
"diagnostic must name the variant, got: {msg}"
);
}
#[test]
fn split_half_even() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 4,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let (a, b) = split_half(&ctx);
assert_eq!(a.len() + b.len(), 7);
assert!(a.intersection(&b).count() == 0, "halves should not overlap");
}
#[test]
fn split_half_small() {
let t = crate::topology::TestTopology::synthetic(2, 1);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 1,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let (a, b) = split_half(&ctx);
assert_eq!(a.len() + b.len(), 2);
}
#[test]
fn dfl_wl_propagates_workers() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let ctx_cg = crate::cgroup::CgroupManager::new("/nonexistent");
let ctx = Ctx {
cgroups: &ctx_cg,
topo: &t,
duration: std::time::Duration::from_secs(1),
workers_per_cgroup: 7,
sched_pid: None,
settle: Duration::from_millis(3000),
work_type_override: None,
assert: assert::Assert::default_checks(),
wait_for_map_write: false,
};
let wl = dfl_wl(&ctx);
assert_eq!(wl.num_workers, 7);
assert!(matches!(wl.work_type, WorkType::SpinWait));
}
#[test]
fn process_alive_self_is_true() {
let pid: libc::pid_t = unsafe { libc::getpid() };
assert!(process_alive(pid));
}
#[test]
fn ctx_active_sched_pid_treats_nonpositive_as_unconfigured() {
let cg = crate::cgroup::CgroupManager::new("/nonexistent");
let topo = crate::topology::TestTopology::synthetic(1, 1);
let ctx_zero = Ctx::builder(&cg, &topo).sched_pid(Some(0)).build();
assert_eq!(
ctx_zero.sched_pid,
Some(0),
"builder must preserve the literal value — the gate lives in the accessor",
);
assert_eq!(
ctx_zero.active_sched_pid(),
None,
"Some(0) must be treated as unconfigured, otherwise the liveness \
bails fire on tests that never ran a scheduler",
);
let ctx_neg = Ctx::builder(&cg, &topo).sched_pid(Some(-1)).build();
assert_eq!(
ctx_neg.active_sched_pid(),
None,
"negative pid must be treated as unconfigured",
);
let ctx_min = Ctx::builder(&cg, &topo)
.sched_pid(Some(libc::pid_t::MIN))
.build();
assert_eq!(
ctx_min.active_sched_pid(),
None,
"pid_t::MIN must be treated as unconfigured — the filter \
is `p > 0`, and the most-negative pid_t stays unconfigured \
under that predicate by construction",
);
let ctx_pos = Ctx::builder(&cg, &topo).sched_pid(Some(1234)).build();
assert_eq!(
ctx_pos.active_sched_pid(),
Some(1234),
"positive pid must pass through unchanged",
);
let ctx_max = Ctx::builder(&cg, &topo)
.sched_pid(Some(libc::pid_t::MAX))
.build();
assert_eq!(
ctx_max.active_sched_pid(),
Some(libc::pid_t::MAX),
"pid_t::MAX must pass the filter — `p > 0` accepts it. \
Liveness determination is the responsibility of the \
downstream `process_alive` call, not this accessor.",
);
let ctx_none = Ctx::builder(&cg, &topo).sched_pid(None).build();
assert_eq!(
ctx_none.active_sched_pid(),
None,
"None must pass through unchanged",
);
}
#[test]
fn process_alive_zero_is_false() {
assert!(!process_alive(0));
}
#[test]
fn process_alive_negative_is_false() {
assert!(!process_alive(-1));
assert!(!process_alive(libc::pid_t::MIN));
}
#[test]
fn process_alive_nonexistent_pid() {
assert!(!process_alive(libc::pid_t::MAX));
}
#[test]
fn cgroup_group_new_empty() {
let cg = crate::cgroup::CgroupManager::new("/nonexistent");
let group = CgroupGroup::new(&cg);
assert!(group.names().is_empty());
}
#[test]
fn decl_by_name_valid() {
for &name in flags::ALL {
assert!(flags::decl_by_name(name).is_some(), "should find {name}");
}
}
#[test]
fn decl_by_name_unknown() {
assert!(flags::decl_by_name("nonexistent").is_none());
}
#[test]
fn decl_by_name_steal_requires_llc() {
let steal = flags::decl_by_name("steal").unwrap();
assert_eq!(steal.requires.len(), 1);
assert_eq!(steal.requires[0].name, "llc");
}
#[test]
fn decl_by_name_borrow_no_requires() {
let borrow = flags::decl_by_name("borrow").unwrap();
assert!(borrow.requires.is_empty());
}
#[test]
fn profile_name_three_flags() {
let p = FlagProfile {
flags: vec![flags::LLC, flags::BORROW, flags::REBAL],
};
assert_eq!(p.name(), "llc+borrow+rebal");
}
#[test]
fn all_decls_matches_all_strings() {
assert_eq!(flags::ALL_DECLS.len(), flags::ALL.len());
for (decl, &name) in flags::ALL_DECLS.iter().zip(flags::ALL.iter()) {
assert_eq!(decl.name, name);
}
}
#[test]
fn resolve_affinity_single_cpu_with_cpuset() {
let t = crate::topology::TestTopology::synthetic(4, 1);
let cpusets: Vec<BTreeSet<usize>> = vec![[2, 3].into_iter().collect()];
match resolve_affinity_for_cgroup(&AffinityIntent::SingleCpu, cpusets.first(), &t).unwrap()
{
ResolvedAffinity::SingleCpu(c) => assert_eq!(c, 2),
other => panic!("expected SingleCpu, got {:?}", other),
}
}
#[test]
fn resolve_affinity_llc_aligned_picks_best_overlap() {
let t = crate::topology::TestTopology::synthetic(8, 2);
let cpusets: Vec<BTreeSet<usize>> = vec![[3, 4, 5, 6, 7].into_iter().collect()];
match resolve_affinity_for_cgroup(&AffinityIntent::LlcAligned, cpusets.first(), &t).unwrap()
{
ResolvedAffinity::Fixed(cpus) => {
assert_eq!(cpus, [4, 5, 6, 7].into_iter().collect());
}
other => panic!("expected Fixed, got {:?}", other),
}
}
#[test]
fn resolve_num_workers_zero_rejected_with_label() {
let w = WorkSpec {
num_workers: Some(0),
..Default::default()
};
let err = resolve_num_workers(&w, 4, "victim").unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("cgroup 'victim'"),
"label must appear in error: {msg}"
);
assert!(
msg.contains("num_workers=0"),
"error must name the offending field: {msg}"
);
}
#[test]
fn resolve_num_workers_zero_default_also_rejected() {
let w = WorkSpec {
num_workers: None,
..Default::default()
};
assert!(resolve_num_workers(&w, 0, "cg").is_err());
}
#[test]
fn resolve_num_workers_falls_back_to_default() {
let w = WorkSpec {
num_workers: None,
..Default::default()
};
assert_eq!(resolve_num_workers(&w, 3, "cg").unwrap(), 3);
}
#[test]
fn resolve_num_workers_explicit_wins_over_default() {
let w = WorkSpec {
num_workers: Some(7),
..Default::default()
};
assert_eq!(resolve_num_workers(&w, 3, "cg").unwrap(), 7);
}
struct DropErrCgroupOps {
parent: std::path::PathBuf,
remove_kind: std::io::ErrorKind,
raw_os_error: Option<i32>,
remove_calls: std::sync::Mutex<Vec<String>>,
}
impl DropErrCgroupOps {
fn new(kind: std::io::ErrorKind, raw: Option<i32>) -> Self {
Self {
parent: std::path::PathBuf::from("/mock/cgroup"),
remove_kind: kind,
raw_os_error: raw,
remove_calls: std::sync::Mutex::new(Vec::new()),
}
}
fn calls(&self) -> Vec<String> {
self.remove_calls.lock().unwrap().clone()
}
}
impl crate::cgroup::CgroupOps for DropErrCgroupOps {
fn parent_path(&self) -> &std::path::Path {
&self.parent
}
fn setup(&self, _: &std::collections::BTreeSet<crate::cgroup::Controller>) -> Result<()> {
Ok(())
}
fn create_cgroup(&self, _: &str) -> Result<()> {
Ok(())
}
fn remove_cgroup(&self, name: &str) -> Result<()> {
self.remove_calls.lock().unwrap().push(name.to_string());
let io = match self.raw_os_error {
Some(errno) => std::io::Error::from_raw_os_error(errno),
None => std::io::Error::from(self.remove_kind),
};
Err(anyhow::Error::new(io).context("remove_dir cgroup"))
}
fn set_cpuset(&self, _: &str, _: &BTreeSet<usize>) -> Result<()> {
Ok(())
}
fn clear_cpuset(&self, _: &str) -> Result<()> {
Ok(())
}
fn set_cpuset_mems(&self, _: &str, _: &BTreeSet<usize>) -> Result<()> {
Ok(())
}
fn clear_cpuset_mems(&self, _: &str) -> Result<()> {
Ok(())
}
fn set_cpu_max(&self, _: &str, _: Option<u64>, _: u64) -> Result<()> {
Ok(())
}
fn set_cpu_weight(&self, _: &str, _: u32) -> Result<()> {
Ok(())
}
fn set_memory_max(&self, _: &str, _: Option<u64>) -> Result<()> {
Ok(())
}
fn set_memory_high(&self, _: &str, _: Option<u64>) -> Result<()> {
Ok(())
}
fn set_memory_low(&self, _: &str, _: Option<u64>) -> Result<()> {
Ok(())
}
fn set_io_weight(&self, _: &str, _: u16) -> Result<()> {
Ok(())
}
fn set_freeze(&self, _: &str, _: bool) -> Result<()> {
Ok(())
}
fn set_pids_max(&self, _: &str, _: Option<u64>) -> Result<()> {
Ok(())
}
fn set_memory_swap_max(&self, _: &str, _: Option<u64>) -> Result<()> {
Ok(())
}
fn move_task(&self, _: &str, _: libc::pid_t) -> Result<()> {
Ok(())
}
fn move_tasks(&self, _: &str, _: &[libc::pid_t]) -> Result<()> {
Ok(())
}
fn clear_subtree_control(&self, _: &str) -> Result<()> {
Ok(())
}
fn drain_tasks(&self, _: &str) -> Result<()> {
Ok(())
}
fn cleanup_all(&self) -> Result<()> {
Ok(())
}
}
#[test]
fn cgroup_group_drop_is_panic_free_on_every_error_kind() {
for (label, kind, raw) in [
("ENOENT", std::io::ErrorKind::NotFound, Some(libc::ENOENT)),
("EBUSY", std::io::ErrorKind::Other, Some(libc::EBUSY)),
(
"EACCES",
std::io::ErrorKind::PermissionDenied,
Some(libc::EACCES),
),
("generic-IO", std::io::ErrorKind::Other, None),
] {
let mock = DropErrCgroupOps::new(kind, raw);
{
let mut group = CgroupGroup::new(&mock);
group.names.push("child-a".to_string());
group.names.push("child-b".to_string());
}
let calls = mock.calls();
assert_eq!(
calls,
vec!["child-b".to_string(), "child-a".to_string()],
"[{label}] Drop must call remove_cgroup for every tracked name in reverse order",
);
}
}
#[test]
fn is_io_not_found_matches_only_notfound() {
let wrap = |k: std::io::ErrorKind| -> anyhow::Error {
anyhow::Error::new(std::io::Error::from(k)).context("wrap")
};
assert!(is_io_not_found(&wrap(std::io::ErrorKind::NotFound)));
assert!(!is_io_not_found(&wrap(
std::io::ErrorKind::PermissionDenied
)));
assert!(!is_io_not_found(&wrap(std::io::ErrorKind::Other)));
let no_io = anyhow::anyhow!("cgroup not found in parent");
assert!(!is_io_not_found(&no_io));
}
#[test]
fn remove_cgroup_errno_hint_covers_ebusy_and_eacces() {
let busy =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::EBUSY)).context("wrap");
let acces =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::EACCES)).context("wrap");
let enotempty =
anyhow::Error::new(std::io::Error::from_raw_os_error(libc::ENOTEMPTY)).context("wrap");
let non_io = anyhow::anyhow!("not an io error");
assert!(
remove_cgroup_errno_hint(&busy)
.is_some_and(|h| h.contains("EBUSY") && h.contains("drain")),
"EBUSY hint must name the errno and the drain remediation",
);
assert!(
remove_cgroup_errno_hint(&acces)
.is_some_and(|h| h.contains("EACCES") && h.contains("permission")),
"EACCES hint must name the errno and the permission angle",
);
assert_eq!(
remove_cgroup_errno_hint(&enotempty),
None,
"unclassified errnos must yield no hint so warn stays terse",
);
assert_eq!(
remove_cgroup_errno_hint(&non_io),
None,
"non-io root causes must yield no hint",
);
}
#[test]
fn flatten_for_spawn_none_to_inherit() {
let out = flatten_for_spawn(ResolvedAffinity::None);
assert!(
matches!(out, AffinityIntent::Inherit),
"ResolvedAffinity::None must flatten to Inherit, got {out:?}"
);
}
#[test]
fn flatten_for_spawn_fixed_to_exact() {
let set: BTreeSet<usize> = [1usize, 3, 5].into_iter().collect();
let out = flatten_for_spawn(ResolvedAffinity::Fixed(set.clone()));
match out {
AffinityIntent::Exact(got) => {
assert_eq!(got, set, "Fixed payload must round-trip into Exact");
}
other => panic!("expected Exact, got {other:?}"),
}
}
#[test]
fn flatten_for_spawn_fixed_empty_to_inherit() {
let out = flatten_for_spawn(ResolvedAffinity::Fixed(BTreeSet::new()));
assert!(
matches!(out, AffinityIntent::Inherit),
"Fixed(empty) must degrade to Inherit (an empty mask would \
EINVAL at sched_setaffinity), got {out:?}"
);
}
#[test]
fn flatten_for_spawn_single_cpu_to_exact_singleton() {
let out = flatten_for_spawn(ResolvedAffinity::SingleCpu(7));
match out {
AffinityIntent::Exact(got) => {
let expected: BTreeSet<usize> = [7usize].into_iter().collect();
assert_eq!(got, expected, "SingleCpu must flatten to a 1-CPU Exact set");
}
other => panic!("expected Exact({{7}}), got {other:?}"),
}
}
#[test]
fn flatten_for_spawn_random_to_random_subset() {
let from: BTreeSet<usize> = [0usize, 1, 2, 3].into_iter().collect();
let out = flatten_for_spawn(ResolvedAffinity::Random {
from: from.clone(),
count: 2,
});
match out {
AffinityIntent::RandomSubset {
from: got_from,
count: got_count,
} => {
assert_eq!(got_from, from, "Random.from must round-trip verbatim");
assert_eq!(got_count, 2, "Random.count must round-trip verbatim");
}
other => panic!("expected RandomSubset, got {other:?}"),
}
}
#[test]
fn flatten_for_spawn_random_empty_pool_to_inherit() {
let out = flatten_for_spawn(ResolvedAffinity::Random {
from: BTreeSet::new(),
count: 4,
});
assert!(
matches!(out, AffinityIntent::Inherit),
"Random with empty pool must degrade to Inherit (the \
spawn-time gate rejects empty-pool RandomSubset), got {out:?}"
);
}
#[test]
fn flatten_for_spawn_random_zero_count_to_inherit() {
let from: BTreeSet<usize> = [0usize, 1, 2, 3].into_iter().collect();
let out = flatten_for_spawn(ResolvedAffinity::Random { from, count: 0 });
assert!(
matches!(out, AffinityIntent::Inherit),
"Random with count=0 must degrade to Inherit (the \
spawn-time gate rejects count=0 RandomSubset), got {out:?}"
);
}
#[test]
fn intent_for_spawn_full_pipeline() {
let vmt = crate::vmm::topology::Topology::new(1, 2, 2, 2);
let t = crate::topology::TestTopology::from_vm_topology(&vmt);
let out = intent_for_spawn(&AffinityIntent::Inherit, None, &t).unwrap();
assert!(
matches!(out, AffinityIntent::Inherit),
"Inherit must round-trip, got {out:?}"
);
let out = intent_for_spawn(&AffinityIntent::SingleCpu, None, &t).unwrap();
match out {
AffinityIntent::Exact(set) => {
assert_eq!(set.len(), 1, "SingleCpu flattens to a 1-CPU Exact set");
}
other => panic!("expected Exact, got {other:?}"),
}
let out = intent_for_spawn(&AffinityIntent::CrossCgroup, None, &t).unwrap();
match out {
AffinityIntent::Exact(set) => {
assert_eq!(set.len(), 8, "CrossCgroup flattens to all-CPU Exact set");
}
other => panic!("expected Exact, got {other:?}"),
}
let out = intent_for_spawn(&AffinityIntent::SmtSiblingPair, None, &t).unwrap();
match out {
AffinityIntent::Exact(set) => {
assert_eq!(set.len(), 2, "SmtSiblingPair flattens to a 2-CPU Exact set");
assert_eq!(
set,
[0usize, 1].into_iter().collect(),
"SmtSiblingPair must pick the first core's siblings"
);
}
other => panic!("expected Exact, got {other:?}"),
}
let pool: BTreeSet<usize> = [0usize, 1, 2, 3].into_iter().collect();
let intent = AffinityIntent::random_subset(pool.iter().copied(), 2);
let out = intent_for_spawn(&intent, None, &t).unwrap();
match out {
AffinityIntent::RandomSubset { from, count } => {
assert_eq!(from, pool, "RandomSubset.from must round-trip");
assert_eq!(count, 2, "RandomSubset.count must round-trip");
}
other => panic!("expected RandomSubset, got {other:?}"),
}
let empty_cpuset: BTreeSet<usize> = BTreeSet::new();
let intent = AffinityIntent::random_subset(t.all_cpus().iter().copied(), 1);
let out = intent_for_spawn(&intent, Some(&empty_cpuset), &t).unwrap();
assert!(
matches!(out, AffinityIntent::Inherit),
"RandomSubset with empty cpuset intersection must flatten \
to Inherit, got {out:?}"
);
}
}