use std::borrow::Cow;
use std::collections::BTreeSet;
use std::time::Duration;
use crate::scenario::Ctx;
use crate::workload::{AffinityIntent, WorkSpec, WorkType};
#[derive(Clone, Debug, strum::EnumDiscriminants)]
#[strum_discriminants(name(OpKind))]
#[strum_discriminants(derive(strum::EnumIter))]
#[strum_discriminants(vis(pub))]
#[non_exhaustive]
pub enum Op {
AddCgroup { name: Cow<'static, str> },
RemoveCgroup { cgroup: Cow<'static, str> },
SetCpuset {
cgroup: Cow<'static, str>,
cpus: CpusetSpec,
},
ClearCpuset { cgroup: Cow<'static, str> },
SwapCpusets {
a: Cow<'static, str>,
b: Cow<'static, str>,
},
Spawn {
cgroup: Cow<'static, str>,
work: WorkSpec,
},
StopCgroup { cgroup: Cow<'static, str> },
SetAffinity {
cgroup: Cow<'static, str>,
affinity: AffinityIntent,
},
SpawnHost { work: WorkSpec },
MoveAllTasks {
from: Cow<'static, str>,
to: Cow<'static, str>,
},
RunPayload {
payload: &'static crate::test_support::Payload,
args: Vec<String>,
cgroup: Option<Cow<'static, str>>,
},
WaitPayload {
name: Cow<'static, str>,
cgroup: Option<Cow<'static, str>>,
},
KillPayload {
name: Cow<'static, str>,
cgroup: Option<Cow<'static, str>>,
},
FreezeCgroup { cgroup: Cow<'static, str> },
UnfreezeCgroup { cgroup: Cow<'static, str> },
Snapshot { name: Cow<'static, str> },
WatchSnapshot { symbol: Cow<'static, str> },
}
#[derive(Clone, Debug, PartialEq)]
#[non_exhaustive]
pub enum CpusetSpec {
Llc(usize),
Numa(usize),
Range { start_frac: f64, end_frac: f64 },
Disjoint { index: usize, of: usize },
Overlap { index: usize, of: usize, frac: f64 },
Exact(BTreeSet<usize>),
}
impl CpusetSpec {
pub fn exact(cpus: impl IntoIterator<Item = usize>) -> Self {
CpusetSpec::Exact(cpus.into_iter().collect())
}
pub fn disjoint(index: usize, of: usize) -> Self {
CpusetSpec::Disjoint { index, of }
}
pub fn overlap(index: usize, of: usize, frac: f64) -> Self {
CpusetSpec::Overlap { index, of, frac }
}
pub fn range(start_frac: f64, end_frac: f64) -> Self {
CpusetSpec::Range {
start_frac,
end_frac,
}
}
pub fn llc(index: usize) -> Self {
CpusetSpec::Llc(index)
}
pub fn numa(index: usize) -> Self {
CpusetSpec::Numa(index)
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[non_exhaustive]
pub struct CpuLimits {
pub max_quota_us: Option<u64>,
pub max_period_us: u64,
pub weight: Option<u32>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[non_exhaustive]
pub struct MemoryLimits {
pub max: Option<u64>,
pub high: Option<u64>,
pub low: Option<u64>,
pub swap_max: Option<u64>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[non_exhaustive]
pub struct PidsLimits {
pub max: Option<u64>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[non_exhaustive]
pub struct IoLimits {
pub weight: Option<u16>,
}
#[derive(Clone, Debug)]
pub struct CgroupDef {
pub name: Cow<'static, str>,
pub cpuset: Option<CpusetSpec>,
pub works: Vec<WorkSpec>,
pub swappable: bool,
pub payload: Option<&'static crate::test_support::Payload>,
pub cpuset_mems: Option<BTreeSet<usize>>,
pub cpu: Option<CpuLimits>,
pub memory: Option<MemoryLimits>,
pub io: Option<IoLimits>,
pub pids: Option<PidsLimits>,
pub default_nice: Option<i32>,
pub default_comm: Option<Cow<'static, str>>,
pub default_uid: Option<u32>,
pub default_gid: Option<u32>,
pub default_numa_node: Option<u32>,
}
impl CgroupDef {
#[must_use = "dropping a CgroupDef discards the cgroup specification"]
pub fn named(name: impl Into<Cow<'static, str>>) -> Self {
Self {
name: name.into(),
..Default::default()
}
}
#[must_use = "builder methods consume self; bind the result"]
pub fn with_cpuset(mut self, cpus: CpusetSpec) -> Self {
self.cpuset = Some(cpus);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn work(mut self, w: WorkSpec) -> Self {
self.works.push(w);
self
}
fn ensure_default_work(&mut self) {
if self.works.is_empty() {
self.works.push(WorkSpec::default());
}
}
#[must_use = "builder methods consume self; bind the result"]
pub fn workers(mut self, n: usize) -> Self {
self.ensure_default_work();
self.works[0].num_workers = Some(n);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn work_type(mut self, wt: WorkType) -> Self {
self.ensure_default_work();
self.works[0].work_type = wt;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn sched_policy(mut self, p: crate::workload::SchedPolicy) -> Self {
self.ensure_default_work();
self.works[0].sched_policy = p;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn affinity(mut self, a: crate::workload::AffinityIntent) -> Self {
self.ensure_default_work();
self.works[0].affinity = a;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn mem_policy(mut self, p: crate::workload::MemPolicy) -> Self {
self.ensure_default_work();
self.works[0].mem_policy = p;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn mpol_flags(mut self, f: crate::workload::MpolFlags) -> Self {
self.ensure_default_work();
self.works[0].mpol_flags = f;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn nice(mut self, n: i32) -> Self {
self.default_nice = Some(n);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn comm(mut self, name: impl Into<std::borrow::Cow<'static, str>>) -> Self {
self.default_comm = Some(name.into());
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn uid(mut self, uid: u32) -> Self {
self.default_uid = Some(uid);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn gid(mut self, gid: u32) -> Self {
self.default_gid = Some(gid);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn pcomm(mut self, name: impl Into<Cow<'static, str>>) -> Self {
let name: Cow<'static, str> = name.into();
if self.works.is_empty() {
self.works.push(WorkSpec::default());
}
for w in &mut self.works {
w.pcomm = Some(name.clone());
}
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn numa_node(mut self, node: u32) -> Self {
self.default_numa_node = Some(node);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn swappable(mut self, swappable: bool) -> Self {
self.swappable = swappable;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn workload(mut self, p: &'static crate::test_support::Payload) -> Self {
assert!(
!p.is_scheduler(),
"CgroupDef::workload called with a scheduler-kind Payload ({}); \
CgroupDef.workload is for userspace binary payloads only. \
Use #[ktstr_test(scheduler = ...)] for scheduler placement.",
p.name,
);
self.payload = Some(p);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn with_cpuset_mems(mut self, nodes: BTreeSet<usize>) -> Self {
self.cpuset_mems = Some(nodes);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn cpu_quota_pct(mut self, pct: u32) -> Self {
let cpu = self.cpu.get_or_insert_with(default_cpu_limits);
cpu.max_period_us = 100_000;
cpu.max_quota_us = Some((pct as u64) * 1_000);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn cpu_quota(mut self, quota: Duration, period: Duration) -> Self {
let cpu = self.cpu.get_or_insert_with(default_cpu_limits);
cpu.max_quota_us = Some(quota.as_micros() as u64);
cpu.max_period_us = period.as_micros() as u64;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn cpu_unlimited(mut self) -> Self {
let cpu = self.cpu.get_or_insert_with(default_cpu_limits);
cpu.max_quota_us = None;
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn cpu_weight(mut self, weight: u32) -> Self {
let cpu = self.cpu.get_or_insert_with(default_cpu_limits);
cpu.weight = Some(weight);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_max(mut self, bytes: u64) -> Self {
let m = self.memory.get_or_insert_with(MemoryLimits::default);
m.max = Some(bytes);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_high(mut self, bytes: u64) -> Self {
let m = self.memory.get_or_insert_with(MemoryLimits::default);
m.high = Some(bytes);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_low(mut self, bytes: u64) -> Self {
let m = self.memory.get_or_insert_with(MemoryLimits::default);
m.low = Some(bytes);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_unlimited(mut self) -> Self {
self.memory = Some(MemoryLimits::default());
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn io_weight(mut self, weight: u16) -> Self {
let io = self.io.get_or_insert_with(IoLimits::default);
io.weight = Some(weight);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_swap_max(mut self, bytes: u64) -> Self {
let m = self.memory.get_or_insert_with(MemoryLimits::default);
m.swap_max = Some(bytes);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn memory_swap_unlimited(mut self) -> Self {
if let Some(m) = self.memory.as_mut() {
m.swap_max = None;
}
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn pids_max(mut self, n: u64) -> Self {
let pids = self.pids.get_or_insert_with(PidsLimits::default);
pids.max = Some(n);
self
}
#[must_use = "builder methods consume self; bind the result"]
pub fn pids_unlimited(mut self) -> Self {
let pids = self.pids.get_or_insert_with(PidsLimits::default);
pids.max = None;
self
}
pub fn merged_works(&self) -> Vec<WorkSpec> {
let base: Vec<WorkSpec> = if self.works.is_empty() {
vec![WorkSpec::default()]
} else {
self.works.clone()
};
base.into_iter()
.map(|mut w| {
if w.nice.is_none()
&& let Some(n) = self.default_nice
{
w.nice = Some(n);
}
if w.comm.is_none() {
w.comm = self.default_comm.clone();
}
if w.uid.is_none() {
w.uid = self.default_uid;
}
if w.gid.is_none() {
w.gid = self.default_gid;
}
if w.numa_node.is_none() {
w.numa_node = self.default_numa_node;
}
w
})
.collect()
}
}
fn default_cpu_limits() -> CpuLimits {
CpuLimits {
max_quota_us: None,
max_period_us: 100_000,
weight: None,
}
}
impl Default for CgroupDef {
fn default() -> Self {
Self {
name: Cow::Borrowed("cg_0"),
cpuset: None,
works: vec![],
swappable: false,
payload: None,
cpuset_mems: None,
cpu: None,
memory: None,
io: None,
pids: None,
default_nice: None,
default_comm: None,
default_uid: None,
default_gid: None,
default_numa_node: None,
}
}
}
pub enum Setup {
Defs(Vec<CgroupDef>),
Factory(fn(&Ctx) -> Vec<CgroupDef>),
}
impl Clone for Setup {
fn clone(&self) -> Self {
match self {
Setup::Defs(defs) => Setup::Defs(defs.clone()),
Setup::Factory(f) => Setup::Factory(*f),
}
}
}
impl std::fmt::Debug for Setup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Setup::Defs(defs) => f.debug_tuple("Defs").field(defs).finish(),
Setup::Factory(_) => f
.debug_tuple("Factory")
.field(&"fn(&Ctx) -> Vec<CgroupDef>")
.finish(),
}
}
}
impl Setup {
pub(super) fn resolve(&self, ctx: &Ctx) -> Vec<CgroupDef> {
match self {
Setup::Defs(defs) => defs.clone(),
Setup::Factory(f) => f(ctx),
}
}
pub(super) fn is_empty(&self) -> bool {
match self {
Setup::Defs(defs) => defs.is_empty(),
Setup::Factory(_) => false,
}
}
}
impl From<Vec<CgroupDef>> for Setup {
fn from(defs: Vec<CgroupDef>) -> Self {
Setup::Defs(defs)
}
}
#[derive(Clone, Debug)]
pub struct Step {
pub setup: Setup,
pub ops: Vec<Op>,
pub hold: HoldSpec,
}
impl Step {
#[must_use = "dropping a Step discards its ops and hold for that scenario phase"]
pub fn new(ops: Vec<Op>, hold: HoldSpec) -> Self {
Self {
setup: Setup::Defs(vec![]),
ops,
hold,
}
}
#[must_use = "dropping a Step discards its CgroupDef setup and hold for that scenario phase"]
pub fn with_defs(defs: Vec<CgroupDef>, hold: HoldSpec) -> Self {
Self {
setup: Setup::Defs(defs),
ops: vec![],
hold,
}
}
#[must_use = "builder methods consume self; bind the result"]
pub fn set_ops(mut self, ops: Vec<Op>) -> Self {
self.ops = ops;
self
}
#[must_use = "dropping a Step discards its payload and hold for that scenario phase"]
pub fn with_payload(payload: &'static crate::test_support::Payload, hold: HoldSpec) -> Self {
Self {
setup: Setup::Defs(vec![]),
ops: vec![Op::run_payload(payload, vec![])],
hold,
}
}
}
#[derive(Clone, Debug)]
pub enum HoldSpec {
Frac(f64),
Fixed(Duration),
Loop { interval: Duration },
}
impl HoldSpec {
pub const FULL: HoldSpec = HoldSpec::Frac(1.0);
pub fn validate(&self) -> std::result::Result<(), String> {
match self {
HoldSpec::Fixed(_) => Ok(()),
HoldSpec::Frac(f) if !f.is_finite() => Err(format!(
"HoldSpec::Frac({f}) is not finite (NaN/Inf) — would \
panic in Duration::from_secs_f64"
)),
HoldSpec::Frac(f) if *f <= 0.0 => Err(format!(
"HoldSpec::Frac({f}) must be > 0.0; negative values \
panic in Duration::from_secs_f64 and zero is vacuous"
)),
HoldSpec::Loop { interval } if interval.is_zero() => {
Err("HoldSpec::Loop { interval: Duration::ZERO } would \
busy-spin the deadline check without yielding; use a \
non-zero interval"
.into())
}
_ => Ok(()),
}
}
}
impl Op {
pub(super) fn discriminant(&self) -> u32 {
OpKind::from(self).bit_index()
}
}
impl OpKind {
pub(super) fn bit_index(self) -> u32 {
match self {
OpKind::AddCgroup => 0,
OpKind::RemoveCgroup => 1,
OpKind::SetCpuset => 2,
OpKind::ClearCpuset => 3,
OpKind::SwapCpusets => 4,
OpKind::Spawn => 5,
OpKind::StopCgroup => 6,
OpKind::SetAffinity => 7,
OpKind::SpawnHost => 8,
OpKind::MoveAllTasks => 9,
OpKind::RunPayload => 10,
OpKind::WaitPayload => 11,
OpKind::KillPayload => 12,
OpKind::FreezeCgroup => 13,
OpKind::UnfreezeCgroup => 14,
OpKind::Snapshot => 15,
OpKind::WatchSnapshot => 16,
}
}
}
impl Op {
pub fn add_cgroup(name: impl Into<Cow<'static, str>>) -> Self {
Op::AddCgroup { name: name.into() }
}
pub fn remove_cgroup(cgroup: impl Into<Cow<'static, str>>) -> Self {
Op::RemoveCgroup {
cgroup: cgroup.into(),
}
}
pub fn set_cpuset(cgroup: impl Into<Cow<'static, str>>, cpus: CpusetSpec) -> Self {
Op::SetCpuset {
cgroup: cgroup.into(),
cpus,
}
}
pub fn clear_cpuset(cgroup: impl Into<Cow<'static, str>>) -> Self {
Op::ClearCpuset {
cgroup: cgroup.into(),
}
}
pub fn swap_cpusets(a: impl Into<Cow<'static, str>>, b: impl Into<Cow<'static, str>>) -> Self {
Op::SwapCpusets {
a: a.into(),
b: b.into(),
}
}
pub fn spawn(cgroup: impl Into<Cow<'static, str>>, work: WorkSpec) -> Self {
Op::Spawn {
cgroup: cgroup.into(),
work,
}
}
pub fn stop_cgroup(cgroup: impl Into<Cow<'static, str>>) -> Self {
Op::StopCgroup {
cgroup: cgroup.into(),
}
}
pub fn set_affinity(cgroup: impl Into<Cow<'static, str>>, affinity: AffinityIntent) -> Self {
Op::SetAffinity {
cgroup: cgroup.into(),
affinity,
}
}
pub fn spawn_host(work: WorkSpec) -> Self {
Op::SpawnHost { work }
}
pub fn move_all_tasks(
from: impl Into<Cow<'static, str>>,
to: impl Into<Cow<'static, str>>,
) -> Self {
Op::MoveAllTasks {
from: from.into(),
to: to.into(),
}
}
pub fn run_payload(payload: &'static crate::test_support::Payload, args: Vec<String>) -> Self {
Op::RunPayload {
payload,
args,
cgroup: None,
}
}
pub fn run_payload_in_cgroup(
payload: &'static crate::test_support::Payload,
args: Vec<String>,
cgroup: impl Into<Cow<'static, str>>,
) -> Self {
Op::RunPayload {
payload,
args,
cgroup: Some(cgroup.into()),
}
}
pub fn wait_payload(name: impl Into<Cow<'static, str>>) -> Self {
Op::WaitPayload {
name: name.into(),
cgroup: None,
}
}
pub fn wait_payload_in_cgroup(
name: impl Into<Cow<'static, str>>,
cgroup: impl Into<Cow<'static, str>>,
) -> Self {
Op::WaitPayload {
name: name.into(),
cgroup: Some(cgroup.into()),
}
}
pub fn kill_payload(name: impl Into<Cow<'static, str>>) -> Self {
Op::KillPayload {
name: name.into(),
cgroup: None,
}
}
pub fn kill_payload_in_cgroup(
name: impl Into<Cow<'static, str>>,
cgroup: impl Into<Cow<'static, str>>,
) -> Self {
Op::KillPayload {
name: name.into(),
cgroup: Some(cgroup.into()),
}
}
pub fn freeze_cgroup(cgroup: impl Into<Cow<'static, str>>) -> Self {
Op::FreezeCgroup {
cgroup: cgroup.into(),
}
}
pub fn unfreeze_cgroup(cgroup: impl Into<Cow<'static, str>>) -> Self {
Op::UnfreezeCgroup {
cgroup: cgroup.into(),
}
}
pub fn snapshot(name: impl Into<Cow<'static, str>>) -> Self {
Op::Snapshot { name: name.into() }
}
pub fn watch_snapshot(symbol: impl Into<Cow<'static, str>>) -> Self {
Op::WatchSnapshot {
symbol: symbol.into(),
}
}
}
impl CpusetSpec {
pub fn validate(&self, ctx: &Ctx) -> std::result::Result<(), String> {
let usable = ctx.topo.usable_cpus();
match self {
CpusetSpec::Llc(idx) if *idx >= ctx.topo.num_llcs() => Err(format!(
"Llc({idx}) out of range: topology has {} LLCs",
ctx.topo.num_llcs()
)),
CpusetSpec::Numa(node) if *node >= ctx.topo.num_numa_nodes() => Err(format!(
"Numa({node}) out of range: topology has {} NUMA nodes",
ctx.topo.num_numa_nodes()
)),
CpusetSpec::Disjoint { of, .. } | CpusetSpec::Overlap { of, .. } if *of == 0 => {
Err("partition count (of) must be > 0".into())
}
CpusetSpec::Disjoint { index, of, .. } | CpusetSpec::Overlap { index, of, .. }
if *index >= *of =>
{
Err(format!("index {index} >= partition count {of}"))
}
CpusetSpec::Range {
start_frac,
end_frac,
} if !start_frac.is_finite() || !end_frac.is_finite() => Err(format!(
"Range start_frac ({start_frac}) or end_frac ({end_frac}) is not finite"
)),
CpusetSpec::Range {
start_frac,
end_frac,
} if *start_frac < 0.0 || *end_frac > 1.0 => Err(format!(
"Range fracs must lie in [0.0, 1.0]: start_frac={start_frac}, end_frac={end_frac}"
)),
CpusetSpec::Range {
start_frac,
end_frac,
} if start_frac >= end_frac => Err(format!(
"Range start_frac ({start_frac}) >= end_frac ({end_frac})"
)),
CpusetSpec::Overlap { frac, .. } if !frac.is_finite() => {
Err(format!("Overlap frac ({frac}) is not finite"))
}
CpusetSpec::Overlap { frac, .. } if *frac < 0.0 || *frac > 1.0 => {
Err(format!("Overlap frac ({frac}) must lie in [0.0, 1.0]"))
}
CpusetSpec::Disjoint { of, .. } | CpusetSpec::Overlap { of, .. }
if usable.len() < *of =>
{
Err(format!(
"not enough usable CPUs ({}) for {} partitions",
usable.len(),
of
))
}
CpusetSpec::Exact(cpus) if cpus.is_empty() => {
Err("CpusetSpec::Exact(empty) would assign no CPUs to the \
cgroup; cpuset.cpus rejects an empty mask and the \
cgroup would become unschedulable"
.into())
}
CpusetSpec::Exact(cpus) => {
let all = ctx.topo.all_cpuset();
let missing: Vec<usize> =
cpus.iter().copied().filter(|c| !all.contains(c)).collect();
if !missing.is_empty() {
return Err(format!(
"CpusetSpec::Exact contains CPU(s) {missing:?} \
outside the topology's physical CPU set (max \
CPU index: {}); writing them to cpuset.cpus \
would fail with EINVAL",
all.iter().next_back().copied().unwrap_or(0),
));
}
Ok(())
}
_ => Ok(()),
}
}
pub fn resolve(&self, ctx: &Ctx) -> BTreeSet<usize> {
let usable = ctx.topo.usable_cpus();
match self {
CpusetSpec::Llc(idx) => {
if *idx >= ctx.topo.num_llcs() {
let clamped = ctx.topo.num_llcs().saturating_sub(1);
tracing::warn!(
llc_idx = idx,
num_llcs = ctx.topo.num_llcs(),
clamped,
"CpusetSpec::Llc index out of range, clamping",
);
ctx.topo.llc_aligned_cpuset(clamped)
} else {
ctx.topo.llc_aligned_cpuset(*idx)
}
}
CpusetSpec::Numa(idx) => {
if *idx >= ctx.topo.num_numa_nodes() {
let clamped = ctx.topo.num_numa_nodes().saturating_sub(1);
tracing::warn!(
numa_node = idx,
num_numa_nodes = ctx.topo.num_numa_nodes(),
clamped,
"CpusetSpec::Numa index out of range, clamping",
);
ctx.topo.numa_aligned_cpuset(clamped)
} else {
ctx.topo.numa_aligned_cpuset(*idx)
}
}
CpusetSpec::Range {
start_frac,
end_frac,
} => {
let len = usable.len();
let sf = if start_frac.is_finite() {
*start_frac
} else {
0.0
};
let ef = if end_frac.is_finite() { *end_frac } else { 0.0 };
let start = (len as f64 * sf) as usize;
let end = (len as f64 * ef) as usize;
let s = start.min(len);
let e = end.min(len).max(s);
usable[s..e].iter().copied().collect()
}
CpusetSpec::Disjoint { index, of } => {
if *of == 0 {
tracing::warn!("CpusetSpec::Disjoint with of=0 — returning empty cpuset");
return BTreeSet::new();
}
let chunk = usable.len() / of;
let start = index * chunk;
let end = if *index == of - 1 {
usable.len()
} else {
(index + 1) * chunk
};
let s = start.min(usable.len());
let e = end.min(usable.len()).max(s);
usable[s..e].iter().copied().collect()
}
CpusetSpec::Overlap { index, of, frac } => {
if *of == 0 {
tracing::warn!("CpusetSpec::Overlap with of=0 — returning empty cpuset");
return BTreeSet::new();
}
let chunk = usable.len() / of;
let frac = if frac.is_finite() {
frac.clamp(0.0, 1.0)
} else {
0.0
};
let overlap = (chunk as f64 * frac) as usize;
let start = if *index == 0 {
0
} else {
(index * chunk).saturating_sub(overlap)
};
let end = if *index == of - 1 {
usable.len()
} else {
((index + 1) * chunk + overlap).min(usable.len())
};
let s = start.min(usable.len());
let e = end.min(usable.len()).max(s);
usable[s..e].iter().copied().collect()
}
CpusetSpec::Exact(cpus) => cpus.clone(),
}
}
}
#[cfg(test)]
mod cgroup_def_default_tests {
use super::*;
use crate::workload::WorkSpec;
#[test]
fn merged_works_nice_order_independent() {
let pre = CgroupDef::named("cg").nice(7).work(WorkSpec::default());
let post = CgroupDef::named("cg").work(WorkSpec::default()).nice(7);
assert_eq!(pre.merged_works()[0].nice, Some(7));
assert_eq!(post.merged_works()[0].nice, Some(7));
}
#[test]
fn merged_works_comm_order_independent() {
let pre = CgroupDef::named("cg").comm("hot").work(WorkSpec::default());
let post = CgroupDef::named("cg").work(WorkSpec::default()).comm("hot");
assert_eq!(pre.merged_works()[0].comm.as_deref(), Some("hot"));
assert_eq!(post.merged_works()[0].comm.as_deref(), Some("hot"));
}
#[test]
fn merged_works_uid_order_independent() {
let pre = CgroupDef::named("cg").uid(1234).work(WorkSpec::default());
let post = CgroupDef::named("cg").work(WorkSpec::default()).uid(1234);
assert_eq!(pre.merged_works()[0].uid, Some(1234));
assert_eq!(post.merged_works()[0].uid, Some(1234));
}
#[test]
fn merged_works_gid_order_independent() {
let pre = CgroupDef::named("cg").gid(4321).work(WorkSpec::default());
let post = CgroupDef::named("cg").work(WorkSpec::default()).gid(4321);
assert_eq!(pre.merged_works()[0].gid, Some(4321));
assert_eq!(post.merged_works()[0].gid, Some(4321));
}
#[test]
fn merged_works_numa_node_order_independent() {
let pre = CgroupDef::named("cg")
.numa_node(2)
.work(WorkSpec::default());
let post = CgroupDef::named("cg")
.work(WorkSpec::default())
.numa_node(2);
assert_eq!(pre.merged_works()[0].numa_node, Some(2));
assert_eq!(post.merged_works()[0].numa_node, Some(2));
}
#[test]
fn merged_works_workspec_overrides_default() {
let spec = WorkSpec::default()
.nice(3)
.comm("override")
.uid(11)
.gid(22)
.numa_node(5);
let def = CgroupDef::named("cg")
.nice(7)
.comm("default")
.uid(99)
.gid(88)
.numa_node(0)
.work(spec);
let merged = def.merged_works();
assert_eq!(merged.len(), 1);
let w = &merged[0];
assert_eq!(w.nice, Some(3), "WorkSpec nice must beat default_nice");
assert_eq!(
w.comm.as_deref(),
Some("override"),
"WorkSpec comm must beat default_comm",
);
assert_eq!(w.uid, Some(11), "WorkSpec uid must beat default_uid");
assert_eq!(w.gid, Some(22), "WorkSpec gid must beat default_gid");
assert_eq!(
w.numa_node,
Some(5),
"WorkSpec numa_node must beat default_numa_node",
);
}
#[test]
fn merged_works_workspec_nice_some_zero_opts_out_of_default() {
let spec = WorkSpec::default().nice(0);
let def = CgroupDef::named("cg").nice(7).work(spec);
let merged = def.merged_works();
assert_eq!(
merged[0].nice,
Some(0),
"Some(0) must opt out of cgroup default nice(7)"
);
}
#[test]
fn pcomm_after_work_overrides() {
let spec = WorkSpec::default().pcomm("explicit");
let def = CgroupDef::named("cg").work(spec).pcomm("forced");
let works = def.merged_works();
assert_eq!(works.len(), 1);
assert_eq!(
works[0].pcomm.as_deref(),
Some("forced"),
"pcomm() after work() must overwrite the WorkSpec's own pcomm \
(the convenience method is a fan-out, not a merge-if-unset \
default)",
);
}
#[test]
fn pcomm_then_work_appends() {
let extra = WorkSpec::default().pcomm("appended");
let def = CgroupDef::named("cg").pcomm("initial").work(extra);
let works = def.merged_works();
assert_eq!(works.len(), 2, "pcomm() pushes one default + work() one");
assert_eq!(
works[0].pcomm.as_deref(),
Some("initial"),
"pcomm() before any work() pushes a default WorkSpec carrying \
the pcomm value",
);
assert_eq!(
works[1].pcomm.as_deref(),
Some("appended"),
"WorkSpec appended after pcomm() keeps its own pcomm — \
pcomm() only writes to WorkSpecs that exist at call time",
);
}
#[test]
fn merged_works_empty_works_substitutes_default() {
let def = CgroupDef::named("cg")
.nice(11)
.comm("default")
.uid(7)
.gid(8)
.numa_node(1);
let works = def.merged_works();
assert_eq!(
works.len(),
1,
"empty works must yield exactly one default WorkSpec"
);
let w = &works[0];
assert_eq!(w.nice, Some(11));
assert_eq!(w.comm.as_deref(), Some("default"));
assert_eq!(w.uid, Some(7));
assert_eq!(w.gid, Some(8));
assert_eq!(w.numa_node, Some(1));
}
#[test]
fn merged_works_does_not_mutate_self() {
let def = CgroupDef::named("cg")
.nice(5)
.comm("named")
.uid(101)
.gid(202)
.numa_node(3)
.work(WorkSpec::default());
let first = def.merged_works();
let second = def.merged_works();
assert_eq!(first.len(), second.len());
assert_eq!(first.len(), 1);
let a = &first[0];
let b = &second[0];
assert_eq!(a.nice, b.nice);
assert_eq!(a.comm, b.comm);
assert_eq!(a.uid, b.uid);
assert_eq!(a.gid, b.gid);
assert_eq!(a.numa_node, b.numa_node);
assert_eq!(def.works.len(), 1);
assert_eq!(def.default_nice, Some(5));
assert_eq!(def.default_comm.as_deref(), Some("named"));
assert_eq!(def.default_uid, Some(101));
assert_eq!(def.default_gid, Some(202));
assert_eq!(def.default_numa_node, Some(3));
}
}