use anyhow::{Context, Result};
#[derive(Debug)]
pub struct ResourceContention {
pub reason: String,
}
impl std::fmt::Display for ResourceContention {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.reason)
}
}
impl std::error::Error for ResourceContention {}
#[derive(Debug, Clone)]
pub struct LlcGroup {
pub cpus: Vec<usize>,
}
#[derive(Debug, Clone)]
pub struct HostTopology {
pub llc_groups: Vec<LlcGroup>,
pub online_cpus: Vec<usize>,
pub cpu_to_node: std::collections::HashMap<usize, usize>,
}
#[derive(Debug)]
pub struct PinningPlan {
pub assignments: Vec<(u32, usize)>,
pub service_cpu: Option<usize>,
#[allow(dead_code)]
pub(crate) locks: Vec<std::os::fd::OwnedFd>,
}
impl HostTopology {
pub fn from_sysfs() -> Result<Self> {
let topo = crate::topology::TestTopology::from_system()
.context("read host topology from sysfs")?;
let online_cpus = topo.all_cpus().to_vec();
let llc_groups = topo
.llcs()
.iter()
.map(|llc| LlcGroup {
cpus: llc.cpus().to_vec(),
})
.collect();
let cpu_to_node = discover_cpu_numa_nodes(&online_cpus);
Ok(Self {
llc_groups,
online_cpus,
cpu_to_node,
})
}
pub fn max_cores_per_llc(&self) -> usize {
self.llc_groups
.iter()
.map(|g| g.cpus.len())
.max()
.unwrap_or(0)
}
pub fn numa_nodes_for_cpus(&self, cpus: &[usize]) -> Vec<usize> {
let mut nodes: Vec<usize> = cpus
.iter()
.map(|c| self.cpu_to_node.get(c).copied().unwrap_or(0))
.collect();
nodes.sort_unstable();
nodes.dedup();
nodes
}
pub fn total_cpus(&self) -> usize {
self.online_cpus.len()
}
pub fn compute_pinning(
&self,
sockets: u32,
cores: u32,
threads: u32,
reserve_service_cpu: bool,
llc_offset: usize,
) -> Result<PinningPlan> {
let vcpus_per_socket = cores * threads;
let total_vcpus = sockets * vcpus_per_socket;
let total_needed = total_vcpus as usize + if reserve_service_cpu { 1 } else { 0 };
anyhow::ensure!(
total_needed <= self.total_cpus(),
"performance_mode: need {} CPUs ({} vCPUs + {} service) \
but only {} host CPUs available",
total_needed,
total_vcpus,
if reserve_service_cpu { 1 } else { 0 },
self.total_cpus(),
);
let num_llcs = self.llc_groups.len();
anyhow::ensure!(
sockets as usize <= num_llcs,
"performance_mode: need {} LLCs for {} virtual sockets, \
but host has {} LLC groups",
sockets,
sockets,
num_llcs,
);
let mut assignments = Vec::with_capacity(total_vcpus as usize);
let mut used_cpus = std::collections::HashSet::new();
for socket in 0..sockets {
let llc_idx = (socket as usize + llc_offset) % num_llcs;
let llc = &self.llc_groups[llc_idx];
let available: Vec<usize> = llc
.cpus
.iter()
.copied()
.filter(|c| !used_cpus.contains(c))
.collect();
anyhow::ensure!(
available.len() >= vcpus_per_socket as usize,
"performance_mode: LLC group {} has {} available CPUs, \
need {} for virtual socket {}",
llc_idx,
available.len(),
vcpus_per_socket,
socket,
);
for vcpu_in_socket in 0..vcpus_per_socket {
let vcpu_id = socket * vcpus_per_socket + vcpu_in_socket;
let host_cpu = available[vcpu_in_socket as usize];
used_cpus.insert(host_cpu);
assignments.push((vcpu_id, host_cpu));
}
}
let service_cpu = if reserve_service_cpu {
let cpu = self
.online_cpus
.iter()
.copied()
.find(|c| !used_cpus.contains(c));
anyhow::ensure!(
cpu.is_some(),
"performance_mode: no free host CPU for service threads \
after assigning {} vCPUs",
total_vcpus,
);
cpu
} else {
None
};
Ok(PinningPlan {
assignments,
service_cpu,
locks: Vec::new(),
})
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LlcLockMode {
Exclusive,
Shared,
}
#[derive(Debug)]
pub enum LockOutcome {
Acquired {
llc_offset: usize,
locks: Vec<std::os::fd::OwnedFd>,
},
Unavailable(String),
}
fn try_flock(path: &str, mode: i32) -> Result<Option<std::os::fd::OwnedFd>> {
use std::os::fd::FromRawFd;
let cpath = std::ffi::CString::new(path).unwrap();
let fd = unsafe {
libc::open(
cpath.as_ptr(),
libc::O_CREAT | libc::O_RDWR,
0o666 as libc::mode_t,
)
};
if fd < 0 {
let err = std::io::Error::last_os_error();
anyhow::bail!("open {path}: {err}");
}
let rc = unsafe { libc::flock(fd, mode | libc::LOCK_NB) };
if rc == 0 {
Ok(Some(unsafe { std::os::fd::OwnedFd::from_raw_fd(fd) }))
} else {
let err = std::io::Error::last_os_error();
unsafe { libc::close(fd) };
if err.raw_os_error() == Some(libc::EWOULDBLOCK) {
Ok(None)
} else {
anyhow::bail!("flock {path}: {err}");
}
}
}
pub fn acquire_resource_locks(
plan: &PinningPlan,
llc_indices: &[usize],
llc_mode: LlcLockMode,
) -> Result<LockOutcome> {
match try_acquire_all(plan, llc_indices, llc_mode) {
Ok(locks) => Ok(LockOutcome::Acquired {
llc_offset: llc_indices.first().copied().unwrap_or(0),
locks,
}),
Err(reason) => Ok(LockOutcome::Unavailable(reason)),
}
}
fn try_acquire_all(
plan: &PinningPlan,
llc_indices: &[usize],
llc_mode: LlcLockMode,
) -> std::result::Result<Vec<std::os::fd::OwnedFd>, String> {
let flock_mode = match llc_mode {
LlcLockMode::Exclusive => libc::LOCK_EX,
LlcLockMode::Shared => libc::LOCK_SH,
};
let mut locks = Vec::new();
for &llc_idx in llc_indices {
let path = format!("/tmp/ktstr-llc-{llc_idx}.lock");
match try_flock(&path, flock_mode) {
Ok(Some(fd)) => locks.push(fd),
Ok(None) => return Err(format!("LLC {llc_idx} busy")),
Err(e) => return Err(format!("LLC {llc_idx}: {e}")),
}
}
if llc_mode != LlcLockMode::Exclusive {
for &(_vcpu, host_cpu) in &plan.assignments {
let path = format!("/tmp/ktstr-cpu-{host_cpu}.lock");
match try_flock(&path, libc::LOCK_EX) {
Ok(Some(fd)) => locks.push(fd),
Ok(None) => return Err(format!("CPU {host_cpu} busy")),
Err(e) => return Err(format!("CPU {host_cpu}: {e}")),
}
}
if let Some(cpu) = plan.service_cpu {
let path = format!("/tmp/ktstr-cpu-{cpu}.lock");
match try_flock(&path, libc::LOCK_EX) {
Ok(Some(fd)) => locks.push(fd),
Ok(None) => return Err(format!("service CPU {cpu} busy")),
Err(e) => return Err(format!("service CPU {cpu}: {e}")),
}
}
}
Ok(locks)
}
pub fn acquire_cpu_locks(
count: usize,
total_host_cpus: usize,
host_topo: Option<&HostTopology>,
) -> Result<Vec<std::os::fd::OwnedFd>> {
if count == 0 {
return Ok(Vec::new());
}
let mut offset = 0;
while offset + count <= total_host_cpus {
match try_acquire_cpu_window(offset, count) {
Ok(mut locks) => {
if let Some(topo) = host_topo {
let cpus: Vec<usize> = (offset..offset + count).collect();
match acquire_llc_shared_locks(topo, &cpus) {
Ok(llc_locks) => locks.extend(llc_locks),
Err(_) => {
drop(locks);
offset += 1;
continue;
}
}
}
return Ok(locks);
}
Err(_) => {
offset += 1;
}
}
}
Err(anyhow::Error::new(ResourceContention {
reason: format!("no {count} consecutive CPUs available"),
}))
}
fn acquire_llc_shared_locks(
topo: &HostTopology,
cpus: &[usize],
) -> std::result::Result<Vec<std::os::fd::OwnedFd>, String> {
let mut llc_indices: Vec<usize> = Vec::new();
for &cpu in cpus {
for (idx, group) in topo.llc_groups.iter().enumerate() {
if group.cpus.contains(&cpu) && !llc_indices.contains(&idx) {
llc_indices.push(idx);
}
}
}
let mut locks = Vec::new();
for &llc_idx in &llc_indices {
let path = format!("/tmp/ktstr-llc-{llc_idx}.lock");
match try_flock(&path, libc::LOCK_SH) {
Ok(Some(fd)) => locks.push(fd),
Ok(None) => return Err(format!("LLC {llc_idx} exclusively held")),
Err(e) => return Err(format!("LLC {llc_idx}: {e}")),
}
}
Ok(locks)
}
fn try_acquire_cpu_window(
offset: usize,
count: usize,
) -> std::result::Result<Vec<std::os::fd::OwnedFd>, String> {
let mut locks = Vec::with_capacity(count);
for cpu in offset..offset + count {
let path = format!("/tmp/ktstr-cpu-{cpu}.lock");
match try_flock(&path, libc::LOCK_EX) {
Ok(Some(fd)) => locks.push(fd),
Ok(None) => return Err(format!("CPU {cpu} busy")),
Err(e) => return Err(format!("CPU {cpu}: {e}")),
}
}
Ok(locks)
}
fn discover_cpu_numa_nodes(online_cpus: &[usize]) -> std::collections::HashMap<usize, usize> {
let mut map = std::collections::HashMap::new();
for &cpu in online_cpus {
let cpu_dir = format!("/sys/devices/system/cpu/cpu{cpu}");
let Ok(entries) = std::fs::read_dir(&cpu_dir) else {
continue;
};
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if let Some(node_str) = name_str.strip_prefix("node")
&& let Ok(node_id) = node_str.parse::<usize>()
{
map.insert(cpu, node_id);
break;
}
}
}
map
}
pub fn mbind_to_nodes(addr: *mut u8, len: usize, nodes: &[usize]) {
if nodes.is_empty() || len == 0 {
return;
}
let max_node = nodes.iter().copied().max().unwrap_or(0);
let mask_bits = max_node + 2; let mask_longs = mask_bits.div_ceil(usize::BITS as usize);
let mut nodemask = vec![0usize; mask_longs];
for &node in nodes {
nodemask[node / (usize::BITS as usize)] |= 1 << (node % (usize::BITS as usize));
}
const MPOL_BIND: i32 = 2;
let rc = unsafe {
libc::syscall(
libc::SYS_mbind,
addr as *mut libc::c_void,
len,
MPOL_BIND,
nodemask.as_ptr(),
mask_bits as libc::c_ulong,
0u32, )
};
if rc == 0 {
eprintln!(
"performance_mode: mbind {} MB to NUMA node(s) {:?}",
len >> 20,
nodes,
);
} else {
let err = std::io::Error::last_os_error();
eprintln!(
"performance_mode: WARNING: mbind to node(s) {:?} failed: {err}",
nodes,
);
}
}
use crate::topology::parse_cpu_list_lenient;
pub fn hugepages_free() -> u64 {
std::fs::read_to_string("/sys/kernel/mm/hugepages/hugepages-2048kB/free_hugepages")
.ok()
.and_then(|s| s.trim().parse::<u64>().ok())
.unwrap_or(0)
}
pub fn hugepages_needed(memory_mb: u32) -> u64 {
(memory_mb as u64).div_ceil(2)
}
pub fn host_load_estimate() -> Option<(usize, usize)> {
let stat = std::fs::read_to_string("/proc/stat").ok()?;
let procs_running = stat
.lines()
.find(|l| l.starts_with("procs_running "))?
.split_whitespace()
.nth(1)?
.parse::<usize>()
.ok()?;
let online = std::fs::read_to_string("/sys/devices/system/cpu/online").ok()?;
let total = parse_cpu_list_lenient(&online).len();
Some((procs_running, total))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_cpu_list_range() {
assert_eq!(parse_cpu_list_lenient("0-3"), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_single() {
assert_eq!(parse_cpu_list_lenient("5"), vec![5]);
}
#[test]
fn parse_cpu_list_mixed() {
assert_eq!(
parse_cpu_list_lenient("0-2,5,7-9"),
vec![0, 1, 2, 5, 7, 8, 9]
);
}
#[test]
fn parse_cpu_list_empty() {
assert!(parse_cpu_list_lenient("").is_empty());
}
#[test]
fn parse_cpu_list_whitespace() {
assert_eq!(parse_cpu_list_lenient("0-3\n"), vec![0, 1, 2, 3]);
}
#[test]
fn host_topology_from_sysfs() {
let topo = HostTopology::from_sysfs();
assert!(topo.is_ok(), "should read host topology: {:?}", topo.err());
let topo = topo.unwrap();
assert!(!topo.online_cpus.is_empty());
assert!(!topo.llc_groups.is_empty());
}
#[test]
fn pinning_plan_simple() {
let topo = HostTopology::from_sysfs().unwrap();
if topo.total_cpus() < 2 {
return; }
let plan = topo.compute_pinning(1, 2, 1, false, 0);
assert!(plan.is_ok(), "pinning should succeed: {:?}", plan.err());
let plan = plan.unwrap();
assert_eq!(plan.assignments.len(), 2);
let cpus: Vec<usize> = plan.assignments.iter().map(|a| a.1).collect();
let unique: std::collections::HashSet<usize> = cpus.iter().copied().collect();
assert_eq!(cpus.len(), unique.len());
}
#[test]
fn pinning_plan_oversubscribed() {
let topo = HostTopology::from_sysfs().unwrap();
let too_many = topo.total_cpus() as u32 + 1;
let plan = topo.compute_pinning(1, too_many, 1, false, 0);
assert!(plan.is_err());
}
#[test]
fn hugepages_needed_values() {
assert_eq!(hugepages_needed(2), 1);
assert_eq!(hugepages_needed(4), 2);
assert_eq!(hugepages_needed(2048), 1024);
assert_eq!(hugepages_needed(3), 2);
}
#[test]
fn hugepages_free_runs() {
let _ = hugepages_free();
}
#[test]
fn host_load_estimate_runs() {
let result = host_load_estimate();
assert!(result.is_some());
let (running, total) = result.unwrap();
assert!(total > 0);
assert!(running >= 1);
}
#[test]
fn parse_cpu_list_trailing_comma() {
assert_eq!(parse_cpu_list_lenient("0,1,2,"), vec![0, 1, 2]);
}
#[test]
fn parse_cpu_list_leading_comma() {
assert_eq!(parse_cpu_list_lenient(",0,1"), vec![0, 1]);
}
#[test]
fn parse_cpu_list_single_zero() {
assert_eq!(parse_cpu_list_lenient("0"), vec![0]);
}
#[test]
fn parse_cpu_list_large_ids() {
assert_eq!(parse_cpu_list_lenient("127,255"), vec![127, 255]);
}
#[test]
fn parse_cpu_list_reversed_range() {
assert!(parse_cpu_list_lenient("5-3").is_empty());
}
#[test]
fn parse_cpu_list_non_numeric() {
assert!(parse_cpu_list_lenient("abc").is_empty());
}
fn synthetic_topo(groups: Vec<Vec<usize>>) -> HostTopology {
let all_cpus: Vec<usize> = groups.iter().flatten().copied().collect();
let mut cpu_to_node = std::collections::HashMap::new();
for (node, group) in groups.iter().enumerate() {
for &cpu in group {
cpu_to_node.insert(cpu, node);
}
}
let llc_groups = groups.into_iter().map(|cpus| LlcGroup { cpus }).collect();
HostTopology {
llc_groups,
online_cpus: all_cpus,
cpu_to_node,
}
}
#[test]
fn mapping_single_socket_single_llc() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3]]);
let plan = topo.compute_pinning(1, 2, 1, false, 0).unwrap();
assert_eq!(plan.assignments.len(), 2);
assert_eq!(plan.assignments[0], (0, 0));
assert_eq!(plan.assignments[1], (1, 1));
}
#[test]
fn mapping_two_sockets_two_llcs() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3], vec![4, 5, 6, 7]]);
let plan = topo.compute_pinning(2, 2, 1, false, 0).unwrap();
assert_eq!(plan.assignments.len(), 4);
assert_eq!(plan.assignments[0], (0, 0));
assert_eq!(plan.assignments[1], (1, 1));
assert_eq!(plan.assignments[2], (2, 4));
assert_eq!(plan.assignments[3], (3, 5));
}
#[test]
fn mapping_with_smt() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3, 4, 5, 6, 7]]);
let plan = topo.compute_pinning(1, 2, 2, false, 0).unwrap();
assert_eq!(plan.assignments.len(), 4);
let cpus: Vec<usize> = plan.assignments.iter().map(|a| a.1).collect();
let unique: std::collections::HashSet<usize> = cpus.iter().copied().collect();
assert_eq!(cpus.len(), unique.len());
}
#[test]
fn mapping_exact_fit() {
let topo = synthetic_topo(vec![vec![0, 1], vec![2, 3]]);
let plan = topo.compute_pinning(2, 2, 1, false, 0).unwrap();
assert_eq!(plan.assignments.len(), 4);
}
#[test]
fn mapping_error_too_many_vcpus() {
let topo = synthetic_topo(vec![vec![0, 1]]);
let err = topo.compute_pinning(1, 4, 1, false, 0).unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("4 vCPUs") && msg.contains("2 host CPUs"),
"error should mention CPU counts: {msg}",
);
}
#[test]
fn mapping_error_too_many_sockets() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3]]);
let err = topo.compute_pinning(2, 1, 1, false, 0).unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("2 LLCs") && msg.contains("1 LLC groups"),
"error should mention LLC count mismatch: {msg}",
);
}
#[test]
fn mapping_error_llc_too_small() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3], vec![4]]);
let err = topo.compute_pinning(2, 2, 1, false, 0).unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("LLC group 1") && msg.contains("1 available"),
"error should identify the undersized LLC: {msg}",
);
}
#[test]
fn mapping_no_cross_llc_sharing() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10, 11]]);
let plan = topo.compute_pinning(3, 2, 1, false, 0).unwrap();
for (vcpu_id, host_cpu) in &plan.assignments {
let socket = vcpu_id / 2; let llc_start = socket as usize * 4;
let llc_end = llc_start + 3;
assert!(
*host_cpu >= llc_start && *host_cpu <= llc_end,
"vCPU {vcpu_id} (socket {socket}) pinned to CPU {host_cpu}, \
expected range {llc_start}..={llc_end}",
);
}
}
#[test]
fn mapping_all_assignments_unique() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3], vec![4, 5, 6, 7]]);
let plan = topo.compute_pinning(2, 4, 1, false, 0).unwrap();
let cpus: Vec<usize> = plan.assignments.iter().map(|a| a.1).collect();
let unique: std::collections::HashSet<usize> = cpus.iter().copied().collect();
assert_eq!(
cpus.len(),
unique.len(),
"all host CPU assignments must be unique: {:?}",
cpus,
);
}
#[test]
fn mapping_vcpu_ids_sequential() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3]]);
let plan = topo.compute_pinning(1, 4, 1, false, 0).unwrap();
let vcpu_ids: Vec<u32> = plan.assignments.iter().map(|a| a.0).collect();
assert_eq!(vcpu_ids, vec![0, 1, 2, 3]);
}
#[test]
fn mapping_single_vcpu() {
let topo = synthetic_topo(vec![vec![42]]);
let plan = topo.compute_pinning(1, 1, 1, false, 0).unwrap();
assert_eq!(plan.assignments.len(), 1);
assert_eq!(plan.assignments[0], (0, 42));
}
#[test]
fn sysfs_llc_groups_cover_all_cpus() {
let topo = HostTopology::from_sysfs().unwrap();
let llc_cpus: Vec<usize> = topo
.llc_groups
.iter()
.flat_map(|g| g.cpus.iter().copied())
.collect();
for cpu in &topo.online_cpus {
assert!(
llc_cpus.contains(cpu),
"CPU {} is online but not in any LLC group",
cpu,
);
}
}
#[test]
fn sysfs_total_cpus_matches_online() {
let topo = HostTopology::from_sysfs().unwrap();
assert_eq!(topo.total_cpus(), topo.online_cpus.len());
}
#[test]
fn sysfs_llc_groups_nonempty() {
let topo = HostTopology::from_sysfs().unwrap();
for (i, group) in topo.llc_groups.iter().enumerate() {
assert!(
!group.cpus.is_empty(),
"LLC group {} should have at least one CPU",
i,
);
}
}
#[test]
fn sysfs_pinning_respects_llc_boundaries() {
let topo = HostTopology::from_sysfs().unwrap();
if topo.llc_groups.len() < 2 || topo.total_cpus() < 4 {
return; }
let min_llc_size = topo.llc_groups.iter().map(|g| g.cpus.len()).min().unwrap();
if min_llc_size < 2 {
return;
}
let plan = topo.compute_pinning(2, 2, 1, false, 0).unwrap();
for (vcpu_id, host_cpu) in &plan.assignments {
let socket = vcpu_id / 2;
let llc = &topo.llc_groups[socket as usize];
assert!(
llc.cpus.contains(host_cpu),
"vCPU {} mapped to CPU {} which is not in LLC group {}",
vcpu_id,
host_cpu,
socket,
);
}
}
#[test]
fn hugepages_needed_boundary() {
assert_eq!(hugepages_needed(1), 1); assert_eq!(hugepages_needed(0), 0);
}
#[test]
fn hugepages_needed_exact_multiple() {
assert_eq!(hugepages_needed(1024), 512);
}
#[test]
fn reserve_service_cpu_picks_unpinned() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3]]);
let plan = topo.compute_pinning(1, 2, 1, true, 0).unwrap();
assert_eq!(plan.assignments.len(), 2);
let service = plan.service_cpu.expect("service_cpu should be set");
let vcpu_cpus: std::collections::HashSet<usize> =
plan.assignments.iter().map(|a| a.1).collect();
assert!(
!vcpu_cpus.contains(&service),
"service CPU {service} must not be assigned to a vCPU",
);
}
#[test]
fn reserve_service_cpu_false_returns_none() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3]]);
let plan = topo.compute_pinning(1, 2, 1, false, 0).unwrap();
assert!(plan.service_cpu.is_none());
}
#[test]
fn reserve_service_cpu_exact_fit() {
let topo = synthetic_topo(vec![vec![0, 1, 2]]);
let plan = topo.compute_pinning(1, 2, 1, true, 0).unwrap();
assert!(plan.service_cpu.is_some());
}
#[test]
fn reserve_service_cpu_insufficient_fails() {
let topo = synthetic_topo(vec![vec![0, 1]]);
let err = topo.compute_pinning(1, 2, 1, true, 0).unwrap_err();
let msg = format!("{err}");
assert!(
msg.contains("3 CPUs") && msg.contains("2 host CPUs"),
"error should mention CPU shortage: {msg}",
);
}
#[test]
fn reserve_service_cpu_multi_llc() {
let topo = synthetic_topo(vec![vec![0, 1, 2], vec![3, 4, 5]]);
let plan = topo.compute_pinning(2, 2, 1, true, 0).unwrap();
let service = plan.service_cpu.unwrap();
let vcpu_cpus: std::collections::HashSet<usize> =
plan.assignments.iter().map(|a| a.1).collect();
assert!(!vcpu_cpus.contains(&service));
}
#[test]
fn sysfs_cpu_to_node_populated() {
let topo = HostTopology::from_sysfs().unwrap();
if !topo.cpu_to_node.is_empty() {
for (&cpu, &node) in &topo.cpu_to_node {
assert!(
topo.online_cpus.contains(&cpu),
"NUMA mapping for CPU {cpu} but not in online set",
);
assert!(node < 1024, "unexpected NUMA node ID {node} for CPU {cpu}");
}
}
}
#[test]
fn numa_nodes_for_cpus_synthetic() {
let topo = synthetic_topo(vec![vec![0, 1], vec![2, 3]]);
assert_eq!(topo.numa_nodes_for_cpus(&[0, 1]), vec![0]);
assert_eq!(topo.numa_nodes_for_cpus(&[2, 3]), vec![1]);
let mut nodes = topo.numa_nodes_for_cpus(&[0, 2]);
nodes.sort();
assert_eq!(nodes, vec![0, 1]);
}
#[test]
fn numa_nodes_for_cpus_empty() {
let topo = synthetic_topo(vec![vec![0, 1]]);
assert!(topo.numa_nodes_for_cpus(&[]).is_empty());
}
#[test]
fn max_cores_per_llc_synthetic() {
let topo = synthetic_topo(vec![vec![0, 1, 2, 3], vec![4, 5]]);
assert_eq!(topo.max_cores_per_llc(), 4);
}
#[test]
fn max_cores_per_llc_uniform() {
let topo = synthetic_topo(vec![vec![0, 1, 2], vec![3, 4, 5]]);
assert_eq!(topo.max_cores_per_llc(), 3);
}
#[test]
fn mbind_to_nodes_empty_is_noop() {
mbind_to_nodes(std::ptr::null_mut(), 0, &[]);
mbind_to_nodes(std::ptr::null_mut(), 4096, &[]);
}
}