use anyhow::{Context, Result, bail};
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::path::Path;
#[derive(Debug, Clone)]
pub struct LlcInfo {
cpus: Vec<usize>,
numa_node: usize,
cache_size_kb: Option<u64>,
cores: BTreeMap<usize, Vec<usize>>,
}
impl LlcInfo {
pub fn cpus(&self) -> &[usize] {
&self.cpus
}
pub fn numa_node(&self) -> usize {
self.numa_node
}
pub fn cache_size_kb(&self) -> Option<u64> {
self.cache_size_kb
}
pub fn cores(&self) -> &BTreeMap<usize, Vec<usize>> {
&self.cores
}
pub fn num_cores(&self) -> usize {
if self.cores.is_empty() {
self.cpus.len()
} else {
self.cores.len()
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct NodeMemInfo {
pub total_kb: u64,
pub free_kb: u64,
}
impl NodeMemInfo {
pub fn used_kb(&self) -> u64 {
self.total_kb.saturating_sub(self.free_kb)
}
}
#[derive(Debug, Clone)]
pub struct TestTopology {
cpus: Vec<usize>,
llcs: Vec<LlcInfo>,
numa_nodes: BTreeSet<usize>,
numa_distances: Vec<u8>,
node_mem: BTreeMap<usize, NodeMemInfo>,
memory_only_nodes: BTreeSet<usize>,
}
pub fn parse_cpu_list(s: &str) -> Result<Vec<usize>> {
let mut cpus = Vec::new();
for part in s.trim().split(',') {
let part = part.trim();
if part.is_empty() {
continue;
}
if let Some((lo, hi)) = part.split_once('-') {
let lo: usize = lo.parse()?;
let hi: usize = hi.parse()?;
cpus.extend(lo..=hi);
} else {
cpus.push(part.parse()?);
}
}
cpus.sort();
Ok(cpus)
}
pub fn parse_cpu_list_lenient(s: &str) -> Vec<usize> {
let mut cpus = Vec::new();
for part in s.trim().split(',') {
let part = part.trim();
if part.is_empty() {
continue;
}
if let Some((lo, hi)) = part.split_once('-') {
if let (Ok(lo), Ok(hi)) = (lo.parse::<usize>(), hi.parse::<usize>()) {
cpus.extend(lo..=hi);
}
} else if let Ok(cpu) = part.parse::<usize>() {
cpus.push(cpu);
}
}
cpus.sort();
cpus
}
fn find_llc_index(cpu: usize) -> Result<usize> {
let cache_dir = format!("/sys/devices/system/cpu/cpu{cpu}/cache");
let mut max_level = 0usize;
let mut llc_index = 0usize;
for entry in fs::read_dir(&cache_dir).context("read cache dir")? {
let entry = entry?;
let name = entry.file_name();
let name = name.to_string_lossy();
if !name.starts_with("index") {
continue;
}
let level_path = entry.path().join("level");
if let Ok(level_str) = fs::read_to_string(&level_path)
&& let Ok(level) = level_str.trim().parse::<usize>()
&& level > max_level
{
let idx_str = name
.strip_prefix("index")
.expect("filtered by starts_with(\"index\") above");
match idx_str.parse::<usize>() {
Ok(idx) => {
max_level = level;
llc_index = idx;
}
Err(e) => {
tracing::warn!(
cache_dir = %cache_dir,
entry = %name,
err = %e,
"malformed sysfs cache index name; skipping entry",
);
}
}
}
}
Ok(llc_index)
}
fn read_llc_id(cpu: usize) -> Result<usize> {
let llc_index = find_llc_index(cpu)?;
let id_path = format!("/sys/devices/system/cpu/cpu{cpu}/cache/index{llc_index}/id");
if let Ok(id_str) = fs::read_to_string(&id_path)
&& let Ok(id) = id_str.trim().parse::<usize>()
{
return Ok(id);
}
let shared_path =
format!("/sys/devices/system/cpu/cpu{cpu}/cache/index{llc_index}/shared_cpu_list");
if let Ok(shared_str) = fs::read_to_string(&shared_path) {
let siblings = parse_cpu_list_lenient(shared_str.trim());
if let Some(&min_cpu) = siblings.iter().min() {
return Ok(min_cpu);
}
}
Ok(0)
}
fn read_numa_node(cpu: usize) -> Result<usize> {
let node_dir = format!("/sys/devices/system/cpu/cpu{cpu}");
for entry in fs::read_dir(&node_dir)? {
let entry = entry?;
let name = entry.file_name();
let name = name.to_string_lossy();
if name.starts_with("node")
&& let Some(id_str) = name.strip_prefix("node")
&& let Ok(id) = id_str.parse::<usize>()
{
return Ok(id);
}
}
Ok(0)
}
fn read_llc_cache_size(cpu: usize) -> Option<u64> {
let llc_index = find_llc_index(cpu).ok()?;
let size_path = format!("/sys/devices/system/cpu/cpu{cpu}/cache/index{llc_index}/size");
let size_str = fs::read_to_string(&size_path).ok()?;
parse_cache_size(size_str.trim())
}
fn parse_cache_size(s: &str) -> Option<u64> {
let s = s.trim();
if let Some(kb) = s.strip_suffix('K') {
kb.parse().ok()
} else if let Some(mb) = s.strip_suffix('M') {
mb.parse::<u64>().ok().map(|v| v * 1024)
} else {
s.parse::<u64>().ok().map(|v| v.div_ceil(1024))
}
}
fn read_core_id(cpu: usize) -> Option<usize> {
let path = format!("/sys/devices/system/cpu/cpu{cpu}/topology/core_id");
fs::read_to_string(&path)
.ok()
.and_then(|s| s.trim().parse().ok())
}
fn read_node_meminfo(node: usize) -> Option<NodeMemInfo> {
let path = format!("/sys/devices/system/node/node{node}/meminfo");
let content = fs::read_to_string(path).ok()?;
let mut total_kb = None;
let mut free_kb = None;
for line in content.lines() {
if let Some(rest) = line.strip_suffix("kB").map(str::trim_end) {
if rest.contains("MemTotal") {
total_kb = rest
.rsplit_once(char::is_whitespace)
.and_then(|(_, v)| v.parse().ok());
} else if rest.contains("MemFree") {
free_kb = rest
.rsplit_once(char::is_whitespace)
.and_then(|(_, v)| v.parse().ok());
}
}
}
Some(NodeMemInfo {
total_kb: total_kb?,
free_kb: free_kb?,
})
}
fn read_node_distances(node: usize) -> Option<Vec<u8>> {
let path = format!("/sys/devices/system/node/node{node}/distance");
let content = fs::read_to_string(path).ok()?;
let values: Option<Vec<u8>> = content.split_whitespace().map(|s| s.parse().ok()).collect();
match values {
Some(v) if v.is_empty() => None,
other => other,
}
}
fn is_node_memory_only(node: usize) -> bool {
let path = format!("/sys/devices/system/node/node{node}/cpulist");
match fs::read_to_string(path) {
Ok(s) => s.trim().is_empty(),
Err(_) => false,
}
}
fn synthesize_fallback_llc(cpus: &[usize], numa_node: usize) -> LlcInfo {
let cores: BTreeMap<usize, Vec<usize>> = cpus.iter().map(|&c| (c, vec![c])).collect();
LlcInfo {
cpus: cpus.to_vec(),
numa_node,
cache_size_kb: None,
cores,
}
}
impl TestTopology {
pub fn from_system() -> Result<Self> {
let online_str =
fs::read_to_string("/sys/devices/system/cpu/online").context("read online cpus")?;
let online_cpus = parse_cpu_list(&online_str)?;
if online_cpus.is_empty() {
bail!("no online CPUs found");
}
let mut cpus = BTreeSet::new();
let mut llc_map: BTreeMap<usize, LlcInfo> = BTreeMap::new();
let mut numa_nodes = BTreeSet::new();
let mut llc_cache_sizes: BTreeMap<usize, Option<u64>> = BTreeMap::new();
for &cpu_id in &online_cpus {
let cpu_path = format!("/sys/devices/system/cpu/cpu{cpu_id}");
if !Path::new(&cpu_path).exists() {
tracing::warn!(
cpu = cpu_id,
path = %cpu_path,
"/sys/devices/system/cpu/online listed this CPU but \
/sys/devices/system/cpu/cpuN/ is absent; skipping — \
the CPU will not appear in TestTopology.all_cpus()"
);
continue;
}
cpus.insert(cpu_id);
let llc_id = match read_llc_id(cpu_id) {
Ok(id) => id,
Err(e) => {
tracing::warn!(
cpu = cpu_id,
error = %e,
"LLC id unreadable from sysfs; bucketing CPU into fallback LLC 0 — \
LlcAligned affinity will merge this CPU with any other unreadable CPUs"
);
0
}
};
let node_id = match read_numa_node(cpu_id) {
Ok(id) => id,
Err(e) => {
tracing::warn!(
cpu = cpu_id,
error = %e,
"NUMA node unreadable from sysfs; bucketing CPU into fallback node 0 — \
NUMA-aware placement may be incorrect for this CPU"
);
0
}
};
let core_id = read_core_id(cpu_id).unwrap_or_else(|| {
tracing::warn!(
cpu = cpu_id,
"core_id unreadable from sysfs; synthesizing singleton core entry \
using cpu_id as the core id — SMT sibling grouping unavailable for this CPU"
);
cpu_id
});
numa_nodes.insert(node_id);
llc_cache_sizes
.entry(llc_id)
.or_insert_with(|| read_llc_cache_size(cpu_id));
llc_map
.entry(llc_id)
.and_modify(|info| {
info.cpus.push(cpu_id);
info.cores.entry(core_id).or_default().push(cpu_id);
})
.or_insert_with(|| {
let mut cores = BTreeMap::new();
cores.insert(core_id, vec![cpu_id]);
LlcInfo {
cpus: vec![cpu_id],
numa_node: node_id,
cache_size_kb: llc_cache_sizes.get(&llc_id).copied().flatten(),
cores,
}
});
}
for info in llc_map.values_mut() {
info.cpus.sort();
for siblings in info.cores.values_mut() {
siblings.sort();
}
}
if let Ok(entries) = fs::read_dir("/sys/devices/system/node") {
for entry in entries.flatten() {
let name = entry.file_name();
let name = name.to_string_lossy();
if let Some(id_str) = name.strip_prefix("node")
&& let Ok(id) = id_str.parse::<usize>()
{
numa_nodes.insert(id);
}
}
}
let n = numa_nodes.len();
let node_ids: Vec<usize> = numa_nodes.iter().copied().collect();
let mut node_mem = BTreeMap::new();
for &nid in &node_ids {
if let Some(mi) = read_node_meminfo(nid) {
node_mem.insert(nid, mi);
}
}
let mut memory_only_nodes = BTreeSet::new();
for &nid in &node_ids {
if is_node_memory_only(nid) {
memory_only_nodes.insert(nid);
}
}
let numa_distances = {
let mut matrix = Vec::with_capacity(n * n);
let mut fallback_reason: Option<String> = None;
for &nid in &node_ids {
match read_node_distances(nid) {
Some(row) if row.len() == n => matrix.extend_from_slice(&row),
Some(row) => {
fallback_reason = Some(format!(
"node{nid}/distance has {} entries, expected {n}",
row.len()
));
break;
}
None => {
fallback_reason =
Some(format!("node{nid}/distance missing or unparseable"));
break;
}
}
}
if fallback_reason.is_some() || matrix.len() != n * n {
let reason = fallback_reason.unwrap_or_else(|| {
format!("distance matrix length {} != {}", matrix.len(), n * n)
});
tracing::warn!(
reason = %reason,
numa_nodes = n,
"NUMA distance matrix unavailable from /sys/devices/system/node/*/distance; \
falling back to 10 (intra-node) / 20 (inter-node) — \
NUMA-aware placement decisions will use uniform distances"
);
matrix.clear();
matrix.resize(n * n, 0);
for i in 0..n {
for j in 0..n {
matrix[i * n + j] = if i == j { 10 } else { 20 };
}
}
}
matrix
};
let llcs: Vec<LlcInfo> = llc_map.into_values().collect();
let llcs = if llcs.is_empty() {
let fallback_cpus: Vec<usize> = cpus.iter().copied().collect();
let fallback_node = *numa_nodes.iter().next().unwrap_or(&0);
tracing::warn!(
cpu_count = fallback_cpus.len(),
fallback_numa_node = fallback_node,
"LLC discovery empty from /sys/devices/system/cpu/*/cache/; \
synthesizing a single fallback LLC covering all online CPUs — \
LlcAligned affinity will pin to the entire machine"
);
vec![synthesize_fallback_llc(&fallback_cpus, fallback_node)]
} else {
llcs
};
let numa_nodes = if numa_nodes.is_empty() {
tracing::warn!(
"NUMA node set empty after sysfs discovery (no nodeN entries and \
no per-CPU node ids); synthesizing a fallback {{0}} — \
NUMA-aware placement will treat the machine as single-node"
);
let mut s = BTreeSet::new();
s.insert(0);
s
} else {
numa_nodes
};
Ok(Self {
cpus: cpus.into_iter().collect(),
llcs,
numa_nodes,
numa_distances,
node_mem,
memory_only_nodes,
})
}
pub fn total_cpus(&self) -> usize {
self.cpus.len()
}
pub fn num_llcs(&self) -> usize {
self.llcs.len()
}
pub fn num_numa_nodes(&self) -> usize {
self.numa_nodes.len()
}
pub fn numa_node_ids(&self) -> &BTreeSet<usize> {
&self.numa_nodes
}
pub fn llcs(&self) -> &[LlcInfo] {
&self.llcs
}
pub fn all_cpus(&self) -> &[usize] {
&self.cpus
}
pub fn all_cpuset(&self) -> BTreeSet<usize> {
self.cpus.iter().copied().collect()
}
pub fn usable_cpus(&self) -> &[usize] {
if self.cpus.len() > 2 {
&self.cpus[..self.cpus.len() - 1]
} else {
&self.cpus
}
}
pub fn usable_cpuset(&self) -> BTreeSet<usize> {
self.usable_cpus().iter().copied().collect()
}
pub fn cpus_in_llc(&self, idx: usize) -> &[usize] {
match self.llcs.get(idx) {
Some(llc) => &llc.cpus,
None => &[],
}
}
pub fn llc_aligned_cpuset(&self, idx: usize) -> BTreeSet<usize> {
match self.llcs.get(idx) {
Some(llc) => llc.cpus.iter().copied().collect(),
None => BTreeSet::new(),
}
}
pub fn numa_aligned_cpuset(&self, node: usize) -> BTreeSet<usize> {
self.llcs
.iter()
.filter(|llc| llc.numa_node() == node)
.flat_map(|llc| llc.cpus())
.copied()
.collect()
}
pub fn numa_nodes_for_cpuset(&self, cpus: &BTreeSet<usize>) -> BTreeSet<usize> {
self.llcs
.iter()
.filter(|llc| llc.cpus.iter().any(|c| cpus.contains(c)))
.map(|llc| llc.numa_node)
.collect()
}
pub fn node_meminfo(&self, node_id: usize) -> Option<&NodeMemInfo> {
self.node_mem.get(&node_id)
}
pub fn numa_distance(&self, from: usize, to: usize) -> u8 {
let n = self.numa_nodes.len();
let Some(from_idx) = self.numa_nodes.iter().position(|&id| id == from) else {
return 255;
};
let Some(to_idx) = self.numa_nodes.iter().position(|&id| id == to) else {
return 255;
};
self.numa_distances[from_idx * n + to_idx]
}
pub fn is_memory_only(&self, node_id: usize) -> bool {
self.memory_only_nodes.contains(&node_id)
}
pub fn split_by_llc(&self) -> Vec<BTreeSet<usize>> {
self.llcs
.iter()
.map(|l| l.cpus.iter().copied().collect())
.collect()
}
pub fn overlapping_cpusets(&self, n: usize, overlap_frac: f64) -> Vec<BTreeSet<usize>> {
let total = self.cpus.len();
if n == 0 || total == 0 {
return vec![];
}
let base = total / n;
let overlap = ((base as f64) * overlap_frac).ceil() as usize;
let stride = if base > overlap { base - overlap } else { 1 };
(0..n)
.map(|i| {
let start = (i * stride) % total;
(0..base.max(1))
.map(|j| self.cpus[(start + j) % total])
.collect()
})
.collect()
}
pub fn cpuset_string(cpus: &BTreeSet<usize>) -> String {
if cpus.is_empty() {
return String::new();
}
let sorted: Vec<usize> = cpus.iter().copied().collect();
let mut ranges = Vec::new();
let (mut start, mut end) = (sorted[0], sorted[0]);
for &cpu in &sorted[1..] {
if cpu == end + 1 {
end = cpu;
} else {
ranges.push(if start == end {
format!("{start}")
} else {
format!("{start}-{end}")
});
start = cpu;
end = cpu;
}
}
ranges.push(if start == end {
format!("{start}")
} else {
format!("{start}-{end}")
});
ranges.join(",")
}
pub fn from_vm_topology(topo: &crate::vmm::topology::Topology) -> Self {
Self::from_vm_topology_with_memory(topo, None)
}
pub fn from_vm_topology_with_memory(
topo: &crate::vmm::topology::Topology,
total_memory_mb: Option<u32>,
) -> Self {
assert!(
topo.llcs > 0 && topo.cores_per_llc > 0 && topo.threads_per_core > 0,
"TestTopology requires non-zero llcs/cores/threads; got llcs={}, cores={}, threads={}",
topo.llcs,
topo.cores_per_llc,
topo.threads_per_core,
);
assert!(
topo.numa_nodes > 0,
"TestTopology requires at least one NUMA node; got {}",
topo.numa_nodes,
);
let llcs = topo.llcs;
let cores = topo.cores_per_llc;
let threads = topo.threads_per_core;
let numa_nodes = topo.numa_nodes;
let total = (llcs * cores * threads) as usize;
let cpus_per_llc = (cores * threads) as usize;
let cpus: Vec<usize> = (0..total).collect();
let llc_infos: Vec<LlcInfo> = (0..llcs as usize)
.map(|l| {
let start = l * cpus_per_llc;
let end = start + cpus_per_llc;
let mut core_map = BTreeMap::new();
for c in 0..cores as usize {
let base = start + c * threads as usize;
let siblings: Vec<usize> = (base..base + threads as usize).collect();
core_map.insert(c, siblings);
}
LlcInfo {
cpus: (start..end).collect(),
numa_node: topo.numa_node_of(l as u32) as usize,
cache_size_kb: None,
cores: core_map,
}
})
.collect();
let n = numa_nodes as usize;
let numa_node_set: BTreeSet<usize> = (0..n).collect();
let mut distances = vec![0u8; n * n];
for i in 0..n {
for j in 0..n {
distances[i * n + j] = topo.distance(i as u32, j as u32);
}
}
let mut node_mem = BTreeMap::new();
let mut memory_only_nodes = BTreeSet::new();
match topo.nodes {
Some(nodes) => {
for (i, node) in nodes.iter().enumerate() {
if node.memory_mb > 0 {
node_mem.insert(
i,
NodeMemInfo {
total_kb: (node.memory_mb as u64) * 1024,
free_kb: (node.memory_mb as u64) * 1024,
},
);
}
if node.is_memory_only() {
memory_only_nodes.insert(i);
}
}
}
None => {
if let Some(total_mb) = total_memory_mb {
let per_node_mb = total_mb / numa_nodes;
for i in 0..n {
let mb = if i == n - 1 {
total_mb - per_node_mb * (numa_nodes - 1)
} else {
per_node_mb
};
node_mem.insert(
i,
NodeMemInfo {
total_kb: (mb as u64) * 1024,
free_kb: (mb as u64) * 1024,
},
);
}
}
}
}
Self {
cpus,
llcs: llc_infos,
numa_nodes: numa_node_set,
numa_distances: distances,
node_mem,
memory_only_nodes,
}
}
#[cfg(test)]
pub fn synthetic(num_cpus: usize, num_llcs: usize) -> Self {
assert!(
num_llcs > 0,
"TestTopology::synthetic requires num_llcs > 0; got 0"
);
assert!(
num_cpus > 0,
"TestTopology::synthetic requires num_cpus > 0; got 0"
);
assert!(
num_cpus >= num_llcs,
"TestTopology::synthetic requires num_cpus ({num_cpus}) >= num_llcs ({num_llcs})",
);
let cpus: Vec<usize> = (0..num_cpus).collect();
let per_llc = num_cpus / num_llcs;
let llcs: Vec<LlcInfo> = (0..num_llcs)
.map(|i| {
let start = i * per_llc;
let end = if i == num_llcs - 1 {
num_cpus
} else {
(i + 1) * per_llc
};
LlcInfo {
cpus: (start..end).collect(),
numa_node: i,
cache_size_kb: None,
cores: BTreeMap::new(),
}
})
.collect();
let n = num_llcs;
let numa_nodes: BTreeSet<usize> = (0..n).collect();
let mut distances = vec![0u8; n * n];
for i in 0..n {
for j in 0..n {
distances[i * n + j] = if i == j { 10 } else { 20 };
}
}
Self {
cpus,
llcs,
numa_nodes,
numa_distances: distances,
node_mem: BTreeMap::new(),
memory_only_nodes: BTreeSet::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cpuset_string_empty() {
assert_eq!(TestTopology::cpuset_string(&BTreeSet::new()), "");
}
#[test]
fn cpuset_string_single() {
assert_eq!(TestTopology::cpuset_string(&[3].into_iter().collect()), "3");
}
#[test]
fn cpuset_string_range() {
assert_eq!(
TestTopology::cpuset_string(&[0, 1, 2, 3].into_iter().collect()),
"0-3"
);
}
#[test]
fn cpuset_string_gaps() {
assert_eq!(
TestTopology::cpuset_string(&[0, 1, 3, 5, 6, 7].into_iter().collect()),
"0-1,3,5-7"
);
}
#[test]
fn synthetic_topology() {
let t = TestTopology::synthetic(8, 2);
assert_eq!(t.total_cpus(), 8);
assert_eq!(t.num_llcs(), 2);
assert_eq!(t.cpus_in_llc(0), &[0, 1, 2, 3]);
assert_eq!(t.cpus_in_llc(1), &[4, 5, 6, 7]);
}
#[test]
fn overlapping_cpusets_basic() {
let t = TestTopology::synthetic(8, 1);
let sets = t.overlapping_cpusets(2, 0.5);
assert_eq!(sets.len(), 2);
for s in &sets {
assert_eq!(s.len(), 4);
}
let overlap: BTreeSet<usize> = sets[0].intersection(&sets[1]).copied().collect();
assert!(!overlap.is_empty());
}
#[test]
fn overlapping_cpusets_no_overlap() {
let t = TestTopology::synthetic(8, 1);
let sets = t.overlapping_cpusets(2, 0.0);
assert_eq!(sets.len(), 2);
let overlap: BTreeSet<usize> = sets[0].intersection(&sets[1]).copied().collect();
assert!(overlap.is_empty());
}
#[test]
fn split_by_llc() {
let t = TestTopology::synthetic(8, 2);
let splits = t.split_by_llc();
assert_eq!(splits.len(), 2);
assert_eq!(splits[0], [0, 1, 2, 3].into_iter().collect());
assert_eq!(splits[1], [4, 5, 6, 7].into_iter().collect());
}
#[test]
fn llc_aligned_cpuset() {
let t = TestTopology::synthetic(8, 2);
assert_eq!(t.llc_aligned_cpuset(0), [0, 1, 2, 3].into_iter().collect());
assert_eq!(t.llc_aligned_cpuset(1), [4, 5, 6, 7].into_iter().collect());
}
#[test]
fn from_vm_topology_single_llc() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 1, 4, 2));
assert_eq!(t.total_cpus(), 8);
assert_eq!(t.num_llcs(), 1);
assert_eq!(t.num_numa_nodes(), 1);
assert_eq!(t.all_cpus(), &[0, 1, 2, 3, 4, 5, 6, 7]);
assert_eq!(t.cpus_in_llc(0), &[0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn from_vm_topology_multi_llc() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 2));
assert_eq!(t.total_cpus(), 16);
assert_eq!(t.num_llcs(), 2);
assert_eq!(t.num_numa_nodes(), 1);
assert_eq!(t.cpus_in_llc(0), &[0, 1, 2, 3, 4, 5, 6, 7]);
assert_eq!(t.cpus_in_llc(1), &[8, 9, 10, 11, 12, 13, 14, 15]);
}
#[test]
fn from_vm_topology_no_smt() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 2, 1));
assert_eq!(t.total_cpus(), 4);
assert_eq!(t.num_llcs(), 2);
assert_eq!(t.cpus_in_llc(0), &[0, 1]);
assert_eq!(t.cpus_in_llc(1), &[2, 3]);
}
#[test]
fn from_vm_topology_minimal() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 1, 1, 1));
assert_eq!(t.total_cpus(), 1);
assert_eq!(t.num_llcs(), 1);
assert_eq!(t.all_cpus(), &[0]);
}
#[test]
fn from_vm_topology_multi_numa() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 2));
assert_eq!(t.total_cpus(), 32);
assert_eq!(t.num_llcs(), 4);
assert_eq!(t.num_numa_nodes(), 2);
assert_eq!(t.llcs()[0].numa_node(), 0);
assert_eq!(t.llcs()[1].numa_node(), 0);
assert_eq!(t.llcs()[2].numa_node(), 1);
assert_eq!(t.llcs()[3].numa_node(), 1);
}
#[test]
fn overlapping_cpusets_zero_n() {
let t = TestTopology::synthetic(8, 1);
assert!(t.overlapping_cpusets(0, 0.5).is_empty());
}
#[test]
fn synthetic_single_llc() {
let t = TestTopology::synthetic(4, 1);
assert_eq!(t.num_llcs(), 1);
assert_eq!(t.total_cpus(), 4);
assert_eq!(t.num_numa_nodes(), 1);
assert_eq!(t.all_cpus(), &[0, 1, 2, 3]);
}
#[test]
fn synthetic_many_llcs() {
let t = TestTopology::synthetic(16, 4);
assert_eq!(t.num_llcs(), 4);
for i in 0..4 {
assert_eq!(t.cpus_in_llc(i).len(), 4);
}
}
#[test]
fn cpuset_string_two_ranges() {
assert_eq!(
TestTopology::cpuset_string(&[0, 1, 2, 5, 6, 7].into_iter().collect()),
"0-2,5-7"
);
}
#[test]
fn cpuset_string_all_isolated() {
assert_eq!(
TestTopology::cpuset_string(&[1, 3, 5].into_iter().collect()),
"1,3,5"
);
}
#[test]
fn cpuset_string_large_range() {
let cpus: BTreeSet<usize> = (0..128).collect();
assert_eq!(TestTopology::cpuset_string(&cpus), "0-127");
}
#[test]
fn overlapping_cpusets_single_set() {
let t = TestTopology::synthetic(8, 1);
let sets = t.overlapping_cpusets(1, 0.5);
assert_eq!(sets.len(), 1);
assert_eq!(sets[0].len(), 8);
}
#[test]
fn split_by_llc_single() {
let t = TestTopology::synthetic(4, 1);
let splits = t.split_by_llc();
assert_eq!(splits.len(), 1);
assert_eq!(splits[0].len(), 4);
}
#[test]
fn split_by_llc_two_llc_regression() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 1));
assert_eq!(t.total_cpus(), 8);
assert_eq!(t.num_llcs(), 2);
let splits = t.split_by_llc();
assert_eq!(splits.len(), 2, "2-LLC topology must produce 2 LLC sets");
let overlap: BTreeSet<usize> = splits[0].intersection(&splits[1]).copied().collect();
assert!(
overlap.is_empty(),
"LLC sets must be disjoint: overlap={overlap:?}"
);
let union: BTreeSet<usize> = splits[0].union(&splits[1]).copied().collect();
assert_eq!(union, t.all_cpuset(), "LLC sets must cover all CPUs");
assert_eq!(splits[0].len(), 4);
assert_eq!(splits[1].len(), 4);
assert_eq!(splits[0], [0, 1, 2, 3].into_iter().collect());
assert_eq!(splits[1], [4, 5, 6, 7].into_iter().collect());
}
#[test]
fn usable_cpus_reserves_last() {
let t = TestTopology::synthetic(8, 2);
assert_eq!(t.usable_cpus().len(), 7);
assert!(!t.usable_cpus().contains(&7));
}
#[test]
fn usable_cpus_small_no_reserve() {
let t = TestTopology::synthetic(2, 1);
assert_eq!(t.usable_cpus().len(), 2);
}
#[test]
fn usable_cpus_single_cpu() {
let t = TestTopology::synthetic(1, 1);
assert_eq!(t.usable_cpus().len(), 1);
}
#[test]
fn parse_cpu_list_simple() {
assert_eq!(parse_cpu_list("0,1,2,3").unwrap(), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_range() {
assert_eq!(parse_cpu_list("0-3").unwrap(), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_mixed() {
assert_eq!(
parse_cpu_list("0-2,5,7-9").unwrap(),
vec![0, 1, 2, 5, 7, 8, 9]
);
}
#[test]
fn parse_cpu_list_empty() {
assert!(parse_cpu_list("").unwrap().is_empty());
}
#[test]
fn parse_cpu_list_whitespace() {
assert_eq!(parse_cpu_list(" 0 , 1 , 2 ").unwrap(), vec![0, 1, 2]);
}
#[test]
fn from_vm_topology_large() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 4, 8, 2));
assert_eq!(t.total_cpus(), 64);
assert_eq!(t.num_llcs(), 4);
assert_eq!(t.num_numa_nodes(), 1);
}
#[test]
fn llc_info_accessors() {
let t = TestTopology::synthetic(8, 2);
let llcs = t.llcs();
assert_eq!(llcs.len(), 2);
assert_eq!(llcs[0].cpus(), &[0, 1, 2, 3]);
assert_eq!(llcs[0].numa_node(), 0);
assert_eq!(llcs[1].cpus(), &[4, 5, 6, 7]);
assert_eq!(llcs[1].numa_node(), 1);
}
#[test]
fn from_vm_topology_cores_populated() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 2));
let llc0 = &t.llcs()[0];
assert_eq!(llc0.num_cores(), 4);
assert_eq!(llc0.cores().len(), 4);
assert_eq!(llc0.cores()[&0], vec![0, 1]);
assert_eq!(llc0.cores()[&1], vec![2, 3]);
assert_eq!(llc0.cores()[&2], vec![4, 5]);
assert_eq!(llc0.cores()[&3], vec![6, 7]);
let llc1 = &t.llcs()[1];
assert_eq!(llc1.cores()[&0], vec![8, 9]);
}
#[test]
fn from_vm_topology_no_smt_cores() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 1, 4, 1));
let llc = &t.llcs()[0];
assert_eq!(llc.num_cores(), 4);
assert_eq!(llc.cores()[&0], vec![0]);
assert_eq!(llc.cores()[&3], vec![3]);
}
#[test]
fn parse_cache_size_formats() {
assert_eq!(parse_cache_size("32768K"), Some(32768));
assert_eq!(parse_cache_size("32M"), Some(32768));
assert_eq!(parse_cache_size("65536"), Some(64));
assert_eq!(parse_cache_size("500"), Some(1));
assert_eq!(parse_cache_size("1"), Some(1));
assert_eq!(parse_cache_size("1023"), Some(1));
assert_eq!(parse_cache_size("1025"), Some(2));
assert_eq!(parse_cache_size("0"), Some(0));
}
#[test]
fn num_cores_from_cores_map() {
let llc = LlcInfo {
cpus: vec![0, 1, 2, 3],
numa_node: 0,
cache_size_kb: None,
cores: BTreeMap::from([(0, vec![0, 1]), (1, vec![2, 3])]),
};
assert_eq!(llc.num_cores(), 2);
}
#[test]
fn num_cores_fallback_to_cpus() {
let llc = LlcInfo {
cpus: vec![0, 1, 2, 3],
numa_node: 0,
cache_size_kb: None,
cores: BTreeMap::new(),
};
assert_eq!(llc.num_cores(), 4);
}
#[test]
fn parse_cpu_list_lenient_simple() {
assert_eq!(parse_cpu_list_lenient("0,1,2,3"), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_lenient_range() {
assert_eq!(parse_cpu_list_lenient("0-3"), vec![0, 1, 2, 3]);
}
#[test]
fn parse_cpu_list_lenient_mixed() {
assert_eq!(
parse_cpu_list_lenient("0-2,5,7-9"),
vec![0, 1, 2, 5, 7, 8, 9]
);
}
#[test]
fn parse_cpu_list_lenient_empty() {
assert!(parse_cpu_list_lenient("").is_empty());
}
#[test]
fn parse_cpu_list_lenient_skips_garbage() {
assert_eq!(parse_cpu_list_lenient("0,abc,2,xyz-3,4"), vec![0, 2, 4]);
}
#[test]
fn parse_cpu_list_lenient_whitespace() {
assert_eq!(parse_cpu_list_lenient(" 0 , 1 , 2 "), vec![0, 1, 2]);
}
#[test]
fn cache_size_bare_number() {
assert_eq!(parse_cache_size("1024"), Some(1));
}
#[test]
fn cache_size_empty_string() {
assert_eq!(parse_cache_size(""), None);
}
#[test]
fn cache_size_whitespace_only() {
assert_eq!(parse_cache_size(" "), None);
}
#[test]
fn numa_aligned_cpuset_two_nodes() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert_eq!(t.total_cpus(), 16);
assert_eq!(t.num_numa_nodes(), 2);
assert_eq!(t.num_llcs(), 4);
let node0: BTreeSet<usize> = t.numa_aligned_cpuset(0);
let node1: BTreeSet<usize> = t.numa_aligned_cpuset(1);
let expected0: BTreeSet<usize> = (0..8).collect();
assert_eq!(node0, expected0);
let expected1: BTreeSet<usize> = (8..16).collect();
assert_eq!(node1, expected1);
}
use proptest::prop_assert;
proptest::proptest! {
#[test]
fn prop_parse_cpu_list_never_panics(s in "\\PC{0,120}") {
if let Ok(cpus) = parse_cpu_list(&s) {
for w in cpus.windows(2) {
prop_assert!(w[0] <= w[1], "parse_cpu_list not sorted: {cpus:?}");
}
}
}
#[test]
fn prop_parse_cpu_list_single_cpu(cpu in 0usize..256) {
let result = parse_cpu_list(&cpu.to_string()).unwrap();
assert_eq!(result, vec![cpu]);
}
#[test]
fn prop_parse_cpu_list_range_sorted(lo in 0usize..128, span in 1usize..64) {
let hi = lo + span;
let result = parse_cpu_list(&format!("{lo}-{hi}")).unwrap();
assert_eq!(result.len(), span + 1);
assert_eq!(*result.first().unwrap(), lo);
assert_eq!(*result.last().unwrap(), hi);
for w in result.windows(2) {
assert!(w[0] <= w[1]);
}
}
#[test]
fn prop_parse_cpu_list_lenient_never_panics(s in "\\PC{0,120}") {
let cpus = parse_cpu_list_lenient(&s);
for w in cpus.windows(2) {
prop_assert!(w[0] <= w[1], "parse_cpu_list_lenient not sorted: {cpus:?}");
}
}
#[test]
fn prop_parse_cpu_list_lenient_superset_of_strict(
lo in 0usize..64,
hi in 64usize..128,
) {
let s = format!("{lo}-{hi}");
let strict = parse_cpu_list(&s).unwrap();
let lenient = parse_cpu_list_lenient(&s);
assert_eq!(strict, lenient);
}
#[test]
fn prop_parse_cpu_list_roundtrip(
cpus in proptest::collection::btree_set(0usize..256, 1..16),
) {
let s: String = cpus.iter().map(|c| c.to_string()).collect::<Vec<_>>().join(",");
let parsed = parse_cpu_list(&s).unwrap();
let roundtrip: std::collections::BTreeSet<usize> = parsed.into_iter().collect();
assert_eq!(cpus, roundtrip);
}
}
#[test]
fn numa_node_ids_synthetic() {
let t = TestTopology::synthetic(8, 2);
assert_eq!(*t.numa_node_ids(), [0, 1].into_iter().collect());
}
#[test]
fn numa_nodes_for_cpuset_single_node() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
let cpuset: BTreeSet<usize> = (0..4).collect(); assert_eq!(t.numa_nodes_for_cpuset(&cpuset), [0].into_iter().collect());
}
#[test]
fn numa_nodes_for_cpuset_both_nodes() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
let cpuset: BTreeSet<usize> = [0, 8].into_iter().collect(); assert_eq!(
t.numa_nodes_for_cpuset(&cpuset),
[0, 1].into_iter().collect()
);
}
#[test]
fn numa_nodes_for_cpuset_empty() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert!(t.numa_nodes_for_cpuset(&BTreeSet::new()).is_empty());
}
#[test]
fn from_vm_topology_numa_distance_local() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert_eq!(t.numa_distance(0, 0), 10);
assert_eq!(t.numa_distance(1, 1), 10);
}
#[test]
fn from_vm_topology_numa_distance_remote() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert_eq!(t.numa_distance(0, 1), 20);
assert_eq!(t.numa_distance(1, 0), 20);
}
#[test]
fn from_vm_topology_numa_distance_single_node() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 1));
assert_eq!(t.numa_distance(0, 0), 10);
}
#[test]
fn numa_distance_invalid_node() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert_eq!(t.numa_distance(0, 99), 255);
assert_eq!(t.numa_distance(99, 0), 255);
}
#[test]
fn synthetic_distances_default() {
let t = TestTopology::synthetic(8, 2);
assert_eq!(t.numa_distance(0, 0), 10);
assert_eq!(t.numa_distance(0, 1), 20);
assert_eq!(t.numa_distance(1, 0), 20);
}
#[test]
fn node_meminfo_used_kb() {
let mi = NodeMemInfo {
total_kb: 1024,
free_kb: 256,
};
assert_eq!(mi.used_kb(), 768);
}
#[test]
fn node_meminfo_used_kb_saturates() {
let mi = NodeMemInfo {
total_kb: 0,
free_kb: 100,
};
assert_eq!(mi.used_kb(), 0);
}
#[test]
fn from_vm_topology_no_meminfo() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert!(t.node_meminfo(0).is_none());
assert!(t.node_meminfo(1).is_none());
}
#[test]
fn synthetic_no_meminfo() {
let t = TestTopology::synthetic(8, 2);
assert!(t.node_meminfo(0).is_none());
}
#[test]
fn from_vm_topology_not_memory_only() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert!(!t.is_memory_only(0));
assert!(!t.is_memory_only(1));
}
#[test]
fn is_memory_only_nonexistent_node() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(2, 4, 4, 1));
assert!(!t.is_memory_only(99));
}
#[test]
fn llc_aligned_cpuset_out_of_range_returns_empty() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 1));
assert_eq!(t.num_llcs(), 2);
let empty = t.llc_aligned_cpuset(99);
assert!(
empty.is_empty(),
"out-of-range LLC idx must return empty, got {empty:?}"
);
}
#[test]
fn cpus_in_llc_out_of_range_returns_empty_slice() {
let t = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 1));
assert_eq!(t.cpus_in_llc(99), &[] as &[usize]);
}
#[test]
#[should_panic(expected = "non-zero llcs")]
fn from_vm_topology_rejects_zero_llcs() {
let bad = crate::vmm::topology::Topology {
llcs: 0,
cores_per_llc: 2,
threads_per_core: 1,
numa_nodes: 1,
nodes: None,
distances: None,
};
let _ = TestTopology::from_vm_topology(&bad);
}
#[test]
#[should_panic(expected = "num_llcs > 0")]
fn synthetic_rejects_zero_llcs() {
let _ = TestTopology::synthetic(4, 0);
}
#[test]
#[should_panic(expected = "num_cpus > 0")]
fn synthetic_rejects_zero_cpus() {
let _ = TestTopology::synthetic(0, 1);
}
#[test]
#[should_panic(expected = ">= num_llcs")]
fn synthetic_rejects_more_llcs_than_cpus() {
let _ = TestTopology::synthetic(2, 4);
}
#[test]
fn every_constructor_produces_nonzero_llcs() {
let a = TestTopology::synthetic(8, 2);
assert!(a.num_llcs() >= 1);
let b = TestTopology::from_vm_topology(&crate::vmm::topology::Topology::new(1, 2, 4, 1));
assert!(b.num_llcs() >= 1);
if let Ok(c) = TestTopology::from_system() {
assert!(
c.num_llcs() >= 1,
"from_system must always yield at least one LLC",
);
}
}
#[test]
fn synthesize_fallback_llc_populates_cpus_node_and_cores() {
let cpus = [0, 1, 3, 7];
let llc = synthesize_fallback_llc(&cpus, 2);
assert_eq!(llc.cpus(), &cpus);
assert_eq!(llc.numa_node(), 2);
assert!(llc.cache_size_kb().is_none());
assert_eq!(llc.cores().len(), cpus.len());
for &c in &cpus {
assert_eq!(
llc.cores().get(&c).map(|v| v.as_slice()),
Some(&[c][..]),
"each CPU must appear as its own single-sibling core",
);
}
assert_eq!(llc.num_cores(), cpus.len());
}
#[test]
fn synthesize_fallback_llc_empty_cpus_returns_empty_llc() {
let llc = synthesize_fallback_llc(&[], 0);
assert!(llc.cpus().is_empty());
assert_eq!(llc.numa_node(), 0);
assert!(llc.cores().is_empty());
assert_eq!(llc.num_cores(), 0);
}
}