use numalloc::NumaAlloc;
use std::alloc::{GlobalAlloc, Layout};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex, mpsc};
use std::thread;
#[test]
fn small_alloc_dealloc() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xAB, 64);
ALLOC.dealloc(ptr, layout);
}
}
#[test]
fn all_size_classes() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
for &size in &[8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384] {
let layout = Layout::from_size_align(size, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null(), "failed for size {size}");
std::ptr::write_bytes(ptr, 0xCD, size);
ALLOC.dealloc(ptr, layout);
}
}
}
#[test]
fn large_alloc_dealloc() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let size = 1024 * 1024; let layout = Layout::from_size_align(size, 4096).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
assert_eq!(ptr as usize % 4096, 0);
std::ptr::write_bytes(ptr, 0xEF, size);
ALLOC.dealloc(ptr, layout);
}
}
#[test]
fn alignment_power_of_two() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
for align_shift in 3..=12 {
let align = 1usize << align_shift;
let layout = Layout::from_size_align(align, align).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
assert_eq!(ptr as usize % align, 0, "misaligned for align={align}");
ALLOC.dealloc(ptr, layout);
}
}
}
#[test]
fn reuse_after_free() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(128, 8).unwrap();
let mut seen = std::collections::HashSet::new();
for _ in 0..100 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
ALLOC.dealloc(ptr, layout);
seen.insert(ptr as usize);
}
assert!(
seen.len() < 100,
"expected reuse, got {} unique ptrs",
seen.len()
);
}
}
#[test]
fn many_allocs() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let mut ptrs: Vec<*mut u8> = Vec::new();
for _ in 0..10_000 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0x42, 64);
ptrs.push(ptr);
}
let mut sorted = ptrs.clone();
sorted.sort();
sorted.dedup();
assert_eq!(sorted.len(), ptrs.len(), "duplicate pointers detected");
for ptr in ptrs {
ALLOC.dealloc(ptr, layout);
}
}
}
#[test]
fn multithreaded_alloc_dealloc() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let handles: Vec<_> = (0..8)
.map(|_| {
thread::spawn(|| unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let mut ptrs = Vec::new();
for _ in 0..2_000 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0x55, 64);
ptrs.push(ptr);
}
for ptr in ptrs {
ALLOC.dealloc(ptr, layout);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn cross_thread_dealloc() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let (tx, rx) = mpsc::channel();
let producer = thread::spawn(move || unsafe {
let layout = Layout::from_size_align(256, 8).unwrap();
let mut addrs = Vec::new();
for _ in 0..200 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xAA, 256);
addrs.push(ptr as usize);
}
tx.send(addrs).unwrap();
});
producer.join().unwrap();
let addrs = rx.recv().unwrap();
let consumer = thread::spawn(move || unsafe {
let layout = Layout::from_size_align(256, 8).unwrap();
for addr in addrs {
ALLOC.dealloc(addr as *mut u8, layout);
}
});
consumer.join().unwrap();
}
#[test]
fn mixed_sizes() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let sizes: &[usize] = &[8, 17, 33, 100, 500, 1024, 4000, 8192, 16384, 32768, 100_000];
let mut ptrs = Vec::new();
for &size in sizes {
let layout = Layout::from_size_align(size, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null(), "failed for size {size}");
std::ptr::write_bytes(ptr, 0xBB, size);
ptrs.push((ptr, layout));
}
for (ptr, layout) in ptrs {
ALLOC.dealloc(ptr, layout);
}
}
}
#[test]
fn alloc_zeroed_small() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(256, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xFF, 256);
ALLOC.dealloc(ptr, layout);
let ptr2 = ALLOC.alloc_zeroed(layout);
assert!(!ptr2.is_null());
let slice = std::slice::from_raw_parts(ptr2, 256);
assert!(
slice.iter().all(|&b| b == 0),
"alloc_zeroed returned non-zero memory"
);
ALLOC.dealloc(ptr2, layout);
}
}
#[test]
fn alloc_zeroed_large() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let size = 128 * 1024; let layout = Layout::from_size_align(size, 8).unwrap();
let ptr = ALLOC.alloc_zeroed(layout);
assert!(!ptr.is_null());
let slice = std::slice::from_raw_parts(ptr, size);
assert!(
slice.iter().all(|&b| b == 0),
"large alloc_zeroed not zeroed"
);
ALLOC.dealloc(ptr, layout);
}
}
#[test]
fn realloc_same_size_class() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(60, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xAA, 60);
let ptr2 = ALLOC.realloc(ptr, layout, 63);
assert_eq!(ptr, ptr2, "same size class should return same pointer");
ALLOC.dealloc(ptr2, Layout::from_size_align(63, 8).unwrap());
}
}
#[test]
fn realloc_grow() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let old_layout = Layout::from_size_align(32, 8).unwrap();
let ptr = ALLOC.alloc(old_layout);
assert!(!ptr.is_null());
for i in 0..32u8 {
*ptr.add(i as usize) = i;
}
let new_ptr = ALLOC.realloc(ptr, old_layout, 256);
assert!(!new_ptr.is_null());
let slice = std::slice::from_raw_parts(new_ptr, 32);
for (i, &b) in slice.iter().enumerate() {
assert_eq!(b, i as u8, "data not preserved at byte {i}");
}
ALLOC.dealloc(new_ptr, Layout::from_size_align(256, 8).unwrap());
}
}
#[test]
fn realloc_shrink() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let old_layout = Layout::from_size_align(4096, 8).unwrap();
let ptr = ALLOC.alloc(old_layout);
assert!(!ptr.is_null());
for i in 0..64u8 {
*ptr.add(i as usize) = i;
}
let new_ptr = ALLOC.realloc(ptr, old_layout, 64);
assert!(!new_ptr.is_null());
let slice = std::slice::from_raw_parts(new_ptr, 64);
for (i, &b) in slice.iter().enumerate() {
assert_eq!(b, i as u8, "data not preserved at byte {i}");
}
ALLOC.dealloc(new_ptr, Layout::from_size_align(64, 8).unwrap());
}
}
#[test]
fn realloc_small_to_large() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let old_layout = Layout::from_size_align(128, 8).unwrap();
let ptr = ALLOC.alloc(old_layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xBB, 128);
let big = 256 * 1024;
let new_ptr = ALLOC.realloc(ptr, old_layout, big);
assert!(!new_ptr.is_null());
let slice = std::slice::from_raw_parts(new_ptr, 128);
assert!(slice.iter().all(|&b| b == 0xBB));
ALLOC.dealloc(new_ptr, Layout::from_size_align(big, 8).unwrap());
}
}
#[test]
fn realloc_large_to_small() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let big = 128 * 1024;
let old_layout = Layout::from_size_align(big, 8).unwrap();
let ptr = ALLOC.alloc(old_layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xCC, 64);
let new_ptr = ALLOC.realloc(ptr, old_layout, 64);
assert!(!new_ptr.is_null());
let slice = std::slice::from_raw_parts(new_ptr, 64);
assert!(slice.iter().all(|&b| b == 0xCC));
ALLOC.dealloc(new_ptr, Layout::from_size_align(64, 8).unwrap());
}
}
#[test]
fn stress_concurrent_mixed() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let handles: Vec<_> = (0..4)
.map(|tid| {
thread::spawn(move || unsafe {
let mut ptrs: Vec<(*mut u8, Layout)> = Vec::new();
for i in 0..5_000 {
let size = match (tid + i) % 5 {
0 => 16,
1 => 128,
2 => 1024,
3 => 8192,
_ => 64,
};
let layout = Layout::from_size_align(size, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xDD, size);
ptrs.push((ptr, layout));
if ptrs.len() > 20 && i % 3 == 0 {
let (p, l) = ptrs.swap_remove(0);
ALLOC.dealloc(p, l);
}
}
for (p, l) in ptrs {
ALLOC.dealloc(p, l);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn cross_thread_dealloc_many_to_many() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 8;
const ALLOCS_PER_THREAD: usize = 500;
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let collected: Arc<std::sync::Mutex<Vec<Vec<usize>>>> =
Arc::new(std::sync::Mutex::new(Vec::new()));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|_| {
let barrier = Arc::clone(&barrier);
let collected = Arc::clone(&collected);
thread::spawn(move || {
barrier.wait();
let mut addrs = Vec::with_capacity(ALLOCS_PER_THREAD);
for _ in 0..ALLOCS_PER_THREAD {
unsafe {
let layout = Layout::from_size_align(128, 8).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xAA, 128);
addrs.push(ptr as usize);
}
}
collected.lock().unwrap().push(addrs);
})
})
.collect();
for h in handles {
h.join().unwrap();
}
let mut all = collected.lock().unwrap();
let batches: Vec<Vec<usize>> = all.drain(..).collect();
drop(all);
let handles: Vec<_> = batches
.into_iter()
.map(|addrs| {
thread::spawn(move || {
let layout = Layout::from_size_align(128, 8).unwrap();
for addr in addrs {
unsafe {
ALLOC.dealloc(addr as *mut u8, layout);
}
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn concurrent_realloc() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let handles: Vec<_> = (0..8)
.map(|_| {
thread::spawn(|| unsafe {
let sizes = [16, 64, 256, 1024, 4096, 16384];
let mut current_layout = Layout::from_size_align(8, 8).unwrap();
let mut ptr = ALLOC.alloc(current_layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0x11, current_layout.size());
for &new_size in &sizes {
let new_ptr = ALLOC.realloc(ptr, current_layout, new_size);
assert!(!new_ptr.is_null());
let check_len = current_layout.size().min(new_size);
let slice = std::slice::from_raw_parts(new_ptr, check_len);
assert!(
slice.iter().all(|&b| b == 0x11),
"data corruption during realloc grow"
);
std::ptr::write_bytes(new_ptr, 0x11, new_size);
ptr = new_ptr;
current_layout = Layout::from_size_align(new_size, 8).unwrap();
}
for &new_size in sizes.iter().rev().skip(1) {
let new_ptr = ALLOC.realloc(ptr, current_layout, new_size);
assert!(!new_ptr.is_null());
let check_len = new_size;
let slice = std::slice::from_raw_parts(new_ptr, check_len);
assert!(
slice.iter().all(|&b| b == 0x11),
"data corruption during realloc shrink"
);
ptr = new_ptr;
current_layout = Layout::from_size_align(new_size, 8).unwrap();
}
ALLOC.dealloc(ptr, current_layout);
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn concurrent_alloc_zeroed() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let handles: Vec<_> = (0..8)
.map(|_| {
thread::spawn(|| unsafe {
let layout = Layout::from_size_align(512, 8).unwrap();
for _ in 0..500 {
let dirty = ALLOC.alloc(layout);
assert!(!dirty.is_null());
std::ptr::write_bytes(dirty, 0xFF, 512);
ALLOC.dealloc(dirty, layout);
let clean = ALLOC.alloc_zeroed(layout);
assert!(!clean.is_null());
let slice = std::slice::from_raw_parts(clean, 512);
assert!(
slice.iter().all(|&b| b == 0),
"alloc_zeroed returned non-zero memory under concurrency"
);
ALLOC.dealloc(clean, layout);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn high_contention_single_size_class() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 16;
const OPS: usize = 2_000;
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|_| {
let barrier = Arc::clone(&barrier);
thread::spawn(move || {
barrier.wait();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let mut ptrs = Vec::with_capacity(OPS);
for _ in 0..OPS {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0x77, 64);
ptrs.push(ptr);
}
for ptr in ptrs.into_iter().rev() {
ALLOC.dealloc(ptr, layout);
}
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn rapid_thread_churn() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let mut handles = Vec::new();
for _ in 0..64 {
handles.push(thread::spawn(|| unsafe {
let layout = Layout::from_size_align(256, 8).unwrap();
let mut ptrs = Vec::new();
for _ in 0..50 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xCC, 256);
ptrs.push(ptr);
}
for ptr in ptrs {
ALLOC.dealloc(ptr, layout);
}
}));
}
for h in handles {
h.join().unwrap();
}
}
#[test]
fn producer_consumer_concurrent() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const PRODUCERS: usize = 4;
const CONSUMERS: usize = 4;
const ITEMS_PER_PRODUCER: usize = 2_000;
let queue: Arc<std::sync::Mutex<Vec<usize>>> = Arc::new(std::sync::Mutex::new(Vec::new()));
let done = Arc::new(AtomicBool::new(false));
let prod_handles: Vec<_> = (0..PRODUCERS)
.map(|_| {
let queue = Arc::clone(&queue);
thread::spawn(move || unsafe {
let layout = Layout::from_size_align(128, 8).unwrap();
for _ in 0..ITEMS_PER_PRODUCER {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0xEE, 128);
queue.lock().unwrap().push(ptr as usize);
}
})
})
.collect();
let layout = Layout::from_size_align(128, 8).unwrap();
let consumer_handles: Vec<_> = (0..CONSUMERS)
.map(|_| {
let queue = Arc::clone(&queue);
let done = Arc::clone(&done);
thread::spawn(move || {
loop {
let batch: Vec<usize> = {
let mut q = queue.lock().unwrap();
q.drain(..).collect()
};
if batch.is_empty() {
if done.load(Ordering::Acquire) {
let final_batch: Vec<usize> = {
let mut q = queue.lock().unwrap();
q.drain(..).collect()
};
for addr in final_batch {
unsafe { ALLOC.dealloc(addr as *mut u8, layout) };
}
break;
}
std::thread::yield_now();
continue;
}
for addr in batch {
unsafe { ALLOC.dealloc(addr as *mut u8, layout) };
}
}
})
})
.collect();
for h in prod_handles {
h.join().unwrap();
}
done.store(true, Ordering::Release);
for h in consumer_handles {
h.join().unwrap();
}
}
#[test]
fn concurrent_drain_overflow() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 8;
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|_| {
let barrier = Arc::clone(&barrier);
thread::spawn(move || {
barrier.wait();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let mut ptrs = Vec::new();
for _ in 0..10 {
for _ in 0..100 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
std::ptr::write_bytes(ptr, 0x33, 64);
ptrs.push(ptr);
}
for ptr in ptrs.drain(..) {
ALLOC.dealloc(ptr, layout);
}
}
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn concurrent_large_and_small() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let handles: Vec<_> = (0..8)
.map(|tid| {
thread::spawn(move || unsafe {
let mut ptrs: Vec<(*mut u8, Layout)> = Vec::new();
for i in 0..500 {
let (size, align) = if (tid + i) % 7 == 0 {
(64 * 1024, 4096)
} else {
let s = match i % 4 {
0 => 32,
1 => 256,
2 => 2048,
_ => 8192,
};
(s, 8)
};
let layout = Layout::from_size_align(size, align).unwrap();
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
if align > 1 {
assert_eq!(ptr as usize % align, 0, "misaligned at size={size}");
}
std::ptr::write_bytes(ptr, 0x99, size);
ptrs.push((ptr, layout));
if ptrs.len() > 30 && i % 5 == 0 {
let (p, l) = ptrs.swap_remove(0);
ALLOC.dealloc(p, l);
}
}
for (p, l) in ptrs {
ALLOC.dealloc(p, l);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn no_duplicate_pointers_concurrent() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 8;
const ALLOCS: usize = 1_000;
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let all_ptrs: Arc<Mutex<Vec<usize>>> = Arc::new(Mutex::new(Vec::new()));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|_| {
let barrier = Arc::clone(&barrier);
let all_ptrs = Arc::clone(&all_ptrs);
thread::spawn(move || {
barrier.wait();
let layout = Layout::from_size_align(64, 8).unwrap();
let mut local_ptrs = Vec::with_capacity(ALLOCS);
unsafe {
for _ in 0..ALLOCS {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
local_ptrs.push(ptr as usize);
}
}
all_ptrs.lock().unwrap().extend_from_slice(&local_ptrs);
barrier.wait();
unsafe {
for addr in &local_ptrs {
ALLOC.dealloc(*addr as *mut u8, layout);
}
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
let ptrs = all_ptrs.lock().unwrap();
let mut sorted = ptrs.clone();
sorted.sort();
sorted.dedup();
assert_eq!(
sorted.len(),
ptrs.len(),
"duplicate pointers detected across threads: {} unique out of {}",
sorted.len(),
ptrs.len()
);
}
#[test]
fn leak_check_small_reuse_after_free() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let count = 200;
let mut first_addrs = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
first_addrs.push(ptr as usize);
}
for &addr in &first_addrs {
ALLOC.dealloc(addr as *mut u8, layout);
}
let mut second_addrs = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
second_addrs.push(ptr as usize);
}
let first_set: std::collections::HashSet<usize> = first_addrs.iter().copied().collect();
let reused = second_addrs
.iter()
.filter(|a| first_set.contains(a))
.count();
assert!(
reused > 0,
"expected some reuse, got 0/{count} — possible leak"
);
for &addr in &second_addrs {
ALLOC.dealloc(addr as *mut u8, layout);
}
}
}
#[test]
fn leak_check_all_size_classes_reuse() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let sizes: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384];
for &size in sizes {
unsafe {
let layout = Layout::from_size_align(size, 8).unwrap();
let count = 100;
let mut first = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
first.push(ptr as usize);
}
for &addr in &first {
ALLOC.dealloc(addr as *mut u8, layout);
}
let mut second = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
second.push(ptr as usize);
}
let first_set: std::collections::HashSet<usize> = first.iter().copied().collect();
let reused = second.iter().filter(|a| first_set.contains(a)).count();
assert!(
reused > 0,
"size class {size}: 0/{count} reused — possible leak"
);
for &addr in &second {
ALLOC.dealloc(addr as *mut u8, layout);
}
}
}
}
#[test]
fn leak_check_realloc_frees_old() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let old_layout = Layout::from_size_align(64, 8).unwrap();
let ptr = ALLOC.alloc(old_layout);
assert!(!ptr.is_null());
let old_addr = ptr as usize;
std::ptr::write_bytes(ptr, 0xAA, 64);
let new_ptr = ALLOC.realloc(ptr, old_layout, 512);
assert!(!new_ptr.is_null());
assert_ne!(new_ptr as usize, old_addr);
let mut found = false;
let mut probes = Vec::new();
for _ in 0..100 {
let p = ALLOC.alloc(old_layout);
assert!(!p.is_null());
probes.push(p);
if p as usize == old_addr {
found = true;
break;
}
}
assert!(
found,
"old address 0x{old_addr:x} never reappeared — realloc may have leaked it"
);
for p in probes {
ALLOC.dealloc(p, old_layout);
}
ALLOC.dealloc(new_ptr, Layout::from_size_align(512, 8).unwrap());
}
}
#[test]
fn leak_check_cross_thread_dealloc_returns() {
static ALLOC: NumaAlloc = NumaAlloc::new();
let count = 200;
let layout = Layout::from_size_align(128, 8).unwrap();
let (tx_addrs, rx_addrs) = mpsc::channel::<Vec<usize>>();
let producer = thread::spawn(move || unsafe {
let mut addrs = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
addrs.push(ptr as usize);
}
tx_addrs.send(addrs).unwrap();
});
producer.join().unwrap();
let consumer = thread::spawn(move || unsafe {
let addrs = rx_addrs.recv().unwrap();
let first_set: std::collections::HashSet<usize> = addrs.iter().copied().collect();
for &addr in &addrs {
ALLOC.dealloc(addr as *mut u8, layout);
}
let mut second = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
second.push(ptr as usize);
}
let reused = second.iter().filter(|a| first_set.contains(a)).count();
assert!(
reused > 0,
"cross-thread free: 0/{count} reused — dealloc may leak"
);
for &addr in &second {
ALLOC.dealloc(addr as *mut u8, layout);
}
});
consumer.join().unwrap();
}
#[test]
fn leak_check_drain_path() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(64, 8).unwrap();
let count = 150;
let mut first = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
first.push(ptr as usize);
}
for &addr in &first {
ALLOC.dealloc(addr as *mut u8, layout);
}
let mut second = Vec::with_capacity(count);
for _ in 0..count {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
second.push(ptr as usize);
}
let first_set: std::collections::HashSet<usize> = first.iter().copied().collect();
let reused = second.iter().filter(|a| first_set.contains(a)).count();
assert!(
reused > 0,
"drain path: 0/{count} reused — drained blocks may have leaked"
);
for &addr in &second {
ALLOC.dealloc(addr as *mut u8, layout);
}
}
}
#[test]
fn leak_check_repeated_cycles_no_growth() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let layout = Layout::from_size_align(256, 8).unwrap();
let batch = 100;
let cycles = 50;
let mut all_addrs = std::collections::HashSet::new();
for _ in 0..cycles {
let mut ptrs = Vec::with_capacity(batch);
for _ in 0..batch {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
all_addrs.insert(ptr as usize);
ptrs.push(ptr);
}
for ptr in ptrs {
ALLOC.dealloc(ptr, layout);
}
}
let max_expected = batch * 10;
assert!(
all_addrs.len() <= max_expected,
"unique addrs {} exceeds {max_expected} — suggests leak (expected ~{batch} with reuse)",
all_addrs.len()
);
}
}
#[test]
fn leak_check_large_object_munmap() {
static ALLOC: NumaAlloc = NumaAlloc::new();
unsafe {
let size = 1024 * 1024; let layout = Layout::from_size_align(size, 4096).unwrap();
for _ in 0..200 {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
assert_eq!(ptr as usize % 4096, 0);
*ptr = 0xAA;
*ptr.add(size - 1) = 0xBB;
ALLOC.dealloc(ptr, layout);
}
}
}
#[test]
fn leak_check_concurrent_reuse() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 8;
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|_| {
let barrier = Arc::clone(&barrier);
thread::spawn(move || {
barrier.wait();
unsafe {
let layout = Layout::from_size_align(128, 8).unwrap();
let batch = 80;
let cycles = 30;
let mut unique = std::collections::HashSet::new();
for _ in 0..cycles {
let mut ptrs = Vec::with_capacity(batch);
for _ in 0..batch {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
unique.insert(ptr as usize);
ptrs.push(ptr);
}
for ptr in ptrs {
ALLOC.dealloc(ptr, layout);
}
}
let max_expected = batch * 12;
assert!(
unique.len() <= max_expected,
"thread saw {} unique addrs (max {max_expected}) — possible leak",
unique.len()
);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}
#[test]
fn leak_check_cross_thread_round_trip() {
static ALLOC: NumaAlloc = NumaAlloc::new();
const NUM_THREADS: usize = 4;
const BATCH: usize = 32;
let layout = Layout::from_size_align(32768, 8).unwrap();
let barrier = Arc::new(Barrier::new(NUM_THREADS));
let deposit: Arc<Mutex<Vec<Vec<usize>>>> =
Arc::new(Mutex::new((0..NUM_THREADS).map(|_| Vec::new()).collect()));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|tid| {
let barrier = Arc::clone(&barrier);
let deposit = Arc::clone(&deposit);
thread::spawn(move || unsafe {
let mut addrs = Vec::with_capacity(BATCH);
for _ in 0..BATCH {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
addrs.push(ptr as usize);
}
let target = (tid + 1) % NUM_THREADS;
deposit.lock().unwrap()[target] = addrs;
barrier.wait();
let to_free: Vec<usize> = {
let mut d = deposit.lock().unwrap();
std::mem::take(&mut d[tid])
};
let freed_set: std::collections::HashSet<usize> = to_free.iter().copied().collect();
for addr in &to_free {
ALLOC.dealloc(*addr as *mut u8, layout);
}
barrier.wait();
let mut second = Vec::with_capacity(BATCH);
for _ in 0..BATCH {
let ptr = ALLOC.alloc(layout);
assert!(!ptr.is_null());
second.push(ptr as usize);
}
let reused = second.iter().filter(|a| freed_set.contains(a)).count();
assert!(
reused > 0,
"thread {tid}: zero reuse out of {BATCH} — freed blocks may be leaked"
);
for &addr in &second {
ALLOC.dealloc(addr as *mut u8, layout);
}
})
})
.collect();
for h in handles {
h.join().unwrap();
}
}