use super::lifecycle::{DEFAULT_STACK_SIZE, parse_stack_size};
use super::spawn::free_stack;
use super::yield_ops::TAIL_CALL_COUNTER;
use super::*;
use crate::stack::{Stack, push};
use crate::value::Value;
use std::sync::atomic::{AtomicU32, Ordering};
#[test]
fn test_spawn_strand() {
unsafe {
static COUNTER: AtomicU32 = AtomicU32::new(0);
extern "C" fn test_entry(_stack: Stack) -> Stack {
COUNTER.fetch_add(1, Ordering::SeqCst);
std::ptr::null_mut()
}
for _ in 0..100 {
spawn_strand(test_entry);
}
std::thread::sleep(std::time::Duration::from_millis(200));
assert_eq!(COUNTER.load(Ordering::SeqCst), 100);
}
}
#[test]
fn test_scheduler_init_idempotent() {
unsafe {
scheduler_init();
scheduler_init();
scheduler_init();
}
}
#[test]
fn test_free_stack_null() {
free_stack(std::ptr::null_mut());
}
#[test]
fn test_free_stack_valid() {
unsafe {
let stack = push(crate::stack::alloc_test_stack(), Value::Int(42));
free_stack(stack);
}
}
#[test]
fn test_strand_spawn_with_stack() {
unsafe {
static COUNTER: AtomicU32 = AtomicU32::new(0);
extern "C" fn test_entry(stack: Stack) -> Stack {
COUNTER.fetch_add(1, Ordering::SeqCst);
stack
}
let initial_stack = push(crate::stack::alloc_test_stack(), Value::Int(99));
strand_spawn(test_entry, initial_stack);
std::thread::sleep(std::time::Duration::from_millis(200));
assert_eq!(COUNTER.load(Ordering::SeqCst), 1);
}
}
#[test]
fn test_scheduler_shutdown() {
unsafe {
scheduler_init();
scheduler_shutdown();
}
}
#[test]
fn test_many_strands_stress() {
unsafe {
static COUNTER: AtomicU32 = AtomicU32::new(0);
extern "C" fn increment(_stack: Stack) -> Stack {
COUNTER.fetch_add(1, Ordering::SeqCst);
std::ptr::null_mut()
}
COUNTER.store(0, Ordering::SeqCst);
for _ in 0..1000 {
strand_spawn(increment, std::ptr::null_mut());
}
wait_all_strands();
assert_eq!(COUNTER.load(Ordering::SeqCst), 1000);
}
}
#[test]
fn test_strand_ids_are_unique() {
unsafe {
use std::collections::HashSet;
extern "C" fn noop(_stack: Stack) -> Stack {
std::ptr::null_mut()
}
let mut ids = Vec::new();
for _ in 0..100 {
let id = strand_spawn(noop, std::ptr::null_mut());
ids.push(id);
}
wait_all_strands();
let unique_ids: HashSet<_> = ids.iter().collect();
assert_eq!(unique_ids.len(), 100, "All strand IDs should be unique");
assert!(
ids.iter().all(|&id| id > 0),
"All strand IDs should be positive"
);
}
}
#[test]
fn test_arena_reset_with_strands() {
unsafe {
use crate::arena;
use crate::seqstring::arena_string;
extern "C" fn create_temp_strings(stack: Stack) -> Stack {
for i in 0..100 {
let temp = arena_string(&format!("temporary string {}", i));
assert!(!temp.as_str().is_empty());
}
let stats = arena::arena_stats();
assert!(stats.allocated_bytes > 0, "Arena should have allocations");
stack }
arena::arena_reset();
strand_spawn(create_temp_strings, std::ptr::null_mut());
wait_all_strands();
let stats_after = arena::arena_stats();
assert_eq!(
stats_after.allocated_bytes, 0,
"Arena should be reset after strand exits"
);
}
}
#[test]
fn test_arena_with_channel_send() {
unsafe {
use crate::channel::{close_channel, make_channel, receive, send};
use crate::stack::{pop, push};
use crate::value::Value;
use std::sync::Arc;
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
static RECEIVED_COUNT: AtomicU32 = AtomicU32::new(0);
static CHANNEL_PTR: AtomicI64 = AtomicI64::new(0);
let stack = crate::stack::alloc_test_stack();
let stack = make_channel(stack);
let (stack, chan_val) = pop(stack);
let channel = match chan_val {
Value::Channel(ch) => ch,
_ => panic!("Expected Channel"),
};
let ch_ptr = Arc::as_ptr(&channel) as i64;
CHANNEL_PTR.store(ch_ptr, Ordering::Release);
std::mem::forget(channel.clone());
std::mem::forget(channel.clone());
extern "C" fn sender(_stack: Stack) -> Stack {
use crate::seqstring::arena_string;
use crate::value::ChannelData;
use std::sync::Arc;
unsafe {
let ch_ptr = CHANNEL_PTR.load(Ordering::Acquire) as *const ChannelData;
let channel = Arc::from_raw(ch_ptr);
let channel_clone = Arc::clone(&channel);
std::mem::forget(channel);
let msg = arena_string("Hello from sender!");
let stack = push(crate::stack::alloc_test_stack(), Value::String(msg));
let stack = push(stack, Value::Channel(channel_clone));
send(stack)
}
}
extern "C" fn receiver(_stack: Stack) -> Stack {
use crate::value::ChannelData;
use std::sync::Arc;
use std::sync::atomic::Ordering;
unsafe {
let ch_ptr = CHANNEL_PTR.load(Ordering::Acquire) as *const ChannelData;
let channel = Arc::from_raw(ch_ptr);
let channel_clone = Arc::clone(&channel);
std::mem::forget(channel);
let stack = push(
crate::stack::alloc_test_stack(),
Value::Channel(channel_clone),
);
let stack = receive(stack);
let (stack, _success) = pop(stack);
let (_stack, msg_val) = pop(stack);
match msg_val {
Value::String(s) => {
assert_eq!(s.as_str(), "Hello from sender!");
RECEIVED_COUNT.fetch_add(1, Ordering::SeqCst);
}
_ => panic!("Expected String"),
}
std::ptr::null_mut()
}
}
spawn_strand(sender);
spawn_strand(receiver);
wait_all_strands();
assert_eq!(
RECEIVED_COUNT.load(Ordering::SeqCst),
1,
"Receiver should have received message"
);
let stack = push(stack, Value::Channel(channel));
close_channel(stack);
}
}
#[test]
fn test_no_memory_leak_over_many_iterations() {
unsafe {
use crate::arena;
use crate::seqstring::arena_string;
extern "C" fn allocate_strings_and_exit(stack: Stack) -> Stack {
for i in 0..50 {
let temp = arena_string(&format!("request header {}", i));
assert!(!temp.as_str().is_empty());
}
stack
}
let iterations = 10_000;
for i in 0..iterations {
arena::arena_reset();
strand_spawn(allocate_strings_and_exit, std::ptr::null_mut());
wait_all_strands();
if i % 1000 == 0 {
let stats = arena::arena_stats();
assert_eq!(
stats.allocated_bytes, 0,
"Arena not reset after iteration {} (leaked {} bytes)",
i, stats.allocated_bytes
);
}
}
let final_stats = arena::arena_stats();
assert_eq!(
final_stats.allocated_bytes, 0,
"Arena leaked memory after {} iterations ({} bytes)",
iterations, final_stats.allocated_bytes
);
println!(
"✓ Memory leak test passed: {} iterations with no growth",
iterations
);
}
}
#[test]
fn test_parse_stack_size_valid() {
assert_eq!(parse_stack_size(Some("2097152".to_string())), 2097152);
assert_eq!(parse_stack_size(Some("1".to_string())), 1);
assert_eq!(parse_stack_size(Some("999999999".to_string())), 999999999);
}
#[test]
fn test_parse_stack_size_none() {
assert_eq!(parse_stack_size(None), DEFAULT_STACK_SIZE);
}
#[test]
fn test_parse_stack_size_zero() {
assert_eq!(parse_stack_size(Some("0".to_string())), DEFAULT_STACK_SIZE);
}
#[test]
fn test_parse_stack_size_invalid() {
assert_eq!(
parse_stack_size(Some("invalid".to_string())),
DEFAULT_STACK_SIZE
);
assert_eq!(
parse_stack_size(Some("-100".to_string())),
DEFAULT_STACK_SIZE
);
assert_eq!(parse_stack_size(Some("".to_string())), DEFAULT_STACK_SIZE);
assert_eq!(
parse_stack_size(Some("1.5".to_string())),
DEFAULT_STACK_SIZE
);
}
#[test]
#[cfg(feature = "diagnostics")]
fn test_strand_registry_basic() {
let registry = StrandRegistry::new(10);
assert_eq!(registry.register(1), Some(0)); assert_eq!(registry.register(2), Some(1)); assert_eq!(registry.register(3), Some(2));
let active: Vec<_> = registry.active_strands().collect();
assert_eq!(active.len(), 3);
assert!(registry.unregister(2));
let active: Vec<_> = registry.active_strands().collect();
assert_eq!(active.len(), 2);
assert!(!registry.unregister(999));
}
#[test]
#[cfg(feature = "diagnostics")]
fn test_strand_registry_overflow() {
let registry = StrandRegistry::new(3);
assert!(registry.register(1).is_some());
assert!(registry.register(2).is_some());
assert!(registry.register(3).is_some());
assert!(registry.register(4).is_none());
assert_eq!(registry.overflow_count.load(Ordering::Relaxed), 1);
assert!(registry.register(5).is_none());
assert_eq!(registry.overflow_count.load(Ordering::Relaxed), 2);
}
#[test]
#[cfg(feature = "diagnostics")]
fn test_strand_registry_slot_reuse() {
let registry = StrandRegistry::new(3);
registry.register(1);
registry.register(2);
registry.register(3);
registry.unregister(2);
assert!(registry.register(4).is_some());
assert_eq!(registry.active_strands().count(), 3);
}
#[test]
#[cfg(feature = "diagnostics")]
fn test_strand_registry_concurrent_stress() {
use std::sync::Arc;
use std::thread;
let registry = Arc::new(StrandRegistry::new(50));
let handles: Vec<_> = (0..100)
.map(|i| {
let reg = Arc::clone(®istry);
thread::spawn(move || {
let id = (i + 1) as u64;
let _ = reg.register(id);
thread::yield_now();
reg.unregister(id);
})
})
.collect();
for h in handles {
h.join().unwrap();
}
assert_eq!(registry.active_strands().count(), 0);
}
#[test]
fn test_strand_lifecycle_counters() {
unsafe {
let initial_spawned = TOTAL_SPAWNED.load(Ordering::Relaxed);
let initial_completed = TOTAL_COMPLETED.load(Ordering::Relaxed);
static COUNTER: AtomicU32 = AtomicU32::new(0);
extern "C" fn simple_work(_stack: Stack) -> Stack {
COUNTER.fetch_add(1, Ordering::SeqCst);
std::ptr::null_mut()
}
COUNTER.store(0, Ordering::SeqCst);
for _ in 0..10 {
strand_spawn(simple_work, std::ptr::null_mut());
}
wait_all_strands();
let final_spawned = TOTAL_SPAWNED.load(Ordering::Relaxed);
let final_completed = TOTAL_COMPLETED.load(Ordering::Relaxed);
assert!(
final_spawned >= initial_spawned + 10,
"TOTAL_SPAWNED should have increased by at least 10"
);
assert!(
final_completed >= initial_completed + 10,
"TOTAL_COMPLETED should have increased by at least 10"
);
assert_eq!(COUNTER.load(Ordering::SeqCst), 10);
}
}
#[test]
fn test_maybe_yield_disabled_by_default() {
for _ in 0..1000 {
patch_seq_maybe_yield();
}
}
#[test]
fn test_tail_call_counter_increments() {
TAIL_CALL_COUNTER.with(|counter| {
let initial = counter.get();
patch_seq_maybe_yield();
patch_seq_maybe_yield();
patch_seq_maybe_yield();
let _ = counter.get();
counter.set(initial);
});
}
#[test]
fn test_counter_overflow_safety() {
TAIL_CALL_COUNTER.with(|counter| {
let initial = counter.get();
counter.set(u64::MAX - 1);
patch_seq_maybe_yield();
patch_seq_maybe_yield();
patch_seq_maybe_yield();
counter.set(initial);
});
}