seq_runtime/
scheduler.rs

1//! Scheduler - Green Thread Management with May
2//!
3//! CSP-style concurrency for Seq using May coroutines.
4//! Each strand is a lightweight green thread that can communicate via channels.
5//!
6//! ## Non-Blocking Guarantee
7//!
8//! Channel operations (`send`, `receive`) use May's cooperative blocking and NEVER
9//! block OS threads. However, I/O operations (`write_line`, `read_line` in io.rs)
10//! currently use blocking syscalls. Future work will make all I/O non-blocking.
11//!
12//! ## Panic Behavior
13//!
14//! Functions panic on invalid input (null stacks, negative IDs, closed channels).
15//! In a production system, consider implementing error channels or Result-based
16//! error handling instead of panicking.
17
18use crate::stack::Stack;
19use crate::tagged_stack::StackValue;
20use may::coroutine;
21use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
22use std::sync::{Condvar, Mutex, Once};
23
24static SCHEDULER_INIT: Once = Once::new();
25
26// Strand lifecycle tracking
27//
28// Design rationale:
29// - ACTIVE_STRANDS: Lock-free atomic counter for the hot path (spawn/complete)
30//   Every strand increments on spawn, decrements on complete. This is extremely
31//   fast (lock-free atomic ops) and suitable for high-frequency operations.
32//
33// - SHUTDOWN_CONDVAR/MUTEX: Event-driven synchronization for the cold path (shutdown wait)
34//   Used only when waiting for all strands to complete (program shutdown).
35//   Condvar provides event-driven wakeup instead of polling, which is critical
36//   for a systems language - no CPU waste, proper OS-level blocking.
37//
38// Why not track JoinHandles?
39// Strands are like Erlang processes - potentially hundreds of thousands of concurrent
40// entities with independent lifecycles. Storing handles would require global mutable
41// state with synchronization overhead on the hot path. The counter + condvar approach
42// keeps the hot path lock-free while providing proper shutdown synchronization.
43pub static ACTIVE_STRANDS: AtomicUsize = AtomicUsize::new(0);
44static SHUTDOWN_CONDVAR: Condvar = Condvar::new();
45static SHUTDOWN_MUTEX: Mutex<()> = Mutex::new(());
46
47// Strand lifecycle statistics (for diagnostics)
48//
49// These counters provide observability into strand lifecycle without any locking.
50// All operations are lock-free atomic increments/loads.
51//
52// - TOTAL_SPAWNED: Monotonically increasing count of all strands ever spawned
53// - TOTAL_COMPLETED: Monotonically increasing count of all strands that completed
54// - PEAK_STRANDS: High-water mark of concurrent strands (helps detect strand leaks)
55//
56// Useful diagnostics:
57// - Currently running: ACTIVE_STRANDS
58// - Completed successfully: TOTAL_COMPLETED
59// - Potential leaks: TOTAL_SPAWNED - TOTAL_COMPLETED - ACTIVE_STRANDS > 0 (strands lost)
60// - Peak concurrency: PEAK_STRANDS
61pub static TOTAL_SPAWNED: AtomicU64 = AtomicU64::new(0);
62pub static TOTAL_COMPLETED: AtomicU64 = AtomicU64::new(0);
63pub static PEAK_STRANDS: AtomicUsize = AtomicUsize::new(0);
64
65// Unique strand ID generation
66static NEXT_STRAND_ID: AtomicU64 = AtomicU64::new(1);
67
68// =============================================================================
69// Lock-Free Strand Registry (only when diagnostics feature is enabled)
70// =============================================================================
71//
72// A fixed-size array of slots for tracking active strands without locks.
73// Each slot stores a strand ID (0 = free) and spawn timestamp.
74//
75// Design principles:
76// - Fixed size: No dynamic allocation, predictable memory footprint
77// - Lock-free: All operations use atomic CAS, no mutex contention
78// - Bounded: If registry is full, strands still run but aren't tracked
79// - Zero cost when not querying: Only diagnostics reads the registry
80//
81// Slot encoding:
82// - strand_id == 0: slot is free
83// - strand_id > 0: slot contains an active strand
84//
85// The registry size can be configured via SEQ_STRAND_REGISTRY_SIZE env var.
86// Default is 1024 slots, which is sufficient for most applications.
87//
88// When the "diagnostics" feature is disabled, the registry is not compiled,
89// eliminating the SystemTime::now() syscall and O(n) scans on every spawn.
90
91#[cfg(feature = "diagnostics")]
92/// Default strand registry size (number of trackable concurrent strands)
93const DEFAULT_REGISTRY_SIZE: usize = 1024;
94
95#[cfg(feature = "diagnostics")]
96/// A slot in the strand registry
97///
98/// Uses two atomics to store strand info without locks.
99/// A slot is free when strand_id == 0.
100pub struct StrandSlot {
101    /// Strand ID (0 = free, >0 = active strand)
102    pub strand_id: AtomicU64,
103    /// Spawn timestamp (seconds since UNIX epoch, for detecting stuck strands)
104    pub spawn_time: AtomicU64,
105}
106
107#[cfg(feature = "diagnostics")]
108impl StrandSlot {
109    const fn new() -> Self {
110        Self {
111            strand_id: AtomicU64::new(0),
112            spawn_time: AtomicU64::new(0),
113        }
114    }
115}
116
117#[cfg(feature = "diagnostics")]
118/// Lock-free strand registry
119///
120/// Provides O(n) registration (scan for free slot) and O(n) unregistration.
121/// This is acceptable because:
122/// 1. N is bounded (default 1024)
123/// 2. Registration/unregistration are infrequent compared to strand work
124/// 3. No locks means no contention, just atomic ops
125pub struct StrandRegistry {
126    slots: Box<[StrandSlot]>,
127    /// Number of slots that couldn't be registered (registry full)
128    pub overflow_count: AtomicU64,
129}
130
131#[cfg(feature = "diagnostics")]
132impl StrandRegistry {
133    /// Create a new registry with the given capacity
134    fn new(capacity: usize) -> Self {
135        let mut slots = Vec::with_capacity(capacity);
136        for _ in 0..capacity {
137            slots.push(StrandSlot::new());
138        }
139        Self {
140            slots: slots.into_boxed_slice(),
141            overflow_count: AtomicU64::new(0),
142        }
143    }
144
145    /// Register a strand, returning the slot index if successful
146    ///
147    /// Uses CAS to atomically claim a free slot.
148    /// Returns None if the registry is full (strand still runs, just not tracked).
149    pub fn register(&self, strand_id: u64) -> Option<usize> {
150        let spawn_time = std::time::SystemTime::now()
151            .duration_since(std::time::UNIX_EPOCH)
152            .map(|d| d.as_secs())
153            .unwrap_or(0);
154
155        // Scan for a free slot
156        for (idx, slot) in self.slots.iter().enumerate() {
157            // Set spawn time first, before claiming the slot
158            // This prevents a race where a reader sees strand_id != 0 but spawn_time == 0
159            // If we fail to claim the slot, the owner will overwrite this value anyway
160            slot.spawn_time.store(spawn_time, Ordering::Relaxed);
161
162            // Try to claim this slot (CAS from 0 to strand_id)
163            // AcqRel ensures the spawn_time write above is visible before strand_id becomes non-zero
164            if slot
165                .strand_id
166                .compare_exchange(0, strand_id, Ordering::AcqRel, Ordering::Relaxed)
167                .is_ok()
168            {
169                return Some(idx);
170            }
171        }
172
173        // Registry full - track overflow but strand still runs
174        self.overflow_count.fetch_add(1, Ordering::Relaxed);
175        None
176    }
177
178    /// Unregister a strand by ID
179    ///
180    /// Scans for the slot containing this strand ID and clears it.
181    /// Returns true if found and cleared, false if not found.
182    ///
183    /// Note: ABA problem is not a concern here because strand IDs are monotonically
184    /// increasing u64 values. ID reuse would require 2^64 spawns, which is practically
185    /// impossible (at 1 billion spawns/sec, it would take ~584 years).
186    pub fn unregister(&self, strand_id: u64) -> bool {
187        for slot in self.slots.iter() {
188            // Check if this slot contains our strand
189            if slot
190                .strand_id
191                .compare_exchange(strand_id, 0, Ordering::AcqRel, Ordering::Relaxed)
192                .is_ok()
193            {
194                // Successfully cleared the slot
195                slot.spawn_time.store(0, Ordering::Release);
196                return true;
197            }
198        }
199        false
200    }
201
202    /// Iterate over active strands (for diagnostics)
203    ///
204    /// Returns an iterator of (strand_id, spawn_time) for non-empty slots.
205    /// Note: This is a snapshot and may be slightly inconsistent due to concurrent updates.
206    pub fn active_strands(&self) -> impl Iterator<Item = (u64, u64)> + '_ {
207        self.slots.iter().filter_map(|slot| {
208            // Acquire on strand_id synchronizes with the Release in register()
209            let id = slot.strand_id.load(Ordering::Acquire);
210            if id > 0 {
211                // Relaxed is sufficient here - we've already synchronized via strand_id Acquire
212                // and spawn_time is written before strand_id in register()
213                let time = slot.spawn_time.load(Ordering::Relaxed);
214                Some((id, time))
215            } else {
216                None
217            }
218        })
219    }
220
221    /// Get the registry capacity
222    pub fn capacity(&self) -> usize {
223        self.slots.len()
224    }
225}
226
227// Global strand registry (lazy initialized)
228#[cfg(feature = "diagnostics")]
229static STRAND_REGISTRY: std::sync::OnceLock<StrandRegistry> = std::sync::OnceLock::new();
230
231/// Get or initialize the global strand registry
232#[cfg(feature = "diagnostics")]
233pub fn strand_registry() -> &'static StrandRegistry {
234    STRAND_REGISTRY.get_or_init(|| {
235        let size = std::env::var("SEQ_STRAND_REGISTRY_SIZE")
236            .ok()
237            .and_then(|s| s.parse().ok())
238            .unwrap_or(DEFAULT_REGISTRY_SIZE);
239        StrandRegistry::new(size)
240    })
241}
242
243/// Default coroutine stack size: 128KB (0x20000 bytes)
244/// Reduced from 1MB for better spawn performance (~16% faster in benchmarks).
245/// Can be overridden via SEQ_STACK_SIZE environment variable.
246const DEFAULT_STACK_SIZE: usize = 0x20000;
247
248/// Parse stack size from an optional string value.
249/// Returns the parsed size, or DEFAULT_STACK_SIZE if the value is missing, zero, or invalid.
250/// Prints a warning to stderr for invalid values.
251fn parse_stack_size(env_value: Option<String>) -> usize {
252    match env_value {
253        Some(val) => match val.parse::<usize>() {
254            Ok(0) => {
255                eprintln!(
256                    "Warning: SEQ_STACK_SIZE=0 is invalid, using default {}",
257                    DEFAULT_STACK_SIZE
258                );
259                DEFAULT_STACK_SIZE
260            }
261            Ok(size) => size,
262            Err(_) => {
263                eprintln!(
264                    "Warning: SEQ_STACK_SIZE='{}' is not a valid number, using default {}",
265                    val, DEFAULT_STACK_SIZE
266                );
267                DEFAULT_STACK_SIZE
268            }
269        },
270        None => DEFAULT_STACK_SIZE,
271    }
272}
273
274/// Default coroutine pool capacity.
275/// May reuses completed coroutine stacks from this pool to avoid allocations.
276/// Default of 1000 is often too small for spawn-heavy workloads.
277const DEFAULT_POOL_CAPACITY: usize = 10000;
278
279/// Initialize the scheduler.
280///
281/// # Safety
282/// Safe to call multiple times (idempotent via Once).
283/// Configures May coroutines with appropriate stack size for LLVM-generated code.
284#[unsafe(no_mangle)]
285pub unsafe extern "C" fn patch_seq_scheduler_init() {
286    SCHEDULER_INIT.call_once(|| {
287        // Configure stack size for coroutines
288        // Default is 128KB, reduced from 1MB for better spawn performance.
289        // Can be overridden via SEQ_STACK_SIZE environment variable (in bytes)
290        // Example: SEQ_STACK_SIZE=2097152 for 2MB
291        // Invalid values (non-numeric, zero) are warned and ignored.
292        let stack_size = parse_stack_size(std::env::var("SEQ_STACK_SIZE").ok());
293
294        // Configure coroutine pool capacity
295        // May reuses coroutine stacks from this pool to reduce allocation overhead.
296        // Default 10000 is 10x May's default (1000), better for spawn-heavy workloads.
297        // Can be overridden via SEQ_POOL_CAPACITY environment variable.
298        let pool_capacity = std::env::var("SEQ_POOL_CAPACITY")
299            .ok()
300            .and_then(|s| s.parse().ok())
301            .filter(|&v| v > 0)
302            .unwrap_or(DEFAULT_POOL_CAPACITY);
303
304        may::config()
305            .set_stack_size(stack_size)
306            .set_pool_capacity(pool_capacity);
307
308        // Install SIGQUIT handler for runtime diagnostics (kill -3)
309        #[cfg(feature = "diagnostics")]
310        crate::diagnostics::install_signal_handler();
311
312        // Install watchdog timer (if enabled via SEQ_WATCHDOG_SECS)
313        #[cfg(feature = "diagnostics")]
314        crate::watchdog::install_watchdog();
315    });
316}
317
318/// Run the scheduler and wait for all coroutines to complete
319///
320/// # Safety
321/// Returns the final stack (always null for now since May handles all scheduling).
322/// This function blocks until all spawned strands have completed.
323///
324/// Uses a condition variable for event-driven shutdown synchronization rather than
325/// polling. The mutex is only held during the wait protocol, not during strand
326/// execution, so there's no contention on the hot path.
327#[unsafe(no_mangle)]
328pub unsafe extern "C" fn patch_seq_scheduler_run() -> Stack {
329    let mut guard = SHUTDOWN_MUTEX.lock().expect(
330        "scheduler_run: shutdown mutex poisoned - strand panicked during shutdown synchronization",
331    );
332
333    // Wait for all strands to complete
334    // The condition variable will be notified when the last strand exits
335    while ACTIVE_STRANDS.load(Ordering::Acquire) > 0 {
336        guard = SHUTDOWN_CONDVAR
337            .wait(guard)
338            .expect("scheduler_run: condvar wait failed - strand panicked during shutdown wait");
339    }
340
341    // All strands have completed
342    std::ptr::null_mut()
343}
344
345/// Shutdown the scheduler
346///
347/// # Safety
348/// Safe to call. May doesn't require explicit shutdown, so this is a no-op.
349#[unsafe(no_mangle)]
350pub unsafe extern "C" fn patch_seq_scheduler_shutdown() {
351    // May doesn't require explicit shutdown
352    // This function exists for API symmetry with init
353}
354
355/// Spawn a strand (coroutine) with initial stack
356///
357/// # Safety
358/// - `entry` must be a valid function pointer that can safely execute on any thread
359/// - `initial_stack` must be either null or a valid pointer to a `StackValue` that:
360///   - Was heap-allocated (e.g., via Box)
361///   - Has a 'static lifetime or lives longer than the coroutine
362///   - Is safe to access from the spawned thread
363/// - The caller transfers ownership of `initial_stack` to the coroutine
364/// - Returns a unique strand ID (positive integer)
365///
366/// # Memory Management
367/// The spawned coroutine takes ownership of `initial_stack` and will automatically
368/// free the final stack returned by `entry` upon completion.
369#[unsafe(no_mangle)]
370pub unsafe extern "C" fn patch_seq_strand_spawn(
371    entry: extern "C" fn(Stack) -> Stack,
372    initial_stack: Stack,
373) -> i64 {
374    // For backwards compatibility, use null base (won't support nested spawns)
375    unsafe { patch_seq_strand_spawn_with_base(entry, initial_stack, std::ptr::null_mut()) }
376}
377
378/// Spawn a strand (coroutine) with initial stack and explicit stack base
379///
380/// This variant allows setting the STACK_BASE for the spawned strand, which is
381/// required for the child to perform operations like clone_stack (nested spawn).
382///
383/// # Safety
384/// - `entry` must be a valid function pointer that can safely execute on any thread
385/// - `initial_stack` must be a valid pointer to a `StackValue` array
386/// - `stack_base` must be the base of the stack (or null to skip setting STACK_BASE)
387/// - The caller transfers ownership of `initial_stack` to the coroutine
388/// - Returns a unique strand ID (positive integer)
389#[unsafe(no_mangle)]
390pub unsafe extern "C" fn patch_seq_strand_spawn_with_base(
391    entry: extern "C" fn(Stack) -> Stack,
392    initial_stack: Stack,
393    stack_base: Stack,
394) -> i64 {
395    // Generate unique strand ID
396    let strand_id = NEXT_STRAND_ID.fetch_add(1, Ordering::Relaxed);
397
398    // Increment active strand counter and track total spawned
399    let new_count = ACTIVE_STRANDS.fetch_add(1, Ordering::Release) + 1;
400    TOTAL_SPAWNED.fetch_add(1, Ordering::Relaxed);
401
402    // Update peak strands if this is a new high-water mark
403    // Uses a CAS loop to safely update the maximum without locks
404    // Uses Acquire/Release ordering for proper synchronization with diagnostics reads
405    let mut peak = PEAK_STRANDS.load(Ordering::Acquire);
406    while new_count > peak {
407        match PEAK_STRANDS.compare_exchange_weak(
408            peak,
409            new_count,
410            Ordering::Release,
411            Ordering::Relaxed,
412        ) {
413            Ok(_) => break,
414            Err(current) => peak = current,
415        }
416    }
417
418    // Register strand in the registry (for diagnostics visibility)
419    // If registry is full, strand still runs but isn't tracked
420    #[cfg(feature = "diagnostics")]
421    let _ = strand_registry().register(strand_id);
422
423    // Function pointers are already Send, no wrapper needed
424    let entry_fn = entry;
425
426    // Convert pointers to usize (which is Send)
427    // This is necessary because *mut T is !Send, but the caller guarantees thread safety
428    let stack_addr = initial_stack as usize;
429    let base_addr = stack_base as usize;
430
431    unsafe {
432        coroutine::spawn(move || {
433            // Reconstruct pointers from addresses
434            let stack_ptr = stack_addr as *mut StackValue;
435            let base_ptr = base_addr as *mut StackValue;
436
437            // Debug assertion: validate stack pointer alignment and reasonable address
438            debug_assert!(
439                stack_ptr.is_null()
440                    || stack_addr.is_multiple_of(std::mem::align_of::<StackValue>()),
441                "Stack pointer must be null or properly aligned"
442            );
443            debug_assert!(
444                stack_ptr.is_null() || stack_addr > 0x1000,
445                "Stack pointer appears to be in invalid memory region (< 0x1000)"
446            );
447
448            // Set STACK_BASE for this strand if provided
449            // This enables nested spawns and other operations that need clone_stack
450            if !base_ptr.is_null() {
451                crate::stack::patch_seq_set_stack_base(base_ptr);
452            }
453
454            // Execute the entry function
455            let final_stack = entry_fn(stack_ptr);
456
457            // Clean up the final stack to prevent memory leak
458            free_stack(final_stack);
459
460            // Unregister strand from registry (uses captured strand_id)
461            #[cfg(feature = "diagnostics")]
462            strand_registry().unregister(strand_id);
463
464            // Decrement active strand counter first, then track completion
465            // This ordering ensures the invariant SPAWNED = COMPLETED + ACTIVE + lost
466            // is never violated from an external observer's perspective
467            // Use AcqRel to establish proper synchronization (both acquire and release barriers)
468            let prev_count = ACTIVE_STRANDS.fetch_sub(1, Ordering::AcqRel);
469
470            // Track completion after decrementing active count
471            TOTAL_COMPLETED.fetch_add(1, Ordering::Release);
472            if prev_count == 1 {
473                // We were the last strand - acquire mutex and signal shutdown
474                // The mutex must be held when calling notify to prevent missed wakeups
475                let _guard = SHUTDOWN_MUTEX.lock()
476                    .expect("strand_spawn: shutdown mutex poisoned - strand panicked during shutdown notification");
477                SHUTDOWN_CONDVAR.notify_all();
478            }
479        });
480    }
481
482    strand_id as i64
483}
484
485/// Free a stack allocated by the runtime
486///
487/// With the tagged stack implementation, stack cleanup is handled differently.
488/// The contiguous array is freed when the TaggedStack is dropped.
489/// This function just resets the thread-local arena.
490///
491/// # Safety
492/// Stack pointer must be valid or null.
493fn free_stack(_stack: Stack) {
494    // With tagged stack, the array is freed when TaggedStack is dropped.
495    // We just need to reset the arena for thread-local strings.
496
497    // Reset the thread-local arena to free all arena-allocated strings
498    // This is safe because:
499    // - Any arena strings in Values have been dropped above
500    // - Global strings are unaffected (they have their own allocations)
501    // - Channel sends clone to global, so no cross-strand arena pointers
502    crate::arena::arena_reset();
503}
504
505/// Legacy spawn_strand function (kept for compatibility)
506///
507/// # Safety
508/// `entry` must be a valid function pointer that can safely execute on any thread.
509#[unsafe(no_mangle)]
510pub unsafe extern "C" fn patch_seq_spawn_strand(entry: extern "C" fn(Stack) -> Stack) {
511    unsafe {
512        patch_seq_strand_spawn(entry, std::ptr::null_mut());
513    }
514}
515
516/// Yield execution to allow other coroutines to run
517///
518/// # Safety
519/// Always safe to call from within a May coroutine.
520#[unsafe(no_mangle)]
521pub unsafe extern "C" fn patch_seq_yield_strand(stack: Stack) -> Stack {
522    coroutine::yield_now();
523    stack
524}
525
526// =============================================================================
527// Cooperative Yield Safety Valve
528// =============================================================================
529//
530// Prevents tight TCO loops from starving other strands and making the process
531// unresponsive. When enabled via SEQ_YIELD_INTERVAL, yields after N tail calls.
532//
533// Configuration:
534//   SEQ_YIELD_INTERVAL=10000  - Yield every 10,000 tail calls (default: 0 = disabled)
535//
536// Scope:
537//   - Covers: User-defined word tail calls (musttail) and quotation tail calls
538//   - Does NOT cover: Closure calls (they use regular calls, bounded by stack)
539//   - Does NOT cover: Non-tail recursive calls (bounded by stack)
540//   This is intentional: the safety valve targets unbounded TCO loops.
541//
542// Design:
543//   - Zero overhead when disabled (threshold=0 short-circuits immediately)
544//   - Thread-local counter avoids synchronization overhead
545//   - Called before every musttail in generated code
546//   - Threshold is cached on first access via OnceLock
547//
548// Thread-Local Counter Behavior:
549//   The counter is per-OS-thread, not per-coroutine. Multiple coroutines on the
550//   same OS thread share the counter, which may cause yields slightly more
551//   frequently than the configured interval. This is intentional:
552//   - Avoids coroutine-local storage overhead
553//   - Still achieves the goal of preventing starvation
554//   - Actual yield frequency is still bounded by the threshold
555
556use std::cell::Cell;
557use std::sync::OnceLock;
558
559/// Cached yield interval threshold (0 = disabled)
560static YIELD_THRESHOLD: OnceLock<u64> = OnceLock::new();
561
562thread_local! {
563    /// Per-thread tail call counter
564    static TAIL_CALL_COUNTER: Cell<u64> = const { Cell::new(0) };
565}
566
567/// Get the yield threshold from environment (cached)
568///
569/// Returns 0 (disabled) if SEQ_YIELD_INTERVAL is not set or invalid.
570/// Prints a warning to stderr if the value is set but invalid.
571fn get_yield_threshold() -> u64 {
572    *YIELD_THRESHOLD.get_or_init(|| {
573        match std::env::var("SEQ_YIELD_INTERVAL") {
574            Ok(s) if s.is_empty() => 0,
575            Ok(s) => match s.parse::<u64>() {
576                Ok(n) => n,
577                Err(_) => {
578                    eprintln!(
579                        "Warning: SEQ_YIELD_INTERVAL='{}' is not a valid positive integer, yield safety valve disabled",
580                        s
581                    );
582                    0
583                }
584            },
585            Err(_) => 0,
586        }
587    })
588}
589
590/// Maybe yield to other coroutines based on tail call count
591///
592/// Called before every tail call in generated code. When SEQ_YIELD_INTERVAL
593/// is set, yields after that many tail calls to prevent starvation.
594///
595/// # Performance
596/// - Disabled (default): Single branch on cached threshold (< 1ns)
597/// - Enabled: Increment + compare + occasional yield (~10-20ns average)
598///
599/// # Safety
600/// Always safe to call. No-op when not in a May coroutine context.
601#[unsafe(no_mangle)]
602pub extern "C" fn patch_seq_maybe_yield() {
603    let threshold = get_yield_threshold();
604
605    // Fast path: disabled
606    if threshold == 0 {
607        return;
608    }
609
610    TAIL_CALL_COUNTER.with(|counter| {
611        let count = counter.get().wrapping_add(1);
612        counter.set(count);
613
614        if count >= threshold {
615            counter.set(0);
616            coroutine::yield_now();
617        }
618    });
619}
620
621/// Wait for all strands to complete
622///
623/// # Safety
624/// Always safe to call. Blocks until all spawned strands have completed.
625///
626/// Uses event-driven synchronization via condition variable - no polling overhead.
627#[unsafe(no_mangle)]
628pub unsafe extern "C" fn patch_seq_wait_all_strands() {
629    let mut guard = SHUTDOWN_MUTEX.lock()
630        .expect("wait_all_strands: shutdown mutex poisoned - strand panicked during shutdown synchronization");
631
632    // Wait for all strands to complete
633    // The condition variable will be notified when the last strand exits
634    while ACTIVE_STRANDS.load(Ordering::Acquire) > 0 {
635        guard = SHUTDOWN_CONDVAR
636            .wait(guard)
637            .expect("wait_all_strands: condvar wait failed - strand panicked during shutdown wait");
638    }
639}
640
641// Public re-exports with short names for internal use
642pub use patch_seq_maybe_yield as maybe_yield;
643pub use patch_seq_scheduler_init as scheduler_init;
644pub use patch_seq_scheduler_run as scheduler_run;
645pub use patch_seq_scheduler_shutdown as scheduler_shutdown;
646pub use patch_seq_spawn_strand as spawn_strand;
647pub use patch_seq_strand_spawn as strand_spawn;
648pub use patch_seq_wait_all_strands as wait_all_strands;
649pub use patch_seq_yield_strand as yield_strand;
650
651#[cfg(test)]
652mod tests {
653    use super::*;
654    use crate::stack::push;
655    use crate::value::Value;
656    use std::sync::atomic::{AtomicU32, Ordering};
657
658    #[test]
659    fn test_spawn_strand() {
660        unsafe {
661            static COUNTER: AtomicU32 = AtomicU32::new(0);
662
663            extern "C" fn test_entry(_stack: Stack) -> Stack {
664                COUNTER.fetch_add(1, Ordering::SeqCst);
665                std::ptr::null_mut()
666            }
667
668            for _ in 0..100 {
669                spawn_strand(test_entry);
670            }
671
672            std::thread::sleep(std::time::Duration::from_millis(200));
673            assert_eq!(COUNTER.load(Ordering::SeqCst), 100);
674        }
675    }
676
677    #[test]
678    fn test_scheduler_init_idempotent() {
679        unsafe {
680            // Should be safe to call multiple times
681            scheduler_init();
682            scheduler_init();
683            scheduler_init();
684        }
685    }
686
687    #[test]
688    fn test_free_stack_null() {
689        // Freeing null should be a no-op
690        free_stack(std::ptr::null_mut());
691    }
692
693    #[test]
694    fn test_free_stack_valid() {
695        unsafe {
696            // Create a stack, then free it
697            let stack = push(crate::stack::alloc_test_stack(), Value::Int(42));
698            free_stack(stack);
699            // If we get here without crashing, test passed
700        }
701    }
702
703    #[test]
704    fn test_strand_spawn_with_stack() {
705        unsafe {
706            static COUNTER: AtomicU32 = AtomicU32::new(0);
707
708            extern "C" fn test_entry(stack: Stack) -> Stack {
709                COUNTER.fetch_add(1, Ordering::SeqCst);
710                // Return the stack as-is (caller will free it)
711                stack
712            }
713
714            let initial_stack = push(crate::stack::alloc_test_stack(), Value::Int(99));
715            strand_spawn(test_entry, initial_stack);
716
717            std::thread::sleep(std::time::Duration::from_millis(200));
718            assert_eq!(COUNTER.load(Ordering::SeqCst), 1);
719        }
720    }
721
722    #[test]
723    fn test_scheduler_shutdown() {
724        unsafe {
725            scheduler_init();
726            scheduler_shutdown();
727            // Should not crash
728        }
729    }
730
731    #[test]
732    fn test_many_strands_stress() {
733        unsafe {
734            static COUNTER: AtomicU32 = AtomicU32::new(0);
735
736            extern "C" fn increment(_stack: Stack) -> Stack {
737                COUNTER.fetch_add(1, Ordering::SeqCst);
738                std::ptr::null_mut()
739            }
740
741            // Reset counter for this test
742            COUNTER.store(0, Ordering::SeqCst);
743
744            // Spawn many strands to stress test synchronization
745            for _ in 0..1000 {
746                strand_spawn(increment, std::ptr::null_mut());
747            }
748
749            // Wait for all to complete
750            wait_all_strands();
751
752            // Verify all strands executed
753            assert_eq!(COUNTER.load(Ordering::SeqCst), 1000);
754        }
755    }
756
757    #[test]
758    fn test_strand_ids_are_unique() {
759        unsafe {
760            use std::collections::HashSet;
761
762            extern "C" fn noop(_stack: Stack) -> Stack {
763                std::ptr::null_mut()
764            }
765
766            // Spawn strands and collect their IDs
767            let mut ids = Vec::new();
768            for _ in 0..100 {
769                let id = strand_spawn(noop, std::ptr::null_mut());
770                ids.push(id);
771            }
772
773            // Wait for completion
774            wait_all_strands();
775
776            // Verify all IDs are unique
777            let unique_ids: HashSet<_> = ids.iter().collect();
778            assert_eq!(unique_ids.len(), 100, "All strand IDs should be unique");
779
780            // Verify all IDs are positive
781            assert!(
782                ids.iter().all(|&id| id > 0),
783                "All strand IDs should be positive"
784            );
785        }
786    }
787
788    #[test]
789    fn test_arena_reset_with_strands() {
790        unsafe {
791            use crate::arena;
792            use crate::seqstring::arena_string;
793
794            extern "C" fn create_temp_strings(stack: Stack) -> Stack {
795                // Create many temporary arena strings (simulating request parsing)
796                for i in 0..100 {
797                    let temp = arena_string(&format!("temporary string {}", i));
798                    // Use the string temporarily
799                    assert!(!temp.as_str().is_empty());
800                    // String is dropped, but memory stays in arena
801                }
802
803                // Arena should have allocated memory
804                let stats = arena::arena_stats();
805                assert!(stats.allocated_bytes > 0, "Arena should have allocations");
806
807                stack // Return empty stack
808            }
809
810            // Reset arena before test
811            arena::arena_reset();
812
813            // Spawn strand that creates many temp strings
814            strand_spawn(create_temp_strings, std::ptr::null_mut());
815
816            // Wait for strand to complete (which calls free_stack -> arena_reset)
817            wait_all_strands();
818
819            // After strand exits, arena should be reset
820            let stats_after = arena::arena_stats();
821            assert_eq!(
822                stats_after.allocated_bytes, 0,
823                "Arena should be reset after strand exits"
824            );
825        }
826    }
827
828    #[test]
829    fn test_arena_with_channel_send() {
830        unsafe {
831            use crate::channel::{close_channel, make_channel, receive, send};
832            use crate::stack::{pop, push};
833            use crate::value::Value;
834            use std::sync::Arc;
835            use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
836
837            static RECEIVED_COUNT: AtomicU32 = AtomicU32::new(0);
838            static CHANNEL_PTR: AtomicI64 = AtomicI64::new(0);
839
840            // Create channel
841            let stack = crate::stack::alloc_test_stack();
842            let stack = make_channel(stack);
843            let (stack, chan_val) = pop(stack);
844            let channel = match chan_val {
845                Value::Channel(ch) => ch,
846                _ => panic!("Expected Channel"),
847            };
848
849            // Store channel pointer for strands
850            let ch_ptr = Arc::as_ptr(&channel) as i64;
851            CHANNEL_PTR.store(ch_ptr, Ordering::Release);
852
853            // Keep Arc alive
854            std::mem::forget(channel.clone());
855            std::mem::forget(channel.clone());
856
857            // Sender strand: creates arena string, sends through channel
858            extern "C" fn sender(_stack: Stack) -> Stack {
859                use crate::seqstring::arena_string;
860                use crate::value::ChannelData;
861                use std::sync::Arc;
862
863                unsafe {
864                    let ch_ptr = CHANNEL_PTR.load(Ordering::Acquire) as *const ChannelData;
865                    let channel = Arc::from_raw(ch_ptr);
866                    let channel_clone = Arc::clone(&channel);
867                    std::mem::forget(channel); // Don't drop
868
869                    // Create arena string
870                    let msg = arena_string("Hello from sender!");
871
872                    // Push string and channel for send
873                    let stack = push(crate::stack::alloc_test_stack(), Value::String(msg));
874                    let stack = push(stack, Value::Channel(channel_clone));
875
876                    // Send (will clone to global)
877                    send(stack)
878                }
879            }
880
881            // Receiver strand: receives string from channel
882            extern "C" fn receiver(_stack: Stack) -> Stack {
883                use crate::value::ChannelData;
884                use std::sync::Arc;
885                use std::sync::atomic::Ordering;
886
887                unsafe {
888                    let ch_ptr = CHANNEL_PTR.load(Ordering::Acquire) as *const ChannelData;
889                    let channel = Arc::from_raw(ch_ptr);
890                    let channel_clone = Arc::clone(&channel);
891                    std::mem::forget(channel); // Don't drop
892
893                    // Push channel for receive
894                    let stack = push(
895                        crate::stack::alloc_test_stack(),
896                        Value::Channel(channel_clone),
897                    );
898
899                    // Receive message
900                    let stack = receive(stack);
901
902                    // Pop and verify message
903                    let (_stack, msg_val) = pop(stack);
904                    match msg_val {
905                        Value::String(s) => {
906                            assert_eq!(s.as_str(), "Hello from sender!");
907                            RECEIVED_COUNT.fetch_add(1, Ordering::SeqCst);
908                        }
909                        _ => panic!("Expected String"),
910                    }
911
912                    std::ptr::null_mut()
913                }
914            }
915
916            // Spawn sender and receiver
917            spawn_strand(sender);
918            spawn_strand(receiver);
919
920            // Wait for both strands
921            wait_all_strands();
922
923            // Verify message was received
924            assert_eq!(
925                RECEIVED_COUNT.load(Ordering::SeqCst),
926                1,
927                "Receiver should have received message"
928            );
929
930            // Clean up channel
931            let stack = push(stack, Value::Channel(channel));
932            close_channel(stack);
933        }
934    }
935
936    #[test]
937    fn test_no_memory_leak_over_many_iterations() {
938        // PR #11 feedback: Verify 10K+ strand iterations don't cause memory growth
939        unsafe {
940            use crate::arena;
941            use crate::seqstring::arena_string;
942
943            extern "C" fn allocate_strings_and_exit(stack: Stack) -> Stack {
944                // Simulate request processing: many temp allocations
945                for i in 0..50 {
946                    let temp = arena_string(&format!("request header {}", i));
947                    assert!(!temp.as_str().is_empty());
948                    // Strings dropped here but arena memory stays allocated
949                }
950                stack
951            }
952
953            // Run many iterations to detect leaks
954            let iterations = 10_000;
955
956            for i in 0..iterations {
957                // Reset arena before each iteration to start fresh
958                arena::arena_reset();
959
960                // Spawn strand, let it allocate strings, then exit
961                strand_spawn(allocate_strings_and_exit, std::ptr::null_mut());
962
963                // Wait for completion (triggers arena reset)
964                wait_all_strands();
965
966                // Every 1000 iterations, verify arena is actually reset
967                if i % 1000 == 0 {
968                    let stats = arena::arena_stats();
969                    assert_eq!(
970                        stats.allocated_bytes, 0,
971                        "Arena not reset after iteration {} (leaked {} bytes)",
972                        i, stats.allocated_bytes
973                    );
974                }
975            }
976
977            // Final verification: arena should be empty
978            let final_stats = arena::arena_stats();
979            assert_eq!(
980                final_stats.allocated_bytes, 0,
981                "Arena leaked memory after {} iterations ({} bytes)",
982                iterations, final_stats.allocated_bytes
983            );
984
985            println!(
986                "✓ Memory leak test passed: {} iterations with no growth",
987                iterations
988            );
989        }
990    }
991
992    #[test]
993    fn test_parse_stack_size_valid() {
994        assert_eq!(parse_stack_size(Some("2097152".to_string())), 2097152);
995        assert_eq!(parse_stack_size(Some("1".to_string())), 1);
996        assert_eq!(parse_stack_size(Some("999999999".to_string())), 999999999);
997    }
998
999    #[test]
1000    fn test_parse_stack_size_none() {
1001        assert_eq!(parse_stack_size(None), DEFAULT_STACK_SIZE);
1002    }
1003
1004    #[test]
1005    fn test_parse_stack_size_zero() {
1006        // Zero should fall back to default (with warning printed to stderr)
1007        assert_eq!(parse_stack_size(Some("0".to_string())), DEFAULT_STACK_SIZE);
1008    }
1009
1010    #[test]
1011    fn test_parse_stack_size_invalid() {
1012        // Non-numeric should fall back to default (with warning printed to stderr)
1013        assert_eq!(
1014            parse_stack_size(Some("invalid".to_string())),
1015            DEFAULT_STACK_SIZE
1016        );
1017        assert_eq!(
1018            parse_stack_size(Some("-100".to_string())),
1019            DEFAULT_STACK_SIZE
1020        );
1021        assert_eq!(parse_stack_size(Some("".to_string())), DEFAULT_STACK_SIZE);
1022        assert_eq!(
1023            parse_stack_size(Some("1.5".to_string())),
1024            DEFAULT_STACK_SIZE
1025        );
1026    }
1027
1028    #[test]
1029    #[cfg(feature = "diagnostics")]
1030    fn test_strand_registry_basic() {
1031        let registry = StrandRegistry::new(10);
1032
1033        // Register some strands
1034        assert_eq!(registry.register(1), Some(0)); // First slot
1035        assert_eq!(registry.register(2), Some(1)); // Second slot
1036        assert_eq!(registry.register(3), Some(2)); // Third slot
1037
1038        // Verify active strands
1039        let active: Vec<_> = registry.active_strands().collect();
1040        assert_eq!(active.len(), 3);
1041
1042        // Unregister one
1043        assert!(registry.unregister(2));
1044        let active: Vec<_> = registry.active_strands().collect();
1045        assert_eq!(active.len(), 2);
1046
1047        // Unregister non-existent should return false
1048        assert!(!registry.unregister(999));
1049    }
1050
1051    #[test]
1052    #[cfg(feature = "diagnostics")]
1053    fn test_strand_registry_overflow() {
1054        let registry = StrandRegistry::new(3); // Small capacity
1055
1056        // Fill it up
1057        assert!(registry.register(1).is_some());
1058        assert!(registry.register(2).is_some());
1059        assert!(registry.register(3).is_some());
1060
1061        // Next should overflow
1062        assert!(registry.register(4).is_none());
1063        assert_eq!(registry.overflow_count.load(Ordering::Relaxed), 1);
1064
1065        // Another overflow
1066        assert!(registry.register(5).is_none());
1067        assert_eq!(registry.overflow_count.load(Ordering::Relaxed), 2);
1068    }
1069
1070    #[test]
1071    #[cfg(feature = "diagnostics")]
1072    fn test_strand_registry_slot_reuse() {
1073        let registry = StrandRegistry::new(3);
1074
1075        // Fill it up
1076        registry.register(1);
1077        registry.register(2);
1078        registry.register(3);
1079
1080        // Unregister middle one
1081        registry.unregister(2);
1082
1083        // New registration should reuse the slot
1084        assert!(registry.register(4).is_some());
1085        assert_eq!(registry.active_strands().count(), 3);
1086    }
1087
1088    #[test]
1089    #[cfg(feature = "diagnostics")]
1090    fn test_strand_registry_concurrent_stress() {
1091        use std::sync::Arc;
1092        use std::thread;
1093
1094        let registry = Arc::new(StrandRegistry::new(50)); // Moderate capacity
1095
1096        let handles: Vec<_> = (0..100)
1097            .map(|i| {
1098                let reg = Arc::clone(&registry);
1099                thread::spawn(move || {
1100                    let id = (i + 1) as u64;
1101                    // Register
1102                    let _ = reg.register(id);
1103                    // Brief work
1104                    thread::yield_now();
1105                    // Unregister
1106                    reg.unregister(id);
1107                })
1108            })
1109            .collect();
1110
1111        for h in handles {
1112            h.join().unwrap();
1113        }
1114
1115        // All slots should be free after all threads complete
1116        assert_eq!(registry.active_strands().count(), 0);
1117    }
1118
1119    #[test]
1120    fn test_strand_lifecycle_counters() {
1121        unsafe {
1122            // Reset counters for isolation (not perfect but helps)
1123            let initial_spawned = TOTAL_SPAWNED.load(Ordering::Relaxed);
1124            let initial_completed = TOTAL_COMPLETED.load(Ordering::Relaxed);
1125
1126            static COUNTER: AtomicU32 = AtomicU32::new(0);
1127
1128            extern "C" fn simple_work(_stack: Stack) -> Stack {
1129                COUNTER.fetch_add(1, Ordering::SeqCst);
1130                std::ptr::null_mut()
1131            }
1132
1133            COUNTER.store(0, Ordering::SeqCst);
1134
1135            // Spawn some strands
1136            for _ in 0..10 {
1137                strand_spawn(simple_work, std::ptr::null_mut());
1138            }
1139
1140            wait_all_strands();
1141
1142            // Verify counters incremented
1143            let final_spawned = TOTAL_SPAWNED.load(Ordering::Relaxed);
1144            let final_completed = TOTAL_COMPLETED.load(Ordering::Relaxed);
1145
1146            assert!(
1147                final_spawned >= initial_spawned + 10,
1148                "TOTAL_SPAWNED should have increased by at least 10"
1149            );
1150            assert!(
1151                final_completed >= initial_completed + 10,
1152                "TOTAL_COMPLETED should have increased by at least 10"
1153            );
1154            assert_eq!(COUNTER.load(Ordering::SeqCst), 10);
1155        }
1156    }
1157
1158    // =========================================================================
1159    // Yield Safety Valve Tests
1160    // =========================================================================
1161
1162    #[test]
1163    fn test_maybe_yield_disabled_by_default() {
1164        // When SEQ_YIELD_INTERVAL is not set (or 0), maybe_yield should be a no-op
1165        // This test verifies it doesn't panic and returns quickly
1166        for _ in 0..1000 {
1167            patch_seq_maybe_yield();
1168        }
1169    }
1170
1171    #[test]
1172    fn test_tail_call_counter_increments() {
1173        // Verify the thread-local counter increments correctly
1174        TAIL_CALL_COUNTER.with(|counter| {
1175            let initial = counter.get();
1176            patch_seq_maybe_yield();
1177            patch_seq_maybe_yield();
1178            patch_seq_maybe_yield();
1179            // Counter should have incremented (if threshold > 0) or stayed same (if disabled)
1180            // Either way, it shouldn't panic
1181            let _ = counter.get();
1182            // Reset to avoid affecting other tests
1183            counter.set(initial);
1184        });
1185    }
1186
1187    #[test]
1188    fn test_counter_overflow_safety() {
1189        // Verify wrapping_add prevents overflow panic
1190        TAIL_CALL_COUNTER.with(|counter| {
1191            let initial = counter.get();
1192            // Set counter near max to test overflow behavior
1193            counter.set(u64::MAX - 1);
1194            // These calls should not panic due to overflow
1195            patch_seq_maybe_yield();
1196            patch_seq_maybe_yield();
1197            patch_seq_maybe_yield();
1198            // Reset
1199            counter.set(initial);
1200        });
1201    }
1202}