seq_runtime/scheduler.rs
1//! Scheduler - Green Thread Management with May
2//!
3//! CSP-style concurrency for Seq using May coroutines.
4//! Each strand is a lightweight green thread that can communicate via channels.
5//!
6//! ## Non-Blocking Guarantee
7//!
8//! Channel operations (`send`, `receive`) use May's cooperative blocking and NEVER
9//! block OS threads. However, I/O operations (`write_line`, `read_line` in io.rs)
10//! currently use blocking syscalls. Future work will make all I/O non-blocking.
11//!
12//! ## Panic Behavior
13//!
14//! Functions panic on invalid input (null stacks, negative IDs, closed channels).
15//! In a production system, consider implementing error channels or Result-based
16//! error handling instead of panicking.
17//!
18//! ## Module Layout
19//!
20//! Per-concern sub-modules:
21//! - `lifecycle` — init / run / shutdown / wait_all_strands / scheduler_elapsed
22//! - `spawn` — strand_spawn (+ with_base) / spawn_strand (legacy) / free_stack
23//! - `yield_ops` — yield_strand (explicit) / maybe_yield (TCO safety valve)
24//! - `registry` — lock-free strand registry (diagnostics feature only)
25//!
26//! Shared lifecycle state (`ACTIVE_STRANDS`, `SHUTDOWN_*`, `TOTAL_*`,
27//! `PEAK_STRANDS`) lives on this aggregator so all sub-modules and
28//! consumers (`weave`, `diagnostics`, `report`) reference one source of truth.
29
30use std::sync::atomic::{AtomicU64, AtomicUsize};
31use std::sync::{Condvar, Mutex};
32
33// Strand lifecycle tracking
34//
35// Design rationale:
36// - ACTIVE_STRANDS: Lock-free atomic counter for the hot path (spawn/complete)
37// Every strand increments on spawn, decrements on complete. This is extremely
38// fast (lock-free atomic ops) and suitable for high-frequency operations.
39//
40// - SHUTDOWN_CONDVAR/MUTEX: Event-driven synchronization for the cold path (shutdown wait)
41// Used only when waiting for all strands to complete (program shutdown).
42// Condvar provides event-driven wakeup instead of polling, which is critical
43// for a systems language - no CPU waste, proper OS-level blocking.
44//
45// Why not track JoinHandles?
46// Strands are like Erlang processes - potentially hundreds of thousands of concurrent
47// entities with independent lifecycles. Storing handles would require global mutable
48// state with synchronization overhead on the hot path. The counter + condvar approach
49// keeps the hot path lock-free while providing proper shutdown synchronization.
50pub static ACTIVE_STRANDS: AtomicUsize = AtomicUsize::new(0);
51pub(crate) static SHUTDOWN_CONDVAR: Condvar = Condvar::new();
52pub(crate) static SHUTDOWN_MUTEX: Mutex<()> = Mutex::new(());
53
54// Strand lifecycle statistics (for diagnostics)
55//
56// These counters provide observability into strand lifecycle without any locking.
57// All operations are lock-free atomic increments/loads.
58//
59// - TOTAL_SPAWNED: Monotonically increasing count of all strands ever spawned
60// - TOTAL_COMPLETED: Monotonically increasing count of all strands that completed
61// - PEAK_STRANDS: High-water mark of concurrent strands (helps detect strand leaks)
62//
63// Useful diagnostics:
64// - Currently running: ACTIVE_STRANDS
65// - Completed successfully: TOTAL_COMPLETED
66// - Potential leaks: TOTAL_SPAWNED - TOTAL_COMPLETED - ACTIVE_STRANDS > 0 (strands lost)
67// - Peak concurrency: PEAK_STRANDS
68pub static TOTAL_SPAWNED: AtomicU64 = AtomicU64::new(0);
69pub static TOTAL_COMPLETED: AtomicU64 = AtomicU64::new(0);
70pub static PEAK_STRANDS: AtomicUsize = AtomicUsize::new(0);
71
72mod lifecycle;
73mod spawn;
74mod yield_ops;
75
76#[cfg(feature = "diagnostics")]
77mod registry;
78
79pub use lifecycle::{
80 patch_seq_scheduler_init, patch_seq_scheduler_run, patch_seq_scheduler_shutdown,
81 patch_seq_wait_all_strands, scheduler_elapsed,
82};
83pub use spawn::{patch_seq_spawn_strand, patch_seq_strand_spawn, patch_seq_strand_spawn_with_base};
84pub use yield_ops::{patch_seq_maybe_yield, patch_seq_yield_strand};
85
86#[cfg(feature = "diagnostics")]
87pub use registry::{StrandRegistry, StrandSlot, strand_registry};
88
89// Public re-exports with short names for internal use
90pub use patch_seq_maybe_yield as maybe_yield;
91pub use patch_seq_scheduler_init as scheduler_init;
92pub use patch_seq_scheduler_run as scheduler_run;
93pub use patch_seq_scheduler_shutdown as scheduler_shutdown;
94pub use patch_seq_spawn_strand as spawn_strand;
95pub use patch_seq_strand_spawn as strand_spawn;
96pub use patch_seq_wait_all_strands as wait_all_strands;
97pub use patch_seq_yield_strand as yield_strand;
98
99#[cfg(test)]
100mod tests;