Skip to main content

oxilean_runtime/scheduler/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use crate::object::RtObject;
6use std::collections::{HashMap, VecDeque};
7use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
8use std::sync::{Arc, Mutex};
9
10/// A message sent to an actor.
11#[allow(dead_code)]
12#[derive(Clone, Debug)]
13pub struct ActorMessage {
14    /// Sender actor.
15    pub from: ActorId,
16    /// Recipient actor.
17    pub to: ActorId,
18    /// The message payload.
19    pub payload: RtObject,
20    /// Sequence number.
21    pub seq: u64,
22}
23#[allow(dead_code)]
24impl ActorMessage {
25    /// Create a new message.
26    pub fn new(from: ActorId, to: ActorId, payload: RtObject, seq: u64) -> Self {
27        ActorMessage {
28            from,
29            to,
30            payload,
31            seq,
32        }
33    }
34}
35/// Simple round-robin token for fair scheduling.
36#[allow(dead_code)]
37#[derive(Debug, Clone)]
38pub struct RoundRobinToken {
39    slots: usize,
40    current: usize,
41}
42#[allow(dead_code)]
43impl RoundRobinToken {
44    pub fn new(slots: usize) -> Self {
45        assert!(slots > 0, "slots must be > 0");
46        Self { slots, current: 0 }
47    }
48    /// Advance to the next slot and return it.
49    pub fn next(&mut self) -> usize {
50        let slot = self.current;
51        self.current = (self.current + 1) % self.slots;
52        slot
53    }
54    /// Peek at the current slot without advancing.
55    pub fn peek(&self) -> usize {
56        self.current
57    }
58    /// Reset back to slot 0.
59    pub fn reset(&mut self) {
60        self.current = 0;
61    }
62}
63/// A deterministic harness for testing the scheduler without real threads.
64#[allow(dead_code)]
65pub struct SchedulerTestHarness {
66    /// Tasks submitted.
67    pub tasks: Vec<(TaskId, RtObject)>,
68    /// Execution order recorded.
69    pub execution_order: Vec<TaskId>,
70    /// Results produced.
71    pub results: HashMap<TaskId, RtObject>,
72    /// Next task id.
73    next_id: u64,
74}
75#[allow(dead_code)]
76impl SchedulerTestHarness {
77    /// Create a new harness.
78    pub fn new() -> Self {
79        SchedulerTestHarness {
80            tasks: Vec::new(),
81            execution_order: Vec::new(),
82            results: HashMap::new(),
83            next_id: 0,
84        }
85    }
86    /// Submit a task.
87    pub fn submit(&mut self, action: RtObject) -> TaskId {
88        let id = TaskId::new(self.next_id);
89        self.next_id += 1;
90        self.tasks.push((id, action));
91        id
92    }
93    /// Run all submitted tasks with a given handler.
94    pub fn run_all<F: FnMut(&RtObject) -> RtObject>(&mut self, mut f: F) {
95        let tasks = std::mem::take(&mut self.tasks);
96        for (id, action) in tasks {
97            let result = f(&action);
98            self.execution_order.push(id);
99            self.results.insert(id, result);
100        }
101    }
102    /// Get the result of a task.
103    pub fn get_result(&self, id: TaskId) -> Option<&RtObject> {
104        self.results.get(&id)
105    }
106    /// Number of tasks completed.
107    pub fn completed(&self) -> usize {
108        self.results.len()
109    }
110    /// Reset the harness.
111    pub fn reset(&mut self) {
112        self.tasks.clear();
113        self.execution_order.clear();
114        self.results.clear();
115        self.next_id = 0;
116    }
117}
118/// Affinity specification for a task.
119#[allow(dead_code)]
120#[derive(Clone, Debug, PartialEq, Eq)]
121pub enum TaskAffinity {
122    /// No affinity — any worker can run this task.
123    Any,
124    /// Pinned to a specific worker index.
125    Worker(usize),
126    /// Prefer a worker but allow stealing.
127    Prefer(usize),
128    /// Run only on the main thread (worker 0).
129    MainThread,
130}
131#[allow(dead_code)]
132impl TaskAffinity {
133    /// Check if a given worker satisfies this affinity.
134    pub fn allows(&self, worker: usize) -> bool {
135        match self {
136            TaskAffinity::Any => true,
137            TaskAffinity::Worker(w) => *w == worker,
138            TaskAffinity::Prefer(w) => *w == worker,
139            TaskAffinity::MainThread => worker == 0,
140        }
141    }
142    /// Whether this is a hard pin (only one worker may run it).
143    pub fn is_pinned(&self) -> bool {
144        matches!(self, TaskAffinity::Worker(_) | TaskAffinity::MainThread)
145    }
146    /// Whether stealing is allowed for this affinity.
147    pub fn allows_steal(&self) -> bool {
148        matches!(self, TaskAffinity::Any | TaskAffinity::Prefer(_))
149    }
150}
151/// A task queue that maintains separate buckets per priority level.
152#[allow(dead_code)]
153pub struct PriorityTaskQueue {
154    /// Buckets indexed by priority value (0..=4).
155    buckets: [VecDeque<TaskId>; 5],
156    /// Total tasks across all buckets.
157    total: usize,
158}
159#[allow(dead_code)]
160impl PriorityTaskQueue {
161    /// Create an empty priority queue.
162    pub fn new() -> Self {
163        PriorityTaskQueue {
164            buckets: [
165                VecDeque::new(),
166                VecDeque::new(),
167                VecDeque::new(),
168                VecDeque::new(),
169                VecDeque::new(),
170            ],
171            total: 0,
172        }
173    }
174    /// Push a task with a given priority.
175    pub fn push(&mut self, id: TaskId, priority: TaskPriority) {
176        self.buckets[priority.value() as usize].push_back(id);
177        self.total += 1;
178    }
179    /// Pop the highest-priority pending task.
180    pub fn pop(&mut self) -> Option<(TaskId, TaskPriority)> {
181        for level in (0..5).rev() {
182            if let Some(id) = self.buckets[level].pop_front() {
183                self.total -= 1;
184                return Some((id, TaskPriority::from_u8(level as u8)));
185            }
186        }
187        None
188    }
189    /// Total number of tasks.
190    pub fn len(&self) -> usize {
191        self.total
192    }
193    /// Whether the queue is empty.
194    pub fn is_empty(&self) -> bool {
195        self.total == 0
196    }
197    /// Number of tasks at a given priority.
198    pub fn count_at(&self, priority: TaskPriority) -> usize {
199        self.buckets[priority.value() as usize].len()
200    }
201    /// Clear all tasks.
202    pub fn clear(&mut self) {
203        for bucket in &mut self.buckets {
204            bucket.clear();
205        }
206        self.total = 0;
207    }
208}
209/// A task represents a unit of work to be executed.
210#[derive(Clone, Debug)]
211pub struct Task {
212    /// Unique task identifier.
213    pub id: TaskId,
214    /// Human-readable name (for debugging).
215    pub name: Option<String>,
216    /// Priority.
217    pub priority: TaskPriority,
218    /// Current state.
219    pub state: TaskState,
220    /// The closure to execute (represented as an RtObject).
221    pub action: RtObject,
222    /// Dependencies (tasks that must complete before this one).
223    pub dependencies: Vec<TaskId>,
224    /// Tasks that depend on this one (notified on completion).
225    pub dependents: Vec<TaskId>,
226    /// Creation timestamp (nanoseconds).
227    pub created_at: u64,
228    /// Completion timestamp (nanoseconds).
229    pub completed_at: Option<u64>,
230}
231impl Task {
232    /// Create a new task.
233    pub fn new(id: TaskId, action: RtObject) -> Self {
234        Task {
235            id,
236            name: None,
237            priority: TaskPriority::Normal,
238            state: TaskState::Created,
239            action,
240            dependencies: Vec::new(),
241            dependents: Vec::new(),
242            created_at: 0,
243            completed_at: None,
244        }
245    }
246    /// Create a named task.
247    pub fn named(id: TaskId, name: String, action: RtObject) -> Self {
248        Task {
249            id,
250            name: Some(name),
251            priority: TaskPriority::Normal,
252            state: TaskState::Created,
253            action,
254            dependencies: Vec::new(),
255            dependents: Vec::new(),
256            created_at: 0,
257            completed_at: None,
258        }
259    }
260    /// Set the priority.
261    pub fn with_priority(mut self, priority: TaskPriority) -> Self {
262        self.priority = priority;
263        self
264    }
265    /// Add a dependency.
266    pub fn depends_on(mut self, dep: TaskId) -> Self {
267        self.dependencies.push(dep);
268        self
269    }
270    /// Check if all dependencies are satisfied.
271    pub fn dependencies_satisfied(&self, completed: &[TaskId]) -> bool {
272        self.dependencies.iter().all(|dep| completed.contains(dep))
273    }
274    /// Mark as completed.
275    pub fn complete(&mut self, result: RtObject) {
276        self.state = TaskState::Completed { result };
277    }
278    /// Mark as failed.
279    pub fn fail(&mut self, error: String) {
280        self.state = TaskState::Failed { error };
281    }
282    /// Mark as cancelled.
283    pub fn cancel(&mut self) {
284        self.state = TaskState::Cancelled;
285    }
286    /// Get the result if completed.
287    pub fn result(&self) -> Option<&RtObject> {
288        if let TaskState::Completed { ref result } = self.state {
289            Some(result)
290        } else {
291            None
292        }
293    }
294    /// Get the error if failed.
295    pub fn error(&self) -> Option<&str> {
296        if let TaskState::Failed { ref error } = self.state {
297            Some(error)
298        } else {
299            None
300        }
301    }
302}
303/// A unique actor identifier.
304#[allow(dead_code)]
305#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
306pub struct ActorId(pub u64);
307#[allow(dead_code)]
308impl ActorId {
309    /// Create a new actor ID.
310    pub fn new(id: u64) -> Self {
311        ActorId(id)
312    }
313    /// Raw value.
314    pub fn raw(self) -> u64 {
315        self.0
316    }
317}
318/// Represents a worker in the scheduler.
319#[derive(Debug)]
320pub struct Worker {
321    /// Worker ID.
322    pub id: usize,
323    /// The worker's local task deque.
324    pub deque: WorkStealingDeque,
325    /// Number of tasks completed by this worker.
326    pub tasks_completed: u64,
327    /// Number of tasks stolen from this worker.
328    pub tasks_stolen_from: u64,
329    /// Number of tasks stolen by this worker.
330    pub tasks_stolen: u64,
331    /// Whether this worker is currently idle.
332    pub idle: bool,
333    /// Current task being executed (if any).
334    pub current_task: Option<TaskId>,
335}
336impl Worker {
337    /// Create a new worker.
338    pub fn new(id: usize, deque_capacity: usize) -> Self {
339        Worker {
340            id,
341            deque: WorkStealingDeque::new(deque_capacity),
342            tasks_completed: 0,
343            tasks_stolen_from: 0,
344            tasks_stolen: 0,
345            idle: true,
346            current_task: None,
347        }
348    }
349    /// Push a task to this worker's deque.
350    pub fn push_task(&mut self, task_id: TaskId) -> bool {
351        self.deque.push(task_id)
352    }
353    /// Pop a task from this worker's deque.
354    pub fn pop_task(&mut self) -> Option<TaskId> {
355        self.deque.pop()
356    }
357    /// Start executing a task.
358    pub fn start_task(&mut self, task_id: TaskId) {
359        self.current_task = Some(task_id);
360        self.idle = false;
361    }
362    /// Finish executing the current task.
363    pub fn finish_task(&mut self) {
364        self.current_task = None;
365        self.idle = true;
366        self.tasks_completed += 1;
367    }
368    /// Load of this worker (number of queued + current tasks).
369    pub fn load(&self) -> usize {
370        self.deque.len() + if self.current_task.is_some() { 1 } else { 0 }
371    }
372}
373/// Load balancing strategies.
374#[derive(Clone, Copy, Debug, PartialEq, Eq)]
375pub enum LoadBalanceStrategy {
376    /// Round-robin assignment.
377    RoundRobin,
378    /// Assign to the least loaded worker.
379    LeastLoaded,
380    /// Random assignment.
381    Random,
382    /// Work stealing (tasks start local, idle workers steal).
383    WorkStealing,
384}
385/// Worker statistics summary.
386#[derive(Clone, Debug)]
387pub struct WorkerStats {
388    /// Worker ID.
389    pub id: usize,
390    /// Tasks completed.
391    pub tasks_completed: u64,
392    /// Tasks stolen by this worker.
393    pub tasks_stolen: u64,
394    /// Tasks stolen from this worker.
395    pub tasks_stolen_from: u64,
396    /// Current queue length.
397    pub queue_length: usize,
398    /// Whether idle.
399    pub idle: bool,
400}
401/// A handle for requesting a yield from outside the task.
402#[allow(dead_code)]
403pub struct YieldHandle {
404    requested: Arc<AtomicBool>,
405}
406#[allow(dead_code)]
407impl YieldHandle {
408    /// Request the task to yield at its next safe point.
409    pub fn request(&self) {
410        self.requested.store(true, Ordering::Release);
411    }
412    /// Check if a yield is pending.
413    pub fn is_pending(&self) -> bool {
414        self.requested.load(Ordering::Acquire)
415    }
416}
417/// Extended scheduler statistics.
418#[allow(dead_code)]
419#[derive(Clone, Debug, Default)]
420pub struct ExtSchedulerStats {
421    /// Total tasks created.
422    pub tasks_created: u64,
423    /// Total tasks completed.
424    pub tasks_completed: u64,
425    /// Total tasks cancelled.
426    pub tasks_cancelled: u64,
427    /// Total tasks stolen from other workers.
428    pub tasks_stolen: u64,
429    /// Total worker-idle samples.
430    pub idle_samples: u64,
431    /// Total worker-busy samples.
432    pub busy_samples: u64,
433    /// Cumulative task latency in "ticks".
434    pub total_latency_ticks: u64,
435    /// Maximum observed task latency.
436    pub max_latency_ticks: u64,
437    /// Total tasks exceeding latency threshold.
438    pub latency_violations: u64,
439    /// Latency violation threshold.
440    pub latency_threshold_ticks: u64,
441}
442#[allow(dead_code)]
443impl ExtSchedulerStats {
444    /// Create a new stats instance.
445    pub fn new() -> Self {
446        Self::default()
447    }
448    /// Record task creation.
449    pub fn record_created(&mut self) {
450        self.tasks_created += 1;
451    }
452    /// Record task completion with latency.
453    pub fn record_completed(&mut self, latency_ticks: u64) {
454        self.tasks_completed += 1;
455        self.total_latency_ticks += latency_ticks;
456        if latency_ticks > self.max_latency_ticks {
457            self.max_latency_ticks = latency_ticks;
458        }
459        if self.latency_threshold_ticks > 0 && latency_ticks > self.latency_threshold_ticks {
460            self.latency_violations += 1;
461        }
462    }
463    /// Record a task cancellation.
464    pub fn record_cancelled(&mut self) {
465        self.tasks_cancelled += 1;
466    }
467    /// Record a work-steal event.
468    pub fn record_steal(&mut self) {
469        self.tasks_stolen += 1;
470    }
471    /// Record worker state samples.
472    pub fn record_sample(&mut self, busy: bool) {
473        if busy {
474            self.busy_samples += 1;
475        } else {
476            self.idle_samples += 1;
477        }
478    }
479    /// Worker utilization (0.0 – 1.0).
480    pub fn utilization(&self) -> f64 {
481        let total = self.busy_samples + self.idle_samples;
482        if total == 0 {
483            return 0.0;
484        }
485        self.busy_samples as f64 / total as f64
486    }
487    /// Average latency in ticks.
488    pub fn avg_latency(&self) -> f64 {
489        if self.tasks_completed == 0 {
490            return 0.0;
491        }
492        self.total_latency_ticks as f64 / self.tasks_completed as f64
493    }
494    /// Merge with another stats instance.
495    pub fn merge(&mut self, other: &ExtSchedulerStats) {
496        self.tasks_created += other.tasks_created;
497        self.tasks_completed += other.tasks_completed;
498        self.tasks_cancelled += other.tasks_cancelled;
499        self.tasks_stolen += other.tasks_stolen;
500        self.idle_samples += other.idle_samples;
501        self.busy_samples += other.busy_samples;
502        self.total_latency_ticks += other.total_latency_ticks;
503        if other.max_latency_ticks > self.max_latency_ticks {
504            self.max_latency_ticks = other.max_latency_ticks;
505        }
506        self.latency_violations += other.latency_violations;
507    }
508    /// Reset all statistics.
509    pub fn reset(&mut self) {
510        *self = Self::default();
511    }
512}
513/// Priority level for task scheduling.
514#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
515pub enum TaskPriority {
516    /// Background (lowest): GC, cleanup.
517    Background = 0,
518    /// Low priority (background tasks).
519    Low = 1,
520    /// Normal priority (default).
521    #[default]
522    Normal = 2,
523    /// High priority (user-facing tasks).
524    High = 3,
525    /// Critical priority (system tasks).
526    Critical = 4,
527}
528impl TaskPriority {
529    /// Create from a numeric value.
530    pub fn from_u8(v: u8) -> Self {
531        match v {
532            0 => TaskPriority::Background,
533            1 => TaskPriority::Low,
534            2 => TaskPriority::Normal,
535            3 => TaskPriority::High,
536            _ => TaskPriority::Critical,
537        }
538    }
539    /// Numeric value of the priority.
540    pub fn value(self) -> u8 {
541        self as u8
542    }
543    /// Whether this priority is above Normal.
544    pub fn is_high(self) -> bool {
545        self >= TaskPriority::High
546    }
547    /// Whether this is a background task.
548    pub fn is_background(self) -> bool {
549        self == TaskPriority::Background
550    }
551}
552/// Tracks backpressure between producers and consumers.
553#[allow(dead_code)]
554pub struct BackpressureController {
555    /// Maximum queue depth before producer is throttled.
556    pub high_watermark: usize,
557    /// Queue depth at which throttling is released.
558    pub low_watermark: usize,
559    /// Current queue depth.
560    pub current_depth: usize,
561    /// Whether the producer is currently throttled.
562    throttled: bool,
563    /// Total times throttling was applied.
564    pub throttle_events: u64,
565}
566#[allow(dead_code)]
567impl BackpressureController {
568    /// Create a backpressure controller.
569    pub fn new(high_watermark: usize, low_watermark: usize) -> Self {
570        BackpressureController {
571            high_watermark,
572            low_watermark,
573            current_depth: 0,
574            throttled: false,
575            throttle_events: 0,
576        }
577    }
578    /// Record that one item was enqueued.
579    pub fn enqueue(&mut self) {
580        self.current_depth += 1;
581        if self.current_depth >= self.high_watermark && !self.throttled {
582            self.throttled = true;
583            self.throttle_events += 1;
584        }
585    }
586    /// Record that one item was dequeued.
587    pub fn dequeue(&mut self) {
588        if self.current_depth > 0 {
589            self.current_depth -= 1;
590        }
591        if self.current_depth <= self.low_watermark {
592            self.throttled = false;
593        }
594    }
595    /// Whether the producer should be throttled.
596    pub fn is_throttled(&self) -> bool {
597        self.throttled
598    }
599    /// Fill ratio (0.0 = empty, 1.0 = at high watermark).
600    pub fn fill_ratio(&self) -> f64 {
601        if self.high_watermark == 0 {
602            return 1.0;
603        }
604        (self.current_depth as f64 / self.high_watermark as f64).min(1.0)
605    }
606    /// Reset depth and throttle state.
607    pub fn reset(&mut self) {
608        self.current_depth = 0;
609        self.throttled = false;
610    }
611}
612/// Parallel evaluation primitives.
613pub struct ParallelEval;
614impl ParallelEval {
615    /// Evaluate multiple independent tasks in parallel and return results.
616    pub fn par_map(scheduler: &mut Scheduler, actions: Vec<RtObject>) -> Vec<TaskId> {
617        actions
618            .into_iter()
619            .map(|action| scheduler.spawn(action))
620            .collect()
621    }
622    /// Spawn two tasks and combine their results.
623    pub fn par_pair(
624        scheduler: &mut Scheduler,
625        action_a: RtObject,
626        action_b: RtObject,
627    ) -> (TaskId, TaskId) {
628        let a = scheduler.spawn(action_a);
629        let b = scheduler.spawn(action_b);
630        (a, b)
631    }
632    /// Create a task that depends on the completion of all given tasks.
633    pub fn when_all(
634        scheduler: &mut Scheduler,
635        deps: Vec<TaskId>,
636        continuation: RtObject,
637    ) -> TaskId {
638        scheduler.spawn_with_deps(continuation, deps)
639    }
640    /// Create a barrier: spawn a continuation after all deps complete.
641    pub fn barrier(
642        scheduler: &mut Scheduler,
643        dep_actions: Vec<RtObject>,
644        continuation: RtObject,
645    ) -> (Vec<TaskId>, TaskId) {
646        let dep_ids: Vec<TaskId> = dep_actions
647            .into_iter()
648            .map(|action| scheduler.spawn(action))
649            .collect();
650        let barrier_id = scheduler.spawn_with_deps(continuation, dep_ids.clone());
651        (dep_ids, barrier_id)
652    }
653}
654/// Profiling record for a single task.
655#[allow(dead_code)]
656#[derive(Clone, Debug)]
657pub struct TaskProfile {
658    /// Task id.
659    pub id: TaskId,
660    /// Task name (if any).
661    pub name: Option<String>,
662    /// Tick at creation.
663    pub created_at: u64,
664    /// Tick at start of execution.
665    pub started_at: Option<u64>,
666    /// Tick at completion.
667    pub completed_at: Option<u64>,
668    /// Number of times the task yielded.
669    pub yield_count: u32,
670    /// Number of times the task was stolen.
671    pub steal_count: u32,
672    /// Worker that ultimately completed the task.
673    pub completed_by: Option<usize>,
674}
675#[allow(dead_code)]
676impl TaskProfile {
677    /// Create a new profile at the given creation tick.
678    pub fn new(id: TaskId, created_at: u64) -> Self {
679        TaskProfile {
680            id,
681            name: None,
682            created_at,
683            started_at: None,
684            completed_at: None,
685            yield_count: 0,
686            steal_count: 0,
687            completed_by: None,
688        }
689    }
690    /// Record the start of execution.
691    pub fn start(&mut self, tick: u64) {
692        self.started_at = Some(tick);
693    }
694    /// Record the completion of the task.
695    pub fn complete(&mut self, tick: u64, worker: usize) {
696        self.completed_at = Some(tick);
697        self.completed_by = Some(worker);
698    }
699    /// Queuing latency (time from creation to start).
700    pub fn queue_latency(&self) -> Option<u64> {
701        self.started_at.map(|s| s - self.created_at)
702    }
703    /// Execution time (time from start to completion).
704    pub fn execution_time(&self) -> Option<u64> {
705        match (self.started_at, self.completed_at) {
706            (Some(s), Some(c)) => Some(c - s),
707            _ => None,
708        }
709    }
710    /// Total latency (creation to completion).
711    pub fn total_latency(&self) -> Option<u64> {
712        self.completed_at.map(|c| c - self.created_at)
713    }
714}
715/// Load balancer for distributing tasks.
716pub struct LoadBalancer {
717    /// Strategy to use.
718    strategy: LoadBalanceStrategy,
719    /// Round-robin counter.
720    rr_counter: usize,
721    /// Number of workers.
722    num_workers: usize,
723}
724impl LoadBalancer {
725    /// Create a new load balancer.
726    pub fn new(strategy: LoadBalanceStrategy, num_workers: usize) -> Self {
727        LoadBalancer {
728            strategy,
729            rr_counter: 0,
730            num_workers,
731        }
732    }
733    /// Select a worker for a task.
734    pub fn select_worker(&mut self, worker_loads: &[usize]) -> usize {
735        match self.strategy {
736            LoadBalanceStrategy::RoundRobin => {
737                let worker = self.rr_counter % self.num_workers;
738                self.rr_counter += 1;
739                worker
740            }
741            LoadBalanceStrategy::LeastLoaded => worker_loads
742                .iter()
743                .enumerate()
744                .min_by_key(|(_, load)| *load)
745                .map(|(i, _)| i)
746                .unwrap_or(0),
747            LoadBalanceStrategy::Random => {
748                self.rr_counter = self
749                    .rr_counter
750                    .wrapping_mul(6364136223846793005)
751                    .wrapping_add(1);
752                (self.rr_counter >> 16) % self.num_workers
753            }
754            LoadBalanceStrategy::WorkStealing => 0,
755        }
756    }
757}
758/// Thread-safe shared state for the scheduler.
759///
760/// In a real multi-threaded implementation, this would be accessed
761/// by multiple worker threads.
762pub struct SharedState {
763    /// Whether the scheduler should stop.
764    pub shutdown: Arc<AtomicBool>,
765    /// Global task counter.
766    pub task_counter: Arc<AtomicU64>,
767    /// Shared global queue.
768    pub global_queue: Arc<Mutex<VecDeque<TaskId>>>,
769    /// Shared task results.
770    pub results: Arc<Mutex<HashMap<TaskId, RtObject>>>,
771}
772impl SharedState {
773    /// Create new shared state.
774    pub fn new() -> Self {
775        SharedState {
776            shutdown: Arc::new(AtomicBool::new(false)),
777            task_counter: Arc::new(AtomicU64::new(0)),
778            global_queue: Arc::new(Mutex::new(VecDeque::new())),
779            results: Arc::new(Mutex::new(HashMap::new())),
780        }
781    }
782    /// Request shutdown.
783    pub fn request_shutdown(&self) {
784        self.shutdown.store(true, Ordering::Release);
785    }
786    /// Check if shutdown was requested.
787    pub fn should_shutdown(&self) -> bool {
788        self.shutdown.load(Ordering::Acquire)
789    }
790    /// Generate a new task ID.
791    pub fn next_task_id(&self) -> TaskId {
792        let id = self.task_counter.fetch_add(1, Ordering::Relaxed);
793        TaskId::new(id)
794    }
795    /// Push a task to the global queue.
796    pub fn push_task(&self, task_id: TaskId) {
797        if let Ok(mut queue) = self.global_queue.lock() {
798            queue.push_back(task_id);
799        }
800    }
801    /// Pop a task from the global queue.
802    pub fn pop_task(&self) -> Option<TaskId> {
803        self.global_queue.lock().ok()?.pop_front()
804    }
805    /// Store a task result.
806    pub fn store_result(&self, task_id: TaskId, result: RtObject) {
807        if let Ok(mut results) = self.results.lock() {
808            results.insert(task_id, result);
809        }
810    }
811    /// Get a task result.
812    pub fn get_result(&self, task_id: TaskId) -> Option<RtObject> {
813        self.results.lock().ok()?.get(&task_id).cloned()
814    }
815}
816/// Simulates preemptive scheduling by tracking time slices.
817#[allow(dead_code)]
818#[derive(Clone, Debug)]
819pub struct PreemptionSimulator {
820    /// Time slice in ticks per task before forced preemption.
821    pub time_slice: u64,
822    /// Current tick within the slice for the active task.
823    pub ticks_used: u64,
824    /// Total preemptions performed.
825    pub preemptions: u64,
826    /// Currently active task.
827    pub active_task: Option<TaskId>,
828}
829#[allow(dead_code)]
830impl PreemptionSimulator {
831    /// Create a simulator with given time slice.
832    pub fn new(time_slice: u64) -> Self {
833        PreemptionSimulator {
834            time_slice,
835            ticks_used: 0,
836            preemptions: 0,
837            active_task: None,
838        }
839    }
840    /// Assign the active task.
841    pub fn set_active(&mut self, id: TaskId) {
842        self.active_task = Some(id);
843        self.ticks_used = 0;
844    }
845    /// Tick the clock. Returns `true` if the task should be preempted.
846    pub fn tick(&mut self) -> bool {
847        self.ticks_used += 1;
848        if self.ticks_used >= self.time_slice {
849            self.preemptions += 1;
850            self.ticks_used = 0;
851            self.active_task = None;
852            true
853        } else {
854            false
855        }
856    }
857    /// Remaining ticks in the current slice.
858    pub fn remaining(&self) -> u64 {
859        self.time_slice.saturating_sub(self.ticks_used)
860    }
861}
862/// Configuration for the task scheduler.
863#[derive(Clone, Debug)]
864pub struct SchedulerConfig {
865    /// Number of workers.
866    pub num_workers: usize,
867    /// Capacity of each worker's deque.
868    pub deque_capacity: usize,
869    /// Maximum number of tasks that can be active.
870    pub max_tasks: usize,
871    /// Whether work stealing is enabled.
872    pub work_stealing: bool,
873    /// Steal batch size (number of tasks to steal at once).
874    pub steal_batch_size: usize,
875    /// Whether to use priority scheduling.
876    pub priority_scheduling: bool,
877    /// Maximum number of retries for failed tasks.
878    pub max_retries: u32,
879}
880impl SchedulerConfig {
881    /// Create default configuration.
882    pub fn new() -> Self {
883        SchedulerConfig {
884            num_workers: 4,
885            deque_capacity: 1024,
886            max_tasks: 100_000,
887            work_stealing: true,
888            steal_batch_size: 4,
889            priority_scheduling: true,
890            max_retries: 3,
891        }
892    }
893    /// Create configuration for single-threaded execution.
894    pub fn single_threaded() -> Self {
895        SchedulerConfig {
896            num_workers: 1,
897            deque_capacity: 1024,
898            max_tasks: 100_000,
899            work_stealing: false,
900            steal_batch_size: 1,
901            priority_scheduling: false,
902            max_retries: 0,
903        }
904    }
905    /// Set the number of workers.
906    pub fn with_workers(mut self, n: usize) -> Self {
907        self.num_workers = n.max(1);
908        self
909    }
910    /// Set the deque capacity.
911    pub fn with_deque_capacity(mut self, cap: usize) -> Self {
912        self.deque_capacity = cap;
913        self
914    }
915    /// Set the maximum number of tasks.
916    pub fn with_max_tasks(mut self, max: usize) -> Self {
917        self.max_tasks = max;
918        self
919    }
920    /// Enable or disable work stealing.
921    pub fn with_work_stealing(mut self, enabled: bool) -> Self {
922        self.work_stealing = enabled;
923        self
924    }
925}
926/// The main task scheduler.
927///
928/// Manages workers, task queues, and coordinates execution.
929pub struct Scheduler {
930    /// Configuration.
931    config: SchedulerConfig,
932    /// All workers.
933    pub(super) workers: Vec<Worker>,
934    /// All tasks, indexed by ID.
935    tasks: HashMap<TaskId, Task>,
936    /// Global task queue (for tasks not assigned to a worker).
937    pub(super) global_queue: VecDeque<TaskId>,
938    /// Completed task IDs.
939    pub(super) completed: Vec<TaskId>,
940    /// Next task ID.
941    next_task_id: u64,
942    /// Whether the scheduler is running.
943    running: bool,
944    /// Statistics.
945    stats: SchedulerStats,
946}
947impl Scheduler {
948    /// Create a new scheduler with the given configuration.
949    pub fn new(config: SchedulerConfig) -> Self {
950        let workers: Vec<Worker> = (0..config.num_workers)
951            .map(|id| Worker::new(id, config.deque_capacity))
952            .collect();
953        Scheduler {
954            config,
955            workers,
956            tasks: HashMap::new(),
957            global_queue: VecDeque::new(),
958            completed: Vec::new(),
959            next_task_id: 0,
960            running: false,
961            stats: SchedulerStats::default(),
962        }
963    }
964    /// Create a single-threaded scheduler.
965    pub fn single_threaded() -> Self {
966        Scheduler::new(SchedulerConfig::single_threaded())
967    }
968    /// Spawn a new task and return its ID.
969    pub fn spawn(&mut self, action: RtObject) -> TaskId {
970        let id = TaskId::new(self.next_task_id);
971        self.next_task_id += 1;
972        let task = Task::new(id, action);
973        self.tasks.insert(id, task);
974        self.global_queue.push_back(id);
975        self.stats.tasks_created += 1;
976        let active = self.active_task_count() as u64;
977        if active > self.stats.peak_active_tasks {
978            self.stats.peak_active_tasks = active;
979        }
980        id
981    }
982    /// Spawn a named task.
983    pub fn spawn_named(&mut self, name: String, action: RtObject) -> TaskId {
984        let id = TaskId::new(self.next_task_id);
985        self.next_task_id += 1;
986        let task = Task::named(id, name, action);
987        self.tasks.insert(id, task);
988        self.global_queue.push_back(id);
989        self.stats.tasks_created += 1;
990        id
991    }
992    /// Spawn a task with a priority.
993    pub fn spawn_with_priority(&mut self, action: RtObject, priority: TaskPriority) -> TaskId {
994        let id = TaskId::new(self.next_task_id);
995        self.next_task_id += 1;
996        let task = Task::new(id, action).with_priority(priority);
997        self.tasks.insert(id, task);
998        if self.config.priority_scheduling && priority >= TaskPriority::High {
999            self.global_queue.push_front(id);
1000        } else {
1001            self.global_queue.push_back(id);
1002        }
1003        self.stats.tasks_created += 1;
1004        id
1005    }
1006    /// Spawn a task with dependencies.
1007    pub fn spawn_with_deps(&mut self, action: RtObject, deps: Vec<TaskId>) -> TaskId {
1008        let id = TaskId::new(self.next_task_id);
1009        self.next_task_id += 1;
1010        let mut task = Task::new(id, action);
1011        task.dependencies = deps.clone();
1012        let all_satisfied = deps.iter().all(|dep| self.completed.contains(dep));
1013        if all_satisfied {
1014            task.state = TaskState::Created;
1015            self.global_queue.push_back(id);
1016        } else {
1017            task.state = TaskState::Suspended {
1018                waiting_on: deps.clone(),
1019            };
1020        }
1021        for dep in &deps {
1022            if let Some(dep_task) = self.tasks.get_mut(dep) {
1023                dep_task.dependents.push(id);
1024            }
1025        }
1026        self.tasks.insert(id, task);
1027        self.stats.tasks_created += 1;
1028        id
1029    }
1030    /// Get a task by ID.
1031    pub fn get_task(&self, id: TaskId) -> Option<&Task> {
1032        self.tasks.get(&id)
1033    }
1034    /// Get a task mutably by ID.
1035    pub fn get_task_mut(&mut self, id: TaskId) -> Option<&mut Task> {
1036        self.tasks.get_mut(&id)
1037    }
1038    /// Cancel a task.
1039    pub fn cancel(&mut self, id: TaskId) -> bool {
1040        if let Some(task) = self.tasks.get_mut(&id) {
1041            if !task.state.is_terminal() {
1042                task.cancel();
1043                self.stats.tasks_cancelled += 1;
1044                return true;
1045            }
1046        }
1047        false
1048    }
1049    /// Check if a task has completed.
1050    pub fn is_complete(&self, id: TaskId) -> bool {
1051        self.tasks
1052            .get(&id)
1053            .map(|t| t.state.is_terminal())
1054            .unwrap_or(false)
1055    }
1056    /// Get the result of a completed task.
1057    pub fn get_result(&self, id: TaskId) -> Option<&RtObject> {
1058        self.tasks.get(&id).and_then(|t| t.result())
1059    }
1060    /// Run one scheduling step.
1061    ///
1062    /// This distributes tasks from the global queue to workers,
1063    /// performs work stealing, and returns the next task to execute.
1064    pub fn schedule_step(&mut self) -> Option<(usize, TaskId)> {
1065        self.stats.scheduling_rounds += 1;
1066        while let Some(task_id) = self.global_queue.pop_front() {
1067            let target_worker = self.find_least_loaded_worker();
1068            if !self.workers[target_worker].push_task(task_id) {
1069                self.global_queue.push_front(task_id);
1070                break;
1071            }
1072            if let Some(task) = self.tasks.get_mut(&task_id) {
1073                task.state = TaskState::Queued;
1074            }
1075        }
1076        for worker_id in 0..self.workers.len() {
1077            if let Some(task_id) = self.workers[worker_id].pop_task() {
1078                self.workers[worker_id].start_task(task_id);
1079                if let Some(task) = self.tasks.get_mut(&task_id) {
1080                    task.state = TaskState::Running { worker_id };
1081                }
1082                return Some((worker_id, task_id));
1083            }
1084        }
1085        if self.config.work_stealing {
1086            if let Some((worker_id, task_id)) = self.try_steal() {
1087                self.workers[worker_id].start_task(task_id);
1088                if let Some(task) = self.tasks.get_mut(&task_id) {
1089                    task.state = TaskState::Running { worker_id };
1090                }
1091                return Some((worker_id, task_id));
1092            }
1093        }
1094        self.stats.idle_cycles += 1;
1095        None
1096    }
1097    /// Complete a task with a result.
1098    pub fn complete_task(&mut self, task_id: TaskId, result: RtObject) {
1099        let dependents = self
1100            .tasks
1101            .get(&task_id)
1102            .map(|t| t.dependents.clone())
1103            .unwrap_or_default();
1104        if let Some(task) = self.tasks.get_mut(&task_id) {
1105            task.complete(result);
1106            self.completed.push(task_id);
1107            self.stats.tasks_completed += 1;
1108        }
1109        for worker in &mut self.workers {
1110            if worker.current_task == Some(task_id) {
1111                worker.finish_task();
1112                break;
1113            }
1114        }
1115        for dep_id in &dependents {
1116            self.try_wake_task(*dep_id);
1117        }
1118    }
1119    /// Fail a task with an error.
1120    pub fn fail_task(&mut self, task_id: TaskId, error: String) {
1121        if let Some(task) = self.tasks.get_mut(&task_id) {
1122            task.fail(error);
1123            self.stats.tasks_failed += 1;
1124        }
1125        for worker in &mut self.workers {
1126            if worker.current_task == Some(task_id) {
1127                worker.finish_task();
1128                break;
1129            }
1130        }
1131    }
1132    /// Try to wake a suspended task.
1133    fn try_wake_task(&mut self, task_id: TaskId) {
1134        let should_wake = if let Some(task) = self.tasks.get(&task_id) {
1135            if let TaskState::Suspended { ref waiting_on } = task.state {
1136                waiting_on.iter().all(|dep| self.completed.contains(dep))
1137            } else {
1138                false
1139            }
1140        } else {
1141            false
1142        };
1143        if should_wake {
1144            if let Some(task) = self.tasks.get_mut(&task_id) {
1145                task.state = TaskState::Queued;
1146            }
1147            self.global_queue.push_back(task_id);
1148        }
1149    }
1150    /// Find the worker with the least load.
1151    fn find_least_loaded_worker(&self) -> usize {
1152        self.workers
1153            .iter()
1154            .enumerate()
1155            .min_by_key(|(_, w)| w.load())
1156            .map(|(i, _)| i)
1157            .unwrap_or(0)
1158    }
1159    /// Try to steal a task from another worker.
1160    fn try_steal(&mut self) -> Option<(usize, TaskId)> {
1161        self.stats.steal_attempts += 1;
1162        let idle_worker = self.workers.iter().position(|w| w.idle)?;
1163        let busy_worker = self
1164            .workers
1165            .iter()
1166            .enumerate()
1167            .filter(|(i, _)| *i != idle_worker)
1168            .max_by_key(|(_, w)| w.deque.len())?
1169            .0;
1170        if self.workers[busy_worker].deque.is_empty() {
1171            return None;
1172        }
1173        let stolen = self.workers[busy_worker].deque.steal()?;
1174        self.workers[busy_worker].tasks_stolen_from += 1;
1175        self.workers[idle_worker].tasks_stolen += 1;
1176        self.stats.total_steals += 1;
1177        Some((idle_worker, stolen))
1178    }
1179    /// Number of active (non-terminal) tasks.
1180    pub fn active_task_count(&self) -> usize {
1181        self.tasks
1182            .values()
1183            .filter(|t| !t.state.is_terminal())
1184            .count()
1185    }
1186    /// Number of completed tasks.
1187    pub fn completed_count(&self) -> usize {
1188        self.completed.len()
1189    }
1190    /// Number of workers.
1191    pub fn num_workers(&self) -> usize {
1192        self.workers.len()
1193    }
1194    /// Get worker statistics.
1195    pub fn worker_stats(&self) -> Vec<WorkerStats> {
1196        self.workers
1197            .iter()
1198            .map(|w| WorkerStats {
1199                id: w.id,
1200                tasks_completed: w.tasks_completed,
1201                tasks_stolen: w.tasks_stolen,
1202                tasks_stolen_from: w.tasks_stolen_from,
1203                queue_length: w.deque.len(),
1204                idle: w.idle,
1205            })
1206            .collect()
1207    }
1208    /// Get the scheduler statistics.
1209    pub fn stats(&self) -> &SchedulerStats {
1210        &self.stats
1211    }
1212    /// Get the configuration.
1213    pub fn config(&self) -> &SchedulerConfig {
1214        &self.config
1215    }
1216    /// Check if the scheduler is running.
1217    pub fn is_running(&self) -> bool {
1218        self.running
1219    }
1220    /// Start the scheduler.
1221    pub fn start(&mut self) {
1222        self.running = true;
1223    }
1224    /// Stop the scheduler.
1225    pub fn stop(&mut self) {
1226        self.running = false;
1227    }
1228    /// Reset the scheduler.
1229    pub fn reset(&mut self) {
1230        self.tasks.clear();
1231        self.global_queue.clear();
1232        self.completed.clear();
1233        self.next_task_id = 0;
1234        self.stats = SchedulerStats::default();
1235        for worker in &mut self.workers {
1236            worker.deque.clear();
1237            worker.tasks_completed = 0;
1238            worker.tasks_stolen = 0;
1239            worker.tasks_stolen_from = 0;
1240            worker.idle = true;
1241            worker.current_task = None;
1242        }
1243    }
1244    /// Run all tasks to completion (single-threaded simulation).
1245    pub fn run_all(&mut self, mut executor: impl FnMut(&Task) -> Result<RtObject, String>) {
1246        self.start();
1247        while self.active_task_count() > 0 {
1248            if let Some((_worker_id, task_id)) = self.schedule_step() {
1249                let result = {
1250                    let task = self
1251                        .tasks
1252                        .get(&task_id)
1253                        .expect("task_id returned by schedule_step must exist in the tasks map");
1254                    executor(task)
1255                };
1256                match result {
1257                    Ok(value) => self.complete_task(task_id, value),
1258                    Err(error) => self.fail_task(task_id, error),
1259                }
1260            } else {
1261                let has_suspended = self.tasks.values().any(|t| t.state.is_suspended());
1262                if has_suspended && self.global_queue.is_empty() {
1263                    let suspended: Vec<TaskId> = self
1264                        .tasks
1265                        .iter()
1266                        .filter(|(_, t)| t.state.is_suspended())
1267                        .map(|(id, _)| *id)
1268                        .collect();
1269                    for id in suspended {
1270                        self.fail_task(id, "deadlock detected".to_string());
1271                    }
1272                }
1273                break;
1274            }
1275        }
1276        self.stop();
1277    }
1278}
1279/// A work-stealing deque for a single worker.
1280///
1281/// The owner pushes/pops from the bottom; thieves steal from the top.
1282/// This implementation uses a simple VecDeque protected by a mutex
1283/// (a production implementation would use a lock-free deque).
1284pub struct WorkStealingDeque {
1285    /// The deque storage.
1286    pub(super) deque: VecDeque<TaskId>,
1287    /// Maximum capacity.
1288    pub(super) capacity: usize,
1289}
1290impl WorkStealingDeque {
1291    /// Create a new empty deque.
1292    pub fn new(capacity: usize) -> Self {
1293        WorkStealingDeque {
1294            deque: VecDeque::with_capacity(capacity),
1295            capacity,
1296        }
1297    }
1298    /// Push a task to the bottom (owner's end).
1299    pub fn push(&mut self, task_id: TaskId) -> bool {
1300        if self.deque.len() >= self.capacity {
1301            return false;
1302        }
1303        self.deque.push_back(task_id);
1304        true
1305    }
1306    /// Pop a task from the bottom (owner's end).
1307    pub fn pop(&mut self) -> Option<TaskId> {
1308        self.deque.pop_back()
1309    }
1310    /// Steal a task from the top (thief's end).
1311    pub fn steal(&mut self) -> Option<TaskId> {
1312        self.deque.pop_front()
1313    }
1314    /// Number of tasks in the deque.
1315    pub fn len(&self) -> usize {
1316        self.deque.len()
1317    }
1318    /// Check if the deque is empty.
1319    pub fn is_empty(&self) -> bool {
1320        self.deque.is_empty()
1321    }
1322    /// Check if the deque is full.
1323    pub fn is_full(&self) -> bool {
1324        self.deque.len() >= self.capacity
1325    }
1326    /// Clear all tasks.
1327    pub fn clear(&mut self) {
1328        self.deque.clear();
1329    }
1330    /// Peek at the bottom task without removing it.
1331    pub fn peek(&self) -> Option<&TaskId> {
1332        self.deque.back()
1333    }
1334    /// Steal up to n tasks.
1335    pub fn steal_batch(&mut self, n: usize) -> Vec<TaskId> {
1336        let count = n.min(self.deque.len() / 2).max(1).min(self.deque.len());
1337        let mut stolen = Vec::with_capacity(count);
1338        for _ in 0..count {
1339            if let Some(task_id) = self.deque.pop_front() {
1340                stolen.push(task_id);
1341            } else {
1342                break;
1343            }
1344        }
1345        stolen
1346    }
1347}
1348/// Statistics for the scheduler.
1349#[derive(Clone, Debug, Default)]
1350pub struct SchedulerStats {
1351    /// Total tasks created.
1352    pub tasks_created: u64,
1353    /// Total tasks completed.
1354    pub tasks_completed: u64,
1355    /// Total tasks failed.
1356    pub tasks_failed: u64,
1357    /// Total tasks cancelled.
1358    pub tasks_cancelled: u64,
1359    /// Total steals performed.
1360    pub total_steals: u64,
1361    /// Total steal attempts (including failures).
1362    pub steal_attempts: u64,
1363    /// Total idle cycles.
1364    pub idle_cycles: u64,
1365    /// Peak number of active tasks.
1366    pub peak_active_tasks: u64,
1367    /// Total scheduling rounds.
1368    pub scheduling_rounds: u64,
1369}
1370/// An actor mailbox (a queue of messages).
1371#[allow(dead_code)]
1372pub struct ActorMailbox {
1373    /// The actor's ID.
1374    pub id: ActorId,
1375    /// Pending messages.
1376    messages: VecDeque<ActorMessage>,
1377    /// Total messages received.
1378    pub total_received: u64,
1379    /// Total messages processed.
1380    pub total_processed: u64,
1381}
1382#[allow(dead_code)]
1383impl ActorMailbox {
1384    /// Create a new mailbox.
1385    pub fn new(id: ActorId) -> Self {
1386        ActorMailbox {
1387            id,
1388            messages: VecDeque::new(),
1389            total_received: 0,
1390            total_processed: 0,
1391        }
1392    }
1393    /// Enqueue a message.
1394    pub fn send(&mut self, msg: ActorMessage) {
1395        self.messages.push_back(msg);
1396        self.total_received += 1;
1397    }
1398    /// Dequeue the next message.
1399    pub fn receive(&mut self) -> Option<ActorMessage> {
1400        let msg = self.messages.pop_front();
1401        if msg.is_some() {
1402            self.total_processed += 1;
1403        }
1404        msg
1405    }
1406    /// Number of pending messages.
1407    pub fn pending(&self) -> usize {
1408        self.messages.len()
1409    }
1410    /// Whether the mailbox is empty.
1411    pub fn is_empty(&self) -> bool {
1412        self.messages.is_empty()
1413    }
1414}
1415/// A unique identifier for a task.
1416#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
1417pub struct TaskId(pub u64);
1418impl TaskId {
1419    /// Create a new task ID.
1420    pub fn new(id: u64) -> Self {
1421        TaskId(id)
1422    }
1423    /// Get the raw ID value.
1424    pub fn raw(self) -> u64 {
1425        self.0
1426    }
1427}
1428/// The state of a task in its lifecycle.
1429#[derive(Clone, Debug, PartialEq, Eq)]
1430pub enum TaskState {
1431    /// Task has been created but not yet scheduled.
1432    Created,
1433    /// Task is in a queue waiting to be executed.
1434    Queued,
1435    /// Task is currently running on a worker.
1436    Running {
1437        /// Which worker is executing this task.
1438        worker_id: usize,
1439    },
1440    /// Task is suspended (waiting for a dependency).
1441    Suspended {
1442        /// Task IDs this task is waiting on.
1443        waiting_on: Vec<TaskId>,
1444    },
1445    /// Task has completed successfully.
1446    Completed {
1447        /// The result value.
1448        result: RtObject,
1449    },
1450    /// Task has failed with an error.
1451    Failed {
1452        /// The error message.
1453        error: String,
1454    },
1455    /// Task has been cancelled.
1456    Cancelled,
1457}
1458impl TaskState {
1459    /// Check if the task is in a terminal state.
1460    pub fn is_terminal(&self) -> bool {
1461        matches!(
1462            self,
1463            TaskState::Completed { .. } | TaskState::Failed { .. } | TaskState::Cancelled
1464        )
1465    }
1466    /// Check if the task is runnable.
1467    pub fn is_runnable(&self) -> bool {
1468        matches!(self, TaskState::Created | TaskState::Queued)
1469    }
1470    /// Check if the task is running.
1471    pub fn is_running(&self) -> bool {
1472        matches!(self, TaskState::Running { .. })
1473    }
1474    /// Check if the task is suspended.
1475    pub fn is_suspended(&self) -> bool {
1476        matches!(self, TaskState::Suspended { .. })
1477    }
1478}
1479/// A cooperative yield mechanism for long-running tasks.
1480#[allow(dead_code)]
1481pub struct YieldPoint {
1482    /// Whether a yield has been requested.
1483    requested: Arc<AtomicBool>,
1484    /// How many times this yield point has been checked.
1485    pub check_count: u64,
1486    /// How many times the task actually yielded.
1487    pub yield_count: u64,
1488    /// Instructions between checks.
1489    pub check_interval: u64,
1490}
1491#[allow(dead_code)]
1492impl YieldPoint {
1493    /// Create a new yield point.
1494    pub fn new() -> Self {
1495        YieldPoint {
1496            requested: Arc::new(AtomicBool::new(false)),
1497            check_count: 0,
1498            yield_count: 0,
1499            check_interval: 100,
1500        }
1501    }
1502    /// Create with a custom check interval.
1503    pub fn with_interval(check_interval: u64) -> Self {
1504        YieldPoint {
1505            requested: Arc::new(AtomicBool::new(false)),
1506            check_count: 0,
1507            yield_count: 0,
1508            check_interval,
1509        }
1510    }
1511    /// Request a yield (called from scheduler).
1512    pub fn request_yield(&self) {
1513        self.requested.store(true, Ordering::Release);
1514    }
1515    /// Clear the yield request (called after yielding).
1516    pub fn clear_request(&self) {
1517        self.requested.store(false, Ordering::Release);
1518    }
1519    /// Check if a yield should happen. Returns true if the task should yield.
1520    pub fn should_yield(&mut self) -> bool {
1521        self.check_count += 1;
1522        if self.requested.load(Ordering::Acquire) {
1523            self.yield_count += 1;
1524            self.clear_request();
1525            true
1526        } else {
1527            false
1528        }
1529    }
1530    /// Get a handle that the scheduler can use to request a yield.
1531    pub fn handle(&self) -> YieldHandle {
1532        YieldHandle {
1533            requested: Arc::clone(&self.requested),
1534        }
1535    }
1536}