Skip to main content

auralis_task/
scope.rs

1//! Explicit [`TaskScope`] tree with iterative cancellation, parent
2//! back-references, callback-handle lifecycle management, and a context
3//! store for dependency injection.
4
5use std::any::{Any, TypeId};
6use std::cell::{Cell, RefCell};
7use std::collections::{HashMap, VecDeque};
8use std::future::Future;
9use std::pin::Pin;
10use std::rc::{Rc, Weak};
11
12use crate::executor;
13use crate::Priority;
14
15type ScopeId = u64;
16type TaskId = u64;
17
18// ---------------------------------------------------------------------------
19// Scope-id allocator
20// ---------------------------------------------------------------------------
21
22thread_local! {
23    static NEXT_SCOPE_ID: Cell<ScopeId> = const { Cell::new(1) };
24}
25
26fn alloc_scope_id() -> ScopeId {
27    NEXT_SCOPE_ID.with(|c| {
28        let id = c.get();
29        c.set(id + 1);
30        id
31    })
32}
33
34// ---------------------------------------------------------------------------
35// CallbackHandle
36// ---------------------------------------------------------------------------
37
38/// Owns a resource that must be cleaned up when the owning [`TaskScope`]
39/// is dropped.
40///
41/// Currently used for signal subscriptions registered by the `bind_*`
42/// functions.  When the [`TaskScope`] drops, every registered
43/// [`CallbackHandle`] is dropped, which calls the stored cleanup closure
44/// to unsubscribe from the signal.
45pub struct CallbackHandle {
46    cleanup: Option<Box<dyn FnOnce() + 'static>>,
47}
48
49impl CallbackHandle {
50    /// Create a handle from a cleanup closure.
51    pub fn new(cleanup: impl FnOnce() + 'static) -> Self {
52        Self {
53            cleanup: Some(Box::new(cleanup)),
54        }
55    }
56
57    /// Create a no-op handle that does nothing on drop.
58    ///
59    /// Useful as a placeholder when a [`CallbackHandle`] is required
60    /// but no cleanup is needed.
61    #[must_use]
62    pub fn noop() -> Self {
63        Self { cleanup: None }
64    }
65}
66
67impl Drop for CallbackHandle {
68    fn drop(&mut self) {
69        if let Some(f) = self.cleanup.take() {
70            f();
71        }
72    }
73}
74
75// ---------------------------------------------------------------------------
76// Scope registry — maps ScopeId → live TaskScope for executor injection
77// ---------------------------------------------------------------------------
78//
79// # Why Weak references
80//
81// The registry stores `Weak<RefCell<TaskScopeInner>>` rather than
82// `Rc<...>`.  This prevents the registry from keeping scopes alive
83// after the application has dropped them — when the last strong
84// reference is gone, the Weak upgrade returns `None` and the executor
85// skips that scope.
86//
87// # Thread safety
88//
89// `SCOPE_REGISTRY` is a `thread_local!` because Auralis is
90// single-threaded by design (Wasm constraint).  For multi-task SSR
91// servers, each request uses an isolated [`Executor`] instance created
92// via [`Executor::new_instance`](crate::Executor::new_instance), and
93// the [`ScopeStore`] trait provides pluggable per-task storage.
94
95type ScopeRegistryEntry = (Weak<RefCell<TaskScopeInner>>, Weak<Cell<bool>>);
96
97thread_local! {
98    static SCOPE_REGISTRY: RefCell<HashMap<ScopeId, ScopeRegistryEntry>> =
99        RefCell::new(HashMap::new());
100}
101
102/// Register a scope in the global registry so the executor can look it
103/// up by id and inject it as the current scope when polling tasks.
104fn register_scope(id: ScopeId, inner: &Rc<RefCell<TaskScopeInner>>, suspended: &Rc<Cell<bool>>) {
105    let _ = SCOPE_REGISTRY.try_with(|reg| {
106        if let Ok(mut r) = reg.try_borrow_mut() {
107            r.insert(id, (Rc::downgrade(inner), Rc::downgrade(suspended)));
108        }
109    });
110}
111
112fn unregister_scope(id: ScopeId) {
113    let _ = SCOPE_REGISTRY.try_with(|reg| {
114        if let Ok(mut r) = reg.try_borrow_mut() {
115            r.remove(&id);
116        }
117    });
118}
119
120/// Find a live [`TaskScope`] by its id.
121///
122/// Returns `None` if the scope has been dropped or the id is unknown.
123#[must_use]
124pub fn find_scope(scope_id: ScopeId) -> Option<TaskScope> {
125    SCOPE_REGISTRY
126        .try_with(|reg| {
127            if let Ok(r) = reg.try_borrow() {
128                r.get(&scope_id).and_then(|(inner_weak, suspended_weak)| {
129                    let inner = inner_weak.upgrade()?;
130                    let suspended = suspended_weak.upgrade()?;
131                    Some(TaskScope { inner, suspended })
132                })
133            } else {
134                None
135            }
136        })
137        .ok()
138        .flatten()
139}
140
141/// Return the debug label for the scope with the given id, if any.
142///
143/// Only available with the `debug` feature.
144#[cfg(feature = "debug")]
145#[doc(hidden)]
146#[must_use]
147pub fn scope_debug_label(scope_id: ScopeId) -> Option<String> {
148    find_scope(scope_id).and_then(|s| s.inner.borrow().debug_label.clone())
149}
150
151/// Clear the scope registry.
152#[doc(hidden)]
153pub fn clear_scope_registry() {
154    let _ = SCOPE_REGISTRY.try_with(|reg| {
155        if let Ok(mut r) = reg.try_borrow_mut() {
156            r.clear();
157        }
158    });
159}
160
161// ---------------------------------------------------------------------------
162// Current-scope storage — injectable, defaults to thread-local
163// ---------------------------------------------------------------------------
164
165/// Function signatures for scope store operations.
166///
167/// Using function pointers keeps the store `Send + Sync` even though
168/// `TaskScope` itself is `!Send` — Rust function pointer types are
169/// always `Send + Sync` regardless of parameter/return types.
170type ScopeSetFn = fn(Option<TaskScope>);
171type ScopeGetFn = fn() -> Option<TaskScope>;
172
173/// A pluggable backend for per-task (or per-thread) scope storage.
174///
175/// The default implementation uses a thread-local cell, which is
176/// sufficient for single-threaded Wasm environments.  For multi-task
177/// SSR runtimes (e.g. tokio) the host application should inject a
178/// task-local implementation via [`set_scope_store`].
179pub struct ScopeStore {
180    /// Store a scope (or `None` to clear).
181    pub set_fn: ScopeSetFn,
182    /// Retrieve the current scope.
183    pub get_fn: ScopeGetFn,
184}
185
186use std::sync::OnceLock;
187static SCOPE_STORE: OnceLock<ScopeStore> = OnceLock::new();
188
189fn ensure_default_store() -> &'static ScopeStore {
190    SCOPE_STORE.get_or_init(|| ScopeStore {
191        set_fn: thread_local_set,
192        get_fn: thread_local_get,
193    })
194}
195
196/// Install a custom scope store.
197///
198/// Must be called before any scope operations.  On Wasm or in tests the
199/// default thread-local store is sufficient.
200///
201/// # Example (tokio SSR)
202///
203/// ```rust,ignore
204/// use auralis_task::ScopeStore;
205///
206/// auralis_task::set_scope_store(ScopeStore {
207///     set_fn: my_tokio_task_local_set,
208///     get_fn: my_tokio_task_local_get,
209/// });
210/// ```
211pub fn set_scope_store(store: ScopeStore) {
212    let _ = SCOPE_STORE.set(store);
213}
214
215// The `set_scope_store` API allows injecting a custom scope store.
216// For SSR in multi-threaded tokio runtimes, users should implement a
217// `ScopeStore` backed by `tokio::task::LocalKey` (available when the
218// `ssr-tokio` feature is enabled) or a similar per-task mechanism.
219//
220// Example with tokio (when `ssr-tokio` is enabled):
221//
222// ```rust,ignore
223// use auralis_task::{ScopeStore, set_scope_store};
224//
225// tokio::task::LocalKey! {
226//     static TK_SCOPE: std::cell::RefCell<Option<auralis_task::TaskScope>> =
227//         const { std::cell::RefCell::new(None) };
228// }
229//
230// set_scope_store(ScopeStore {
231//     set_fn: |s| TK_SCOPE.with(|c| *c.borrow_mut() = s),
232//     get_fn: || TK_SCOPE.with(|c| c.borrow().clone()),
233// });
234// ```
235//
236// For single-threaded tokio use (LocalSet / spawn_local), the default
237// thread-local store works correctly without any configuration.
238
239// ---- default thread-local implementation -------------------------------
240
241thread_local! {
242    static CURRENT_SCOPE: RefCell<Option<TaskScope>> = const { RefCell::new(None) };
243}
244
245fn thread_local_set(scope: Option<TaskScope>) {
246    CURRENT_SCOPE.with(|cell| {
247        cell.replace(scope);
248    });
249}
250
251fn thread_local_get() -> Option<TaskScope> {
252    CURRENT_SCOPE.with(|cell| cell.borrow().clone())
253}
254
255/// Directly set the current scope without save/restore.
256///
257/// Used by the executor to inject the owning scope before polling a
258/// task.  The caller must restore the previous scope after the poll.
259pub(crate) fn set_scope_direct(scope: Option<TaskScope>) {
260    let store = ensure_default_store();
261    (store.set_fn)(scope);
262}
263
264/// Directly get the current scope.
265pub(crate) fn get_scope_direct() -> Option<TaskScope> {
266    let store = ensure_default_store();
267    (store.get_fn)()
268}
269
270// ---- ssr-tokio integration ----------------------------------------------
271
272/// Initialise the scope store for tokio-based SSR runtimes.
273///
274/// Uses `tokio::task::LocalKey` to store the current [`TaskScope`] per
275/// tokio task, enabling true multi-request isolation.  Call this once
276/// at process startup, before any scope operations.
277///
278/// Only available with the **`ssr-tokio`** feature (non-wasm).
279///
280/// # Example
281///
282/// ```rust,ignore
283/// auralis_task::init_scope_store_tokio();
284/// ```
285#[cfg(feature = "ssr-tokio")]
286pub fn init_scope_store_tokio() {
287    tokio::task_local! {
288        static TK_SCOPE: std::cell::RefCell<Option<TaskScope>>;
289    }
290
291    // Initialise the key.
292    let _ = TK_SCOPE.try_with(|cell| {
293        cell.replace(None);
294    });
295
296    set_scope_store(ScopeStore {
297        set_fn: |s| {
298            let _ = TK_SCOPE.try_with(|cell| {
299                cell.replace(s);
300            });
301        },
302        get_fn: || {
303            TK_SCOPE
304                .try_with(|cell| cell.borrow().clone())
305                .ok()
306                .flatten()
307        },
308    });
309}
310
311// ---- public API --------------------------------------------------------
312
313/// Set the current [`TaskScope`] for the duration of `f`.
314///
315/// Set `scope` as the current scope for the duration of `f`,
316/// restoring the previous scope afterward.
317///
318/// Used by framework glue code so that bind functions can discover the
319/// owning scope via [`current_scope`].
320pub fn with_current_scope<R>(scope: &TaskScope, f: impl FnOnce() -> R) -> R {
321    let store = ensure_default_store();
322    let prev = (store.get_fn)();
323    (store.set_fn)(Some(scope.clone_inner()));
324    let result = f();
325    (store.set_fn)(prev);
326    result
327}
328
329/// Get the currently active [`TaskScope`], if any.
330#[must_use]
331pub fn current_scope() -> Option<TaskScope> {
332    let store = ensure_default_store();
333    (store.get_fn)()
334}
335
336// ---------------------------------------------------------------------------
337// TaskScopeInner
338// ---------------------------------------------------------------------------
339
340struct TaskScopeInner {
341    id: ScopeId,
342    task_ids: Vec<TaskId>,
343    children: Vec<TaskScope>,
344    /// Weak back-reference to parent (set for child scopes).
345    parent: Option<Weak<RefCell<TaskScopeInner>>>,
346    /// Typed context store for dependency injection.
347    context: RefCell<HashMap<TypeId, Rc<dyn Any>>>,
348    /// Callback handles registered by bind_* functions.
349    callbacks: RefCell<Vec<CallbackHandle>>,
350    /// Optional label for `dump_task_tree` output (debug feature).
351    #[cfg(feature = "debug")]
352    debug_label: Option<String>,
353    cancelled: bool,
354    /// The executor that owns tasks spawned in this scope.
355    /// Stored as `Rc` (strong reference) so the executor lives
356    /// at least as long as the scope — essential for safe
357    /// cancellation during drop.
358    executor: executor::ExecutorRef,
359}
360
361// ---------------------------------------------------------------------------
362// TaskScope
363// ---------------------------------------------------------------------------
364
365/// A node in the scope tree that owns spawned tasks and carries a typed
366/// context for dependency injection.
367///
368/// # Drop guarantee
369///
370/// When a [`TaskScope`] is dropped, all descendant scopes and their
371/// tasks are cancelled **iteratively** using a work queue — recursion
372/// is never used, so deeply nested UI trees (200+ levels) never
373/// overflow the stack.
374///
375/// # Context
376///
377/// Use [`provide`](TaskScope::provide) / [`consume`](TaskScope::consume)
378/// for lightweight dependency injection that walks up the scope tree.
379///
380/// # Callback lifecycle
381///
382/// [`CallbackHandle`]s registered via
383/// [`register_callback_handle`](Self::register_callback_handle) are
384/// dropped **before** spawned tasks are cancelled, ensuring that
385/// signal subscriptions are removed before any task cleanup.
386#[must_use]
387pub struct TaskScope {
388    inner: Rc<RefCell<TaskScopeInner>>,
389    /// Whether this scope is suspended.  Stored outside the `RefCell`
390    /// so that [`is_suspended`](Self::is_suspended) can be checked
391    /// without borrowing (avoids re-entrant borrow panics during
392    /// synchronous flush in tests).
393    suspended: Rc<Cell<bool>>,
394}
395
396impl TaskScope {
397    /// Create a new root scope on the global thread-local executor.
398    ///
399    /// For explicit executor ownership use [`TaskScope::with_executor`].
400    pub fn new() -> Self {
401        Self::with_executor(&executor::current_executor_instance())
402    }
403
404    /// Create a new root scope on the given executor.
405    ///
406    /// All tasks spawned in this scope (and its descendants) run on
407    /// `ex`.  The scope holds a strong reference, keeping the executor
408    /// alive at least as long as the scope.
409    pub fn with_executor(ex: &executor::ExecutorRef) -> Self {
410        let inner = Rc::new(RefCell::new(TaskScopeInner {
411            id: alloc_scope_id(),
412            task_ids: Vec::new(),
413            children: Vec::new(),
414            parent: None,
415            context: RefCell::new(HashMap::new()),
416            callbacks: RefCell::new(Vec::new()),
417            #[cfg(feature = "debug")]
418            debug_label: None,
419            cancelled: false,
420            executor: Rc::clone(ex),
421        }));
422        let id = inner.borrow().id;
423        let suspended = Rc::new(Cell::new(false));
424        register_scope(id, &inner, &suspended);
425        Self { inner, suspended }
426    }
427
428    /// Create a child scope that inherits the parent's executor.
429    pub fn new_child(parent: &Self) -> Self {
430        let ex = parent.inner.borrow().executor.clone();
431        let inner = Rc::new(RefCell::new(TaskScopeInner {
432            id: alloc_scope_id(),
433            task_ids: Vec::new(),
434            children: Vec::new(),
435            parent: Some(Rc::downgrade(&parent.inner)),
436            context: RefCell::new(HashMap::new()),
437            callbacks: RefCell::new(Vec::new()),
438            #[cfg(feature = "debug")]
439            debug_label: None,
440            cancelled: false,
441            executor: ex,
442        }));
443        let id = inner.borrow().id;
444        let suspended = Rc::new(Cell::new(false));
445        register_scope(id, &inner, &suspended);
446        let child = Self { inner, suspended };
447        parent.inner.borrow_mut().children.push(child.clone_inner());
448        child
449    }
450
451    /// Spawn a future in this scope at low priority.
452    pub fn spawn(&self, future: impl Future<Output = ()> + 'static) {
453        self.spawn_with_priority(Priority::Low, future);
454    }
455
456    /// Spawn a future in this scope at the given priority.
457    ///
458    /// The current scope is set to `self` during the spawn so that any
459    /// synchronous work inside the future constructor (e.g. `bind_text`)
460    /// can discover the owning scope via [`current_scope`].
461    pub fn spawn_with_priority(
462        &self,
463        priority: Priority,
464        future: impl Future<Output = ()> + 'static,
465    ) {
466        let inner = self.inner.borrow();
467        if inner.cancelled {
468            return;
469        }
470        let ex = Rc::clone(&inner.executor);
471        let task_id = executor::with_executor(&ex, || {
472            with_current_scope(self, || {
473                executor::spawn_scoped_on(&ex, priority, inner.id, future)
474            })
475        });
476        drop(inner);
477        self.inner.borrow_mut().task_ids.push(task_id);
478    }
479
480    // -- callback lifecycle ------------------------------------------------
481
482    /// Register a [`CallbackHandle`] that will be dropped when this scope
483    /// is dropped (or when `clear_callbacks` is called).
484    ///
485    /// Used by `bind_*` functions to ensure signal subscriptions are
486    /// cleaned up when the owning component is destroyed.
487    pub fn register_callback_handle(&self, handle: CallbackHandle) {
488        let inner = self.inner.borrow();
489        if inner.cancelled {
490            return;
491        }
492        inner.callbacks.borrow_mut().push(handle);
493    }
494
495    // -- context -----------------------------------------------------------
496
497    /// Store a value of type `T` in this scope.
498    ///
499    /// The value is wrapped in [`Rc`] so it can be shared.  A subsequent
500    /// call to [`consume`](TaskScope::consume) on this scope (or any
501    /// descendant) will discover it by walking up the parent chain.
502    pub fn provide<T: 'static>(&self, value: T) {
503        self.inner
504            .borrow()
505            .context
506            .borrow_mut()
507            .insert(TypeId::of::<T>(), Rc::new(value));
508    }
509
510    /// Look up a value of type `T` by walking up the scope tree.
511    ///
512    /// Returns `None` if no ancestor (including `self`) has provided a
513    /// value of this type.
514    #[must_use]
515    pub fn consume<T: 'static>(&self) -> Option<Rc<T>> {
516        let mut current = Some(Rc::clone(&self.inner));
517
518        while let Some(inner) = current {
519            // Check local context.
520            {
521                let inner_ref = inner.borrow();
522                let ctx = inner_ref.context.borrow();
523                if let Some(val) = ctx.get(&TypeId::of::<T>()) {
524                    if let Ok(downcast) = val.clone().downcast::<T>() {
525                        return Some(downcast);
526                    }
527                }
528            }
529
530            // Walk up to parent.
531            let parent = {
532                let inner_ref = inner.borrow();
533                inner_ref.parent.as_ref().and_then(Weak::upgrade)
534            };
535            current = parent;
536        }
537
538        None
539    }
540
541    /// Like [`consume`](TaskScope::consume) but panics if the value is
542    /// not found.
543    ///
544    /// # Panics
545    ///
546    /// Panics if no ancestor scope has provided a value of type `T`.
547    #[must_use]
548    #[track_caller]
549    pub fn expect_context<T: 'static>(&self) -> Rc<T> {
550        self.consume::<T>()
551            .unwrap_or_else(|| panic!("context not found: {}", std::any::type_name::<T>()))
552    }
553
554    /// Return `true` if this scope has been cancelled (dropped).
555    ///
556    /// A cancelled scope silently ignores [`spawn`](TaskScope::spawn) calls.
557    #[must_use]
558    pub fn is_cancelled(&self) -> bool {
559        self.inner.borrow().cancelled
560    }
561
562    // -- debugging ----------------------------------------------------------
563
564    /// Set a label for this scope, shown in [`dump_task_tree`] output.
565    ///
566    /// Only available with the `debug` feature.
567    #[cfg(feature = "debug")]
568    pub fn set_debug_label(&self, label: impl Into<String>) {
569        self.inner.borrow_mut().debug_label = Some(label.into());
570    }
571
572    // -- testing -----------------------------------------------------------
573
574    /// Return the number of spawned tasks in this scope (test-only).
575    #[cfg(test)]
576    #[must_use]
577    pub fn task_count(&self) -> usize {
578        self.inner.borrow().task_ids.len()
579    }
580
581    /// Return the number of child scopes (test-only).
582    #[cfg(test)]
583    #[must_use]
584    pub fn child_count(&self) -> usize {
585        self.inner.borrow().children.len()
586    }
587
588    // -- internals ---------------------------------------------------------
589
590    fn clone_inner(&self) -> Self {
591        Self {
592            inner: Rc::clone(&self.inner),
593            suspended: Rc::clone(&self.suspended),
594        }
595    }
596
597    /// Run `f` with `self` set as the current scope for the thread.
598    ///
599    /// Used by framework glue code so bind functions can discover the
600    /// owning scope via [`current_scope`].
601    pub fn enter<R>(&self, f: impl FnOnce() -> R) -> R {
602        with_current_scope(self, f)
603    }
604
605    /// Suspend all tasks owned by this scope and its descendants.
606    ///
607    /// Suspended tasks are skipped during executor polling.  Signal
608    /// subscriptions remain registered but their callbacks are not
609    /// invoked while the scope is suspended.  Use [`resume`](Self::resume)
610    /// to restart execution.
611    ///
612    /// Used by `if_async_cached` and `match_async_cached` to pause
613    /// hidden branches.
614    pub fn suspend(&self) {
615        if self.suspended.get() {
616            return;
617        }
618        self.suspended.set(true);
619        // Cascading: suspend all descendants.
620        let children: Vec<TaskScope> = {
621            self.inner
622                .borrow()
623                .children
624                .iter()
625                .map(TaskScope::clone_inner)
626                .collect()
627        };
628        for child in &children {
629            child.suspend();
630        }
631    }
632
633    /// Resume all tasks owned by this scope and its descendants.
634    ///
635    /// This reverses the effect of [`suspend`](Self::suspend).  Tasks
636    /// become eligible for polling again on the next executor flush.
637    pub fn resume(&self) {
638        if !self.suspended.get() {
639            return;
640        }
641        self.suspended.set(false);
642
643        let (scope_id, children) = {
644            let inner = self.inner.borrow();
645            let id = inner.id;
646            let children: Vec<TaskScope> =
647                inner.children.iter().map(TaskScope::clone_inner).collect();
648            (id, children)
649        };
650
651        // Enqueue all tasks belonging to this scope.
652        let ex = Rc::clone(&self.inner.borrow().executor);
653        executor::enqueue_scope_tasks_on(&ex, scope_id);
654
655        // Resume children (cascading).
656        for child in &children {
657            child.resume();
658        }
659    }
660
661    /// Return `true` if this scope is currently suspended.
662    #[must_use]
663    pub fn is_suspended(&self) -> bool {
664        self.suspended.get()
665    }
666}
667
668impl Default for TaskScope {
669    fn default() -> Self {
670        Self::new()
671    }
672}
673
674impl Clone for TaskScope {
675    fn clone(&self) -> Self {
676        self.clone_inner()
677    }
678}
679
680// Iterative cancellation: descendants are collected BFS, then
681// cancelled leaf→root, avoiding recursive drop that would overflow
682// the stack on deeply-nested trees (200+ levels).
683//
684// Callback handles are dropped BEFORE tasks, ensuring signal
685// subscriptions are removed before any task is cancelled.
686impl Drop for TaskScope {
687    fn drop(&mut self) {
688        // Only cancel when this is the last reference to the inner.
689        // Temporary clones (from find_scope during executor flush,
690        // from with_current_scope during spawn) share the same inner
691        // and must not cancel the scope when they go out of scope.
692        if Rc::strong_count(&self.inner) > 1 {
693            return;
694        }
695
696        let Ok(mut inner) = self.inner.try_borrow_mut() else {
697            // Already borrowed — this is a re-entrant drop (e.g. a
698            // callback held the last clone of this scope).  If this
699            // was the last clone, resources will leak.
700            #[cfg(debug_assertions)]
701            {
702                eprintln!(
703                    "[auralis-task] WARNING: TaskScope::drop cannot borrow inner \
704                     (already borrowed). If this was the last clone, tasks and \
705                     callbacks will leak. Avoid dropping the last TaskScope clone \
706                     inside a callback or during executor flush."
707                );
708            }
709            return;
710        };
711        if inner.cancelled {
712            return;
713        }
714        inner.cancelled = true;
715
716        // ---- drop callback handles first ---------------------------------
717        inner.callbacks.borrow_mut().clear();
718
719        // ---- collect descendants BFS ------------------------------------
720        let mut descendants: Vec<Rc<RefCell<TaskScopeInner>>> = Vec::new();
721        {
722            let mut queue: VecDeque<Rc<RefCell<TaskScopeInner>>> = VecDeque::new();
723            for child in &inner.children {
724                queue.push_back(Rc::clone(&child.inner));
725            }
726
727            while let Some(scope_rc) = queue.pop_front() {
728                let scope = scope_rc.borrow();
729                for child in &scope.children {
730                    queue.push_back(Rc::clone(&child.inner));
731                }
732                descendants.push(Rc::clone(&scope_rc));
733            }
734        }
735
736        // ---- cancel leaves → root ---------------------------------------
737        for scope_rc in descendants.iter().rev() {
738            let mut scope = scope_rc.borrow_mut();
739            if scope.cancelled {
740                continue;
741            }
742            scope.cancelled = true;
743
744            // Drop callbacks before tasks.
745            scope.callbacks.borrow_mut().clear();
746
747            if !scope.task_ids.is_empty() {
748                let ex = Rc::clone(&scope.executor);
749                let dropped_futures: Vec<Pin<Box<dyn Future<Output = ()>>>> =
750                    executor::cancel_scope_tasks_on(&ex, scope.id);
751                drop(dropped_futures);
752            }
753            scope.context.borrow_mut().clear();
754            unregister_scope(scope.id);
755        }
756
757        // ---- cancel own tasks -------------------------------------------
758        if !inner.task_ids.is_empty() {
759            let ex = Rc::clone(&inner.executor);
760            let dropped_futures = executor::cancel_scope_tasks_on(&ex, inner.id);
761            drop(dropped_futures);
762        }
763
764        inner.context.borrow_mut().clear();
765        inner.children.clear();
766
767        // Remove from the global registry so stale lookups return None.
768        unregister_scope(inner.id);
769    }
770}
771
772// ---------------------------------------------------------------------------
773// Convenience macros for context injection / retrieval
774// ---------------------------------------------------------------------------
775
776/// Shorthand for `scope.provide(value)`.
777///
778/// ```rust,ignore
779/// provide_context!(scope, 42i32);
780/// ```
781#[macro_export]
782macro_rules! provide_context {
783    ($scope:expr, $value:expr) => {
784        $scope.provide($value)
785    };
786}
787
788/// Shorthand for `scope.consume::<T>()`.
789///
790/// ```rust,ignore
791/// let theme: Option<Rc<Theme>> = consume_context!(scope, Theme);
792/// ```
793#[macro_export]
794macro_rules! consume_context {
795    ($scope:expr, $ty:ty) => {
796        $scope.consume::<$ty>()
797    };
798}
799
800// ---------------------------------------------------------------------------
801// Tests
802// ---------------------------------------------------------------------------
803
804#[cfg(test)]
805#[allow(clippy::items_after_statements)]
806mod tests {
807    use super::*;
808    use crate::executor::{self, init_flush_scheduler, reset_executor_for_test, TestScheduleFlush};
809    use crate::{init_time_source, ScheduleFlush, TestTimeSource, TimeSource};
810    use auralis_signal::Signal;
811    use std::cell::{Cell, RefCell};
812    use std::rc::Rc;
813    use std::time::Duration;
814
815    fn init() {
816        reset_executor_for_test();
817        init_flush_scheduler(Rc::new(TestScheduleFlush));
818    }
819
820    // -- scope ------------------------------------------------------------
821
822    #[test]
823    fn new_scope_has_zero_tasks() {
824        let scope = TaskScope::new();
825        assert_eq!(scope.task_count(), 0);
826        assert_eq!(scope.child_count(), 0);
827    }
828
829    #[test]
830    fn new_child_attaches_to_parent() {
831        let parent = TaskScope::new();
832        let _child = TaskScope::new_child(&parent);
833        assert_eq!(parent.child_count(), 1);
834    }
835
836    #[test]
837    fn spawn_adds_task() {
838        init();
839        let scope = TaskScope::new();
840        scope.spawn(async {});
841        assert_eq!(scope.task_count(), 1);
842    }
843
844    #[test]
845    fn spawn_and_complete() {
846        init();
847        let done = Rc::new(Cell::new(false));
848        let done2 = Rc::clone(&done);
849        spawn_global(async move {
850            done2.set(true);
851        });
852        assert!(done.get());
853    }
854
855    #[test]
856    fn scope_spawn_and_cancel() {
857        init();
858        let dropped = Rc::new(Cell::new(false));
859        {
860            let scope = TaskScope::new();
861            let d = Rc::clone(&dropped);
862            struct DropCheck(Rc<Cell<bool>>);
863            impl Drop for DropCheck {
864                fn drop(&mut self) {
865                    self.0.set(true);
866                }
867            }
868            scope.spawn(async move {
869                let _guard = DropCheck(d);
870                std::future::pending::<()>().await;
871            });
872            assert_eq!(executor::debug_task_count(), 1);
873        }
874        assert!(dropped.get());
875        assert_eq!(executor::debug_task_count(), 0);
876    }
877
878    #[test]
879    fn nested_scope_child_cancel_with_parent() {
880        init();
881        let dropped_child = Rc::new(Cell::new(false));
882        {
883            let parent = TaskScope::new();
884            let child = TaskScope::new_child(&parent);
885            let d = Rc::clone(&dropped_child);
886            struct DropCheck(Rc<Cell<bool>>);
887            impl Drop for DropCheck {
888                fn drop(&mut self) {
889                    self.0.set(true);
890                }
891            }
892            child.spawn(async move {
893                let _guard = DropCheck(d);
894                std::future::pending::<()>().await;
895            });
896            assert_eq!(executor::debug_task_count(), 1);
897        }
898        assert!(dropped_child.get());
899        assert_eq!(executor::debug_task_count(), 0);
900    }
901
902    #[test]
903    fn deeply_nested_scope_drop_no_stack_overflow() {
904        init();
905        let root = TaskScope::new();
906        {
907            let mut current = TaskScope::new_child(&root);
908            for _ in 0..199 {
909                current = TaskScope::new_child(&current);
910            }
911        }
912        drop(root);
913        assert_eq!(executor::debug_task_count(), 0);
914    }
915
916    #[test]
917    fn scope_child_explicit_tree() {
918        let root = TaskScope::new();
919        let a = TaskScope::new_child(&root);
920        let b = TaskScope::new_child(&root);
921        let _a1 = TaskScope::new_child(&a);
922        let _a2 = TaskScope::new_child(&a);
923        assert_eq!(root.child_count(), 2);
924        assert_eq!(a.child_count(), 2);
925        assert_eq!(b.child_count(), 0);
926    }
927
928    // -- callbacks -------------------------------------------------------
929
930    #[test]
931    fn callback_handle_dropped_before_tasks() {
932        init();
933        let dropped_order: Rc<RefCell<Vec<String>>> = Rc::new(RefCell::new(Vec::new()));
934        {
935            let scope = TaskScope::new();
936            let order1 = Rc::clone(&dropped_order);
937            scope.register_callback_handle(CallbackHandle::new(move || {
938                order1.borrow_mut().push("callback".to_string());
939            }));
940            let order2 = Rc::clone(&dropped_order);
941            struct DropCheck {
942                order: Rc<RefCell<Vec<String>>>,
943                label: String,
944            }
945            impl Drop for DropCheck {
946                fn drop(&mut self) {
947                    self.order.borrow_mut().push(self.label.clone());
948                }
949            }
950            scope.spawn(async move {
951                let _guard = DropCheck {
952                    order: order2,
953                    label: "task".to_string(),
954                };
955                std::future::pending::<()>().await;
956            });
957        }
958        let order = dropped_order.borrow().clone();
959        assert_eq!(order, vec!["callback", "task"]);
960    }
961
962    #[test]
963    fn callback_handle_cleaned_up_on_child_scope_drop() {
964        init();
965        let called = Rc::new(Cell::new(false));
966        {
967            let parent = TaskScope::new();
968            let child = TaskScope::new_child(&parent);
969            let c = Rc::clone(&called);
970            child.register_callback_handle(CallbackHandle::new(move || {
971                c.set(true);
972            }));
973            // Child dropped here.
974        }
975        assert!(called.get());
976    }
977
978    // -- context ----------------------------------------------------------
979
980    #[test]
981    fn context_provide_and_consume_in_same_scope() {
982        let scope = TaskScope::new();
983        scope.provide(42i32);
984        assert_eq!(*scope.consume::<i32>().unwrap(), 42);
985    }
986
987    #[test]
988    fn context_consume_walks_up_to_parent() {
989        let parent = TaskScope::new();
990        parent.provide("hello".to_string());
991        let child = TaskScope::new_child(&parent);
992        assert_eq!(*child.consume::<String>().unwrap(), "hello");
993    }
994
995    #[test]
996    fn context_consume_not_found() {
997        let scope = TaskScope::new();
998        assert!(scope.consume::<i32>().is_none());
999    }
1000
1001    #[test]
1002    fn context_removed_on_scope_drop() {
1003        let parent = TaskScope::new();
1004        parent.provide(99u32);
1005        {
1006            let _child = TaskScope::new_child(&parent);
1007            // Child can consume from parent.
1008        }
1009        // Parent still has the context.
1010        assert_eq!(*parent.consume::<u32>().unwrap(), 99);
1011    }
1012
1013    #[test]
1014    fn context_shadowing() {
1015        let parent = TaskScope::new();
1016        parent.provide(1i32);
1017        let child = TaskScope::new_child(&parent);
1018        child.provide(2i32);
1019        // Child's own value shadows parent's.
1020        assert_eq!(*child.consume::<i32>().unwrap(), 2);
1021        // Parent still has its own.
1022        assert_eq!(*parent.consume::<i32>().unwrap(), 1);
1023    }
1024
1025    #[test]
1026    #[should_panic(expected = "context not found")]
1027    fn expect_context_panics_when_missing() {
1028        let scope = TaskScope::new();
1029        let _ = scope.expect_context::<String>();
1030    }
1031
1032    // -- existing tests continue to pass -----------------------------------
1033
1034    #[test]
1035    fn executor_priority_ordering() {
1036        init();
1037        let order = Rc::new(RefCell::new(Vec::new()));
1038        let o1 = Rc::clone(&order);
1039        executor::spawn_no_auto_flush(Priority::Low, async move {
1040            o1.borrow_mut().push("low");
1041        });
1042        let o2 = Rc::clone(&order);
1043        executor::spawn_no_auto_flush(Priority::High, async move {
1044            o2.borrow_mut().push("high");
1045        });
1046        executor::flush_all();
1047        let result = order.borrow().clone();
1048        assert_eq!(result, vec!["high", "low"]);
1049    }
1050
1051    #[test]
1052    fn executor_batch() {
1053        init();
1054        let counter = Rc::new(Cell::new(0u32));
1055        for _ in 0..10 {
1056            let c = Rc::clone(&counter);
1057            spawn_global(async move {
1058                c.set(c.get() + 1);
1059            });
1060        }
1061        assert_eq!(counter.get(), 10);
1062        assert_eq!(executor::debug_task_count(), 0);
1063    }
1064
1065    #[test]
1066    fn no_leak_on_cancel() {
1067        init();
1068        for _ in 0..50 {
1069            let scope = TaskScope::new();
1070            for _ in 0..5 {
1071                scope.spawn(std::future::pending::<()>());
1072            }
1073        }
1074        assert_eq!(executor::debug_task_count(), 0);
1075    }
1076
1077    #[test]
1078    fn set_deferred_triggers_after_flush() {
1079        use auralis_signal::Signal;
1080        init();
1081        let sig = Signal::new(0);
1082        let observed = Rc::new(Cell::new(0));
1083        set_deferred(&sig, 42);
1084        assert_eq!(sig.read(), 42);
1085        let ob1 = Rc::clone(&observed);
1086        spawn_global(async move {
1087            ob1.set(sig.read());
1088        });
1089        assert_eq!(observed.get(), 42);
1090    }
1091
1092    #[test]
1093    fn set_deferred_in_drop_safe() {
1094        use auralis_signal::Signal;
1095        init();
1096        let sig = Signal::new(0);
1097        struct SetOnDrop {
1098            sig: Signal<i32>,
1099            val: i32,
1100        }
1101        impl Drop for SetOnDrop {
1102            fn drop(&mut self) {
1103                set_deferred(&self.sig, self.val);
1104            }
1105        }
1106        let guard = SetOnDrop {
1107            sig: sig.clone(),
1108            val: 99,
1109        };
1110        drop(guard);
1111        assert_eq!(sig.read(), 99);
1112    }
1113
1114    #[test]
1115    fn set_deferred_from_drop_guard_during_scope_cancel() {
1116        use auralis_signal::Signal;
1117        init();
1118
1119        let sig = Signal::new(0i32);
1120
1121        // A drop guard that calls set_deferred — simulating a
1122        // component that resets shared state when its task is
1123        // cancelled.
1124        struct ResetOnDrop {
1125            sig: Signal<i32>,
1126        }
1127        impl Drop for ResetOnDrop {
1128            fn drop(&mut self) {
1129                set_deferred(&self.sig, 42);
1130            }
1131        }
1132
1133        {
1134            let scope = TaskScope::new();
1135            let s = sig.clone();
1136            scope.spawn(async move {
1137                let _guard = ResetOnDrop { sig: s };
1138                // The guard's Drop will call set_deferred when this
1139                // future is cancelled by the scope dropping.
1140                std::future::pending::<()>().await;
1141            });
1142            // Scope dropped here — task cancelled, guard fires.
1143        }
1144
1145        // After scope drop, the deferred op should have executed.
1146        assert_eq!(
1147            sig.read(),
1148            42,
1149            "set_deferred should have fired after scope cancel"
1150        );
1151    }
1152
1153    #[test]
1154    fn yield_now_gives_other_tasks_a_turn() {
1155        init();
1156        let order = Rc::new(RefCell::new(Vec::new()));
1157        let o1 = Rc::clone(&order);
1158        executor::spawn_no_auto_flush(Priority::Low, async move {
1159            o1.borrow_mut().push("a1");
1160            executor::yield_now().await;
1161            o1.borrow_mut().push("a2");
1162        });
1163        let o2 = Rc::clone(&order);
1164        executor::spawn_no_auto_flush(Priority::Low, async move {
1165            o2.borrow_mut().push("b1");
1166            o2.borrow_mut().push("b2");
1167        });
1168        executor::flush_all();
1169        let r = order.borrow().clone();
1170        assert_eq!(&r[0..3], &["a1", "b1", "b2"][..]);
1171        assert!(r.contains(&"a2"));
1172    }
1173
1174    #[test]
1175    fn panic_in_task_is_isolated() {
1176        init();
1177        let survived = Rc::new(Cell::new(false));
1178        let s = Rc::clone(&survived);
1179        spawn_global(async move {
1180            panic!("intentional test panic");
1181        });
1182        spawn_global(async move {
1183            s.set(true);
1184        });
1185        assert!(survived.get());
1186        assert_eq!(executor::debug_task_count(), 0);
1187    }
1188
1189    // -- time budget -------------------------------------------------------
1190
1191    #[test]
1192    fn time_budget_with_test_time_source() {
1193        init();
1194        let ts = Rc::new(TestTimeSource::new(0));
1195        init_time_source(ts.clone());
1196
1197        let polled = Rc::new(Cell::new(0u32));
1198
1199        // Spawn 50 tasks without auto-flush.  Each task increments the
1200        // counter and advances simulated time by 1 ms.
1201        for _ in 0..50 {
1202            let pc = Rc::clone(&polled);
1203            let ts_c = Rc::clone(&ts);
1204            executor::spawn_no_auto_flush(Priority::Low, async move {
1205                pc.set(pc.get() + 1);
1206                ts_c.advance(1);
1207            });
1208        }
1209
1210        // With TestScheduleFlush the next-flush callback fires
1211        // synchronously, so budget breaks re-enter flush immediately.
1212        // All tasks eventually complete.
1213        executor::flush_all();
1214
1215        assert_eq!(polled.get(), 50);
1216        assert_eq!(executor::debug_task_count(), 0);
1217    }
1218
1219    #[test]
1220    fn time_budget_honoured_with_split() {
1221        // Use a flush-scheduler that records calls instead of
1222        // re-entering, so we can observe that the budget actually
1223        // triggered a split.
1224        let schedule_count = Rc::new(Cell::new(0u32));
1225        struct NoopScheduleFlush(Rc<Cell<u32>>);
1226        impl ScheduleFlush for NoopScheduleFlush {
1227            fn schedule(&self, _callback: Box<dyn FnOnce()>) {
1228                self.0.set(self.0.get() + 1);
1229                // Intentionally do NOT call callback() — we want to
1230                // observe the break without re-entering.
1231            }
1232        }
1233        init_flush_scheduler(Rc::new(NoopScheduleFlush(Rc::clone(&schedule_count))));
1234
1235        let ts = Rc::new(TestTimeSource::new(0));
1236        init_time_source(ts.clone());
1237
1238        let polled = Rc::new(RefCell::new(Vec::new()));
1239
1240        for i in 0..50u32 {
1241            let pc = Rc::clone(&polled);
1242            let ts_c = Rc::clone(&ts);
1243            executor::spawn_no_auto_flush(Priority::Low, async move {
1244                pc.borrow_mut().push(i);
1245                ts_c.advance(1);
1246            });
1247        }
1248
1249        executor::flush_all();
1250
1251        let completed = polled.borrow().len();
1252        assert!(
1253            completed < 50,
1254            "budget should split before all tasks run (only {completed} of 50)"
1255        );
1256        assert!(
1257            completed >= 7,
1258            "at least 7 tasks should run before budget expires ({completed})"
1259        );
1260        assert_eq!(
1261            schedule_count.get(),
1262            1,
1263            "next flush should have been scheduled exactly once"
1264        );
1265
1266        // Clean up: schedule remaining tasks to finish.
1267        // Re-register TestScheduleFlush and flush again.
1268        init_flush_scheduler(Rc::new(TestScheduleFlush));
1269        executor::flush_all();
1270        assert_eq!(executor::debug_task_count(), 0);
1271    }
1272
1273    // -- macros -----------------------------------------------------------
1274
1275    #[test]
1276    fn provide_context_macro_works() {
1277        let scope = TaskScope::new();
1278        provide_context!(scope, 42i32);
1279        assert_eq!(*scope.consume::<i32>().unwrap(), 42);
1280    }
1281
1282    #[test]
1283    fn consume_context_macro_works() {
1284        let scope = TaskScope::new();
1285        scope.provide(99u32);
1286        let val: Option<Rc<u32>> = consume_context!(scope, u32);
1287        assert_eq!(*val.unwrap(), 99);
1288    }
1289
1290    #[test]
1291    fn consume_context_macro_not_found() {
1292        let scope = TaskScope::new();
1293        let val: Option<Rc<String>> = consume_context!(scope, String);
1294        assert!(val.is_none());
1295    }
1296
1297    // -- dump_task_tree ---------------------------------------------------
1298
1299    #[cfg(feature = "debug")]
1300    #[test]
1301    fn dump_task_tree_returns_string() {
1302        init();
1303        let scope = TaskScope::new();
1304        scope.spawn(async { std::future::pending::<()>().await });
1305
1306        let output = crate::dump_task_tree();
1307        assert!(output.contains("Auralis Task Tree"));
1308        assert!(output.contains("Total active tasks: 1"));
1309        assert!(output.contains("Scope"));
1310    }
1311
1312    #[cfg(feature = "debug")]
1313    #[test]
1314    fn dump_task_tree_empty() {
1315        init();
1316        let output = crate::dump_task_tree();
1317        assert!(output.contains("(no active tasks)"));
1318    }
1319
1320    use crate::{set_deferred, spawn_global};
1321
1322    // -- suspend / resume ---------------------------------------------------
1323
1324    #[test]
1325    fn suspend_prevents_task_execution() {
1326        init();
1327        let scope = TaskScope::new();
1328        let executed = Rc::new(Cell::new(false));
1329        let ex = Rc::clone(&executed);
1330        scope.spawn(async move {
1331            ex.set(true);
1332        });
1333        // Task runs immediately with TestScheduleFlush.
1334        assert!(executed.get());
1335        executed.set(false);
1336
1337        scope.suspend();
1338        let ex2 = Rc::clone(&executed);
1339        scope.spawn(async move {
1340            ex2.set(true);
1341        });
1342        // Task should NOT execute while suspended.
1343        assert!(!executed.get());
1344    }
1345
1346    #[test]
1347    fn resume_allows_task_execution() {
1348        init();
1349        let scope = TaskScope::new();
1350        scope.suspend();
1351        let executed = Rc::new(Cell::new(false));
1352        let ex = Rc::clone(&executed);
1353        scope.spawn(async move {
1354            ex.set(true);
1355        });
1356        assert!(!executed.get());
1357
1358        scope.resume();
1359        // After resume, the task should execute.
1360        assert!(executed.get());
1361    }
1362
1363    #[test]
1364    fn suspend_cascades_to_children() {
1365        init();
1366        let parent = TaskScope::new();
1367        let child = TaskScope::new_child(&parent);
1368        assert!(!child.is_suspended());
1369
1370        parent.suspend();
1371        assert!(parent.is_suspended());
1372        assert!(child.is_suspended());
1373    }
1374
1375    #[test]
1376    fn resume_cascades_to_children() {
1377        init();
1378        let parent = TaskScope::new();
1379        let child = TaskScope::new_child(&parent);
1380        parent.suspend();
1381        assert!(child.is_suspended());
1382
1383        parent.resume();
1384        assert!(!parent.is_suspended());
1385        assert!(!child.is_suspended());
1386    }
1387
1388    #[test]
1389    fn multiple_suspend_resume_no_leak() {
1390        init();
1391        let scope = TaskScope::new();
1392        for _ in 0..50 {
1393            scope.suspend();
1394            assert!(scope.is_suspended());
1395            scope.resume();
1396            assert!(!scope.is_suspended());
1397        }
1398        // No panic, no leak.
1399    }
1400
1401    #[test]
1402    fn suspended_scope_drops_without_panic() {
1403        init();
1404        {
1405            let scope = TaskScope::new();
1406            scope.suspend();
1407            let d = Rc::new(Cell::new(false));
1408            struct DropCheck(Rc<Cell<bool>>);
1409            impl Drop for DropCheck {
1410                fn drop(&mut self) {
1411                    self.0.set(true);
1412                }
1413            }
1414            scope.spawn(async move {
1415                let _guard = DropCheck(d);
1416                std::future::pending::<()>().await;
1417            });
1418            // Scope dropped with tasks and in suspended state.
1419            // Tasks should be cancelled without panic.
1420        }
1421        // After scope drop, all tasks should be cleaned up.
1422        assert_eq!(executor::debug_task_count(), 0);
1423    }
1424
1425    #[test]
1426    fn siblings_not_affected_by_suspend() {
1427        init();
1428        let parent = TaskScope::new();
1429        let child_a = TaskScope::new_child(&parent);
1430        let child_b = TaskScope::new_child(&parent);
1431
1432        child_a.suspend();
1433        assert!(child_a.is_suspended());
1434        assert!(!child_b.is_suspended());
1435        assert!(!parent.is_suspended());
1436    }
1437
1438    // -- instance executor tests ------------------------------------------
1439
1440    use crate::Executor;
1441
1442    #[test]
1443    fn flush_instance_panicking_task_is_isolated() {
1444        init();
1445        let ex = Executor::new_instance();
1446        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1447
1448        let survived = Rc::new(Cell::new(false));
1449        let s = Rc::clone(&survived);
1450
1451        Executor::spawn(&ex, async move {
1452            panic!("intentional test panic in instance executor");
1453        });
1454        Executor::spawn(&ex, async move {
1455            s.set(true);
1456        });
1457        Executor::flush_instance(&ex);
1458
1459        assert!(survived.get());
1460    }
1461
1462    #[test]
1463    fn flush_instance_spawn_and_complete() {
1464        init();
1465        let ex = Executor::new_instance();
1466        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1467
1468        let counter = Rc::new(Cell::new(0u32));
1469        for _ in 0..20 {
1470            let c = Rc::clone(&counter);
1471            Executor::spawn(&ex, async move {
1472                c.set(c.get() + 1);
1473            });
1474        }
1475        Executor::flush_instance(&ex);
1476        assert_eq!(counter.get(), 20);
1477    }
1478
1479    // -- timer tests -------------------------------------------------------
1480
1481    use crate::timer;
1482
1483    #[test]
1484    fn timer_zero_duration_completes_immediately() {
1485        init();
1486        let done = Rc::new(Cell::new(false));
1487        let d = Rc::clone(&done);
1488        spawn_global(async move {
1489            timer::sleep(Duration::ZERO).await;
1490            d.set(true);
1491        });
1492        // With TestScheduleFlush, the task completes synchronously.
1493        assert!(done.get());
1494    }
1495
1496    #[test]
1497    fn timer_normal_delay_fires_after_time_advances() {
1498        init();
1499        let ts = Rc::new(TestTimeSource::new(0));
1500        init_time_source(Rc::clone(&ts) as Rc<dyn TimeSource>);
1501
1502        let done = Rc::new(Cell::new(false));
1503        let d = Rc::clone(&done);
1504        spawn_global(async move {
1505            timer::sleep(Duration::from_millis(100)).await;
1506            d.set(true);
1507        });
1508        // Timer registered but not yet expired — the task is sleeping.
1509        assert!(!done.get());
1510
1511        // Advance time past the deadline, then flush to process the
1512        // expired timer and re-poll the task.
1513        ts.advance(150);
1514        crate::executor::flush_all();
1515        assert!(done.get());
1516    }
1517
1518    #[test]
1519    fn timer_across_multiple_flushes() {
1520        init();
1521        let ts = Rc::new(TestTimeSource::new(0));
1522        init_time_source(Rc::clone(&ts) as Rc<dyn TimeSource>);
1523
1524        let counter = Rc::new(Cell::new(0u32));
1525        let c = Rc::clone(&counter);
1526        spawn_global(async move {
1527            for _ in 0..3 {
1528                timer::sleep(Duration::from_millis(100)).await;
1529                c.set(c.get() + 1);
1530            }
1531        });
1532        assert_eq!(counter.get(), 0);
1533
1534        ts.advance(100);
1535        crate::executor::flush_all();
1536        assert_eq!(counter.get(), 1);
1537
1538        ts.advance(100);
1539        crate::executor::flush_all();
1540        assert_eq!(counter.get(), 2);
1541
1542        ts.advance(100);
1543        crate::executor::flush_all();
1544        assert_eq!(counter.get(), 3);
1545    }
1546
1547    #[test]
1548    fn timer_cancelled_by_scope_drop() {
1549        init();
1550        let executed = Rc::new(Cell::new(false));
1551        let ex = Rc::clone(&executed);
1552        {
1553            let scope = TaskScope::new();
1554            scope.spawn(async move {
1555                timer::sleep(Duration::from_millis(500)).await;
1556                ex.set(true);
1557            });
1558        }
1559        // Scope dropped → task cancelled → timer cleaned up.
1560        // The task should NOT execute.
1561        assert!(!executed.get());
1562        assert_eq!(executor::debug_task_count(), 0);
1563    }
1564
1565    #[test]
1566    fn reentrant_flush_is_noop() {
1567        init();
1568        // flush_instance re-entrancy guard: calling flush inside a
1569        // deferred callback (which runs during flush step 2) should
1570        // be a no-op and leave state intact.
1571        //
1572        // With TestScheduleFlush, signal callbacks fire synchronously
1573        // and a re-entrant flush() inside a callback is simply a no-op.
1574        let reentered = Rc::new(Cell::new(false));
1575        let r = Rc::clone(&reentered);
1576        let sig = Signal::new(0);
1577        auralis_signal::subscribe(&sig, Rc::new(move || r.set(true)));
1578        // This set triggers the callback synchronously (TestScheduleFlush).
1579        // The callback does not call flush itself, but we verify the
1580        // guard by calling flush() inside the deferred callback drain.
1581        sig.set(1);
1582        assert!(reentered.get());
1583    }
1584
1585    #[test]
1586    fn instance_executor_timer() {
1587        init();
1588        let ex = Executor::new_instance();
1589        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1590        let ts = Rc::new(TestTimeSource::new(0));
1591        Executor::install_time_source(&ex, Rc::clone(&ts) as Rc<dyn TimeSource>);
1592
1593        let done = Rc::new(Cell::new(false));
1594        let d = Rc::clone(&done);
1595        Executor::spawn(&ex, async move {
1596            timer::sleep(Duration::from_millis(50)).await;
1597            d.set(true);
1598        });
1599        assert!(!done.get());
1600
1601        // Timer should fire on the instance executor's flush.
1602        ts.advance(60);
1603        Executor::flush_instance(&ex);
1604        assert!(done.get());
1605    }
1606
1607    #[test]
1608    fn set_deferred_routes_to_instance_executor() {
1609        init();
1610        let ex = Executor::new_instance();
1611        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1612
1613        let sig = Signal::new(0);
1614        let s = sig.clone();
1615
1616        // Spawn a task on the instance executor that uses set_deferred.
1617        Executor::spawn(&ex, async move {
1618            crate::set_deferred(&s, 42);
1619        });
1620
1621        // Flush the instance executor — set_deferred should route here.
1622        Executor::flush_instance(&ex);
1623        // The deferred set should have been processed.
1624        assert_eq!(sig.read(), 42);
1625    }
1626
1627    // -- defensive / API coverage --------------------------------------
1628
1629    #[test]
1630    fn panic_hook_is_invoked_on_task_panic() {
1631        init();
1632        let hook_called = Rc::new(Cell::new(false));
1633        let hc = Rc::clone(&hook_called);
1634
1635        crate::set_panic_hook(Rc::new(move |_info| {
1636            hc.set(true);
1637        }));
1638
1639        let scope = TaskScope::new();
1640        scope.spawn(async move { panic!("intentional") });
1641
1642        // The panic hook should have been called.
1643        assert!(hook_called.get());
1644    }
1645
1646    #[test]
1647    fn current_scope_available_in_spawned_task() {
1648        init();
1649        let scope = TaskScope::new();
1650        let found = Rc::new(Cell::new(false));
1651        let f = Rc::clone(&found);
1652        scope.spawn(async move {
1653            f.set(crate::current_scope().is_some());
1654        });
1655        assert!(found.get());
1656    }
1657
1658    #[test]
1659    fn callback_handle_noop_does_not_panic() {
1660        let _h = crate::CallbackHandle::noop();
1661        // Dropping should not panic.
1662    }
1663
1664    #[test]
1665    fn sync_callback_fallback_without_schedule_hook() {
1666        // When no ScheduleFlush hook is installed, signal callbacks
1667        // fire synchronously inside set() (the executor_schedule fallback).
1668        crate::reset_executor_for_test();
1669        // No init_flush_scheduler call — hook is absent.
1670
1671        let sig = Signal::new(0);
1672        let called = Rc::new(Cell::new(false));
1673        let c = Rc::clone(&called);
1674        auralis_signal::subscribe(&sig, Rc::new(move || c.set(true)));
1675
1676        sig.set(1);
1677        // Without a hook, the callback fires synchronously.
1678        assert!(called.get());
1679    }
1680
1681    #[test]
1682    fn set_deferred_isolated_to_instance_executor() {
1683        init();
1684        let ex1 = Executor::new_instance();
1685        Executor::install_flush_scheduler(&ex1, Rc::new(TestScheduleFlush));
1686        let ex2 = Executor::new_instance();
1687        Executor::install_flush_scheduler(&ex2, Rc::new(TestScheduleFlush));
1688
1689        let sig1 = Signal::new(0);
1690        let sig2 = Signal::new(0);
1691        let s1 = sig1.clone();
1692
1693        // Spawn on ex1: use set_deferred via with_executor.
1694        crate::with_executor(&ex1, || {
1695            crate::set_deferred(&s1, 42);
1696        });
1697        Executor::flush_instance(&ex1);
1698        assert_eq!(sig1.read(), 42);
1699        // sig2 must be unaffected — set_deferred was on ex1.
1700        assert_eq!(sig2.read(), 0);
1701    }
1702
1703    #[test]
1704    fn notify_signal_state_follow_up_handles_reentrant_dirty() {
1705        // When a signal subscriber callback calls set() on the same
1706        // signal, the follow-up notification must fire correctly.
1707        let sig = Signal::new(0);
1708        let sig2 = sig.clone();
1709        let count = Rc::new(Cell::new(0u32));
1710        let c = Rc::clone(&count);
1711
1712        auralis_signal::subscribe(
1713            &sig,
1714            Rc::new(move || {
1715                c.set(c.get() + 1);
1716                // Re-entrant set: should be picked up by follow-up.
1717                if c.get() == 1 {
1718                    sig2.set(2);
1719                }
1720            }),
1721        );
1722
1723        sig.set(1);
1724        // First callback (set 1): count=1, triggers re-entrant set(2).
1725        // Follow-up notification fires second callback: count=2.
1726        assert_eq!(sig.read(), 2);
1727        assert_eq!(count.get(), 2);
1728    }
1729}