Skip to main content

auralis_task/
scope.rs

1//! Explicit [`TaskScope`] tree with iterative cancellation, parent
2//! back-references, callback-handle lifecycle management, and a context
3//! store for dependency injection.
4
5use std::any::{Any, TypeId};
6use std::cell::{Cell, RefCell};
7use std::collections::{HashMap, VecDeque};
8use std::future::Future;
9use std::pin::Pin;
10use std::rc::{Rc, Weak};
11
12use crate::executor;
13use crate::Priority;
14
15type ScopeId = u64;
16type TaskId = u64;
17
18// ---------------------------------------------------------------------------
19// Scope-id allocator
20// ---------------------------------------------------------------------------
21
22thread_local! {
23    static NEXT_SCOPE_ID: Cell<ScopeId> = const { Cell::new(1) };
24}
25
26fn alloc_scope_id() -> ScopeId {
27    NEXT_SCOPE_ID.with(|c| {
28        let id = c.get();
29        c.set(id + 1);
30        id
31    })
32}
33
34// ---------------------------------------------------------------------------
35// CallbackHandle
36// ---------------------------------------------------------------------------
37
38/// Owns a resource that must be cleaned up when the owning [`TaskScope`]
39/// is dropped.
40///
41/// Currently used for signal subscriptions registered by the `bind_*`
42/// functions.  When the [`TaskScope`] drops, every registered
43/// [`CallbackHandle`] is dropped, which calls the stored cleanup closure
44/// to unsubscribe from the signal.
45pub struct CallbackHandle {
46    cleanup: Option<Box<dyn FnOnce() + 'static>>,
47}
48
49impl CallbackHandle {
50    /// Create a handle from a cleanup closure.
51    pub fn new(cleanup: impl FnOnce() + 'static) -> Self {
52        Self {
53            cleanup: Some(Box::new(cleanup)),
54        }
55    }
56
57    /// Create a no-op handle that does nothing on drop.
58    ///
59    /// Useful as a placeholder when a [`CallbackHandle`] is required
60    /// but no cleanup is needed.
61    #[must_use]
62    pub fn noop() -> Self {
63        Self { cleanup: None }
64    }
65}
66
67impl Drop for CallbackHandle {
68    fn drop(&mut self) {
69        if let Some(f) = self.cleanup.take() {
70            f();
71        }
72    }
73}
74
75// ---------------------------------------------------------------------------
76// Scope registry — maps ScopeId → live TaskScope for executor injection
77// ---------------------------------------------------------------------------
78//
79// # Why Weak references
80//
81// The registry stores `Weak<RefCell<TaskScopeInner>>` rather than
82// `Rc<...>`.  This prevents the registry from keeping scopes alive
83// after the application has dropped them — when the last strong
84// reference is gone, the Weak upgrade returns `None` and the executor
85// skips that scope.
86//
87// # Thread safety
88//
89// `SCOPE_REGISTRY` is a `thread_local!` because Auralis is
90// single-threaded by design (Wasm constraint).  For multi-task SSR
91// servers, each request uses an isolated [`Executor`] instance created
92// via [`Executor::new_instance`](crate::Executor::new_instance), and
93// the [`ScopeStore`] trait provides pluggable per-task storage.
94
95type ScopeRegistryEntry = (Weak<RefCell<TaskScopeInner>>, Weak<Cell<bool>>);
96
97thread_local! {
98    static SCOPE_REGISTRY: RefCell<HashMap<ScopeId, ScopeRegistryEntry>> =
99        RefCell::new(HashMap::new());
100}
101
102/// Register a scope in the global registry so the executor can look it
103/// up by id and inject it as the current scope when polling tasks.
104fn register_scope(id: ScopeId, inner: &Rc<RefCell<TaskScopeInner>>, suspended: &Rc<Cell<bool>>) {
105    let _ = SCOPE_REGISTRY.try_with(|reg| {
106        if let Ok(mut r) = reg.try_borrow_mut() {
107            r.insert(id, (Rc::downgrade(inner), Rc::downgrade(suspended)));
108        }
109    });
110}
111
112fn unregister_scope(id: ScopeId) {
113    let _ = SCOPE_REGISTRY.try_with(|reg| {
114        if let Ok(mut r) = reg.try_borrow_mut() {
115            r.remove(&id);
116        }
117    });
118}
119
120/// Find a live [`TaskScope`] by its id.
121///
122/// Returns `None` if the scope has been dropped or the id is unknown.
123#[must_use]
124pub fn find_scope(scope_id: ScopeId) -> Option<TaskScope> {
125    SCOPE_REGISTRY
126        .try_with(|reg| {
127            if let Ok(r) = reg.try_borrow() {
128                r.get(&scope_id).and_then(|(inner_weak, suspended_weak)| {
129                    let inner = inner_weak.upgrade()?;
130                    let suspended = suspended_weak.upgrade()?;
131                    Some(TaskScope { inner, suspended })
132                })
133            } else {
134                None
135            }
136        })
137        .ok()
138        .flatten()
139}
140
141/// Return the debug label for the scope with the given id, if any.
142///
143/// Only available with the `debug` feature.
144#[cfg(feature = "debug")]
145#[doc(hidden)]
146#[must_use]
147pub fn scope_debug_label(scope_id: ScopeId) -> Option<String> {
148    find_scope(scope_id).and_then(|s| s.inner.borrow().debug_label.clone())
149}
150
151/// Clear the scope registry.
152#[doc(hidden)]
153pub fn clear_scope_registry() {
154    let _ = SCOPE_REGISTRY.try_with(|reg| {
155        if let Ok(mut r) = reg.try_borrow_mut() {
156            r.clear();
157        }
158    });
159}
160
161// ---------------------------------------------------------------------------
162// Current-scope storage — injectable, defaults to thread-local
163// ---------------------------------------------------------------------------
164
165/// Function signatures for scope store operations.
166///
167/// Using function pointers keeps the store `Send + Sync` even though
168/// `TaskScope` itself is `!Send` — Rust function pointer types are
169/// always `Send + Sync` regardless of parameter/return types.
170type ScopeSetFn = fn(Option<TaskScope>);
171type ScopeGetFn = fn() -> Option<TaskScope>;
172
173/// A pluggable backend for per-task (or per-thread) scope storage.
174///
175/// The default implementation uses a thread-local cell, which is
176/// sufficient for single-threaded Wasm environments.  For multi-task
177/// SSR runtimes (e.g. tokio) the host application should inject a
178/// task-local implementation via [`set_scope_store`].
179pub struct ScopeStore {
180    /// Store a scope (or `None` to clear).
181    pub set_fn: ScopeSetFn,
182    /// Retrieve the current scope.
183    pub get_fn: ScopeGetFn,
184}
185
186use std::sync::OnceLock;
187static SCOPE_STORE: OnceLock<ScopeStore> = OnceLock::new();
188
189fn ensure_default_store() -> &'static ScopeStore {
190    SCOPE_STORE.get_or_init(|| ScopeStore {
191        set_fn: thread_local_set,
192        get_fn: thread_local_get,
193    })
194}
195
196/// Install a custom scope store.
197///
198/// Must be called before any scope operations.  On Wasm or in tests the
199/// default thread-local store is sufficient.
200///
201/// # Example (tokio SSR)
202///
203/// ```rust,ignore
204/// use auralis_task::ScopeStore;
205///
206/// auralis_task::set_scope_store(ScopeStore {
207///     set_fn: my_tokio_task_local_set,
208///     get_fn: my_tokio_task_local_get,
209/// });
210/// ```
211pub fn set_scope_store(store: ScopeStore) {
212    let _ = SCOPE_STORE.set(store);
213}
214
215// The `set_scope_store` API allows injecting a custom scope store.
216// For SSR in multi-threaded tokio runtimes, users should implement a
217// `ScopeStore` backed by `tokio::task::LocalKey` (available when the
218// `ssr-tokio` feature is enabled) or a similar per-task mechanism.
219//
220// Example with tokio (when `ssr-tokio` is enabled):
221//
222// ```rust,ignore
223// use auralis_task::{ScopeStore, set_scope_store};
224//
225// tokio::task::LocalKey! {
226//     static TK_SCOPE: std::cell::RefCell<Option<auralis_task::TaskScope>> =
227//         const { std::cell::RefCell::new(None) };
228// }
229//
230// set_scope_store(ScopeStore {
231//     set_fn: |s| TK_SCOPE.with(|c| *c.borrow_mut() = s),
232//     get_fn: || TK_SCOPE.with(|c| c.borrow().clone()),
233// });
234// ```
235//
236// For single-threaded tokio use (LocalSet / spawn_local), the default
237// thread-local store works correctly without any configuration.
238
239// ---- default thread-local implementation -------------------------------
240
241thread_local! {
242    static CURRENT_SCOPE: RefCell<Option<TaskScope>> = const { RefCell::new(None) };
243}
244
245fn thread_local_set(scope: Option<TaskScope>) {
246    CURRENT_SCOPE.with(|cell| {
247        cell.replace(scope);
248    });
249}
250
251fn thread_local_get() -> Option<TaskScope> {
252    CURRENT_SCOPE.with(|cell| cell.borrow().clone())
253}
254
255/// Directly set the current scope without save/restore.
256///
257/// Used by the executor to inject the owning scope before polling a
258/// task.  The caller must restore the previous scope after the poll.
259pub(crate) fn set_scope_direct(scope: Option<TaskScope>) {
260    let store = ensure_default_store();
261    (store.set_fn)(scope);
262}
263
264/// Directly get the current scope.
265pub(crate) fn get_scope_direct() -> Option<TaskScope> {
266    let store = ensure_default_store();
267    (store.get_fn)()
268}
269
270// ---- ssr-tokio integration ----------------------------------------------
271
272/// Initialise the scope store for tokio-based SSR runtimes.
273///
274/// Uses `tokio::task::LocalKey` to store the current [`TaskScope`] per
275/// tokio task, enabling true multi-request isolation.  Call this once
276/// at process startup, before any scope operations.
277///
278/// Only available with the **`ssr-tokio`** feature (non-wasm).
279///
280/// # Example
281///
282/// ```rust,ignore
283/// auralis_task::init_scope_store_tokio();
284/// ```
285#[cfg(feature = "ssr-tokio")]
286pub fn init_scope_store_tokio() {
287    tokio::task_local! {
288        static TK_SCOPE: std::cell::RefCell<Option<TaskScope>>;
289    }
290
291    // Initialise the key.
292    let _ = TK_SCOPE.try_with(|cell| {
293        cell.replace(None);
294    });
295
296    set_scope_store(ScopeStore {
297        set_fn: |s| {
298            let _ = TK_SCOPE.try_with(|cell| {
299                cell.replace(s);
300            });
301        },
302        get_fn: || {
303            TK_SCOPE
304                .try_with(|cell| cell.borrow().clone())
305                .ok()
306                .flatten()
307        },
308    });
309}
310
311// ---- public API --------------------------------------------------------
312
313/// Set the current [`TaskScope`] for the duration of `f`.
314///
315/// Set `scope` as the current scope for the duration of `f`,
316/// restoring the previous scope afterward.
317///
318/// Used by framework glue code so that bind functions can discover the
319/// owning scope via [`current_scope`].
320pub fn with_current_scope<R>(scope: &TaskScope, f: impl FnOnce() -> R) -> R {
321    let store = ensure_default_store();
322    let prev = (store.get_fn)();
323    (store.set_fn)(Some(scope.clone_inner()));
324    let result = f();
325    (store.set_fn)(prev);
326    result
327}
328
329/// Get the currently active [`TaskScope`], if any.
330#[must_use]
331pub fn current_scope() -> Option<TaskScope> {
332    let store = ensure_default_store();
333    (store.get_fn)()
334}
335
336// ---------------------------------------------------------------------------
337// TaskScopeInner
338// ---------------------------------------------------------------------------
339
340struct TaskScopeInner {
341    id: ScopeId,
342    task_ids: Vec<TaskId>,
343    children: Vec<TaskScope>,
344    /// Weak back-reference to parent (set for child scopes).
345    parent: Option<Weak<RefCell<TaskScopeInner>>>,
346    /// Typed context store for dependency injection.
347    context: RefCell<HashMap<TypeId, Rc<dyn Any>>>,
348    /// Callback handles registered by bind_* functions.
349    callbacks: RefCell<Vec<CallbackHandle>>,
350    /// Optional label for `dump_task_tree` output (debug feature).
351    #[cfg(feature = "debug")]
352    debug_label: Option<String>,
353    cancelled: bool,
354}
355
356// ---------------------------------------------------------------------------
357// TaskScope
358// ---------------------------------------------------------------------------
359
360/// A node in the scope tree that owns spawned tasks and carries a typed
361/// context for dependency injection.
362///
363/// # Drop guarantee
364///
365/// When a [`TaskScope`] is dropped, all descendant scopes and their
366/// tasks are cancelled **iteratively** using a work queue — recursion
367/// is never used, so deeply nested UI trees (200+ levels) never
368/// overflow the stack.
369///
370/// # Context
371///
372/// Use [`provide`](TaskScope::provide) / [`consume`](TaskScope::consume)
373/// for lightweight dependency injection that walks up the scope tree.
374///
375/// # Callback lifecycle
376///
377/// [`CallbackHandle`]s registered via
378/// [`register_callback_handle`](Self::register_callback_handle) are
379/// dropped **before** spawned tasks are cancelled, ensuring that
380/// signal subscriptions are removed before any task cleanup.
381#[must_use]
382pub struct TaskScope {
383    inner: Rc<RefCell<TaskScopeInner>>,
384    /// Whether this scope is suspended.  Stored outside the `RefCell`
385    /// so that [`is_suspended`](Self::is_suspended) can be checked
386    /// without borrowing (avoids re-entrant borrow panics during
387    /// synchronous flush in tests).
388    suspended: Rc<Cell<bool>>,
389}
390
391impl TaskScope {
392    /// Create a new root scope (no parent).
393    pub fn new() -> Self {
394        let inner = Rc::new(RefCell::new(TaskScopeInner {
395            id: alloc_scope_id(),
396            task_ids: Vec::new(),
397            children: Vec::new(),
398            parent: None,
399            context: RefCell::new(HashMap::new()),
400            callbacks: RefCell::new(Vec::new()),
401            #[cfg(feature = "debug")]
402            debug_label: None,
403            cancelled: false,
404        }));
405        let id = inner.borrow().id;
406        let suspended = Rc::new(Cell::new(false));
407        register_scope(id, &inner, &suspended);
408        Self { inner, suspended }
409    }
410
411    /// Create a child scope attached to `self`.
412    ///
413    /// A weak back-reference to the parent is stored so that
414    /// [`consume`](TaskScope::consume) can walk up the tree.
415    pub fn new_child(parent: &Self) -> Self {
416        let inner = Rc::new(RefCell::new(TaskScopeInner {
417            id: alloc_scope_id(),
418            task_ids: Vec::new(),
419            children: Vec::new(),
420            parent: Some(Rc::downgrade(&parent.inner)),
421            context: RefCell::new(HashMap::new()),
422            callbacks: RefCell::new(Vec::new()),
423            #[cfg(feature = "debug")]
424            debug_label: None,
425            cancelled: false,
426        }));
427        let id = inner.borrow().id;
428        let suspended = Rc::new(Cell::new(false));
429        register_scope(id, &inner, &suspended);
430        let child = Self { inner, suspended };
431        parent.inner.borrow_mut().children.push(child.clone_inner());
432        child
433    }
434
435    /// Spawn a future in this scope at low priority.
436    pub fn spawn(&self, future: impl Future<Output = ()> + 'static) {
437        self.spawn_with_priority(Priority::Low, future);
438    }
439
440    /// Spawn a future in this scope at the given priority.
441    ///
442    /// The current scope is set to `self` during the spawn so that any
443    /// synchronous work inside the future constructor (e.g. `bind_text`)
444    /// can discover the owning scope via [`current_scope`].
445    pub fn spawn_with_priority(
446        &self,
447        priority: Priority,
448        future: impl Future<Output = ()> + 'static,
449    ) {
450        let mut inner = self.inner.borrow_mut();
451        if inner.cancelled {
452            return;
453        }
454        let task_id =
455            with_current_scope(self, || executor::spawn_scoped(priority, inner.id, future));
456        inner.task_ids.push(task_id);
457    }
458
459    // -- callback lifecycle ------------------------------------------------
460
461    /// Register a [`CallbackHandle`] that will be dropped when this scope
462    /// is dropped (or when `clear_callbacks` is called).
463    ///
464    /// Used by `bind_*` functions to ensure signal subscriptions are
465    /// cleaned up when the owning component is destroyed.
466    pub fn register_callback_handle(&self, handle: CallbackHandle) {
467        let inner = self.inner.borrow();
468        if inner.cancelled {
469            return;
470        }
471        inner.callbacks.borrow_mut().push(handle);
472    }
473
474    // -- context -----------------------------------------------------------
475
476    /// Store a value of type `T` in this scope.
477    ///
478    /// The value is wrapped in [`Rc`] so it can be shared.  A subsequent
479    /// call to [`consume`](TaskScope::consume) on this scope (or any
480    /// descendant) will discover it by walking up the parent chain.
481    pub fn provide<T: 'static>(&self, value: T) {
482        self.inner
483            .borrow()
484            .context
485            .borrow_mut()
486            .insert(TypeId::of::<T>(), Rc::new(value));
487    }
488
489    /// Look up a value of type `T` by walking up the scope tree.
490    ///
491    /// Returns `None` if no ancestor (including `self`) has provided a
492    /// value of this type.
493    #[must_use]
494    pub fn consume<T: 'static>(&self) -> Option<Rc<T>> {
495        let mut current = Some(Rc::clone(&self.inner));
496
497        while let Some(inner) = current {
498            // Check local context.
499            {
500                let inner_ref = inner.borrow();
501                let ctx = inner_ref.context.borrow();
502                if let Some(val) = ctx.get(&TypeId::of::<T>()) {
503                    if let Ok(downcast) = val.clone().downcast::<T>() {
504                        return Some(downcast);
505                    }
506                }
507            }
508
509            // Walk up to parent.
510            let parent = {
511                let inner_ref = inner.borrow();
512                inner_ref.parent.as_ref().and_then(Weak::upgrade)
513            };
514            current = parent;
515        }
516
517        None
518    }
519
520    /// Like [`consume`](TaskScope::consume) but panics if the value is
521    /// not found.
522    ///
523    /// # Panics
524    ///
525    /// Panics if no ancestor scope has provided a value of type `T`.
526    #[must_use]
527    #[track_caller]
528    pub fn expect_context<T: 'static>(&self) -> Rc<T> {
529        self.consume::<T>()
530            .unwrap_or_else(|| panic!("context not found: {}", std::any::type_name::<T>()))
531    }
532
533    // -- debugging ----------------------------------------------------------
534
535    /// Set a label for this scope, shown in [`dump_task_tree`] output.
536    ///
537    /// Only available with the `debug` feature.
538    #[cfg(feature = "debug")]
539    pub fn set_debug_label(&self, label: impl Into<String>) {
540        self.inner.borrow_mut().debug_label = Some(label.into());
541    }
542
543    // -- testing -----------------------------------------------------------
544
545    /// Return the number of spawned tasks in this scope (test-only).
546    #[cfg(test)]
547    #[must_use]
548    pub fn task_count(&self) -> usize {
549        self.inner.borrow().task_ids.len()
550    }
551
552    /// Return the number of child scopes (test-only).
553    #[cfg(test)]
554    #[must_use]
555    pub fn child_count(&self) -> usize {
556        self.inner.borrow().children.len()
557    }
558
559    // -- internals ---------------------------------------------------------
560
561    fn clone_inner(&self) -> Self {
562        Self {
563            inner: Rc::clone(&self.inner),
564            suspended: Rc::clone(&self.suspended),
565        }
566    }
567
568    /// Run `f` with `self` set as the current scope for the thread.
569    ///
570    /// Used by framework glue code so bind functions can discover the
571    /// owning scope via [`current_scope`].
572    pub fn enter<R>(&self, f: impl FnOnce() -> R) -> R {
573        with_current_scope(self, f)
574    }
575
576    /// Suspend all tasks owned by this scope and its descendants.
577    ///
578    /// Suspended tasks are skipped during executor polling.  Signal
579    /// subscriptions remain registered but their callbacks are not
580    /// invoked while the scope is suspended.  Use [`resume`](Self::resume)
581    /// to restart execution.
582    ///
583    /// Used by `if_async_cached` and `match_async_cached` to pause
584    /// hidden branches.
585    pub fn suspend(&self) {
586        if self.suspended.get() {
587            return;
588        }
589        self.suspended.set(true);
590        // Cascading: suspend all descendants.
591        let children: Vec<TaskScope> = {
592            self.inner
593                .borrow()
594                .children
595                .iter()
596                .map(TaskScope::clone_inner)
597                .collect()
598        };
599        for child in &children {
600            child.suspend();
601        }
602    }
603
604    /// Resume all tasks owned by this scope and its descendants.
605    ///
606    /// This reverses the effect of [`suspend`](Self::suspend).  Tasks
607    /// become eligible for polling again on the next executor flush.
608    pub fn resume(&self) {
609        if !self.suspended.get() {
610            return;
611        }
612        self.suspended.set(false);
613
614        let (scope_id, children) = {
615            let inner = self.inner.borrow();
616            let id = inner.id;
617            let children: Vec<TaskScope> =
618                inner.children.iter().map(TaskScope::clone_inner).collect();
619            (id, children)
620        };
621
622        // Enqueue all tasks belonging to this scope.
623        executor::enqueue_scope_tasks(scope_id);
624
625        // Resume children (cascading).
626        for child in &children {
627            child.resume();
628        }
629    }
630
631    /// Return `true` if this scope is currently suspended.
632    #[must_use]
633    pub fn is_suspended(&self) -> bool {
634        self.suspended.get()
635    }
636}
637
638impl Default for TaskScope {
639    fn default() -> Self {
640        Self::new()
641    }
642}
643
644impl Clone for TaskScope {
645    fn clone(&self) -> Self {
646        self.clone_inner()
647    }
648}
649
650// Iterative cancellation: descendants are collected BFS, then
651// cancelled leaf→root, avoiding recursive drop that would overflow
652// the stack on deeply-nested trees (200+ levels).
653//
654// Callback handles are dropped BEFORE tasks, ensuring signal
655// subscriptions are removed before any task is cancelled.
656impl Drop for TaskScope {
657    fn drop(&mut self) {
658        let Ok(mut inner) = self.inner.try_borrow_mut() else {
659            // Already borrowed — this is a re-entrant drop (e.g. a
660            // callback held the last clone of this scope).  If this
661            // was the last clone, resources will leak.
662            #[cfg(debug_assertions)]
663            {
664                eprintln!(
665                    "[auralis-task] WARNING: TaskScope::drop cannot borrow inner \
666                     (already borrowed). If this was the last clone, tasks and \
667                     callbacks will leak. Avoid dropping the last TaskScope clone \
668                     inside a callback or during executor flush."
669                );
670            }
671            return;
672        };
673        if inner.cancelled {
674            return;
675        }
676        inner.cancelled = true;
677
678        // ---- drop callback handles first ---------------------------------
679        inner.callbacks.borrow_mut().clear();
680
681        // ---- collect descendants BFS ------------------------------------
682        let mut descendants: Vec<Rc<RefCell<TaskScopeInner>>> = Vec::new();
683        {
684            let mut queue: VecDeque<Rc<RefCell<TaskScopeInner>>> = VecDeque::new();
685            for child in &inner.children {
686                queue.push_back(Rc::clone(&child.inner));
687            }
688
689            while let Some(scope_rc) = queue.pop_front() {
690                let scope = scope_rc.borrow();
691                for child in &scope.children {
692                    queue.push_back(Rc::clone(&child.inner));
693                }
694                descendants.push(Rc::clone(&scope_rc));
695            }
696        }
697
698        // ---- cancel leaves → root ---------------------------------------
699        for scope_rc in descendants.iter().rev() {
700            let mut scope = scope_rc.borrow_mut();
701            if scope.cancelled {
702                continue;
703            }
704            scope.cancelled = true;
705
706            // Drop callbacks before tasks.
707            scope.callbacks.borrow_mut().clear();
708
709            if !scope.task_ids.is_empty() {
710                let dropped_futures: Vec<Pin<Box<dyn Future<Output = ()>>>> =
711                    executor::cancel_scope_tasks(scope.id);
712                drop(dropped_futures);
713            }
714            // Clear context to release Rc references.
715            scope.context.borrow_mut().clear();
716
717            // Remove from the global registry.  This scope's Drop will run
718            // later (when the parent's children Vec is cleared) but will
719            // see cancelled=true and return immediately, so we must
720            // unregister now.
721            unregister_scope(scope.id);
722        }
723
724        // ---- cancel own tasks -------------------------------------------
725        if !inner.task_ids.is_empty() {
726            let dropped_futures = executor::cancel_scope_tasks(inner.id);
727            drop(dropped_futures);
728        }
729
730        inner.context.borrow_mut().clear();
731        inner.children.clear();
732
733        // Remove from the global registry so stale lookups return None.
734        unregister_scope(inner.id);
735    }
736}
737
738// ---------------------------------------------------------------------------
739// Convenience macros for context injection / retrieval
740// ---------------------------------------------------------------------------
741
742/// Shorthand for `scope.provide(value)`.
743///
744/// ```rust,ignore
745/// provide_context!(scope, 42i32);
746/// ```
747#[macro_export]
748macro_rules! provide_context {
749    ($scope:expr, $value:expr) => {
750        $scope.provide($value)
751    };
752}
753
754/// Shorthand for `scope.consume::<T>()`.
755///
756/// ```rust,ignore
757/// let theme: Option<Rc<Theme>> = consume_context!(scope, Theme);
758/// ```
759#[macro_export]
760macro_rules! consume_context {
761    ($scope:expr, $ty:ty) => {
762        $scope.consume::<$ty>()
763    };
764}
765
766// ---------------------------------------------------------------------------
767// Tests
768// ---------------------------------------------------------------------------
769
770#[cfg(test)]
771#[allow(clippy::items_after_statements)]
772mod tests {
773    use super::*;
774    use crate::executor::{self, init_flush_scheduler, reset_executor_for_test, TestScheduleFlush};
775    use crate::{init_time_source, ScheduleFlush, TestTimeSource, TimeSource};
776    use auralis_signal::Signal;
777    use std::cell::{Cell, RefCell};
778    use std::rc::Rc;
779    use std::time::Duration;
780
781    fn init() {
782        reset_executor_for_test();
783        init_flush_scheduler(Rc::new(TestScheduleFlush));
784    }
785
786    // -- scope ------------------------------------------------------------
787
788    #[test]
789    fn new_scope_has_zero_tasks() {
790        let scope = TaskScope::new();
791        assert_eq!(scope.task_count(), 0);
792        assert_eq!(scope.child_count(), 0);
793    }
794
795    #[test]
796    fn new_child_attaches_to_parent() {
797        let parent = TaskScope::new();
798        let _child = TaskScope::new_child(&parent);
799        assert_eq!(parent.child_count(), 1);
800    }
801
802    #[test]
803    fn spawn_adds_task() {
804        init();
805        let scope = TaskScope::new();
806        scope.spawn(async {});
807        assert_eq!(scope.task_count(), 1);
808    }
809
810    #[test]
811    fn spawn_and_complete() {
812        init();
813        let done = Rc::new(Cell::new(false));
814        let done2 = Rc::clone(&done);
815        spawn_global(async move {
816            done2.set(true);
817        });
818        assert!(done.get());
819    }
820
821    #[test]
822    fn scope_spawn_and_cancel() {
823        init();
824        let dropped = Rc::new(Cell::new(false));
825        {
826            let scope = TaskScope::new();
827            let d = Rc::clone(&dropped);
828            struct DropCheck(Rc<Cell<bool>>);
829            impl Drop for DropCheck {
830                fn drop(&mut self) {
831                    self.0.set(true);
832                }
833            }
834            scope.spawn(async move {
835                let _guard = DropCheck(d);
836                std::future::pending::<()>().await;
837            });
838            assert_eq!(executor::debug_task_count(), 1);
839        }
840        assert!(dropped.get());
841        assert_eq!(executor::debug_task_count(), 0);
842    }
843
844    #[test]
845    fn nested_scope_child_cancel_with_parent() {
846        init();
847        let dropped_child = Rc::new(Cell::new(false));
848        {
849            let parent = TaskScope::new();
850            let child = TaskScope::new_child(&parent);
851            let d = Rc::clone(&dropped_child);
852            struct DropCheck(Rc<Cell<bool>>);
853            impl Drop for DropCheck {
854                fn drop(&mut self) {
855                    self.0.set(true);
856                }
857            }
858            child.spawn(async move {
859                let _guard = DropCheck(d);
860                std::future::pending::<()>().await;
861            });
862            assert_eq!(executor::debug_task_count(), 1);
863        }
864        assert!(dropped_child.get());
865        assert_eq!(executor::debug_task_count(), 0);
866    }
867
868    #[test]
869    fn deeply_nested_scope_drop_no_stack_overflow() {
870        init();
871        let root = TaskScope::new();
872        {
873            let mut current = TaskScope::new_child(&root);
874            for _ in 0..199 {
875                current = TaskScope::new_child(&current);
876            }
877        }
878        drop(root);
879        assert_eq!(executor::debug_task_count(), 0);
880    }
881
882    #[test]
883    fn scope_child_explicit_tree() {
884        let root = TaskScope::new();
885        let a = TaskScope::new_child(&root);
886        let b = TaskScope::new_child(&root);
887        let _a1 = TaskScope::new_child(&a);
888        let _a2 = TaskScope::new_child(&a);
889        assert_eq!(root.child_count(), 2);
890        assert_eq!(a.child_count(), 2);
891        assert_eq!(b.child_count(), 0);
892    }
893
894    // -- callbacks -------------------------------------------------------
895
896    #[test]
897    fn callback_handle_dropped_before_tasks() {
898        init();
899        let dropped_order: Rc<RefCell<Vec<String>>> = Rc::new(RefCell::new(Vec::new()));
900        {
901            let scope = TaskScope::new();
902            let order1 = Rc::clone(&dropped_order);
903            scope.register_callback_handle(CallbackHandle::new(move || {
904                order1.borrow_mut().push("callback".to_string());
905            }));
906            let order2 = Rc::clone(&dropped_order);
907            struct DropCheck {
908                order: Rc<RefCell<Vec<String>>>,
909                label: String,
910            }
911            impl Drop for DropCheck {
912                fn drop(&mut self) {
913                    self.order.borrow_mut().push(self.label.clone());
914                }
915            }
916            scope.spawn(async move {
917                let _guard = DropCheck {
918                    order: order2,
919                    label: "task".to_string(),
920                };
921                std::future::pending::<()>().await;
922            });
923        }
924        let order = dropped_order.borrow().clone();
925        assert_eq!(order, vec!["callback", "task"]);
926    }
927
928    #[test]
929    fn callback_handle_cleaned_up_on_child_scope_drop() {
930        init();
931        let called = Rc::new(Cell::new(false));
932        {
933            let parent = TaskScope::new();
934            let child = TaskScope::new_child(&parent);
935            let c = Rc::clone(&called);
936            child.register_callback_handle(CallbackHandle::new(move || {
937                c.set(true);
938            }));
939            // Child dropped here.
940        }
941        assert!(called.get());
942    }
943
944    // -- context ----------------------------------------------------------
945
946    #[test]
947    fn context_provide_and_consume_in_same_scope() {
948        let scope = TaskScope::new();
949        scope.provide(42i32);
950        assert_eq!(*scope.consume::<i32>().unwrap(), 42);
951    }
952
953    #[test]
954    fn context_consume_walks_up_to_parent() {
955        let parent = TaskScope::new();
956        parent.provide("hello".to_string());
957        let child = TaskScope::new_child(&parent);
958        assert_eq!(*child.consume::<String>().unwrap(), "hello");
959    }
960
961    #[test]
962    fn context_consume_not_found() {
963        let scope = TaskScope::new();
964        assert!(scope.consume::<i32>().is_none());
965    }
966
967    #[test]
968    fn context_removed_on_scope_drop() {
969        let parent = TaskScope::new();
970        parent.provide(99u32);
971        {
972            let _child = TaskScope::new_child(&parent);
973            // Child can consume from parent.
974        }
975        // Parent still has the context.
976        assert_eq!(*parent.consume::<u32>().unwrap(), 99);
977    }
978
979    #[test]
980    fn context_shadowing() {
981        let parent = TaskScope::new();
982        parent.provide(1i32);
983        let child = TaskScope::new_child(&parent);
984        child.provide(2i32);
985        // Child's own value shadows parent's.
986        assert_eq!(*child.consume::<i32>().unwrap(), 2);
987        // Parent still has its own.
988        assert_eq!(*parent.consume::<i32>().unwrap(), 1);
989    }
990
991    #[test]
992    #[should_panic(expected = "context not found")]
993    fn expect_context_panics_when_missing() {
994        let scope = TaskScope::new();
995        let _ = scope.expect_context::<String>();
996    }
997
998    // -- existing tests continue to pass -----------------------------------
999
1000    #[test]
1001    fn executor_priority_ordering() {
1002        init();
1003        let order = Rc::new(RefCell::new(Vec::new()));
1004        let o1 = Rc::clone(&order);
1005        executor::spawn_no_auto_flush(Priority::Low, async move {
1006            o1.borrow_mut().push("low");
1007        });
1008        let o2 = Rc::clone(&order);
1009        executor::spawn_no_auto_flush(Priority::High, async move {
1010            o2.borrow_mut().push("high");
1011        });
1012        executor::flush_all();
1013        let result = order.borrow().clone();
1014        assert_eq!(result, vec!["high", "low"]);
1015    }
1016
1017    #[test]
1018    fn executor_batch() {
1019        init();
1020        let counter = Rc::new(Cell::new(0u32));
1021        for _ in 0..10 {
1022            let c = Rc::clone(&counter);
1023            spawn_global(async move {
1024                c.set(c.get() + 1);
1025            });
1026        }
1027        assert_eq!(counter.get(), 10);
1028        assert_eq!(executor::debug_task_count(), 0);
1029    }
1030
1031    #[test]
1032    fn no_leak_on_cancel() {
1033        init();
1034        for _ in 0..50 {
1035            let scope = TaskScope::new();
1036            for _ in 0..5 {
1037                scope.spawn(std::future::pending::<()>());
1038            }
1039        }
1040        assert_eq!(executor::debug_task_count(), 0);
1041    }
1042
1043    #[test]
1044    fn set_deferred_triggers_after_flush() {
1045        use auralis_signal::Signal;
1046        init();
1047        let sig = Signal::new(0);
1048        let observed = Rc::new(Cell::new(0));
1049        set_deferred(&sig, 42);
1050        assert_eq!(sig.read(), 42);
1051        let ob1 = Rc::clone(&observed);
1052        spawn_global(async move {
1053            ob1.set(sig.read());
1054        });
1055        assert_eq!(observed.get(), 42);
1056    }
1057
1058    #[test]
1059    fn set_deferred_in_drop_safe() {
1060        use auralis_signal::Signal;
1061        init();
1062        let sig = Signal::new(0);
1063        struct SetOnDrop {
1064            sig: Signal<i32>,
1065            val: i32,
1066        }
1067        impl Drop for SetOnDrop {
1068            fn drop(&mut self) {
1069                set_deferred(&self.sig, self.val);
1070            }
1071        }
1072        let guard = SetOnDrop {
1073            sig: sig.clone(),
1074            val: 99,
1075        };
1076        drop(guard);
1077        assert_eq!(sig.read(), 99);
1078    }
1079
1080    #[test]
1081    fn set_deferred_from_drop_guard_during_scope_cancel() {
1082        use auralis_signal::Signal;
1083        init();
1084
1085        let sig = Signal::new(0i32);
1086
1087        // A drop guard that calls set_deferred — simulating a
1088        // component that resets shared state when its task is
1089        // cancelled.
1090        struct ResetOnDrop {
1091            sig: Signal<i32>,
1092        }
1093        impl Drop for ResetOnDrop {
1094            fn drop(&mut self) {
1095                set_deferred(&self.sig, 42);
1096            }
1097        }
1098
1099        {
1100            let scope = TaskScope::new();
1101            let s = sig.clone();
1102            scope.spawn(async move {
1103                let _guard = ResetOnDrop { sig: s };
1104                // The guard's Drop will call set_deferred when this
1105                // future is cancelled by the scope dropping.
1106                std::future::pending::<()>().await;
1107            });
1108            // Scope dropped here — task cancelled, guard fires.
1109        }
1110
1111        // After scope drop, the deferred op should have executed.
1112        assert_eq!(
1113            sig.read(),
1114            42,
1115            "set_deferred should have fired after scope cancel"
1116        );
1117    }
1118
1119    #[test]
1120    fn yield_now_gives_other_tasks_a_turn() {
1121        init();
1122        let order = Rc::new(RefCell::new(Vec::new()));
1123        let o1 = Rc::clone(&order);
1124        executor::spawn_no_auto_flush(Priority::Low, async move {
1125            o1.borrow_mut().push("a1");
1126            executor::yield_now().await;
1127            o1.borrow_mut().push("a2");
1128        });
1129        let o2 = Rc::clone(&order);
1130        executor::spawn_no_auto_flush(Priority::Low, async move {
1131            o2.borrow_mut().push("b1");
1132            o2.borrow_mut().push("b2");
1133        });
1134        executor::flush_all();
1135        let r = order.borrow().clone();
1136        assert_eq!(&r[0..3], &["a1", "b1", "b2"][..]);
1137        assert!(r.contains(&"a2"));
1138    }
1139
1140    #[test]
1141    fn panic_in_task_is_isolated() {
1142        init();
1143        let survived = Rc::new(Cell::new(false));
1144        let s = Rc::clone(&survived);
1145        spawn_global(async move {
1146            panic!("intentional test panic");
1147        });
1148        spawn_global(async move {
1149            s.set(true);
1150        });
1151        assert!(survived.get());
1152        assert_eq!(executor::debug_task_count(), 0);
1153    }
1154
1155    // -- time budget -------------------------------------------------------
1156
1157    #[test]
1158    fn time_budget_with_test_time_source() {
1159        init();
1160        let ts = Rc::new(TestTimeSource::new(0));
1161        init_time_source(ts.clone());
1162
1163        let polled = Rc::new(Cell::new(0u32));
1164
1165        // Spawn 50 tasks without auto-flush.  Each task increments the
1166        // counter and advances simulated time by 1 ms.
1167        for _ in 0..50 {
1168            let pc = Rc::clone(&polled);
1169            let ts_c = Rc::clone(&ts);
1170            executor::spawn_no_auto_flush(Priority::Low, async move {
1171                pc.set(pc.get() + 1);
1172                ts_c.advance(1);
1173            });
1174        }
1175
1176        // With TestScheduleFlush the next-flush callback fires
1177        // synchronously, so budget breaks re-enter flush immediately.
1178        // All tasks eventually complete.
1179        executor::flush_all();
1180
1181        assert_eq!(polled.get(), 50);
1182        assert_eq!(executor::debug_task_count(), 0);
1183    }
1184
1185    #[test]
1186    fn time_budget_honoured_with_split() {
1187        // Use a flush-scheduler that records calls instead of
1188        // re-entering, so we can observe that the budget actually
1189        // triggered a split.
1190        let schedule_count = Rc::new(Cell::new(0u32));
1191        struct NoopScheduleFlush(Rc<Cell<u32>>);
1192        impl ScheduleFlush for NoopScheduleFlush {
1193            fn schedule(&self, _callback: Box<dyn FnOnce()>) {
1194                self.0.set(self.0.get() + 1);
1195                // Intentionally do NOT call callback() — we want to
1196                // observe the break without re-entering.
1197            }
1198        }
1199        init_flush_scheduler(Rc::new(NoopScheduleFlush(Rc::clone(&schedule_count))));
1200
1201        let ts = Rc::new(TestTimeSource::new(0));
1202        init_time_source(ts.clone());
1203
1204        let polled = Rc::new(RefCell::new(Vec::new()));
1205
1206        for i in 0..50u32 {
1207            let pc = Rc::clone(&polled);
1208            let ts_c = Rc::clone(&ts);
1209            executor::spawn_no_auto_flush(Priority::Low, async move {
1210                pc.borrow_mut().push(i);
1211                ts_c.advance(1);
1212            });
1213        }
1214
1215        executor::flush_all();
1216
1217        let completed = polled.borrow().len();
1218        assert!(
1219            completed < 50,
1220            "budget should split before all tasks run (only {completed} of 50)"
1221        );
1222        assert!(
1223            completed >= 7,
1224            "at least 7 tasks should run before budget expires ({completed})"
1225        );
1226        assert_eq!(
1227            schedule_count.get(),
1228            1,
1229            "next flush should have been scheduled exactly once"
1230        );
1231
1232        // Clean up: schedule remaining tasks to finish.
1233        // Re-register TestScheduleFlush and flush again.
1234        init_flush_scheduler(Rc::new(TestScheduleFlush));
1235        executor::flush_all();
1236        assert_eq!(executor::debug_task_count(), 0);
1237    }
1238
1239    // -- macros -----------------------------------------------------------
1240
1241    #[test]
1242    fn provide_context_macro_works() {
1243        let scope = TaskScope::new();
1244        provide_context!(scope, 42i32);
1245        assert_eq!(*scope.consume::<i32>().unwrap(), 42);
1246    }
1247
1248    #[test]
1249    fn consume_context_macro_works() {
1250        let scope = TaskScope::new();
1251        scope.provide(99u32);
1252        let val: Option<Rc<u32>> = consume_context!(scope, u32);
1253        assert_eq!(*val.unwrap(), 99);
1254    }
1255
1256    #[test]
1257    fn consume_context_macro_not_found() {
1258        let scope = TaskScope::new();
1259        let val: Option<Rc<String>> = consume_context!(scope, String);
1260        assert!(val.is_none());
1261    }
1262
1263    // -- dump_task_tree ---------------------------------------------------
1264
1265    #[cfg(feature = "debug")]
1266    #[test]
1267    fn dump_task_tree_returns_string() {
1268        init();
1269        let scope = TaskScope::new();
1270        scope.spawn(async { std::future::pending::<()>().await });
1271
1272        let output = crate::dump_task_tree();
1273        assert!(output.contains("Auralis Task Tree"));
1274        assert!(output.contains("Total active tasks: 1"));
1275        assert!(output.contains("Scope"));
1276    }
1277
1278    #[cfg(feature = "debug")]
1279    #[test]
1280    fn dump_task_tree_empty() {
1281        init();
1282        let output = crate::dump_task_tree();
1283        assert!(output.contains("(no active tasks)"));
1284    }
1285
1286    use crate::{set_deferred, spawn_global};
1287
1288    // -- suspend / resume ---------------------------------------------------
1289
1290    #[test]
1291    fn suspend_prevents_task_execution() {
1292        init();
1293        let scope = TaskScope::new();
1294        let executed = Rc::new(Cell::new(false));
1295        let ex = Rc::clone(&executed);
1296        scope.spawn(async move {
1297            ex.set(true);
1298        });
1299        // Task runs immediately with TestScheduleFlush.
1300        assert!(executed.get());
1301        executed.set(false);
1302
1303        scope.suspend();
1304        let ex2 = Rc::clone(&executed);
1305        scope.spawn(async move {
1306            ex2.set(true);
1307        });
1308        // Task should NOT execute while suspended.
1309        assert!(!executed.get());
1310    }
1311
1312    #[test]
1313    fn resume_allows_task_execution() {
1314        init();
1315        let scope = TaskScope::new();
1316        scope.suspend();
1317        let executed = Rc::new(Cell::new(false));
1318        let ex = Rc::clone(&executed);
1319        scope.spawn(async move {
1320            ex.set(true);
1321        });
1322        assert!(!executed.get());
1323
1324        scope.resume();
1325        // After resume, the task should execute.
1326        assert!(executed.get());
1327    }
1328
1329    #[test]
1330    fn suspend_cascades_to_children() {
1331        init();
1332        let parent = TaskScope::new();
1333        let child = TaskScope::new_child(&parent);
1334        assert!(!child.is_suspended());
1335
1336        parent.suspend();
1337        assert!(parent.is_suspended());
1338        assert!(child.is_suspended());
1339    }
1340
1341    #[test]
1342    fn resume_cascades_to_children() {
1343        init();
1344        let parent = TaskScope::new();
1345        let child = TaskScope::new_child(&parent);
1346        parent.suspend();
1347        assert!(child.is_suspended());
1348
1349        parent.resume();
1350        assert!(!parent.is_suspended());
1351        assert!(!child.is_suspended());
1352    }
1353
1354    #[test]
1355    fn multiple_suspend_resume_no_leak() {
1356        init();
1357        let scope = TaskScope::new();
1358        for _ in 0..50 {
1359            scope.suspend();
1360            assert!(scope.is_suspended());
1361            scope.resume();
1362            assert!(!scope.is_suspended());
1363        }
1364        // No panic, no leak.
1365    }
1366
1367    #[test]
1368    fn suspended_scope_drops_without_panic() {
1369        init();
1370        {
1371            let scope = TaskScope::new();
1372            scope.suspend();
1373            let d = Rc::new(Cell::new(false));
1374            struct DropCheck(Rc<Cell<bool>>);
1375            impl Drop for DropCheck {
1376                fn drop(&mut self) {
1377                    self.0.set(true);
1378                }
1379            }
1380            scope.spawn(async move {
1381                let _guard = DropCheck(d);
1382                std::future::pending::<()>().await;
1383            });
1384            // Scope dropped with tasks and in suspended state.
1385            // Tasks should be cancelled without panic.
1386        }
1387        // After scope drop, all tasks should be cleaned up.
1388        assert_eq!(executor::debug_task_count(), 0);
1389    }
1390
1391    #[test]
1392    fn siblings_not_affected_by_suspend() {
1393        init();
1394        let parent = TaskScope::new();
1395        let child_a = TaskScope::new_child(&parent);
1396        let child_b = TaskScope::new_child(&parent);
1397
1398        child_a.suspend();
1399        assert!(child_a.is_suspended());
1400        assert!(!child_b.is_suspended());
1401        assert!(!parent.is_suspended());
1402    }
1403
1404    // -- instance executor tests ------------------------------------------
1405
1406    use crate::Executor;
1407
1408    #[test]
1409    fn flush_instance_panicking_task_is_isolated() {
1410        init();
1411        let ex = Executor::new_instance();
1412        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1413
1414        let survived = Rc::new(Cell::new(false));
1415        let s = Rc::clone(&survived);
1416
1417        Executor::spawn(&ex, async move {
1418            panic!("intentional test panic in instance executor");
1419        });
1420        Executor::spawn(&ex, async move {
1421            s.set(true);
1422        });
1423        Executor::flush_instance(&ex);
1424
1425        assert!(survived.get());
1426    }
1427
1428    #[test]
1429    fn flush_instance_spawn_and_complete() {
1430        init();
1431        let ex = Executor::new_instance();
1432        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1433
1434        let counter = Rc::new(Cell::new(0u32));
1435        for _ in 0..20 {
1436            let c = Rc::clone(&counter);
1437            Executor::spawn(&ex, async move {
1438                c.set(c.get() + 1);
1439            });
1440        }
1441        Executor::flush_instance(&ex);
1442        assert_eq!(counter.get(), 20);
1443    }
1444
1445    // -- timer tests -------------------------------------------------------
1446
1447    use crate::timer;
1448
1449    #[test]
1450    fn timer_zero_duration_completes_immediately() {
1451        init();
1452        let done = Rc::new(Cell::new(false));
1453        let d = Rc::clone(&done);
1454        spawn_global(async move {
1455            timer::sleep(Duration::ZERO).await;
1456            d.set(true);
1457        });
1458        // With TestScheduleFlush, the task completes synchronously.
1459        assert!(done.get());
1460    }
1461
1462    #[test]
1463    fn timer_normal_delay_fires_after_time_advances() {
1464        init();
1465        let ts = Rc::new(TestTimeSource::new(0));
1466        init_time_source(Rc::clone(&ts) as Rc<dyn TimeSource>);
1467
1468        let done = Rc::new(Cell::new(false));
1469        let d = Rc::clone(&done);
1470        spawn_global(async move {
1471            timer::sleep(Duration::from_millis(100)).await;
1472            d.set(true);
1473        });
1474        // Timer registered but not yet expired — the task is sleeping.
1475        assert!(!done.get());
1476
1477        // Advance time past the deadline, then flush to process the
1478        // expired timer and re-poll the task.
1479        ts.advance(150);
1480        crate::executor::flush_all();
1481        assert!(done.get());
1482    }
1483
1484    #[test]
1485    fn timer_across_multiple_flushes() {
1486        init();
1487        let ts = Rc::new(TestTimeSource::new(0));
1488        init_time_source(Rc::clone(&ts) as Rc<dyn TimeSource>);
1489
1490        let counter = Rc::new(Cell::new(0u32));
1491        let c = Rc::clone(&counter);
1492        spawn_global(async move {
1493            for _ in 0..3 {
1494                timer::sleep(Duration::from_millis(100)).await;
1495                c.set(c.get() + 1);
1496            }
1497        });
1498        assert_eq!(counter.get(), 0);
1499
1500        ts.advance(100);
1501        crate::executor::flush_all();
1502        assert_eq!(counter.get(), 1);
1503
1504        ts.advance(100);
1505        crate::executor::flush_all();
1506        assert_eq!(counter.get(), 2);
1507
1508        ts.advance(100);
1509        crate::executor::flush_all();
1510        assert_eq!(counter.get(), 3);
1511    }
1512
1513    #[test]
1514    fn timer_cancelled_by_scope_drop() {
1515        init();
1516        let executed = Rc::new(Cell::new(false));
1517        let ex = Rc::clone(&executed);
1518        {
1519            let scope = TaskScope::new();
1520            scope.spawn(async move {
1521                timer::sleep(Duration::from_millis(500)).await;
1522                ex.set(true);
1523            });
1524        }
1525        // Scope dropped → task cancelled → timer cleaned up.
1526        // The task should NOT execute.
1527        assert!(!executed.get());
1528        assert_eq!(executor::debug_task_count(), 0);
1529    }
1530
1531    #[test]
1532    fn reentrant_flush_is_noop() {
1533        init();
1534        // flush_instance re-entrancy guard: calling flush inside a
1535        // deferred callback (which runs during flush step 2) should
1536        // be a no-op and leave state intact.
1537        //
1538        // With TestScheduleFlush, signal callbacks fire synchronously
1539        // and a re-entrant flush() inside a callback is simply a no-op.
1540        let reentered = Rc::new(Cell::new(false));
1541        let r = Rc::clone(&reentered);
1542        let sig = Signal::new(0);
1543        auralis_signal::subscribe(&sig, Rc::new(move || r.set(true)));
1544        // This set triggers the callback synchronously (TestScheduleFlush).
1545        // The callback does not call flush itself, but we verify the
1546        // guard by calling flush() inside the deferred callback drain.
1547        sig.set(1);
1548        assert!(reentered.get());
1549    }
1550
1551    #[test]
1552    fn instance_executor_timer() {
1553        init();
1554        let ex = Executor::new_instance();
1555        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1556        let ts = Rc::new(TestTimeSource::new(0));
1557        Executor::install_time_source(&ex, Rc::clone(&ts) as Rc<dyn TimeSource>);
1558
1559        let done = Rc::new(Cell::new(false));
1560        let d = Rc::clone(&done);
1561        Executor::spawn(&ex, async move {
1562            timer::sleep(Duration::from_millis(50)).await;
1563            d.set(true);
1564        });
1565        assert!(!done.get());
1566
1567        // Timer should fire on the instance executor's flush.
1568        ts.advance(60);
1569        Executor::flush_instance(&ex);
1570        assert!(done.get());
1571    }
1572
1573    #[test]
1574    fn set_deferred_routes_to_instance_executor() {
1575        init();
1576        let ex = Executor::new_instance();
1577        Executor::install_flush_scheduler(&ex, Rc::new(TestScheduleFlush));
1578
1579        let sig = Signal::new(0);
1580        let s = sig.clone();
1581
1582        // Spawn a task on the instance executor that uses set_deferred.
1583        Executor::spawn(&ex, async move {
1584            crate::set_deferred(&s, 42);
1585        });
1586
1587        // Flush the instance executor — set_deferred should route here.
1588        Executor::flush_instance(&ex);
1589        // The deferred set should have been processed.
1590        assert_eq!(sig.read(), 42);
1591    }
1592
1593    // -- defensive / API coverage --------------------------------------
1594
1595    #[test]
1596    fn panic_hook_is_invoked_on_task_panic() {
1597        init();
1598        let hook_called = Rc::new(Cell::new(false));
1599        let hc = Rc::clone(&hook_called);
1600
1601        crate::set_panic_hook(Rc::new(move |_info| {
1602            hc.set(true);
1603        }));
1604
1605        let scope = TaskScope::new();
1606        scope.spawn(async move { panic!("intentional") });
1607
1608        // The panic hook should have been called.
1609        assert!(hook_called.get());
1610    }
1611
1612    #[test]
1613    fn current_scope_available_in_spawned_task() {
1614        init();
1615        let scope = TaskScope::new();
1616        let found = Rc::new(Cell::new(false));
1617        let f = Rc::clone(&found);
1618        scope.spawn(async move {
1619            f.set(crate::current_scope().is_some());
1620        });
1621        assert!(found.get());
1622    }
1623
1624    #[test]
1625    fn callback_handle_noop_does_not_panic() {
1626        let _h = crate::CallbackHandle::noop();
1627        // Dropping should not panic.
1628    }
1629
1630    #[test]
1631    fn sync_callback_fallback_without_schedule_hook() {
1632        // When no ScheduleFlush hook is installed, signal callbacks
1633        // fire synchronously inside set() (the executor_schedule fallback).
1634        crate::reset_executor_for_test();
1635        // No init_flush_scheduler call — hook is absent.
1636
1637        let sig = Signal::new(0);
1638        let called = Rc::new(Cell::new(false));
1639        let c = Rc::clone(&called);
1640        auralis_signal::subscribe(&sig, Rc::new(move || c.set(true)));
1641
1642        sig.set(1);
1643        // Without a hook, the callback fires synchronously.
1644        assert!(called.get());
1645    }
1646
1647    #[test]
1648    fn set_deferred_isolated_to_instance_executor() {
1649        init();
1650        let ex1 = Executor::new_instance();
1651        Executor::install_flush_scheduler(&ex1, Rc::new(TestScheduleFlush));
1652        let ex2 = Executor::new_instance();
1653        Executor::install_flush_scheduler(&ex2, Rc::new(TestScheduleFlush));
1654
1655        let sig1 = Signal::new(0);
1656        let sig2 = Signal::new(0);
1657        let s1 = sig1.clone();
1658
1659        // Spawn on ex1: use set_deferred via with_executor.
1660        crate::with_executor(&ex1, || {
1661            crate::set_deferred(&s1, 42);
1662        });
1663        Executor::flush_instance(&ex1);
1664        assert_eq!(sig1.read(), 42);
1665        // sig2 must be unaffected — set_deferred was on ex1.
1666        assert_eq!(sig2.read(), 0);
1667    }
1668
1669    #[test]
1670    fn notify_signal_state_follow_up_handles_reentrant_dirty() {
1671        // When a signal subscriber callback calls set() on the same
1672        // signal, the follow-up notification must fire correctly.
1673        let sig = Signal::new(0);
1674        let sig2 = sig.clone();
1675        let count = Rc::new(Cell::new(0u32));
1676        let c = Rc::clone(&count);
1677
1678        auralis_signal::subscribe(
1679            &sig,
1680            Rc::new(move || {
1681                c.set(c.get() + 1);
1682                // Re-entrant set: should be picked up by follow-up.
1683                if c.get() == 1 {
1684                    sig2.set(2);
1685                }
1686            }),
1687        );
1688
1689        sig.set(1);
1690        // First callback (set 1): count=1, triggers re-entrant set(2).
1691        // Follow-up notification fires second callback: count=2.
1692        assert_eq!(sig.read(), 2);
1693        assert_eq!(count.get(), 2);
1694    }
1695}