1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
use crate::{
    innerlude::{LocalTask, SchedulerMsg},
    scope_context::Scope,
    scopes::ScopeId,
    Task,
};
use std::{
    cell::{Cell, Ref, RefCell},
    rc::Rc,
    sync::Arc,
};
thread_local! {
    static RUNTIMES: RefCell<Vec<Rc<Runtime>>> = const { RefCell::new(vec![]) };
}
/// A global runtime that is shared across all scopes that provides the async runtime and context API
pub struct Runtime {
    pub(crate) scope_states: RefCell<Vec<Option<Scope>>>,
    // We use this to track the current scope
    pub(crate) scope_stack: RefCell<Vec<ScopeId>>,
    // We use this to track the current task
    pub(crate) current_task: Cell<Option<Task>>,
    pub(crate) rendering: Cell<bool>,
    /// Tasks created with cx.spawn
    pub(crate) tasks: RefCell<slab::Slab<Rc<LocalTask>>>,
    pub(crate) sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
    // the virtualdom will hold this lock while it's doing syncronous work
    // when the lock is lifted, tasks waiting for the lock will be able to run
    pub(crate) flush_mutex: Arc<futures_util::lock::Mutex<()>>,
    pub(crate) flush_lock: Cell<Option<futures_util::lock::OwnedMutexGuard<()>>>,
}
impl Runtime {
    pub(crate) fn new(sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>) -> Rc<Self> {
        Rc::new(Self {
            sender,
            flush_mutex: Default::default(),
            flush_lock: Default::default(),
            rendering: Cell::new(true),
            scope_states: Default::default(),
            scope_stack: Default::default(),
            current_task: Default::default(),
            tasks: Default::default(),
        })
    }
    /// Get the current runtime
    pub fn current() -> Option<Rc<Self>> {
        RUNTIMES.with(|stack| stack.borrow().last().cloned())
    }
    /// Create a scope context. This slab is synchronized with the scope slab.
    pub(crate) fn create_scope(&self, context: Scope) {
        let id = context.id;
        let mut scopes = self.scope_states.borrow_mut();
        if scopes.len() <= id.0 {
            scopes.resize_with(id.0 + 1, Default::default);
        }
        scopes[id.0] = Some(context);
    }
    pub(crate) fn remove_scope(self: &Rc<Self>, id: ScopeId) {
        {
            let borrow = self.scope_states.borrow();
            if let Some(scope) = &borrow[id.0] {
                let _runtime_guard = RuntimeGuard::new(self.clone());
                // Manually drop tasks, hooks, and contexts inside of the runtime
                self.on_scope(id, || {
                    // Drop all spawned tasks - order doesn't matter since tasks don't rely on eachother
                    // In theory nested tasks might not like this
                    for id in scope.spawned_tasks.take() {
                        self.remove_task(id);
                    }
                    // Drop all hooks in reverse order in case a hook depends on another hook.
                    for hook in scope.hooks.take().drain(..).rev() {
                        drop(hook);
                    }
                    // Drop all contexts
                    scope.shared_contexts.take();
                });
            }
        }
        self.scope_states.borrow_mut()[id.0].take();
    }
    /// Get the current scope id
    pub(crate) fn current_scope_id(&self) -> Option<ScopeId> {
        self.scope_stack.borrow().last().copied()
    }
    /// Call this function with the current scope set to the given scope
    ///
    /// Useful in a limited number of scenarios
    pub fn on_scope<O>(&self, id: ScopeId, f: impl FnOnce() -> O) -> O {
        {
            self.scope_stack.borrow_mut().push(id);
        }
        let o = f();
        {
            self.scope_stack.borrow_mut().pop();
        }
        o
    }
    /// Get the state for any scope given its ID
    ///
    /// This is useful for inserting or removing contexts from a scope, or rendering out its root node
    pub(crate) fn get_state(&self, id: ScopeId) -> Option<Ref<'_, Scope>> {
        Ref::filter_map(self.scope_states.borrow(), |contexts| {
            contexts.get(id.0).and_then(|f| f.as_ref())
        })
        .ok()
    }
    /// Pushes a new scope onto the stack
    pub(crate) fn push(runtime: Rc<Runtime>) {
        RUNTIMES.with(|stack| stack.borrow_mut().push(runtime));
    }
    /// Pops a scope off the stack
    pub(crate) fn pop() {
        RUNTIMES.with(|stack| stack.borrow_mut().pop());
    }
    /// Runs a function with the current runtime
    pub(crate) fn with<R>(f: impl FnOnce(&Runtime) -> R) -> Option<R> {
        RUNTIMES.with(|stack| stack.borrow().last().map(|r| f(r)))
    }
    /// Runs a function with the current scope
    pub(crate) fn with_current_scope<R>(f: impl FnOnce(&Scope) -> R) -> Option<R> {
        Self::with(|rt| {
            rt.current_scope_id()
                .and_then(|scope| rt.get_state(scope).map(|sc| f(&sc)))
        })
        .flatten()
    }
    /// Runs a function with the current scope
    pub(crate) fn with_scope<R>(scope: ScopeId, f: impl FnOnce(&Scope) -> R) -> Option<R> {
        Self::with(|rt| rt.get_state(scope).map(|sc| f(&sc))).flatten()
    }
    /// Acquire the flush lock and store it interally
    ///
    /// This means the virtual dom is currently doing syncronous work
    /// The lock will be held until `release_flush_lock` is called - and then the OwnedLock will be dropped
    pub(crate) fn acquire_flush_lock(&self) {
        // The flush lock might already be held...
        if let Some(lock) = self.flush_mutex.try_lock_owned() {
            self.flush_lock.set(Some(lock));
        }
    }
    /// Release the flush lock
    ///
    /// On the drop of the flush lock, all tasks waiting on `flush_sync` will spring to life via their wakers.
    /// You can now freely poll those tasks and they can progress
    pub(crate) fn release_flush_lock(&self) {
        self.flush_lock.take();
    }
}
/// A guard for a new runtime. This must be used to override the current runtime when importing components from a dynamic library that has it's own runtime.
///
/// ```rust
/// use dioxus::prelude::*;
///
/// fn main() {
///     let virtual_dom = VirtualDom::new(app);
/// }
///
/// fn app() -> Element {
///     rsx!{ Component { runtime: Runtime::current().unwrap() } }
/// }
///
/// // In a dynamic library
/// #[derive(Props, Clone)]
/// struct ComponentProps {
///    runtime: std::rc::Rc<Runtime>,
/// }
///
/// impl PartialEq for ComponentProps {
///     fn eq(&self, _other: &Self) -> bool {
///         true
///     }
/// }
///
/// fn Component(cx: ComponentProps) -> Element {
///     use_hook(|| {
///         let _guard = RuntimeGuard::new(cx.runtime.clone());
///     });
///
///     rsx! { div {} }
/// }
/// ```
pub struct RuntimeGuard(());
impl RuntimeGuard {
    /// Create a new runtime guard that sets the current Dioxus runtime. The runtime will be reset when the guard is dropped
    pub fn new(runtime: Rc<Runtime>) -> Self {
        Runtime::push(runtime);
        Self(())
    }
}
impl Drop for RuntimeGuard {
    fn drop(&mut self) {
        Runtime::pop();
    }
}