Skip to main content

cranpose_core/
lib.rs

1#![doc = r"Core runtime pieces for the Cranpose experiment."]
2
3pub extern crate self as cranpose_core;
4
5mod callbacks;
6mod composer;
7pub mod composer_context;
8mod composition;
9mod composition_locals;
10mod debug_trace;
11mod emit;
12#[cfg(any(feature = "internal", test))]
13mod frame_clock;
14mod hooks;
15mod launched_effect;
16pub mod owned;
17pub mod platform;
18mod recompose;
19pub mod runtime;
20mod slot_storage;
21pub mod slot_table;
22pub mod snapshot_double_index_heap;
23pub mod snapshot_id_set;
24pub mod snapshot_pinning;
25pub mod snapshot_state_observer;
26pub mod snapshot_v2;
27mod snapshot_weak_set;
28mod state;
29pub mod subcompose;
30
31#[cfg(feature = "internal")]
32#[doc(hidden)]
33pub mod internal {
34    pub use crate::frame_clock::{FrameCallbackRegistration, FrameClock};
35}
36pub use callbacks::{CallbackHolder, CallbackHolder1, ParamSlot, ParamState, ReturnSlot};
37pub use composer::Composer;
38pub(crate) use composer::{ComposerCore, EmittedNode, ParentAttachMode, ParentFrame};
39pub use composition::{Composition, ROOT_RENDER_REPLAY_LIMIT};
40pub use composition_locals::{
41    compositionLocalOf, compositionLocalOfWithPolicy, staticCompositionLocalOf, CompositionLocal,
42    CompositionLocalProvider, ProvidedValue, StaticCompositionLocal,
43};
44pub(crate) use composition_locals::{LocalStateEntry, StaticLocalEntry};
45#[cfg(test)]
46pub(crate) use debug_trace::set_debug_scope_tracking_override_for_tests;
47#[doc(hidden)]
48pub use debug_trace::{
49    debug_label_current_scope, debug_live_recompose_scope_count,
50    debug_recompose_scope_registry_stats, debug_scope_invalidation_sources, debug_scope_label,
51};
52pub use hooks::{
53    derivedStateOf, mutableStateList, mutableStateListOf, mutableStateMap, mutableStateMapOf,
54    mutableStateOf, ownedMutableStateOf, remember, rememberUpdatedState, try_mutableStateOf,
55    useState,
56};
57#[cfg(feature = "internal")]
58#[doc(hidden)]
59pub use hooks::{withFrameMillis, withFrameNanos};
60pub use launched_effect::{
61    __launched_effect_async_impl, __launched_effect_impl, CancelToken, LaunchedEffectScope,
62};
63pub use owned::Owned;
64pub use platform::{Clock, RuntimeScheduler};
65#[doc(hidden)]
66pub use runtime::{
67    current_runtime_handle, schedule_frame, schedule_node_update, DefaultScheduler, Runtime,
68    RuntimeHandle, StateId, TaskHandle,
69};
70pub use slot_storage::{GroupId, StartGroup};
71pub use slot_table::{SlotTable, SlotTableDebugStats, SlotValueTypeDebugStat};
72#[doc(hidden)]
73pub use snapshot_state_observer::SnapshotStateObserver;
74
75/// Runs the provided closure inside a mutable snapshot and applies the result.
76///
77/// Use this function when you need to update `MutableState` from outside the
78/// composition or layout phase, typically in event handlers or async tasks.
79///
80/// # Why is this needed?
81/// Cranpose uses a snapshot system (MVCC) to isolate state changes. Modifications
82/// made to `MutableState` are only visible to the current thread's active snapshot.
83/// To make changes visible to the rest of the system (and trigger recomposition),
84/// they must be "applied" by committing the snapshot. This helper handles that
85/// lifecycle for you.
86///
87/// # Example
88///
89/// ```ignore
90/// // Inside a button click handler
91/// run_in_mutable_snapshot(|| {
92///     count.set(count.value() + 1);
93/// });
94/// ```
95///
96/// # Important
97/// ALL UI event handlers (keyboard, mouse, touch, animations) that modify state
98/// MUST use this function or [`dispatch_ui_event`].
99pub fn run_in_mutable_snapshot<T>(block: impl FnOnce() -> T) -> Result<T, &'static str> {
100    let snapshot = snapshot_v2::take_mutable_snapshot(None, None);
101
102    // Mark that we're in an applied snapshot context
103    IN_APPLIED_SNAPSHOT.with(|c| c.set(true));
104    let value = snapshot.enter(block);
105    IN_APPLIED_SNAPSHOT.with(|c| c.set(false));
106
107    match snapshot.apply() {
108        snapshot_v2::SnapshotApplyResult::Success => Ok(value),
109        snapshot_v2::SnapshotApplyResult::Failure => Err("Snapshot apply failed"),
110    }
111}
112
113/// Dispatches a UI event in a proper snapshot context.
114///
115/// This is a convenience wrapper around [`run_in_mutable_snapshot`] that returns
116/// `Option<T>` instead of `Result<T, &str>`.
117///
118/// # Example
119/// ```ignore
120/// // In a keyboard event handler:
121/// dispatch_ui_event(|| {
122///     text_field_state.edit(|buffer| {
123///         buffer.insert("a");
124///     });
125/// });
126/// ```
127pub fn dispatch_ui_event<T>(block: impl FnOnce() -> T) -> Option<T> {
128    run_in_mutable_snapshot(block).ok()
129}
130
131// ─── Event Handler Context Tracking ─────────────────────────────────────────
132//
133// These thread-locals track whether code is running in an event handler context
134// and whether it's properly wrapped in run_in_mutable_snapshot. This allows
135// debug-mode warnings when state is modified without proper snapshot handling.
136
137thread_local! {
138    /// Tracks if we're in a UI event handler context (keyboard, mouse, etc.)
139    pub(crate) static IN_EVENT_HANDLER: Cell<bool> = const { Cell::new(false) };
140    /// Tracks if we're in a properly-applied mutable snapshot
141    pub(crate) static IN_APPLIED_SNAPSHOT: Cell<bool> = const { Cell::new(false) };
142}
143
144#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
145pub struct CompositionPassDebugStats {
146    pub commands_len: usize,
147    pub commands_cap: usize,
148    pub command_payload_len_bytes: usize,
149    pub command_payload_cap_bytes: usize,
150    pub sync_children_len: usize,
151    pub sync_children_cap: usize,
152    pub sync_child_ids_len: usize,
153    pub sync_child_ids_cap: usize,
154    pub side_effects_len: usize,
155    pub side_effects_cap: usize,
156}
157
158/// Marks the start of an event handler context.
159/// Call this at the start of keyboard/mouse/touch event handling.
160/// Use `run_in_mutable_snapshot` or `dispatch_ui_event` inside to ensure proper snapshot handling.
161pub fn enter_event_handler() {
162    IN_EVENT_HANDLER.with(|c| c.set(true));
163}
164
165/// Marks the end of an event handler context.
166pub fn exit_event_handler() {
167    IN_EVENT_HANDLER.with(|c| c.set(false));
168}
169
170/// Returns true if currently in an event handler context.
171pub fn in_event_handler() -> bool {
172    IN_EVENT_HANDLER.with(|c| c.get())
173}
174
175/// Returns true if currently in an applied snapshot context.
176pub fn in_applied_snapshot() -> bool {
177    IN_APPLIED_SNAPSHOT.with(|c| c.get())
178}
179
180#[cfg(test)]
181pub use runtime::{TestRuntime, TestScheduler};
182
183use crate::collections::map::{HashMap, HashSet};
184use smallvec::SmallVec;
185use std::any::{Any, TypeId};
186use std::cell::{Cell, Ref, RefCell, RefMut};
187use std::cmp::Reverse;
188use std::collections::BinaryHeap;
189use std::hash::{Hash, Hasher};
190use std::ops::{Deref, DerefMut};
191use std::rc::{Rc, Weak};
192use std::sync::atomic::{AtomicUsize, Ordering};
193
194pub type Key = u64;
195pub type NodeId = usize;
196
197pub fn location_key(file: &str, line: u32, column: u32) -> Key {
198    let base = file.as_ptr() as u64;
199    base.wrapping_mul(0x9E37_79B9_7F4A_7C15) ^ ((line as u64) << 32) ^ (column as u64)
200}
201
202/// Stable identifier for a slot in the slot table.
203///
204/// Anchors provide positional stability: they maintain their identity even when
205/// the slot table is reorganized (e.g., during conditional rendering or group moves).
206/// This prevents effect states from being prematurely removed during recomposition.
207#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
208pub struct AnchorId(usize);
209
210impl AnchorId {
211    /// Invalid anchor that represents no anchor.
212    pub(crate) const INVALID: AnchorId = AnchorId(0);
213
214    /// Check if this anchor is valid (non-zero).
215    pub fn is_valid(&self) -> bool {
216        self.0 != 0
217    }
218}
219
220pub(crate) type ScopeId = usize;
221type LocalKey = usize;
222pub(crate) type FrameCallbackId = u64;
223type LocalStackSnapshot = Rc<Vec<composer::LocalContext>>;
224
225thread_local! {
226    static EMPTY_LOCAL_STACK: LocalStackSnapshot = Rc::new(Vec::new());
227    #[cfg(debug_assertions)]
228    static DEBUG_SCOPE_LABELS: RefCell<HashMap<usize, &'static str>> = RefCell::new(HashMap::default());
229    #[cfg(debug_assertions)]
230    static DEBUG_SCOPE_INVALIDATION_SOURCES: RefCell<HashMap<usize, HashSet<String>>> =
231        RefCell::new(HashMap::default());
232    #[cfg(all(test, debug_assertions))]
233    static DEBUG_SCOPE_TRACKING_OVERRIDE: Cell<Option<bool>> = const { Cell::new(None) };
234}
235
236static NEXT_SCOPE_ID: AtomicUsize = AtomicUsize::new(1);
237static NEXT_LOCAL_KEY: AtomicUsize = AtomicUsize::new(1);
238static LIVE_RECOMPOSE_SCOPE_COUNT: AtomicUsize = AtomicUsize::new(0);
239
240fn next_scope_id() -> ScopeId {
241    NEXT_SCOPE_ID.fetch_add(1, Ordering::Relaxed)
242}
243
244fn next_local_key() -> LocalKey {
245    NEXT_LOCAL_KEY.fetch_add(1, Ordering::Relaxed)
246}
247
248fn empty_local_stack() -> LocalStackSnapshot {
249    EMPTY_LOCAL_STACK.with(Rc::clone)
250}
251
252enum RecomposeCallback {
253    Static(fn(&Composer)),
254    Dynamic(Box<dyn FnMut(&Composer) + 'static>),
255}
256
257pub(crate) struct RecomposeScopeInner {
258    id: ScopeId,
259    runtime: RuntimeHandle,
260    invalid: Cell<bool>,
261    enqueued: Cell<bool>,
262    active: Cell<bool>,
263    composed_once: Cell<bool>,
264    pending_recompose: Cell<bool>,
265    force_reuse: Cell<bool>,
266    force_recompose: Cell<bool>,
267    group_anchor: Cell<AnchorId>,
268    parent_hint: Cell<Option<NodeId>>,
269    recompose: RefCell<Option<RecomposeCallback>>,
270    parent_scope: RefCell<Option<Weak<RecomposeScopeInner>>>,
271    local_stack: RefCell<LocalStackSnapshot>,
272    slots_host: RefCell<Option<Weak<SlotsHost>>>,
273    state_subscriptions: RefCell<HashSet<StateId>>,
274}
275
276impl RecomposeScopeInner {
277    fn new(runtime: RuntimeHandle) -> Self {
278        LIVE_RECOMPOSE_SCOPE_COUNT.fetch_add(1, Ordering::Relaxed);
279        Self {
280            id: next_scope_id(),
281            runtime,
282            invalid: Cell::new(false),
283            enqueued: Cell::new(false),
284            active: Cell::new(true),
285            composed_once: Cell::new(false),
286            pending_recompose: Cell::new(false),
287            force_reuse: Cell::new(false),
288            force_recompose: Cell::new(false),
289            group_anchor: Cell::new(AnchorId::INVALID),
290            parent_hint: Cell::new(None),
291            recompose: RefCell::new(None),
292            parent_scope: RefCell::new(None),
293            local_stack: RefCell::new(empty_local_stack()),
294            slots_host: RefCell::new(None),
295            state_subscriptions: RefCell::new(HashSet::default()),
296        }
297    }
298}
299
300impl Drop for RecomposeScopeInner {
301    fn drop(&mut self) {
302        LIVE_RECOMPOSE_SCOPE_COUNT.fetch_sub(1, Ordering::Relaxed);
303        let subscriptions = std::mem::take(self.state_subscriptions.get_mut());
304        for state_id in subscriptions {
305            self.runtime.unregister_state_scope(state_id, self.id);
306        }
307        #[cfg(debug_assertions)]
308        {
309            let _ = DEBUG_SCOPE_LABELS.try_with(|labels| {
310                labels.borrow_mut().remove(&self.id);
311            });
312            let _ = DEBUG_SCOPE_INVALIDATION_SOURCES.try_with(|sources| {
313                sources.borrow_mut().remove(&self.id);
314            });
315        }
316        if self.enqueued.replace(false) {
317            self.runtime.mark_scope_recomposed(self.id);
318        }
319    }
320}
321
322#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
323pub struct RecomposeScopeRegistryDebugStats {
324    pub len: usize,
325    pub capacity: usize,
326}
327
328#[derive(Clone)]
329pub struct RecomposeScope {
330    inner: Rc<RecomposeScopeInner>,
331}
332
333impl PartialEq for RecomposeScope {
334    fn eq(&self, other: &Self) -> bool {
335        Rc::ptr_eq(&self.inner, &other.inner)
336    }
337}
338
339impl Eq for RecomposeScope {}
340
341impl Hash for RecomposeScope {
342    fn hash<H: Hasher>(&self, state: &mut H) {
343        self.id().hash(state);
344    }
345}
346
347impl RecomposeScope {
348    fn new(runtime: RuntimeHandle) -> Self {
349        Self {
350            inner: Rc::new(RecomposeScopeInner::new(runtime)),
351        }
352    }
353
354    fn downgrade(&self) -> Weak<RecomposeScopeInner> {
355        Rc::downgrade(&self.inner)
356    }
357
358    pub fn id(&self) -> ScopeId {
359        self.inner.id
360    }
361
362    pub fn is_invalid(&self) -> bool {
363        self.inner.invalid.get()
364    }
365
366    pub fn is_active(&self) -> bool {
367        self.inner.active.get()
368    }
369
370    fn record_state_subscription(&self, state_id: StateId) {
371        self.inner.state_subscriptions.borrow_mut().insert(state_id);
372    }
373
374    fn invalidate(&self) {
375        self.inner.invalid.set(true);
376        if !self.inner.active.get() {
377            return;
378        }
379        if !self.inner.enqueued.replace(true) {
380            self.inner
381                .runtime
382                .register_invalid_scope(self.inner.id, self.downgrade());
383        }
384    }
385
386    fn mark_recomposed(&self) {
387        self.inner.invalid.set(false);
388        self.inner.force_reuse.set(false);
389        self.inner.force_recompose.set(false);
390        if self.inner.enqueued.replace(false) {
391            self.inner.runtime.mark_scope_recomposed(self.inner.id);
392        }
393        let pending = self.inner.pending_recompose.replace(false);
394        if pending {
395            if self.inner.active.get() {
396                self.invalidate();
397            } else {
398                self.inner.invalid.set(true);
399            }
400        }
401    }
402
403    fn set_recompose(&self, callback: Box<dyn FnMut(&Composer) + 'static>) {
404        *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Dynamic(callback));
405    }
406
407    fn set_recompose_fn(&self, callback: fn(&Composer)) {
408        *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Static(callback));
409    }
410
411    /// Returns true if a callback was found and executed, false if no callback was registered.
412    fn run_recompose(&self, composer: &Composer) -> bool {
413        let callback = self.inner.recompose.borrow_mut().take();
414        if let Some(callback) = callback {
415            match callback {
416                RecomposeCallback::Static(callback) => callback(composer),
417                RecomposeCallback::Dynamic(mut callback) => callback(composer),
418            }
419            true
420        } else {
421            false
422        }
423    }
424
425    fn has_recompose_callback(&self) -> bool {
426        self.inner.recompose.borrow().is_some()
427    }
428
429    fn set_group_anchor(&self, anchor: AnchorId) {
430        self.inner.group_anchor.set(anchor);
431    }
432
433    fn group_anchor(&self) -> AnchorId {
434        self.inner.group_anchor.get()
435    }
436
437    fn snapshot_locals(&self, stack: LocalStackSnapshot) {
438        *self.inner.local_stack.borrow_mut() = stack;
439    }
440
441    fn local_stack(&self) -> LocalStackSnapshot {
442        self.inner.local_stack.borrow().clone()
443    }
444
445    fn set_parent_hint(&self, parent: Option<NodeId>) {
446        self.inner.parent_hint.set(parent);
447    }
448
449    fn set_parent_scope(&self, parent: Option<RecomposeScope>) {
450        *self.inner.parent_scope.borrow_mut() = parent.map(|scope| scope.downgrade());
451    }
452
453    fn parent_scope(&self) -> Option<RecomposeScope> {
454        self.inner
455            .parent_scope
456            .borrow()
457            .as_ref()
458            .and_then(Weak::upgrade)
459            .map(|inner| RecomposeScope { inner })
460    }
461
462    fn callback_promotion_target(&self) -> Option<RecomposeScope> {
463        let mut current = self.parent_scope();
464        while let Some(scope) = current {
465            if scope.has_recompose_callback() {
466                return Some(scope);
467            }
468            current = scope.parent_scope();
469        }
470        None
471    }
472
473    fn parent_hint(&self) -> Option<NodeId> {
474        self.inner.parent_hint.get()
475    }
476
477    fn set_slots_host(&self, host: Weak<SlotsHost>) {
478        *self.inner.slots_host.borrow_mut() = Some(host);
479    }
480
481    fn slots_host(&self) -> Option<Rc<SlotsHost>> {
482        self.inner
483            .slots_host
484            .borrow()
485            .as_ref()
486            .and_then(Weak::upgrade)
487    }
488
489    pub fn deactivate(&self) {
490        if !self.inner.active.replace(false) {
491            return;
492        }
493        if self.inner.enqueued.replace(false) {
494            self.inner.runtime.mark_scope_recomposed(self.inner.id);
495        }
496    }
497
498    pub fn reactivate(&self) {
499        if self.inner.active.replace(true) {
500            return;
501        }
502        if self.inner.invalid.get() && !self.inner.enqueued.replace(true) {
503            self.inner
504                .runtime
505                .register_invalid_scope(self.inner.id, self.downgrade());
506        }
507    }
508
509    pub fn force_reuse(&self) {
510        self.inner.force_reuse.set(true);
511        self.inner.force_recompose.set(false);
512        self.inner.pending_recompose.set(true);
513    }
514
515    pub fn force_recompose(&self) {
516        self.inner.force_recompose.set(true);
517        self.inner.force_reuse.set(false);
518        self.inner.pending_recompose.set(false);
519    }
520
521    pub fn should_recompose(&self) -> bool {
522        if self.inner.force_recompose.replace(false) {
523            self.inner.force_reuse.set(false);
524            return true;
525        }
526        if self.inner.force_reuse.replace(false) {
527            return false;
528        }
529        self.is_invalid()
530    }
531
532    pub fn has_composed_once(&self) -> bool {
533        self.inner.composed_once.get()
534    }
535
536    fn mark_composed_once(&self) {
537        self.inner.composed_once.set(true);
538    }
539}
540
541#[cfg(test)]
542impl RecomposeScope {
543    pub(crate) fn new_for_test(runtime: RuntimeHandle) -> Self {
544        Self::new(runtime)
545    }
546}
547
548#[derive(Debug, Clone, Copy, Default)]
549pub struct RecomposeOptions {
550    pub force_reuse: bool,
551    pub force_recompose: bool,
552}
553
554#[derive(Debug, Clone, PartialEq, Eq)]
555pub enum NodeError {
556    Missing { id: NodeId },
557    TypeMismatch { id: NodeId, expected: &'static str },
558    MissingContext { id: NodeId, reason: &'static str },
559    AlreadyExists { id: NodeId },
560}
561
562impl std::fmt::Display for NodeError {
563    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
564        match self {
565            NodeError::Missing { id } => write!(f, "node {id} missing"),
566            NodeError::TypeMismatch { id, expected } => {
567                write!(f, "node {id} type mismatch; expected {expected}")
568            }
569            NodeError::MissingContext { id, reason } => {
570                write!(f, "missing context for node {id}: {reason}")
571            }
572            NodeError::AlreadyExists { id } => {
573                write!(f, "node {id} already exists")
574            }
575        }
576    }
577}
578
579impl std::error::Error for NodeError {}
580
581pub use subcompose::{
582    ContentTypeReusePolicy, DefaultSlotReusePolicy, SlotId, SlotReusePolicy, SubcomposeState,
583};
584
585#[derive(Copy, Clone, Debug, PartialEq, Eq)]
586pub enum Phase {
587    Compose,
588    Measure,
589    Layout,
590}
591
592pub use composer_context::with_composer as with_current_composer;
593
594#[allow(non_snake_case)]
595pub fn withCurrentComposer<R>(f: impl FnOnce(&Composer) -> R) -> R {
596    composer_context::with_composer(f)
597}
598
599fn with_current_composer_opt<R>(f: impl FnOnce(&Composer) -> R) -> Option<R> {
600    composer_context::try_with_composer(f)
601}
602
603pub fn with_key<K: Hash>(key: &K, content: impl FnOnce()) {
604    with_current_composer(|composer| composer.with_key(key, |_| content()));
605}
606
607#[allow(non_snake_case)]
608pub fn withKey<K: Hash>(key: &K, content: impl FnOnce()) {
609    with_key(key, content)
610}
611
612#[derive(Default)]
613struct DisposableEffectState {
614    key: Option<Key>,
615    cleanup: Option<Box<dyn FnOnce()>>,
616}
617
618impl DisposableEffectState {
619    fn should_run(&self, key: Key) -> bool {
620        match self.key {
621            Some(current) => current != key,
622            None => true,
623        }
624    }
625
626    fn set_key(&mut self, key: Key) {
627        self.key = Some(key);
628    }
629
630    fn set_cleanup(&mut self, cleanup: Option<Box<dyn FnOnce()>>) {
631        self.cleanup = cleanup;
632    }
633
634    fn run_cleanup(&mut self) {
635        if let Some(cleanup) = self.cleanup.take() {
636            cleanup();
637        }
638    }
639}
640
641impl Drop for DisposableEffectState {
642    fn drop(&mut self) {
643        self.run_cleanup();
644    }
645}
646
647#[derive(Clone, Copy, Debug, Default)]
648pub struct DisposableEffectScope;
649
650#[derive(Default)]
651pub struct DisposableEffectResult {
652    cleanup: Option<Box<dyn FnOnce()>>,
653}
654
655impl DisposableEffectScope {
656    pub fn on_dispose(&self, cleanup: impl FnOnce() + 'static) -> DisposableEffectResult {
657        DisposableEffectResult::new(cleanup)
658    }
659}
660
661impl DisposableEffectResult {
662    pub fn new(cleanup: impl FnOnce() + 'static) -> Self {
663        Self {
664            cleanup: Some(Box::new(cleanup)),
665        }
666    }
667
668    fn into_cleanup(self) -> Option<Box<dyn FnOnce()>> {
669        self.cleanup
670    }
671}
672
673#[allow(non_snake_case)]
674pub fn SideEffect(effect: impl FnOnce() + 'static) {
675    with_current_composer(|composer| composer.register_side_effect(effect));
676}
677
678pub fn __disposable_effect_impl<K, F>(group_key: Key, keys: K, effect: F)
679where
680    K: Hash,
681    F: FnOnce(DisposableEffectScope) -> DisposableEffectResult + 'static,
682{
683    // Create a group using the caller's location to ensure each DisposableEffect
684    // gets its own slot table entry, even in conditional branches
685    with_current_composer(|composer| {
686        composer.with_group(group_key, |composer| {
687            let key_hash = hash_key(&keys);
688            let state = composer.remember(DisposableEffectState::default);
689            if state.with(|state| state.should_run(key_hash)) {
690                state.update(|state| {
691                    state.run_cleanup();
692                    state.set_key(key_hash);
693                });
694                let state_for_effect = state.clone();
695                let mut effect_opt = Some(effect);
696                composer.register_side_effect(move || {
697                    if let Some(effect) = effect_opt.take() {
698                        let result = effect(DisposableEffectScope);
699                        state_for_effect.update(|state| state.set_cleanup(result.into_cleanup()));
700                    }
701                });
702            }
703        });
704    });
705}
706
707#[macro_export]
708macro_rules! DisposableEffect {
709    ($keys:expr, $effect:expr) => {
710        $crate::__disposable_effect_impl(
711            $crate::location_key(file!(), line!(), column!()),
712            $keys,
713            $effect,
714        )
715    };
716}
717
718#[macro_export]
719macro_rules! clone_captures {
720    ($($alias:ident $(= $value:expr)?),+ $(,)?; $body:expr) => {{
721        $(let $alias = $crate::clone_captures!(@clone $alias $(= $value)?);)+
722        $body
723    }};
724    (@clone $alias:ident = $value:expr) => {
725        ($value).clone()
726    };
727    (@clone $alias:ident) => {
728        $alias.clone()
729    };
730}
731
732pub fn with_node_mut<N: Node + 'static, R>(
733    id: NodeId,
734    f: impl FnOnce(&mut N) -> R,
735) -> Result<R, NodeError> {
736    with_current_composer(|composer| composer.with_node_mut(id, f))
737}
738
739pub fn push_parent(id: NodeId) {
740    with_current_composer(|composer| composer.push_parent(id));
741}
742
743pub fn pop_parent() {
744    with_current_composer(|composer| composer.pop_parent());
745}
746
747// ═══════════════════════════════════════════════════════════════════════════
748// Public slot storage helper types
749// ═══════════════════════════════════════════════════════════════════════════
750
751pub trait Node: Any {
752    fn mount(&mut self) {}
753    fn update(&mut self) {}
754    fn unmount(&mut self) {}
755    fn insert_child(&mut self, _child: NodeId) {}
756    fn remove_child(&mut self, _child: NodeId) {}
757    fn move_child(&mut self, _from: usize, _to: usize) {}
758    fn update_children(&mut self, _children: &[NodeId]) {}
759    fn children(&self) -> Vec<NodeId> {
760        Vec::new()
761    }
762    /// Copies child IDs into the provided scratch buffer without allocating a
763    /// fresh container for every traversal.
764    fn collect_children_into(&self, out: &mut SmallVec<[NodeId; 8]>) {
765        out.clear();
766        out.extend(self.children());
767    }
768    /// Called after the node is created to record its own ID.
769    /// Useful for nodes that need to store their ID for later operations.
770    fn set_node_id(&mut self, _id: NodeId) {}
771    /// Called when this node is attached to a parent.
772    /// Nodes with parent tracking should set their parent reference here.
773    fn on_attached_to_parent(&mut self, _parent: NodeId) {}
774    /// Called when this node is removed from its parent.
775    /// Nodes with parent tracking should clear their parent reference here.
776    fn on_removed_from_parent(&mut self) {}
777    /// Get this node's parent ID (for nodes that track parents).
778    /// Returns None if node has no parent or doesn't track parents.
779    fn parent(&self) -> Option<NodeId> {
780        None
781    }
782    /// Mark this node as needing layout (for nodes with dirty flags).
783    /// Called during bubbling to propagate dirtiness up the tree.
784    fn mark_needs_layout(&self) {}
785    /// Check if this node needs layout (for nodes with dirty flags).
786    fn needs_layout(&self) -> bool {
787        false
788    }
789    /// Mark this node as needing measure (size may have changed).
790    /// Called during bubbling when children are added/removed.
791    fn mark_needs_measure(&self) {}
792    /// Check if this node needs measure (for nodes with dirty flags).
793    fn needs_measure(&self) -> bool {
794        false
795    }
796    /// Mark this node as needing semantics recomputation.
797    fn mark_needs_semantics(&self) {}
798    /// Check if this node needs semantics recomputation.
799    fn needs_semantics(&self) -> bool {
800        false
801    }
802    /// Set parent reference for dirty flag bubbling ONLY.
803    /// This is a minimal version of on_attached_to_parent that doesn't trigger
804    /// registry updates or other side effects. Used during measurement when we
805    /// need to establish parent connections for bubble_measure_dirty without
806    /// causing the full attachment lifecycle.
807    ///
808    /// Default: delegates to on_attached_to_parent for backward compatibility.
809    fn set_parent_for_bubbling(&mut self, parent: NodeId) {
810        self.on_attached_to_parent(parent);
811    }
812
813    /// Returns a recycle pool key when this node supports shell reuse.
814    fn recycle_key(&self) -> Option<TypeId> {
815        None
816    }
817
818    /// Bounds how many recyclable shells of this node type should be retained.
819    fn recycle_pool_limit(&self) -> Option<usize> {
820        None
821    }
822
823    /// Clears live attachments before the node shell enters a recycle pool.
824    fn prepare_for_recycle(&mut self) {}
825
826    /// Optionally provides a compact replacement box for this recycled shell.
827    ///
828    /// Returning `Some` lets nodes move pooled survivors onto fresh compact
829    /// storage so the recycle pool does not pin large spike-era allocations.
830    fn rehouse_for_recycle(&self) -> Option<Box<dyn Node>> {
831        None
832    }
833
834    /// Optionally moves a live node onto a fresh box during applier compaction.
835    ///
836    /// This is used after large-majority teardowns where a small surviving live
837    /// tree can otherwise pin allocator pages from a much larger spike-era node
838    /// population. Implementations must preserve the node's live state.
839    fn rehouse_for_live_compaction(&mut self) -> Option<Box<dyn Node>> {
840        None
841    }
842
843    /// Returns the node-owned heap retained beyond the node's own box allocation.
844    fn debug_heap_bytes(&self) -> usize {
845        0
846    }
847}
848
849/// Unified API for bubbling layout dirty flags from a node to the root (Applier context).
850///
851/// This is the canonical function for dirty bubbling during the apply phase (structural changes).
852/// Call this after mutations like insert/remove/move that happen during apply.
853///
854/// # Behavior
855/// 1. Marks the starting node as needing layout
856/// 2. Walks up the parent chain, marking each ancestor
857/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
858/// 4. Stops at the root (node with no parent)
859///
860/// # Performance
861/// This function is O(height) in the worst case, but typically O(1) due to early exit
862/// when encountering an already-dirty ancestor.
863///
864/// # Usage
865/// - Call from composer mutations (insert/remove/move) during apply phase
866/// - Call from applier-level operations that modify the tree structure
867pub fn bubble_layout_dirty(applier: &mut dyn Applier, node_id: NodeId) {
868    bubble_layout_dirty_applier(applier, node_id);
869}
870
871/// Unified API for bubbling measure dirty flags from a node to the root (Applier context).
872///
873/// Call this when a node's size may have changed (children added/removed, modifier changed).
874/// This ensures that measure_layout will increment the cache epoch and re-measure the subtree.
875///
876/// # Behavior
877/// 1. Marks the starting node as needing measure
878/// 2. Walks up the parent chain, marking each ancestor
879/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
880/// 4. Stops at the root (node with no parent)
881pub fn bubble_measure_dirty(applier: &mut dyn Applier, node_id: NodeId) {
882    bubble_measure_dirty_applier(applier, node_id);
883}
884
885/// Unified API for bubbling semantics dirty flags from a node to the root (Applier context).
886///
887/// This mirrors [`bubble_layout_dirty`] but toggles semantics-specific dirty
888/// flags instead of layout ones, allowing semantics updates to propagate during
889/// the apply phase without forcing layout work.
890pub fn bubble_semantics_dirty(applier: &mut dyn Applier, node_id: NodeId) {
891    bubble_semantics_dirty_applier(applier, node_id);
892}
893
894/// Schedules semantics bubbling for a node using the active composer if present.
895///
896/// This defers the work to the apply phase where we can safely mutate the
897/// applier tree without re-entrantly borrowing the composer during composition.
898pub fn queue_semantics_invalidation(node_id: NodeId) {
899    let _ = composer_context::try_with_composer(|composer| {
900        composer.enqueue_semantics_invalidation(node_id);
901    });
902}
903
904/// Unified API for bubbling layout dirty flags from a node to the root (Composer context).
905///
906/// This is the canonical function for dirty bubbling during composition (property changes).
907/// Call this after property changes that happen during composition via with_node_mut.
908///
909/// # Behavior
910/// 1. Marks the starting node as needing layout
911/// 2. Walks up the parent chain, marking each ancestor
912/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
913/// 4. Stops at the root (node with no parent)
914///
915/// # Performance
916/// This function is O(height) in the worst case, but typically O(1) due to early exit
917/// when encountering an already-dirty ancestor.
918///
919/// # Type Requirements
920/// The node type N must implement Node (which includes mark_needs_layout, parent, etc.).
921/// Typically this will be LayoutNode or similar layout-aware node types.
922///
923/// # Usage
924/// - Call from property setters during composition (e.g., set_modifier, set_measure_policy)
925/// - Call from widget composition when layout-affecting state changes
926pub fn bubble_layout_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
927    bubble_layout_dirty_composer::<N>(node_id);
928}
929
930/// Unified API for bubbling measure dirty flags from a node to the root during composition.
931///
932/// This queues a dirty-bubble command on the active composer so measure invalidation
933/// runs during the apply phase, avoiding re-entrant applier borrows while widgets are
934/// mutating nodes via `with_node_mut`.
935pub fn bubble_measure_dirty_in_composer(node_id: NodeId) {
936    with_current_composer(|composer| {
937        composer.commands_mut().push(Command::BubbleDirty {
938            node_id,
939            bubble: DirtyBubble {
940                layout: false,
941                measure: true,
942                semantics: false,
943            },
944        });
945    });
946}
947
948/// Unified API for bubbling semantics dirty flags from a node to the root (Composer context).
949///
950/// This mirrors [`bubble_layout_dirty_in_composer`] but routes through the semantics
951/// dirty flag instead of the layout one. Modifier nodes can request semantics
952/// invalidations without triggering measure/layout work, and the runtime can
953/// query the root to determine whether the semantics tree needs rebuilding.
954pub fn bubble_semantics_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
955    bubble_semantics_dirty_composer::<N>(node_id);
956}
957
958/// Internal implementation for applier-based bubbling.
959fn bubble_layout_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
960    // First, mark the starting node dirty (critical!)
961    // This ensures root gets marked even if it has no parent
962    if let Ok(node) = applier.get_mut(node_id) {
963        node.mark_needs_layout();
964    }
965
966    // Then bubble up to ancestors
967    loop {
968        // Get parent of current node
969        let parent_id = match applier.get_mut(node_id) {
970            Ok(node) => node.parent(),
971            Err(_) => None,
972        };
973
974        match parent_id {
975            Some(pid) => {
976                // Mark parent as needing layout
977                if let Ok(parent) = applier.get_mut(pid) {
978                    let parent_already_dirty = parent.needs_layout();
979                    if !parent_already_dirty {
980                        parent.mark_needs_layout();
981                    }
982                    node_id = pid;
983                } else {
984                    break;
985                }
986            }
987            None => break, // No parent, stop
988        }
989    }
990}
991
992/// Internal implementation for applier-based bubbling of measure dirtiness.
993fn bubble_measure_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
994    // First, mark the starting node as needing measure
995    if let Ok(node) = applier.get_mut(node_id) {
996        node.mark_needs_measure();
997    }
998
999    // Then bubble up to ancestors
1000    loop {
1001        // Get parent of current node
1002        let parent_id = match applier.get_mut(node_id) {
1003            Ok(node) => node.parent(),
1004            Err(_) => None,
1005        };
1006
1007        match parent_id {
1008            Some(pid) => {
1009                // Mark parent as needing measure
1010                if let Ok(parent) = applier.get_mut(pid) {
1011                    if !parent.needs_measure() {
1012                        parent.mark_needs_measure();
1013                    }
1014                    node_id = pid;
1015                } else {
1016                    break;
1017                }
1018            }
1019            None => {
1020                break; // No parent, stop
1021            }
1022        }
1023    }
1024}
1025
1026/// Internal implementation for applier-based bubbling of semantics dirtiness.
1027fn bubble_semantics_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1028    if let Ok(node) = applier.get_mut(node_id) {
1029        node.mark_needs_semantics();
1030    }
1031
1032    loop {
1033        let parent_id = match applier.get_mut(node_id) {
1034            Ok(node) => node.parent(),
1035            Err(_) => None,
1036        };
1037
1038        match parent_id {
1039            Some(pid) => {
1040                if let Ok(parent) = applier.get_mut(pid) {
1041                    if !parent.needs_semantics() {
1042                        parent.mark_needs_semantics();
1043                    }
1044                    node_id = pid;
1045                } else {
1046                    break;
1047                }
1048            }
1049            None => break,
1050        }
1051    }
1052}
1053
1054/// Internal implementation for composer-based bubbling.
1055/// This uses with_node_mut and works during composition with a concrete node type.
1056/// The node type N must implement Node (which includes mark_needs_layout, parent, etc.).
1057fn bubble_layout_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1058    // Mark the starting node dirty
1059    let _ = with_node_mut(node_id, |node: &mut N| {
1060        node.mark_needs_layout();
1061    });
1062
1063    // Then bubble up to ancestors
1064    while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1065        let parent_id = pid;
1066
1067        // Mark parent as needing layout
1068        let advanced = with_node_mut(parent_id, |node: &mut N| {
1069            if !node.needs_layout() {
1070                node.mark_needs_layout();
1071            }
1072            true
1073        })
1074        .unwrap_or(false);
1075
1076        if advanced {
1077            node_id = parent_id;
1078        } else {
1079            break;
1080        }
1081    }
1082}
1083
1084/// Internal implementation for composer-based bubbling of semantics dirtiness.
1085fn bubble_semantics_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1086    // Mark the starting node semantics-dirty.
1087    let _ = with_node_mut(node_id, |node: &mut N| {
1088        node.mark_needs_semantics();
1089    });
1090
1091    while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1092        let parent_id = pid;
1093
1094        let advanced = with_node_mut(parent_id, |node: &mut N| {
1095            if !node.needs_semantics() {
1096                node.mark_needs_semantics();
1097            }
1098            true
1099        })
1100        .unwrap_or(false);
1101
1102        if advanced {
1103            node_id = parent_id;
1104        } else {
1105            break;
1106        }
1107    }
1108}
1109
1110impl dyn Node {
1111    pub fn as_any_mut(&mut self) -> &mut dyn Any {
1112        self
1113    }
1114}
1115
1116pub struct RecycledNode {
1117    stable_id: NodeId,
1118    node: Box<dyn Node>,
1119    warm_origin: bool,
1120}
1121
1122impl RecycledNode {
1123    fn new(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1124        let node = node.rehouse_for_recycle().unwrap_or(node);
1125        Self {
1126            stable_id,
1127            node,
1128            warm_origin,
1129        }
1130    }
1131
1132    fn from_shell(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1133        Self {
1134            stable_id,
1135            node,
1136            warm_origin,
1137        }
1138    }
1139
1140    pub fn stable_id(&self) -> NodeId {
1141        self.stable_id
1142    }
1143
1144    fn warm_origin(&self) -> bool {
1145        self.warm_origin
1146    }
1147
1148    fn set_warm_origin(&mut self, warm_origin: bool) {
1149        self.warm_origin = warm_origin;
1150    }
1151
1152    pub fn node_mut(&mut self) -> &mut dyn Node {
1153        self.node.as_mut()
1154    }
1155
1156    pub fn into_parts(self) -> (NodeId, Box<dyn Node>, bool) {
1157        (self.stable_id, self.node, self.warm_origin)
1158    }
1159}
1160
1161pub trait Applier: Any {
1162    fn create(&mut self, node: Box<dyn Node>) -> NodeId;
1163    fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError>;
1164    fn remove(&mut self, id: NodeId) -> Result<(), NodeError>;
1165
1166    /// Returns the current generation for a node index.
1167    /// Generation is incremented when an index is reused from the freelist,
1168    /// preventing stale slot entries from matching recycled nodes.
1169    fn node_generation(&self, id: NodeId) -> u32;
1170
1171    /// Inserts a node with a pre-assigned ID.
1172    ///
1173    /// This is used for virtual nodes whose IDs are allocated separately
1174    /// (e.g., via allocate_virtual_node_id()). Unlike `create()` which assigns
1175    /// a new ID, this method uses the provided ID.
1176    ///
1177    /// Returns Ok(()) if successful, or an error if the ID is already in use.
1178    fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError>;
1179
1180    fn as_any(&self) -> &dyn Any
1181    where
1182        Self: Sized,
1183    {
1184        self
1185    }
1186
1187    fn as_any_mut(&mut self) -> &mut dyn Any
1188    where
1189        Self: Sized,
1190    {
1191        self
1192    }
1193
1194    /// Trim trailing tombstones/unused capacity after structural changes.
1195    fn compact(&mut self) {}
1196
1197    /// Returns a previously recycled node shell and its stable ID for the requested concrete type.
1198    fn take_recycled_node(&mut self, _key: TypeId) -> Option<RecycledNode> {
1199        None
1200    }
1201
1202    /// Marks whether a reinserted recycled node originated from the warm recycle path.
1203    fn set_recycled_node_origin(&mut self, _id: NodeId, _warm_origin: bool) {}
1204
1205    /// Seeds a warm recyclable shell for future reuse without requiring a prior removal.
1206    fn seed_recycled_node_shell(
1207        &mut self,
1208        _key: TypeId,
1209        _recycle_pool_limit: Option<usize>,
1210        _shell: Box<dyn Node>,
1211    ) {
1212    }
1213
1214    /// Records that the current apply pass had to allocate a fresh recyclable shell for this type.
1215    fn record_fresh_recyclable_creation(&mut self, _key: TypeId) {}
1216
1217    /// Drops any recyclable shells that should not survive beyond the current apply pass.
1218    fn clear_recycled_nodes(&mut self) {}
1219}
1220
1221type TypedNodeUpdate = fn(&mut dyn Node, NodeId) -> Result<(), NodeError>;
1222type CommandCallback = Box<dyn FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static>;
1223
1224#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1225pub(crate) struct DirtyBubble {
1226    layout: bool,
1227    measure: bool,
1228    semantics: bool,
1229}
1230
1231impl DirtyBubble {
1232    pub(crate) const LAYOUT_AND_MEASURE: Self = Self {
1233        layout: true,
1234        measure: true,
1235        semantics: false,
1236    };
1237
1238    pub(crate) const SEMANTICS: Self = Self {
1239        layout: false,
1240        measure: false,
1241        semantics: true,
1242    };
1243
1244    fn apply(self, applier: &mut dyn Applier, node_id: NodeId) {
1245        if self.layout {
1246            bubble_layout_dirty(applier, node_id);
1247        }
1248        if self.measure {
1249            bubble_measure_dirty(applier, node_id);
1250        }
1251        if self.semantics {
1252            bubble_semantics_dirty(applier, node_id);
1253        }
1254    }
1255}
1256
1257pub(crate) enum Command {
1258    BubbleDirty {
1259        node_id: NodeId,
1260        bubble: DirtyBubble,
1261    },
1262    UpdateTypedNode {
1263        id: NodeId,
1264        updater: TypedNodeUpdate,
1265    },
1266    RemoveNode {
1267        id: NodeId,
1268    },
1269    MountNode {
1270        id: NodeId,
1271    },
1272    AttachChild {
1273        parent_id: NodeId,
1274        child_id: NodeId,
1275        bubble: DirtyBubble,
1276    },
1277    InsertChild {
1278        parent_id: NodeId,
1279        child_id: NodeId,
1280        appended_index: usize,
1281        insert_index: usize,
1282        bubble: DirtyBubble,
1283    },
1284    MoveChild {
1285        parent_id: NodeId,
1286        from_index: usize,
1287        to_index: usize,
1288        bubble: DirtyBubble,
1289    },
1290    RemoveChild {
1291        parent_id: NodeId,
1292        child_id: NodeId,
1293    },
1294    SyncChildren {
1295        parent_id: NodeId,
1296        expected_children: ChildList,
1297    },
1298    Callback(CommandCallback),
1299}
1300
1301#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1302struct DeferredChildCleanup {
1303    child_id: NodeId,
1304    generation: u32,
1305    removed_from_parent: bool,
1306}
1307
1308#[derive(Default)]
1309struct DeferredChildCleanupQueue {
1310    pending: Vec<DeferredChildCleanup>,
1311}
1312
1313impl DeferredChildCleanupQueue {
1314    fn push(&mut self, child_id: NodeId, generation: u32, removed_from_parent: bool) {
1315        self.pending.push(DeferredChildCleanup {
1316            child_id,
1317            generation,
1318            removed_from_parent,
1319        });
1320    }
1321
1322    fn flush(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1323        for cleanup in self.pending {
1324            cleanup_detached_child(applier, cleanup)?;
1325        }
1326        Ok(())
1327    }
1328}
1329
1330impl Command {
1331    pub(crate) fn update_node<N: Node + 'static>(id: NodeId) -> Self {
1332        Self::UpdateTypedNode {
1333            id,
1334            updater: update_typed_node::<N>,
1335        }
1336    }
1337
1338    pub(crate) fn callback(
1339        callback: impl FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static,
1340    ) -> Self {
1341        Self::Callback(Box::new(callback))
1342    }
1343
1344    pub(crate) fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1345        let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1346        self.apply_with_cleanup(applier, &mut deferred_cleanup)?;
1347        deferred_cleanup.flush(applier)
1348    }
1349
1350    fn apply_with_cleanup(
1351        self,
1352        applier: &mut dyn Applier,
1353        deferred_cleanup: &mut DeferredChildCleanupQueue,
1354    ) -> Result<(), NodeError> {
1355        match self {
1356            Self::BubbleDirty { node_id, bubble } => {
1357                bubble.apply(applier, node_id);
1358                Ok(())
1359            }
1360            Self::UpdateTypedNode { id, updater } => {
1361                let node = match applier.get_mut(id) {
1362                    Ok(node) => node,
1363                    Err(NodeError::Missing { .. }) => return Ok(()),
1364                    Err(err) => return Err(err),
1365                };
1366                updater(node, id)
1367            }
1368            Self::RemoveNode { id } => {
1369                if let Ok(node) = applier.get_mut(id) {
1370                    node.unmount();
1371                }
1372                match applier.remove(id) {
1373                    Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
1374                    Err(err) => Err(err),
1375                }
1376            }
1377            Self::MountNode { id } => {
1378                let node = match applier.get_mut(id) {
1379                    Ok(node) => node,
1380                    Err(NodeError::Missing { .. }) => return Ok(()),
1381                    Err(err) => return Err(err),
1382                };
1383                node.set_node_id(id);
1384                node.mount();
1385                Ok(())
1386            }
1387            Self::AttachChild {
1388                parent_id,
1389                child_id,
1390                bubble,
1391            } => {
1392                insert_child_with_reparenting(applier, parent_id, child_id);
1393                bubble.apply(applier, parent_id);
1394                Ok(())
1395            }
1396            Self::InsertChild {
1397                parent_id,
1398                child_id,
1399                appended_index,
1400                insert_index,
1401                bubble,
1402            } => {
1403                insert_child_with_reparenting(applier, parent_id, child_id);
1404                bubble.apply(applier, parent_id);
1405                if insert_index != appended_index {
1406                    if let Ok(parent_node) = applier.get_mut(parent_id) {
1407                        parent_node.move_child(appended_index, insert_index);
1408                    }
1409                }
1410                Ok(())
1411            }
1412            Self::MoveChild {
1413                parent_id,
1414                from_index,
1415                to_index,
1416                bubble,
1417            } => {
1418                if let Ok(parent_node) = applier.get_mut(parent_id) {
1419                    parent_node.move_child(from_index, to_index);
1420                }
1421                bubble.apply(applier, parent_id);
1422                Ok(())
1423            }
1424            Self::RemoveChild {
1425                parent_id,
1426                child_id,
1427            } => apply_remove_child(applier, parent_id, child_id, deferred_cleanup),
1428            Self::SyncChildren {
1429                parent_id,
1430                expected_children,
1431            } => sync_children(applier, parent_id, &expected_children, deferred_cleanup),
1432            Self::Callback(callback) => callback(applier),
1433        }
1434    }
1435}
1436
1437const COMMAND_CHUNK_CAPACITY: usize = 1024;
1438const COMMAND_FLUSH_THRESHOLD: usize = COMMAND_CHUNK_CAPACITY * 4;
1439type ChildList = SmallVec<[NodeId; 4]>;
1440const SMALL_CHILD_SYNC_LINEAR_THRESHOLD: usize = 8;
1441
1442#[derive(Copy, Clone)]
1443enum CommandTag {
1444    BubbleDirty,
1445    UpdateTypedNode,
1446    RemoveNode,
1447    MountNode,
1448    AttachChild,
1449    InsertChild,
1450    MoveChild,
1451    RemoveChild,
1452    SyncChildren,
1453    Callback,
1454}
1455
1456#[derive(Copy, Clone)]
1457struct BubbleDirtyCommand {
1458    node_id: NodeId,
1459    bubble: DirtyBubble,
1460}
1461
1462#[derive(Copy, Clone)]
1463struct UpdateTypedNodeCommand {
1464    id: NodeId,
1465    updater: TypedNodeUpdate,
1466}
1467
1468#[derive(Copy, Clone)]
1469struct AttachChildCommand {
1470    parent_id: NodeId,
1471    child_id: NodeId,
1472    bubble: DirtyBubble,
1473}
1474
1475#[derive(Copy, Clone)]
1476struct InsertChildCommand {
1477    parent_id: NodeId,
1478    child_id: NodeId,
1479    appended_index: usize,
1480    insert_index: usize,
1481    bubble: DirtyBubble,
1482}
1483
1484#[derive(Copy, Clone)]
1485struct MoveChildCommand {
1486    parent_id: NodeId,
1487    from_index: usize,
1488    to_index: usize,
1489    bubble: DirtyBubble,
1490}
1491
1492#[derive(Copy, Clone)]
1493struct RemoveChildCommand {
1494    parent_id: NodeId,
1495    child_id: NodeId,
1496}
1497
1498struct SyncChildrenCommand {
1499    parent_id: NodeId,
1500    child_start: usize,
1501    child_len: usize,
1502}
1503
1504#[derive(Default)]
1505struct CommandQueue {
1506    chunks: Vec<Vec<CommandTag>>,
1507    len: usize,
1508    bubble_dirty: Vec<BubbleDirtyCommand>,
1509    update_typed_nodes: Vec<UpdateTypedNodeCommand>,
1510    remove_nodes: Vec<NodeId>,
1511    mount_nodes: Vec<NodeId>,
1512    attach_children: Vec<AttachChildCommand>,
1513    insert_children: Vec<InsertChildCommand>,
1514    move_children: Vec<MoveChildCommand>,
1515    remove_children: Vec<RemoveChildCommand>,
1516    sync_children: Vec<SyncChildrenCommand>,
1517    sync_child_ids: Vec<NodeId>,
1518    callbacks: Vec<CommandCallback>,
1519}
1520
1521impl CommandQueue {
1522    fn push_tag(&mut self, tag: CommandTag) {
1523        let needs_chunk = self
1524            .chunks
1525            .last()
1526            .map(|chunk| chunk.len() == chunk.capacity())
1527            .unwrap_or(true);
1528        if needs_chunk {
1529            self.chunks.push(Vec::with_capacity(COMMAND_CHUNK_CAPACITY));
1530        }
1531        self.chunks
1532            .last_mut()
1533            .expect("command chunk should exist")
1534            .push(tag);
1535        self.len += 1;
1536    }
1537
1538    fn push(&mut self, command: Command) {
1539        match command {
1540            Command::BubbleDirty { node_id, bubble } => {
1541                self.bubble_dirty
1542                    .push(BubbleDirtyCommand { node_id, bubble });
1543                self.push_tag(CommandTag::BubbleDirty);
1544            }
1545            Command::UpdateTypedNode { id, updater } => {
1546                self.update_typed_nodes
1547                    .push(UpdateTypedNodeCommand { id, updater });
1548                self.push_tag(CommandTag::UpdateTypedNode);
1549            }
1550            Command::RemoveNode { id } => {
1551                self.remove_nodes.push(id);
1552                self.push_tag(CommandTag::RemoveNode);
1553            }
1554            Command::MountNode { id } => {
1555                self.mount_nodes.push(id);
1556                self.push_tag(CommandTag::MountNode);
1557            }
1558            Command::AttachChild {
1559                parent_id,
1560                child_id,
1561                bubble,
1562            } => {
1563                self.attach_children.push(AttachChildCommand {
1564                    parent_id,
1565                    child_id,
1566                    bubble,
1567                });
1568                self.push_tag(CommandTag::AttachChild);
1569            }
1570            Command::InsertChild {
1571                parent_id,
1572                child_id,
1573                appended_index,
1574                insert_index,
1575                bubble,
1576            } => {
1577                self.insert_children.push(InsertChildCommand {
1578                    parent_id,
1579                    child_id,
1580                    appended_index,
1581                    insert_index,
1582                    bubble,
1583                });
1584                self.push_tag(CommandTag::InsertChild);
1585            }
1586            Command::MoveChild {
1587                parent_id,
1588                from_index,
1589                to_index,
1590                bubble,
1591            } => {
1592                self.move_children.push(MoveChildCommand {
1593                    parent_id,
1594                    from_index,
1595                    to_index,
1596                    bubble,
1597                });
1598                self.push_tag(CommandTag::MoveChild);
1599            }
1600            Command::RemoveChild {
1601                parent_id,
1602                child_id,
1603            } => {
1604                self.remove_children.push(RemoveChildCommand {
1605                    parent_id,
1606                    child_id,
1607                });
1608                self.push_tag(CommandTag::RemoveChild);
1609            }
1610            Command::SyncChildren {
1611                parent_id,
1612                expected_children,
1613            } => {
1614                let child_start = self.sync_child_ids.len();
1615                let child_len = expected_children.len();
1616                self.sync_child_ids.extend(expected_children);
1617                self.sync_children.push(SyncChildrenCommand {
1618                    parent_id,
1619                    child_start,
1620                    child_len,
1621                });
1622                self.push_tag(CommandTag::SyncChildren);
1623            }
1624            Command::Callback(callback) => {
1625                self.callbacks.push(callback);
1626                self.push_tag(CommandTag::Callback);
1627            }
1628        }
1629    }
1630
1631    fn len(&self) -> usize {
1632        self.len
1633    }
1634
1635    fn capacity(&self) -> usize {
1636        self.chunks.iter().map(Vec::capacity).sum()
1637    }
1638
1639    fn payload_len_bytes(&self) -> usize {
1640        self.bubble_dirty
1641            .len()
1642            .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1643            .saturating_add(
1644                self.update_typed_nodes
1645                    .len()
1646                    .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1647            )
1648            .saturating_add(
1649                self.remove_nodes
1650                    .len()
1651                    .saturating_mul(std::mem::size_of::<NodeId>()),
1652            )
1653            .saturating_add(
1654                self.mount_nodes
1655                    .len()
1656                    .saturating_mul(std::mem::size_of::<NodeId>()),
1657            )
1658            .saturating_add(
1659                self.attach_children
1660                    .len()
1661                    .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1662            )
1663            .saturating_add(
1664                self.insert_children
1665                    .len()
1666                    .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1667            )
1668            .saturating_add(
1669                self.move_children
1670                    .len()
1671                    .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1672            )
1673            .saturating_add(
1674                self.remove_children
1675                    .len()
1676                    .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1677            )
1678            .saturating_add(
1679                self.sync_children
1680                    .len()
1681                    .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1682            )
1683            .saturating_add(
1684                self.sync_child_ids
1685                    .len()
1686                    .saturating_mul(std::mem::size_of::<NodeId>()),
1687            )
1688            .saturating_add(
1689                self.callbacks
1690                    .len()
1691                    .saturating_mul(std::mem::size_of::<CommandCallback>()),
1692            )
1693    }
1694
1695    fn payload_capacity_bytes(&self) -> usize {
1696        self.bubble_dirty
1697            .capacity()
1698            .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1699            .saturating_add(
1700                self.update_typed_nodes
1701                    .capacity()
1702                    .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1703            )
1704            .saturating_add(
1705                self.remove_nodes
1706                    .capacity()
1707                    .saturating_mul(std::mem::size_of::<NodeId>()),
1708            )
1709            .saturating_add(
1710                self.mount_nodes
1711                    .capacity()
1712                    .saturating_mul(std::mem::size_of::<NodeId>()),
1713            )
1714            .saturating_add(
1715                self.attach_children
1716                    .capacity()
1717                    .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1718            )
1719            .saturating_add(
1720                self.insert_children
1721                    .capacity()
1722                    .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1723            )
1724            .saturating_add(
1725                self.move_children
1726                    .capacity()
1727                    .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1728            )
1729            .saturating_add(
1730                self.remove_children
1731                    .capacity()
1732                    .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1733            )
1734            .saturating_add(
1735                self.sync_children
1736                    .capacity()
1737                    .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1738            )
1739            .saturating_add(
1740                self.sync_child_ids
1741                    .capacity()
1742                    .saturating_mul(std::mem::size_of::<NodeId>()),
1743            )
1744            .saturating_add(
1745                self.callbacks
1746                    .capacity()
1747                    .saturating_mul(std::mem::size_of::<CommandCallback>()),
1748            )
1749    }
1750
1751    fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1752        let mut bubble_dirty = self.bubble_dirty.into_iter();
1753        let mut update_typed_nodes = self.update_typed_nodes.into_iter();
1754        let mut remove_nodes = self.remove_nodes.into_iter();
1755        let mut mount_nodes = self.mount_nodes.into_iter();
1756        let mut attach_children = self.attach_children.into_iter();
1757        let mut insert_children = self.insert_children.into_iter();
1758        let mut move_children = self.move_children.into_iter();
1759        let mut remove_children = self.remove_children.into_iter();
1760        let mut sync_children_commands = self.sync_children.into_iter();
1761        let sync_child_ids = self.sync_child_ids;
1762        let mut callbacks = self.callbacks.into_iter();
1763        let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1764
1765        for chunk in self.chunks {
1766            for tag in chunk {
1767                match tag {
1768                    CommandTag::BubbleDirty => {
1769                        let BubbleDirtyCommand { node_id, bubble } =
1770                            bubble_dirty.next().expect("missing BubbleDirty payload");
1771                        Command::BubbleDirty { node_id, bubble }
1772                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1773                    }
1774                    CommandTag::UpdateTypedNode => {
1775                        let UpdateTypedNodeCommand { id, updater } = update_typed_nodes
1776                            .next()
1777                            .expect("missing UpdateTypedNode payload");
1778                        Command::UpdateTypedNode { id, updater }
1779                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1780                    }
1781                    CommandTag::RemoveNode => {
1782                        let id = remove_nodes.next().expect("missing RemoveNode payload");
1783                        Command::RemoveNode { id }
1784                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1785                    }
1786                    CommandTag::MountNode => {
1787                        let id = mount_nodes.next().expect("missing MountNode payload");
1788                        Command::MountNode { id }
1789                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1790                    }
1791                    CommandTag::AttachChild => {
1792                        let AttachChildCommand {
1793                            parent_id,
1794                            child_id,
1795                            bubble,
1796                        } = attach_children.next().expect("missing AttachChild payload");
1797                        Command::AttachChild {
1798                            parent_id,
1799                            child_id,
1800                            bubble,
1801                        }
1802                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1803                    }
1804                    CommandTag::InsertChild => {
1805                        let InsertChildCommand {
1806                            parent_id,
1807                            child_id,
1808                            appended_index,
1809                            insert_index,
1810                            bubble,
1811                        } = insert_children.next().expect("missing InsertChild payload");
1812                        Command::InsertChild {
1813                            parent_id,
1814                            child_id,
1815                            appended_index,
1816                            insert_index,
1817                            bubble,
1818                        }
1819                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1820                    }
1821                    CommandTag::MoveChild => {
1822                        let MoveChildCommand {
1823                            parent_id,
1824                            from_index,
1825                            to_index,
1826                            bubble,
1827                        } = move_children.next().expect("missing MoveChild payload");
1828                        Command::MoveChild {
1829                            parent_id,
1830                            from_index,
1831                            to_index,
1832                            bubble,
1833                        }
1834                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1835                    }
1836                    CommandTag::RemoveChild => {
1837                        let RemoveChildCommand {
1838                            parent_id,
1839                            child_id,
1840                        } = remove_children.next().expect("missing RemoveChild payload");
1841                        Command::RemoveChild {
1842                            parent_id,
1843                            child_id,
1844                        }
1845                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1846                    }
1847                    CommandTag::SyncChildren => {
1848                        let SyncChildrenCommand {
1849                            parent_id,
1850                            child_start,
1851                            child_len,
1852                        } = sync_children_commands
1853                            .next()
1854                            .expect("missing SyncChildren payload");
1855                        let expected_children =
1856                            &sync_child_ids[child_start..child_start + child_len];
1857                        sync_children(
1858                            applier,
1859                            parent_id,
1860                            expected_children,
1861                            &mut deferred_cleanup,
1862                        )?;
1863                    }
1864                    CommandTag::Callback => {
1865                        let callback = callbacks.next().expect("missing Callback payload");
1866                        Command::Callback(callback)
1867                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1868                    }
1869                }
1870            }
1871        }
1872
1873        debug_assert!(bubble_dirty.next().is_none());
1874        debug_assert!(update_typed_nodes.next().is_none());
1875        debug_assert!(remove_nodes.next().is_none());
1876        debug_assert!(mount_nodes.next().is_none());
1877        debug_assert!(attach_children.next().is_none());
1878        debug_assert!(insert_children.next().is_none());
1879        debug_assert!(move_children.next().is_none());
1880        debug_assert!(remove_children.next().is_none());
1881        debug_assert!(sync_children_commands.next().is_none());
1882        debug_assert!(callbacks.next().is_none());
1883        deferred_cleanup.flush(applier)
1884    }
1885}
1886
1887fn update_typed_node<N: Node + 'static>(node: &mut dyn Node, id: NodeId) -> Result<(), NodeError> {
1888    let typed = node
1889        .as_any_mut()
1890        .downcast_mut::<N>()
1891        .ok_or(NodeError::TypeMismatch {
1892            id,
1893            expected: std::any::type_name::<N>(),
1894        })?;
1895    typed.update();
1896    Ok(())
1897}
1898
1899fn insert_child_with_reparenting(applier: &mut dyn Applier, parent_id: NodeId, child_id: NodeId) {
1900    let old_parent = applier
1901        .get_mut(child_id)
1902        .ok()
1903        .and_then(|node| node.parent());
1904    if let Some(old_parent_id) = old_parent {
1905        if old_parent_id != parent_id {
1906            if let Ok(old_parent_node) = applier.get_mut(old_parent_id) {
1907                old_parent_node.remove_child(child_id);
1908            }
1909            if let Ok(child_node) = applier.get_mut(child_id) {
1910                child_node.on_removed_from_parent();
1911            }
1912            bubble_layout_dirty(applier, old_parent_id);
1913            bubble_measure_dirty(applier, old_parent_id);
1914        }
1915    }
1916
1917    if let Ok(parent_node) = applier.get_mut(parent_id) {
1918        parent_node.insert_child(child_id);
1919    }
1920    if let Ok(child_node) = applier.get_mut(child_id) {
1921        child_node.on_attached_to_parent(parent_id);
1922    }
1923}
1924
1925fn apply_remove_child(
1926    applier: &mut dyn Applier,
1927    parent_id: NodeId,
1928    child_id: NodeId,
1929    deferred_cleanup: &mut DeferredChildCleanupQueue,
1930) -> Result<(), NodeError> {
1931    if let Ok(parent_node) = applier.get_mut(parent_id) {
1932        parent_node.remove_child(child_id);
1933    }
1934    bubble_layout_dirty(applier, parent_id);
1935    bubble_measure_dirty(applier, parent_id);
1936
1937    let generation = applier.node_generation(child_id);
1938    let removed_from_parent = if let Ok(node) = applier.get_mut(child_id) {
1939        match node.parent() {
1940            Some(existing_parent_id) if existing_parent_id == parent_id => {
1941                node.on_removed_from_parent();
1942                true
1943            }
1944            None => false,
1945            Some(_) => return Ok(()),
1946        }
1947    } else {
1948        return Ok(());
1949    };
1950
1951    deferred_cleanup.push(child_id, generation, removed_from_parent);
1952    Ok(())
1953}
1954
1955fn cleanup_detached_child(
1956    applier: &mut dyn Applier,
1957    cleanup: DeferredChildCleanup,
1958) -> Result<(), NodeError> {
1959    if applier.node_generation(cleanup.child_id) != cleanup.generation {
1960        return Ok(());
1961    }
1962
1963    let parent_id = match applier.get_mut(cleanup.child_id) {
1964        Ok(node) => node.parent(),
1965        Err(NodeError::Missing { .. }) => return Ok(()),
1966        Err(err) => return Err(err),
1967    };
1968    if parent_id.is_some() {
1969        return Ok(());
1970    }
1971
1972    if let Ok(node) = applier.get_mut(cleanup.child_id) {
1973        if !cleanup.removed_from_parent {
1974            node.on_removed_from_parent();
1975        }
1976        node.unmount();
1977    }
1978    match applier.remove(cleanup.child_id) {
1979        Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
1980        Err(err) => Err(err),
1981    }
1982}
1983
1984fn remove_child_and_cleanup_now(
1985    applier: &mut dyn Applier,
1986    parent_id: NodeId,
1987    child_id: NodeId,
1988) -> Result<(), NodeError> {
1989    let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1990    apply_remove_child(applier, parent_id, child_id, &mut deferred_cleanup)?;
1991    deferred_cleanup.flush(applier)
1992}
1993
1994fn collect_current_children(applier: &mut dyn Applier, parent_id: NodeId) -> ChildList {
1995    let mut scratch = SmallVec::<[NodeId; 8]>::new();
1996    if let Ok(node) = applier.get_mut(parent_id) {
1997        node.collect_children_into(&mut scratch);
1998    }
1999    let mut current = ChildList::new();
2000    current.extend(scratch);
2001    current
2002}
2003
2004fn sync_children(
2005    applier: &mut dyn Applier,
2006    parent_id: NodeId,
2007    expected_children: &[NodeId],
2008    deferred_cleanup: &mut DeferredChildCleanupQueue,
2009) -> Result<(), NodeError> {
2010    let mut current = collect_current_children(applier, parent_id);
2011    let children_changed = current.as_slice() != expected_children;
2012
2013    if children_changed {
2014        if current.len().max(expected_children.len()) <= SMALL_CHILD_SYNC_LINEAR_THRESHOLD {
2015            sync_children_small(
2016                applier,
2017                parent_id,
2018                &mut current,
2019                expected_children,
2020                deferred_cleanup,
2021            )?;
2022        } else {
2023            let mut target_positions: HashMap<NodeId, usize> = HashMap::default();
2024            target_positions.reserve(expected_children.len());
2025            for (index, &child) in expected_children.iter().enumerate() {
2026                target_positions.insert(child, index);
2027            }
2028
2029            for index in (0..current.len()).rev() {
2030                let child = current[index];
2031                if !target_positions.contains_key(&child) {
2032                    current.remove(index);
2033                    apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2034                }
2035            }
2036
2037            let mut current_positions = build_child_positions(&current);
2038            for (target_index, &child) in expected_children.iter().enumerate() {
2039                if let Some(current_index) = current_positions.get(&child).copied() {
2040                    if current_index != target_index {
2041                        let from_index = current_index;
2042                        let to_index = move_child_in_diff_state(
2043                            &mut current,
2044                            &mut current_positions,
2045                            from_index,
2046                            target_index,
2047                        );
2048                        Command::MoveChild {
2049                            parent_id,
2050                            from_index,
2051                            to_index,
2052                            bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2053                        }
2054                        .apply(applier)?;
2055                    }
2056                } else {
2057                    let insert_index = target_index.min(current.len());
2058                    let appended_index = current.len();
2059                    insert_child_into_diff_state(
2060                        &mut current,
2061                        &mut current_positions,
2062                        insert_index,
2063                        child,
2064                    );
2065                    Command::InsertChild {
2066                        parent_id,
2067                        child_id: child,
2068                        appended_index,
2069                        insert_index,
2070                        bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2071                    }
2072                    .apply(applier)?;
2073                }
2074            }
2075        }
2076    }
2077
2078    reconcile_children(applier, parent_id, expected_children, !children_changed)
2079}
2080
2081fn sync_children_small(
2082    applier: &mut dyn Applier,
2083    parent_id: NodeId,
2084    current: &mut ChildList,
2085    expected_children: &[NodeId],
2086    deferred_cleanup: &mut DeferredChildCleanupQueue,
2087) -> Result<(), NodeError> {
2088    for index in (0..current.len()).rev() {
2089        let child = current[index];
2090        if !expected_children.contains(&child) {
2091            current.remove(index);
2092            apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2093        }
2094    }
2095
2096    for (target_index, &child) in expected_children.iter().enumerate() {
2097        if let Some(current_index) = current
2098            .iter()
2099            .position(|&current_child| current_child == child)
2100        {
2101            if current_index != target_index {
2102                let child = current.remove(current_index);
2103                let to_index = target_index.min(current.len());
2104                current.insert(to_index, child);
2105                Command::MoveChild {
2106                    parent_id,
2107                    from_index: current_index,
2108                    to_index,
2109                    bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2110                }
2111                .apply(applier)?;
2112            }
2113        } else {
2114            let insert_index = target_index.min(current.len());
2115            let appended_index = current.len();
2116            current.insert(insert_index, child);
2117            Command::InsertChild {
2118                parent_id,
2119                child_id: child,
2120                appended_index,
2121                insert_index,
2122                bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2123            }
2124            .apply(applier)?;
2125        }
2126    }
2127
2128    Ok(())
2129}
2130
2131fn reconcile_children(
2132    applier: &mut dyn Applier,
2133    parent_id: NodeId,
2134    expected_children: &[NodeId],
2135    needs_dirty_check: bool,
2136) -> Result<(), NodeError> {
2137    let mut repaired = false;
2138    for &child_id in expected_children {
2139        let needs_attach = if let Ok(node) = applier.get_mut(child_id) {
2140            node.parent() != Some(parent_id)
2141        } else {
2142            false
2143        };
2144
2145        if needs_attach {
2146            insert_child_with_reparenting(applier, parent_id, child_id);
2147            repaired = true;
2148        }
2149    }
2150
2151    let is_dirty = if needs_dirty_check {
2152        if let Ok(node) = applier.get_mut(parent_id) {
2153            node.needs_layout()
2154        } else {
2155            false
2156        }
2157    } else {
2158        false
2159    };
2160
2161    if repaired {
2162        bubble_layout_dirty(applier, parent_id);
2163        bubble_measure_dirty(applier, parent_id);
2164    } else if is_dirty {
2165        bubble_layout_dirty(applier, parent_id);
2166    }
2167
2168    Ok(())
2169}
2170
2171#[derive(Default)]
2172pub struct MemoryApplier {
2173    nodes: Vec<Option<Box<dyn Node>>>,
2174    /// Stable node ID occupying each physical slot.
2175    physical_stable_ids: Vec<u32>,
2176    physical_warm_recycled_origins: Vec<bool>,
2177    /// Physical storage location for each live stable node ID.
2178    stable_to_physical: HashMap<NodeId, usize>,
2179    /// Generation counter per stable node ID.
2180    stable_generations: HashMap<NodeId, u32>,
2181    /// Lowest free physical slots are reused first so the dense storage stays compact.
2182    free_ids: BinaryHeap<Reverse<usize>>,
2183    /// Storage for high-ID nodes (like virtual nodes with IDs starting at 0xFFFFFFFF00000000)
2184    /// that can't be stored in the Vec without causing capacity overflow.
2185    high_id_nodes: HashMap<NodeId, Box<dyn Node>>,
2186    high_id_warm_recycled_origins: HashMap<NodeId, bool>,
2187    high_id_generations: HashMap<NodeId, u32>,
2188    next_stable_id: NodeId,
2189    layout_runtime: Option<RuntimeHandle>,
2190    slots: SlotTable,
2191    recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2192    returning_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2193    cold_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2194    recycled_node_limits: HashMap<TypeId, usize>,
2195    warm_recycled_node_targets: HashMap<TypeId, usize>,
2196    fresh_recyclable_creations: HashMap<TypeId, usize>,
2197    recycled_node_prototypes: HashMap<TypeId, Box<dyn Node>>,
2198}
2199
2200struct RemovalFrame {
2201    node_id: NodeId,
2202    children: SmallVec<[NodeId; 8]>,
2203    next_child: usize,
2204}
2205
2206#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
2207pub struct MemoryApplierDebugStats {
2208    pub next_stable_id: NodeId,
2209    pub nodes_len: usize,
2210    pub nodes_cap: usize,
2211    pub physical_stable_ids_len: usize,
2212    pub physical_stable_ids_cap: usize,
2213    pub stable_to_physical_len: usize,
2214    pub stable_to_physical_cap: usize,
2215    pub stable_generations_len: usize,
2216    pub stable_generations_cap: usize,
2217    pub free_ids_len: usize,
2218    pub free_ids_cap: usize,
2219    pub high_id_nodes_len: usize,
2220    pub high_id_nodes_cap: usize,
2221    pub high_id_generations_len: usize,
2222    pub high_id_generations_cap: usize,
2223    pub recycled_type_count: usize,
2224    pub recycled_type_cap: usize,
2225    pub recycled_node_count: usize,
2226    pub recycled_node_capacity: usize,
2227    pub warm_recycled_node_id_count: usize,
2228    pub warm_recycled_node_id_capacity: usize,
2229}
2230
2231impl MemoryApplier {
2232    const EAGER_COMPACT_NODE_LEN: usize = 1_024;
2233    const INVALID_STABLE_ID: u32 = u32::MAX;
2234    const INITIAL_DENSE_NODE_CAP: usize = 32;
2235    const LARGE_DENSE_NODE_GROWTH_THRESHOLD: usize = 32 * 1024;
2236    const LARGE_DENSE_NODE_GROWTH_DIVISOR: usize = 4;
2237
2238    fn pack_stable_id(stable_id: NodeId) -> u32 {
2239        u32::try_from(stable_id).expect("stable id overflow")
2240    }
2241
2242    fn unpack_stable_id(stable_id: u32) -> NodeId {
2243        stable_id as NodeId
2244    }
2245
2246    fn next_dense_node_target_len(old_len: usize) -> usize {
2247        if old_len < Self::INITIAL_DENSE_NODE_CAP {
2248            return Self::INITIAL_DENSE_NODE_CAP;
2249        }
2250        if old_len < Self::LARGE_DENSE_NODE_GROWTH_THRESHOLD {
2251            return old_len.saturating_mul(2);
2252        }
2253
2254        let incremental_growth =
2255            (old_len / Self::LARGE_DENSE_NODE_GROWTH_DIVISOR).max(Self::INITIAL_DENSE_NODE_CAP);
2256        old_len.saturating_add(incremental_growth)
2257    }
2258
2259    fn ensure_dense_node_storage_capacity(&mut self) {
2260        let len = self
2261            .nodes
2262            .len()
2263            .max(self.physical_stable_ids.len())
2264            .max(self.physical_warm_recycled_origins.len());
2265        if len < self.nodes.capacity()
2266            && len < self.physical_stable_ids.capacity()
2267            && len < self.physical_warm_recycled_origins.capacity()
2268        {
2269            return;
2270        }
2271
2272        let target = Self::next_dense_node_target_len(len);
2273        if self.nodes.capacity() < target {
2274            self.nodes
2275                .reserve_exact(target.saturating_sub(self.nodes.len()));
2276        }
2277        if self.physical_stable_ids.capacity() < target {
2278            self.physical_stable_ids
2279                .reserve_exact(target.saturating_sub(self.physical_stable_ids.len()));
2280        }
2281        if self.physical_warm_recycled_origins.capacity() < target {
2282            self.physical_warm_recycled_origins
2283                .reserve_exact(target.saturating_sub(self.physical_warm_recycled_origins.len()));
2284        }
2285    }
2286
2287    fn ensure_stable_index_capacity(&mut self) {
2288        let len = self
2289            .stable_to_physical
2290            .len()
2291            .max(self.stable_generations.len());
2292        if len < self.stable_to_physical.capacity() && len < self.stable_generations.capacity() {
2293            return;
2294        }
2295
2296        let target = Self::next_dense_node_target_len(len);
2297        let additional = target.saturating_sub(len);
2298        if self.stable_to_physical.capacity() < target {
2299            self.stable_to_physical.reserve(additional);
2300        }
2301        if self.stable_generations.capacity() < target {
2302            self.stable_generations.reserve(additional);
2303        }
2304    }
2305
2306    pub fn new() -> Self {
2307        Self {
2308            nodes: Vec::new(),
2309            physical_stable_ids: Vec::new(),
2310            physical_warm_recycled_origins: Vec::new(),
2311            stable_to_physical: HashMap::default(),
2312            stable_generations: HashMap::default(),
2313            free_ids: BinaryHeap::new(),
2314            high_id_nodes: HashMap::default(),
2315            high_id_warm_recycled_origins: HashMap::default(),
2316            high_id_generations: HashMap::default(),
2317            next_stable_id: 0,
2318            layout_runtime: None,
2319            slots: SlotTable::default(),
2320            recycled_nodes: HashMap::default(),
2321            returning_recycled_nodes: HashMap::default(),
2322            cold_recycled_nodes: HashMap::default(),
2323            recycled_node_limits: HashMap::default(),
2324            warm_recycled_node_targets: HashMap::default(),
2325            fresh_recyclable_creations: HashMap::default(),
2326            recycled_node_prototypes: HashMap::default(),
2327        }
2328    }
2329
2330    pub fn slots(&mut self) -> &mut SlotTable {
2331        &mut self.slots
2332    }
2333
2334    pub fn with_node<N: Node + 'static, R>(
2335        &mut self,
2336        id: NodeId,
2337        f: impl FnOnce(&mut N) -> R,
2338    ) -> Result<R, NodeError> {
2339        let physical_id = self
2340            .resolve_node_index(id)
2341            .ok_or(NodeError::Missing { id })?;
2342        let slot = self
2343            .nodes
2344            .get_mut(physical_id)
2345            .ok_or(NodeError::Missing { id })?
2346            .as_deref_mut()
2347            .ok_or(NodeError::Missing { id })?;
2348        let typed = slot
2349            .as_any_mut()
2350            .downcast_mut::<N>()
2351            .ok_or(NodeError::TypeMismatch {
2352                id,
2353                expected: std::any::type_name::<N>(),
2354            })?;
2355        Ok(f(typed))
2356    }
2357
2358    pub fn len(&self) -> usize {
2359        self.nodes.iter().filter(|n| n.is_some()).count()
2360    }
2361
2362    pub fn capacity(&self) -> usize {
2363        self.nodes.len()
2364    }
2365
2366    pub fn tombstone_count(&self) -> usize {
2367        self.nodes.iter().filter(|n| n.is_none()).count()
2368    }
2369
2370    pub fn freelist_len(&self) -> usize {
2371        self.free_ids.len()
2372    }
2373
2374    pub fn debug_recycled_node_count(&self) -> usize {
2375        self.total_recycled_node_count()
2376    }
2377
2378    pub fn debug_recycled_node_count_for<N: Node + 'static>(&self) -> usize {
2379        let key = TypeId::of::<N>();
2380        self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2381            + self
2382                .returning_recycled_nodes
2383                .get(&key)
2384                .map(Vec::len)
2385                .unwrap_or(0)
2386            + self
2387                .cold_recycled_nodes
2388                .get(&key)
2389                .map(Vec::len)
2390                .unwrap_or(0)
2391    }
2392
2393    pub fn debug_stats(&self) -> MemoryApplierDebugStats {
2394        let mut recycled_keys: HashSet<TypeId> = HashSet::default();
2395        recycled_keys.extend(self.recycled_nodes.keys().copied());
2396        recycled_keys.extend(self.returning_recycled_nodes.keys().copied());
2397        recycled_keys.extend(self.cold_recycled_nodes.keys().copied());
2398
2399        MemoryApplierDebugStats {
2400            next_stable_id: self.next_stable_id,
2401            nodes_len: self.len(),
2402            nodes_cap: self.nodes.len(),
2403            physical_stable_ids_len: self.physical_stable_ids.len(),
2404            physical_stable_ids_cap: self.physical_stable_ids.capacity(),
2405            stable_to_physical_len: self.stable_to_physical.len(),
2406            stable_to_physical_cap: self.stable_to_physical.capacity(),
2407            stable_generations_len: self.stable_generations.len(),
2408            stable_generations_cap: self.stable_generations.capacity(),
2409            free_ids_len: self.free_ids.len(),
2410            free_ids_cap: self.free_ids.capacity(),
2411            high_id_nodes_len: self.high_id_nodes.len(),
2412            high_id_nodes_cap: self.high_id_nodes.capacity(),
2413            high_id_generations_len: self.high_id_generations.len(),
2414            high_id_generations_cap: self.high_id_generations.capacity(),
2415            recycled_type_count: recycled_keys.len(),
2416            recycled_type_cap: self.recycled_nodes.capacity()
2417                + self.returning_recycled_nodes.capacity()
2418                + self.cold_recycled_nodes.capacity(),
2419            recycled_node_count: self.total_recycled_node_count(),
2420            recycled_node_capacity: self.total_recycled_node_capacity(),
2421            warm_recycled_node_id_count: self.total_warm_recycled_node_id_count(),
2422            warm_recycled_node_id_capacity: self.total_warm_recycled_node_id_capacity(),
2423        }
2424    }
2425
2426    pub fn is_empty(&self) -> bool {
2427        self.len() == 0
2428    }
2429
2430    pub fn debug_live_node_heap_bytes(&self) -> usize {
2431        let dense_nodes = self
2432            .nodes
2433            .iter()
2434            .flatten()
2435            .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2436            .sum::<usize>();
2437        let high_id_nodes = self
2438            .high_id_nodes
2439            .values()
2440            .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2441            .sum::<usize>();
2442        dense_nodes + high_id_nodes
2443    }
2444
2445    pub fn debug_recycled_node_heap_bytes(&self) -> usize {
2446        let pool_bytes = |pools: &HashMap<TypeId, Vec<RecycledNode>>| {
2447            pools
2448                .values()
2449                .flat_map(|nodes| nodes.iter())
2450                .map(|node| std::mem::size_of_val(&*node.node) + node.node.debug_heap_bytes())
2451                .sum::<usize>()
2452        };
2453
2454        pool_bytes(&self.recycled_nodes)
2455            + pool_bytes(&self.returning_recycled_nodes)
2456            + pool_bytes(&self.cold_recycled_nodes)
2457    }
2458
2459    pub fn set_runtime_handle(&mut self, handle: RuntimeHandle) {
2460        self.layout_runtime = Some(handle);
2461    }
2462
2463    pub fn clear_runtime_handle(&mut self) {
2464        self.layout_runtime = None;
2465    }
2466
2467    pub fn runtime_handle(&self) -> Option<RuntimeHandle> {
2468        self.layout_runtime.clone()
2469    }
2470
2471    fn pool_node_count(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2472        pools.values().map(Vec::len).sum()
2473    }
2474
2475    fn pool_node_capacity(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2476        pools.values().map(Vec::capacity).sum()
2477    }
2478
2479    fn total_recycled_node_count(&self) -> usize {
2480        Self::pool_node_count(&self.recycled_nodes)
2481            + Self::pool_node_count(&self.returning_recycled_nodes)
2482            + Self::pool_node_count(&self.cold_recycled_nodes)
2483    }
2484
2485    fn total_recycled_node_capacity(&self) -> usize {
2486        Self::pool_node_capacity(&self.recycled_nodes)
2487            + Self::pool_node_capacity(&self.returning_recycled_nodes)
2488            + Self::pool_node_capacity(&self.cold_recycled_nodes)
2489    }
2490
2491    fn total_warm_recycled_node_id_count(&self) -> usize {
2492        self.live_warm_recycled_origin_count()
2493            + Self::pool_node_count(&self.recycled_nodes)
2494            + Self::pool_node_count(&self.returning_recycled_nodes)
2495    }
2496
2497    fn total_warm_recycled_node_id_capacity(&self) -> usize {
2498        self.live_warm_recycled_origin_capacity()
2499            + Self::pool_node_capacity(&self.recycled_nodes)
2500            + Self::pool_node_capacity(&self.returning_recycled_nodes)
2501    }
2502
2503    fn remember_recycle_pool_limit(&mut self, key: TypeId, recycle_pool_limit: Option<usize>) {
2504        if let Some(limit) = recycle_pool_limit {
2505            self.recycled_node_limits.insert(key, limit);
2506        } else {
2507            self.recycled_node_limits.remove(&key);
2508        }
2509    }
2510
2511    fn recycle_pool_limit_for(&self, key: TypeId) -> Option<usize> {
2512        self.recycled_node_limits.get(&key).copied()
2513    }
2514
2515    fn warm_recycled_pool_len(&self, key: TypeId) -> usize {
2516        self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2517    }
2518
2519    fn warm_recycled_node_target(&self, key: TypeId) -> usize {
2520        self.warm_recycled_node_targets
2521            .get(&key)
2522            .copied()
2523            .unwrap_or(0)
2524    }
2525
2526    fn warm_recycled_node_target_limit(&self, key: TypeId) -> usize {
2527        let Some(limit) = self.recycle_pool_limit_for(key) else {
2528            return usize::MAX;
2529        };
2530        if limit <= 8 {
2531            limit
2532        } else {
2533            limit / 4
2534        }
2535    }
2536
2537    fn update_warm_recycled_node_target(&mut self, key: TypeId, observed_demand: usize) -> usize {
2538        let target_limit = self.warm_recycled_node_target_limit(key);
2539        let existing = self.warm_recycled_node_target(key).min(target_limit);
2540        if observed_demand == 0 {
2541            return existing;
2542        }
2543
2544        let target = match self.recycle_pool_limit_for(key) {
2545            Some(limit) if limit > 8 => target_limit,
2546            Some(_) => observed_demand.min(target_limit),
2547            None => observed_demand,
2548        };
2549        self.warm_recycled_node_targets.insert(key, target);
2550        target
2551    }
2552
2553    fn remember_recycled_node_prototype(&mut self, key: TypeId, shell: &dyn Node) {
2554        if self.recycled_node_prototypes.contains_key(&key) {
2555            return;
2556        }
2557        if let Some(prototype) = shell.rehouse_for_recycle() {
2558            self.recycled_node_prototypes.insert(key, prototype);
2559        }
2560    }
2561
2562    fn live_warm_recycled_origin_count(&self) -> usize {
2563        self.physical_warm_recycled_origins
2564            .iter()
2565            .zip(self.nodes.iter())
2566            .filter(|(warm_origin, node)| **warm_origin && node.is_some())
2567            .count()
2568            + self
2569                .high_id_warm_recycled_origins
2570                .values()
2571                .filter(|warm_origin| **warm_origin)
2572                .count()
2573    }
2574
2575    fn live_warm_recycled_origin_capacity(&self) -> usize {
2576        self.physical_warm_recycled_origins.capacity()
2577            + self.high_id_warm_recycled_origins.capacity()
2578    }
2579
2580    fn push_recycled_node(
2581        &mut self,
2582        key: TypeId,
2583        recycle_pool_limit: Option<usize>,
2584        recycled: RecycledNode,
2585    ) {
2586        self.remember_recycle_pool_limit(key, recycle_pool_limit);
2587        self.remember_recycled_node_prototype(key, recycled.node.as_ref());
2588
2589        let warm_origin = recycled.warm_origin();
2590        let pool = if warm_origin {
2591            self.returning_recycled_nodes.entry(key).or_default()
2592        } else {
2593            self.cold_recycled_nodes.entry(key).or_default()
2594        };
2595        pool.push(recycled);
2596        if let Some(limit) = recycle_pool_limit {
2597            if pool.len() > limit {
2598                let excess = pool.len() - limit;
2599                let dropped: Vec<_> = pool.drain(0..excess).collect();
2600                drop(dropped);
2601            }
2602        }
2603    }
2604
2605    fn push_warm_recycled_node(
2606        &mut self,
2607        key: TypeId,
2608        recycle_pool_limit: Option<usize>,
2609        mut recycled: RecycledNode,
2610    ) {
2611        self.remember_recycle_pool_limit(key, recycle_pool_limit);
2612
2613        recycled.set_warm_origin(true);
2614        let mut dropped = Vec::new();
2615        let mut remove_pool_entry = false;
2616        {
2617            let pool = self.recycled_nodes.entry(key).or_default();
2618            pool.push(recycled);
2619            if let Some(limit) = recycle_pool_limit {
2620                if pool.len() > limit {
2621                    let excess = pool.len() - limit;
2622                    dropped = pool.drain(0..excess).collect();
2623                    remove_pool_entry = pool.is_empty();
2624                }
2625            }
2626        }
2627        if remove_pool_entry {
2628            self.recycled_nodes.remove(&key);
2629        }
2630        drop(dropped);
2631    }
2632
2633    fn seed_recycled_node_shell_impl(
2634        &mut self,
2635        key: TypeId,
2636        recycle_pool_limit: Option<usize>,
2637        shell: Box<dyn Node>,
2638    ) {
2639        let limit = recycle_pool_limit.unwrap_or(usize::MAX);
2640        if self.warm_recycled_pool_len(key) >= limit {
2641            return;
2642        }
2643
2644        self.remember_recycled_node_prototype(key, shell.as_ref());
2645        let stable_id = self.next_stable_id;
2646        self.next_stable_id = self.next_stable_id.saturating_add(1);
2647        self.push_warm_recycled_node(
2648            key,
2649            recycle_pool_limit,
2650            RecycledNode::from_shell(stable_id, shell, true),
2651        );
2652    }
2653
2654    fn take_recycled_node_from_pool(
2655        pools: &mut HashMap<TypeId, Vec<RecycledNode>>,
2656        key: TypeId,
2657    ) -> Option<RecycledNode> {
2658        let pool = pools.get_mut(&key)?;
2659        let node = pool.pop();
2660        if pool.is_empty() {
2661            pools.remove(&key);
2662        }
2663        node
2664    }
2665
2666    fn compact_idle_warm_pool(&mut self, key: TypeId) {
2667        let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2668            return;
2669        };
2670        if pool.capacity() <= pool.len().saturating_mul(4).max(64) {
2671            return;
2672        }
2673
2674        let retained = pool.len();
2675        let mut compacted = Vec::with_capacity(retained);
2676        compacted.append(pool);
2677        let remove_pool_entry = compacted.is_empty();
2678        *pool = compacted;
2679        let _ = pool;
2680
2681        if remove_pool_entry {
2682            self.recycled_nodes.remove(&key);
2683        }
2684    }
2685
2686    fn trim_idle_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2687        let pool_len = self.warm_recycled_pool_len(key);
2688        if pool_len <= target {
2689            return;
2690        }
2691
2692        let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2693            return;
2694        };
2695        let removable = (pool_len - target).min(pool.len());
2696        let dropped: Vec<_> = pool.drain(0..removable).collect();
2697        let remove_pool_entry = pool.is_empty();
2698        let _ = pool;
2699
2700        if remove_pool_entry {
2701            self.recycled_nodes.remove(&key);
2702        }
2703        drop(dropped);
2704    }
2705
2706    fn replenish_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2707        let missing = target.saturating_sub(self.warm_recycled_pool_len(key));
2708        if missing == 0 {
2709            return;
2710        }
2711
2712        let recycle_pool_limit = self.recycle_pool_limit_for(key);
2713        let mut shells = Vec::with_capacity(missing);
2714        if let Some(prototype) = self.recycled_node_prototypes.get(&key) {
2715            for _ in 0..missing {
2716                let Some(shell) = prototype.rehouse_for_recycle() else {
2717                    break;
2718                };
2719                shells.push(shell);
2720            }
2721        }
2722
2723        for shell in shells {
2724            self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
2725        }
2726    }
2727
2728    fn prune_stable_generations(&mut self) {
2729        let retained_len = self.stable_to_physical.len() + self.total_recycled_node_count();
2730        if retained_len == self.stable_generations.len() {
2731            return;
2732        }
2733
2734        let mut retained = HashMap::default();
2735        retained.reserve(retained_len);
2736        for stable_id in self.stable_to_physical.keys().copied() {
2737            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2738                retained.insert(stable_id, generation);
2739            }
2740        }
2741        for stable_id in self
2742            .recycled_nodes
2743            .values()
2744            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2745        {
2746            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2747                retained.insert(stable_id, generation);
2748            }
2749        }
2750        for stable_id in self
2751            .returning_recycled_nodes
2752            .values()
2753            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2754        {
2755            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2756                retained.insert(stable_id, generation);
2757            }
2758        }
2759        for stable_id in self
2760            .cold_recycled_nodes
2761            .values()
2762            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2763        {
2764            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2765                retained.insert(stable_id, generation);
2766            }
2767        }
2768        self.stable_generations = retained;
2769    }
2770
2771    pub fn dump_tree(&self, root: Option<NodeId>) -> String {
2772        let mut output = String::new();
2773        if let Some(root_id) = root {
2774            self.dump_node(&mut output, root_id, 0);
2775        } else {
2776            output.push_str("(no root)\n");
2777        }
2778        output
2779    }
2780
2781    fn dump_node(&self, output: &mut String, id: NodeId, depth: usize) {
2782        let indent = "  ".repeat(depth);
2783        if let Some(physical_id) = self.resolve_node_index(id) {
2784            let node = self.nodes[physical_id]
2785                .as_ref()
2786                .expect("resolved physical node must exist");
2787            let type_name = std::any::type_name_of_val(&**node);
2788            output.push_str(&format!("{}[{}] {}\n", indent, id, type_name));
2789
2790            let children = node.children();
2791            for child_id in children {
2792                self.dump_node(output, child_id, depth + 1);
2793            }
2794        } else {
2795            output.push_str(&format!("{}[{}] (missing)\n", indent, id));
2796        }
2797    }
2798
2799    fn resolve_node_index(&self, id: NodeId) -> Option<usize> {
2800        self.stable_to_physical.get(&id).copied()
2801    }
2802
2803    fn get_ref(&self, id: NodeId) -> Result<&dyn Node, NodeError> {
2804        if let Some(physical_id) = self.resolve_node_index(id) {
2805            let slot = self
2806                .nodes
2807                .get(physical_id)
2808                .ok_or(NodeError::Missing { id })?
2809                .as_deref()
2810                .ok_or(NodeError::Missing { id })?;
2811            return Ok(slot);
2812        }
2813
2814        self.high_id_nodes
2815            .get(&id)
2816            .map(|node| node.as_ref())
2817            .ok_or(NodeError::Missing { id })
2818    }
2819
2820    fn collect_node_children_into(
2821        &self,
2822        id: NodeId,
2823        out: &mut SmallVec<[NodeId; 8]>,
2824    ) -> Result<(), NodeError> {
2825        self.get_ref(id)?.collect_children_into(out);
2826        Ok(())
2827    }
2828
2829    fn node_parent(&self, id: NodeId) -> Result<Option<NodeId>, NodeError> {
2830        Ok(self.get_ref(id)?.parent())
2831    }
2832
2833    fn collect_owned_children(
2834        &self,
2835        node_id: NodeId,
2836        out: &mut SmallVec<[NodeId; 8]>,
2837    ) -> Result<(), NodeError> {
2838        self.collect_node_children_into(node_id, out)?;
2839        out.retain(|child_id| {
2840            self.node_parent(*child_id)
2841                .map(|parent| parent == Some(node_id))
2842                .unwrap_or(false)
2843        });
2844        Ok(())
2845    }
2846
2847    fn remove_node_storage(&mut self, node_id: NodeId) -> Result<(), NodeError> {
2848        if self.high_id_nodes.contains_key(&node_id) {
2849            if let Some(mut node) = self.high_id_nodes.remove(&node_id) {
2850                if let Some(key) = node.recycle_key() {
2851                    let recycle_pool_limit = node.recycle_pool_limit();
2852                    let warm_origin = self
2853                        .high_id_warm_recycled_origins
2854                        .remove(&node_id)
2855                        .unwrap_or(false);
2856                    node.prepare_for_recycle();
2857                    self.push_recycled_node(
2858                        key,
2859                        recycle_pool_limit,
2860                        RecycledNode::new(node_id, node, warm_origin),
2861                    );
2862                }
2863            }
2864            let generation = self.high_id_generations.entry(node_id).or_insert(0);
2865            *generation = generation.wrapping_add(1);
2866            return Ok(());
2867        }
2868
2869        let physical_id = self
2870            .resolve_node_index(node_id)
2871            .ok_or(NodeError::Missing { id: node_id })?;
2872        if let Some(mut node) = self.nodes[physical_id].take() {
2873            if let Some(key) = node.recycle_key() {
2874                let recycle_pool_limit = node.recycle_pool_limit();
2875                let warm_origin = self
2876                    .physical_warm_recycled_origins
2877                    .get_mut(physical_id)
2878                    .map(std::mem::take)
2879                    .unwrap_or(false);
2880                node.prepare_for_recycle();
2881                self.push_recycled_node(
2882                    key,
2883                    recycle_pool_limit,
2884                    RecycledNode::new(node_id, node, warm_origin),
2885                );
2886            }
2887        }
2888        self.physical_stable_ids[physical_id] = Self::INVALID_STABLE_ID;
2889        self.stable_to_physical.remove(&node_id);
2890        if let Some(generation) = self.stable_generations.get_mut(&node_id) {
2891            *generation = generation.wrapping_add(1);
2892        } else {
2893            self.stable_generations.insert(node_id, 1);
2894        }
2895        self.free_ids.push(Reverse(physical_id));
2896        Ok(())
2897    }
2898
2899    fn remove_subtree_postorder(&mut self, id: NodeId) -> Result<usize, NodeError> {
2900        self.get_ref(id)?;
2901
2902        let mut root_children = SmallVec::<[NodeId; 8]>::new();
2903        self.collect_owned_children(id, &mut root_children)?;
2904
2905        let mut stack = Vec::new();
2906        stack.push(RemovalFrame {
2907            node_id: id,
2908            children: root_children,
2909            next_child: 0,
2910        });
2911        let mut max_depth = stack.len();
2912
2913        while let Some(frame) = stack.last_mut() {
2914            if frame.next_child < frame.children.len() {
2915                let child_id = frame.children[frame.next_child];
2916                frame.next_child += 1;
2917
2918                if let Ok(child) = self.get_mut(child_id) {
2919                    child.on_removed_from_parent();
2920                    child.unmount();
2921                }
2922
2923                let mut child_children = SmallVec::<[NodeId; 8]>::new();
2924                self.collect_owned_children(child_id, &mut child_children)?;
2925                stack.push(RemovalFrame {
2926                    node_id: child_id,
2927                    children: child_children,
2928                    next_child: 0,
2929                });
2930                max_depth = max_depth.max(stack.len());
2931                continue;
2932            }
2933
2934            let node_id = frame.node_id;
2935            stack.pop();
2936            self.remove_node_storage(node_id)?;
2937        }
2938
2939        Ok(max_depth)
2940    }
2941
2942    #[cfg(test)]
2943    fn debug_remove_max_traversal_depth(&mut self, id: NodeId) -> Result<usize, NodeError> {
2944        self.remove_subtree_postorder(id)
2945    }
2946}
2947
2948impl Applier for MemoryApplier {
2949    fn create(&mut self, node: Box<dyn Node>) -> NodeId {
2950        self.ensure_stable_index_capacity();
2951        let stable_id = self.next_stable_id;
2952        self.next_stable_id = self.next_stable_id.saturating_add(1);
2953        self.stable_generations.insert(stable_id, 0);
2954
2955        let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
2956            debug_assert!(self.nodes[id].is_none(), "freelist entry {id} is not None");
2957            self.nodes[id] = Some(node);
2958            self.physical_stable_ids[id] = Self::pack_stable_id(stable_id);
2959            self.physical_warm_recycled_origins[id] = false;
2960            id
2961        } else {
2962            self.ensure_dense_node_storage_capacity();
2963            let id = self.nodes.len();
2964            self.nodes.push(Some(node));
2965            self.physical_stable_ids
2966                .push(Self::pack_stable_id(stable_id));
2967            self.physical_warm_recycled_origins.push(false);
2968            id
2969        };
2970        self.stable_to_physical.insert(stable_id, physical_id);
2971        stable_id
2972    }
2973
2974    fn node_generation(&self, id: NodeId) -> u32 {
2975        self.high_id_generations
2976            .get(&id)
2977            .copied()
2978            .or_else(|| self.stable_generations.get(&id).copied())
2979            .unwrap_or(0)
2980    }
2981
2982    fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError> {
2983        // Try Vec first (common case for normal nodes), then HashMap for virtual nodes.
2984        if let Some(physical_id) = self.resolve_node_index(id) {
2985            let slot = self.nodes[physical_id]
2986                .as_deref_mut()
2987                .ok_or(NodeError::Missing { id })?;
2988            return Ok(slot);
2989        }
2990        self.high_id_nodes
2991            .get_mut(&id)
2992            .map(|n| n.as_mut())
2993            .ok_or(NodeError::Missing { id })
2994    }
2995
2996    fn remove(&mut self, id: NodeId) -> Result<(), NodeError> {
2997        self.remove_subtree_postorder(id).map(|_| ())
2998    }
2999
3000    fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError> {
3001        // Use HashMap for high IDs (virtual nodes) to avoid Vec capacity overflow
3002        // Virtual node IDs start at a very high value that can't fit in a Vec
3003        const HIGH_ID_THRESHOLD: NodeId = 1_000_000_000; // 1 billion
3004
3005        if id >= HIGH_ID_THRESHOLD {
3006            if self.high_id_nodes.contains_key(&id) {
3007                return Err(NodeError::AlreadyExists { id });
3008            }
3009            self.high_id_nodes.insert(id, node);
3010            self.high_id_warm_recycled_origins.insert(id, false);
3011            self.high_id_generations.entry(id).or_insert(0);
3012            Ok(())
3013        } else {
3014            if self.stable_to_physical.contains_key(&id) {
3015                return Err(NodeError::AlreadyExists { id });
3016            }
3017
3018            let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
3019                self.nodes[id] = Some(node);
3020                self.physical_stable_ids[id] = Self::pack_stable_id(id);
3021                self.physical_warm_recycled_origins[id] = false;
3022                id
3023            } else {
3024                self.ensure_dense_node_storage_capacity();
3025                let id = self.nodes.len();
3026                self.nodes.push(Some(node));
3027                self.physical_stable_ids.push(Self::pack_stable_id(id));
3028                self.physical_warm_recycled_origins.push(false);
3029                id
3030            };
3031
3032            self.next_stable_id = self.next_stable_id.max(id.saturating_add(1));
3033            self.ensure_stable_index_capacity();
3034            self.stable_generations.entry(id).or_insert(0);
3035            self.physical_stable_ids[physical_id] = Self::pack_stable_id(id);
3036            self.stable_to_physical.insert(id, physical_id);
3037            Ok(())
3038        }
3039    }
3040
3041    fn compact(&mut self) {
3042        let live_count = self.nodes.iter().filter(|slot| slot.is_some()).count();
3043        let tombstone_count = self.nodes.len().saturating_sub(live_count);
3044        if tombstone_count == 0 {
3045            return;
3046        }
3047        if self.nodes.len() > Self::EAGER_COMPACT_NODE_LEN && tombstone_count < live_count {
3048            return;
3049        }
3050        let rehouse_live_nodes = tombstone_count >= live_count;
3051        let mut packed_nodes = Vec::with_capacity(live_count);
3052        let mut packed_physical_stable_ids = Vec::with_capacity(live_count);
3053        let mut packed_warm_recycled_origins = Vec::with_capacity(live_count);
3054        let mut stable_to_physical = HashMap::default();
3055        stable_to_physical.reserve(live_count);
3056
3057        for physical_id in 0..self.nodes.len() {
3058            let Some(mut node) = self.nodes[physical_id].take() else {
3059                continue;
3060            };
3061            if rehouse_live_nodes {
3062                if let Some(rehoused) = node.rehouse_for_live_compaction() {
3063                    node = rehoused;
3064                }
3065            }
3066            let stable_id = std::mem::replace(
3067                &mut self.physical_stable_ids[physical_id],
3068                Self::INVALID_STABLE_ID,
3069            );
3070            debug_assert_ne!(
3071                stable_id,
3072                Self::INVALID_STABLE_ID,
3073                "live physical slot must have a stable id",
3074            );
3075            let stable_id = Self::unpack_stable_id(stable_id);
3076            packed_nodes.push(Some(node));
3077            packed_physical_stable_ids.push(Self::pack_stable_id(stable_id));
3078            packed_warm_recycled_origins.push(self.physical_warm_recycled_origins[physical_id]);
3079            stable_to_physical.insert(stable_id, packed_nodes.len() - 1);
3080        }
3081
3082        self.nodes = packed_nodes;
3083        self.physical_stable_ids = packed_physical_stable_ids;
3084        self.physical_warm_recycled_origins = packed_warm_recycled_origins;
3085        self.free_ids = BinaryHeap::new();
3086        self.stable_to_physical = stable_to_physical;
3087        self.prune_stable_generations();
3088    }
3089
3090    fn take_recycled_node(&mut self, key: TypeId) -> Option<RecycledNode> {
3091        Self::take_recycled_node_from_pool(&mut self.returning_recycled_nodes, key)
3092            .or_else(|| Self::take_recycled_node_from_pool(&mut self.recycled_nodes, key))
3093    }
3094
3095    fn set_recycled_node_origin(&mut self, id: NodeId, warm_origin: bool) {
3096        if let Some(physical_id) = self.resolve_node_index(id) {
3097            self.physical_warm_recycled_origins[physical_id] = warm_origin;
3098        } else if self.high_id_nodes.contains_key(&id) {
3099            self.high_id_warm_recycled_origins.insert(id, warm_origin);
3100        }
3101    }
3102
3103    fn seed_recycled_node_shell(
3104        &mut self,
3105        key: TypeId,
3106        recycle_pool_limit: Option<usize>,
3107        shell: Box<dyn Node>,
3108    ) {
3109        self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
3110    }
3111
3112    fn record_fresh_recyclable_creation(&mut self, key: TypeId) {
3113        *self.fresh_recyclable_creations.entry(key).or_insert(0) += 1;
3114    }
3115
3116    fn clear_recycled_nodes(&mut self) {
3117        let returning = std::mem::take(&mut self.returning_recycled_nodes);
3118        for (key, mut nodes) in returning {
3119            let pool = self.recycled_nodes.entry(key).or_default();
3120            pool.append(&mut nodes);
3121        }
3122
3123        let fresh_recyclable_creations = std::mem::take(&mut self.fresh_recyclable_creations);
3124        let cold = std::mem::take(&mut self.cold_recycled_nodes);
3125        for (key, mut nodes) in cold {
3126            let needed = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3127            if needed > 0 {
3128                let remaining_limit = self
3129                    .recycle_pool_limit_for(key)
3130                    .unwrap_or(usize::MAX)
3131                    .saturating_sub(self.warm_recycled_pool_len(key));
3132                let promote = nodes.len().min(needed).min(remaining_limit);
3133                let split_at = nodes.len().saturating_sub(promote);
3134                let promoted = nodes.split_off(split_at);
3135                for mut recycled in promoted {
3136                    recycled.set_warm_origin(true);
3137                    self.recycled_nodes.entry(key).or_default().push(recycled);
3138                }
3139            }
3140        }
3141
3142        let mut keys: HashSet<TypeId> = HashSet::default();
3143        keys.extend(self.recycled_nodes.keys().copied());
3144        keys.extend(self.recycled_node_limits.keys().copied());
3145        keys.extend(self.warm_recycled_node_targets.keys().copied());
3146        keys.extend(self.recycled_node_prototypes.keys().copied());
3147        for key in keys {
3148            let observed_demand = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3149            let target = self.update_warm_recycled_node_target(key, observed_demand);
3150            self.replenish_warm_pool_to_target(key, target);
3151            self.trim_idle_warm_pool_to_target(key, target);
3152            self.compact_idle_warm_pool(key);
3153        }
3154        self.prune_stable_generations();
3155        // Compact the dense physical storage if it has grown very large relative
3156        // to the live node count (e.g. after a spike of recyclable nodes that
3157        // were all removed). This keeps warm_recycled_node_id_capacity bounded.
3158        self.compact();
3159    }
3160}
3161
3162pub trait ApplierHost {
3163    fn borrow_dyn(&self) -> RefMut<'_, dyn Applier>;
3164    /// Compact internal storage after commands have been applied.
3165    fn compact(&self) {}
3166}
3167
3168pub struct ConcreteApplierHost<A: Applier + 'static> {
3169    inner: RefCell<A>,
3170}
3171
3172impl<A: Applier + 'static> ConcreteApplierHost<A> {
3173    pub fn new(applier: A) -> Self {
3174        Self {
3175            inner: RefCell::new(applier),
3176        }
3177    }
3178
3179    pub fn borrow_typed(&self) -> RefMut<'_, A> {
3180        self.inner.borrow_mut()
3181    }
3182
3183    pub fn try_borrow_typed(&self) -> Result<RefMut<'_, A>, std::cell::BorrowMutError> {
3184        self.inner.try_borrow_mut()
3185    }
3186
3187    pub fn into_inner(self) -> A {
3188        self.inner.into_inner()
3189    }
3190}
3191
3192impl<A: Applier + 'static> ApplierHost for ConcreteApplierHost<A> {
3193    fn borrow_dyn(&self) -> RefMut<'_, dyn Applier> {
3194        RefMut::map(self.inner.borrow_mut(), |applier| {
3195            applier as &mut dyn Applier
3196        })
3197    }
3198
3199    fn compact(&self) {
3200        self.inner.borrow_mut().compact();
3201    }
3202}
3203
3204pub struct ApplierGuard<'a, A: Applier + 'static> {
3205    inner: RefMut<'a, A>,
3206}
3207
3208impl<'a, A: Applier + 'static> ApplierGuard<'a, A> {
3209    fn new(inner: RefMut<'a, A>) -> Self {
3210        Self { inner }
3211    }
3212}
3213
3214impl<'a, A: Applier + 'static> Deref for ApplierGuard<'a, A> {
3215    type Target = A;
3216
3217    fn deref(&self) -> &Self::Target {
3218        &self.inner
3219    }
3220}
3221
3222impl<'a, A: Applier + 'static> DerefMut for ApplierGuard<'a, A> {
3223    fn deref_mut(&mut self) -> &mut Self::Target {
3224        &mut self.inner
3225    }
3226}
3227
3228pub struct SlotsHost {
3229    inner: RefCell<SlotTable>,
3230}
3231
3232impl SlotsHost {
3233    pub fn new(storage: SlotTable) -> Self {
3234        Self {
3235            inner: RefCell::new(storage),
3236        }
3237    }
3238
3239    pub fn borrow(&self) -> Ref<'_, SlotTable> {
3240        self.inner.borrow()
3241    }
3242
3243    pub fn borrow_mut(&self) -> RefMut<'_, SlotTable> {
3244        self.inner.borrow_mut()
3245    }
3246
3247    pub fn take(&self) -> SlotTable {
3248        std::mem::take(&mut *self.inner.borrow_mut())
3249    }
3250}
3251
3252fn build_child_positions(children: &[NodeId]) -> HashMap<NodeId, usize> {
3253    let mut positions = HashMap::default();
3254    positions.reserve(children.len());
3255    for (index, &child) in children.iter().enumerate() {
3256        positions.insert(child, index);
3257    }
3258    positions
3259}
3260
3261fn refresh_child_positions(
3262    current: &[NodeId],
3263    positions: &mut HashMap<NodeId, usize>,
3264    start: usize,
3265    end: usize,
3266) {
3267    if current.is_empty() || start >= current.len() {
3268        return;
3269    }
3270    let end = end.min(current.len() - 1);
3271    for (offset, &child) in current[start..=end].iter().enumerate() {
3272        positions.insert(child, start + offset);
3273    }
3274}
3275
3276fn insert_child_into_diff_state(
3277    current: &mut ChildList,
3278    positions: &mut HashMap<NodeId, usize>,
3279    index: usize,
3280    child: NodeId,
3281) {
3282    let index = index.min(current.len());
3283    current.insert(index, child);
3284    refresh_child_positions(current, positions, index, current.len() - 1);
3285}
3286
3287fn move_child_in_diff_state(
3288    current: &mut ChildList,
3289    positions: &mut HashMap<NodeId, usize>,
3290    from_index: usize,
3291    target_index: usize,
3292) -> usize {
3293    let child = current.remove(from_index);
3294    let to_index = target_index.min(current.len());
3295    current.insert(to_index, child);
3296    refresh_child_positions(
3297        current,
3298        positions,
3299        from_index.min(to_index),
3300        from_index.max(to_index),
3301    );
3302    to_index
3303}
3304
3305pub(crate) use state::MutableStateInner;
3306pub use state::{MutableState, OwnedMutableState, SnapshotStateList, SnapshotStateMap, State};
3307
3308fn hash_key<K: Hash>(key: &K) -> Key {
3309    let mut hasher = hash::default::new();
3310    key.hash(&mut hasher);
3311    hasher.finish()
3312}
3313
3314#[cfg(test)]
3315#[path = "tests/mod.rs"]
3316mod tests;
3317
3318#[cfg(test)]
3319#[path = "tests/recursive_decrease_increase_test.rs"]
3320mod recursive_decrease_increase_test;
3321
3322pub mod collections;
3323pub mod hash;