Skip to main content

cranpose_core/
lib.rs

1#![doc = r"Core runtime pieces for the Cranpose experiment."]
2
3pub extern crate self as cranpose_core;
4
5mod callbacks;
6mod composer;
7pub mod composer_context;
8mod composition;
9mod composition_locals;
10mod debug_trace;
11mod emit;
12#[cfg(any(feature = "internal", test))]
13mod frame_clock;
14mod hooks;
15mod launched_effect;
16pub mod owned;
17pub mod platform;
18mod recompose;
19mod retention;
20pub mod runtime;
21mod slot;
22pub mod snapshot_double_index_heap;
23pub mod snapshot_id_set;
24pub mod snapshot_pinning;
25pub mod snapshot_state_observer;
26pub mod snapshot_v2;
27mod snapshot_weak_set;
28mod state;
29pub mod subcompose;
30
31#[cfg(feature = "internal")]
32#[doc(hidden)]
33pub mod internal {
34    pub use crate::frame_clock::{FrameCallbackRegistration, FrameClock};
35}
36pub use callbacks::{CallbackHolder, CallbackHolder1, ParamSlot, ParamState, ReturnSlot};
37pub use composer::{Composer, ValueSlotHandle};
38pub(crate) use composer::{ComposerCore, EmittedNode, ParentAttachMode, ParentFrame};
39pub use composition::{Composition, ROOT_RENDER_REPLAY_LIMIT};
40pub use composition_locals::{
41    compositionLocalOf, compositionLocalOfWithPolicy, staticCompositionLocalOf, CompositionLocal,
42    CompositionLocalProvider, ProvidedValue, StaticCompositionLocal,
43};
44pub(crate) use composition_locals::{LocalStateEntry, StaticLocalEntry};
45#[doc(hidden)]
46pub use debug_trace::{
47    debug_label_current_scope, debug_live_recompose_scope_count,
48    debug_recompose_scope_registry_stats, debug_scope_invalidation_sources, debug_scope_label,
49};
50pub use hooks::{
51    derivedStateOf, mutableStateList, mutableStateListOf, mutableStateMap, mutableStateMapOf,
52    mutableStateOf, ownedMutableStateOf, remember, rememberUpdatedState, try_mutableStateOf,
53    useState,
54};
55#[cfg(feature = "internal")]
56#[doc(hidden)]
57pub use hooks::{withFrameMillis, withFrameNanos};
58pub use launched_effect::{
59    __launched_effect_async_impl, __launched_effect_impl, CancelToken, LaunchedEffectScope,
60};
61pub use owned::Owned;
62pub use platform::{Clock, RuntimeScheduler};
63pub use retention::{RetentionBudget, RetentionEvictionPolicy, RetentionMode, RetentionPolicy};
64#[doc(hidden)]
65pub use runtime::{
66    current_runtime_handle, schedule_frame, schedule_node_update, DefaultScheduler, Runtime,
67    RuntimeHandle, StateId, TaskHandle,
68};
69pub use slot::{
70    SlotDebugAnchor, SlotDebugEntry, SlotDebugEntryKind, SlotDebugGroup, SlotDebugScope,
71    SlotDebugSnapshot, SlotRetentionDebugStats, SlotTable, SlotTableDebugStats,
72    SlotTableLocalDebugStats, SlotTableMutationDebugStats,
73};
74#[doc(hidden)]
75pub use snapshot_state_observer::SnapshotStateObserver;
76
77/// Runs the provided closure inside a mutable snapshot and applies the result.
78///
79/// Use this function when you need to update `MutableState` from outside the
80/// composition or layout phase, typically in event handlers or async tasks.
81///
82/// # Why is this needed?
83/// Cranpose uses a snapshot system (MVCC) to isolate state changes. Modifications
84/// made to `MutableState` are only visible to the current thread's active snapshot.
85/// To make changes visible to the rest of the system (and trigger recomposition),
86/// they must be "applied" by committing the snapshot. This helper handles that
87/// lifecycle for you.
88///
89/// # Example
90///
91/// ```ignore
92/// // Inside a button click handler
93/// run_in_mutable_snapshot(|| {
94///     count.set(count.value() + 1);
95/// });
96/// ```
97///
98/// # Important
99/// ALL UI event handlers (keyboard, mouse, touch, animations) that modify state
100/// MUST use this function or [`dispatch_ui_event`].
101pub fn run_in_mutable_snapshot<T>(block: impl FnOnce() -> T) -> Result<T, &'static str> {
102    let snapshot = snapshot_v2::take_mutable_snapshot(None, None);
103
104    // Mark that we're in an applied snapshot context
105    IN_APPLIED_SNAPSHOT.with(|c| c.set(true));
106    let value = snapshot.enter(block);
107    IN_APPLIED_SNAPSHOT.with(|c| c.set(false));
108
109    match snapshot.apply() {
110        snapshot_v2::SnapshotApplyResult::Success => Ok(value),
111        snapshot_v2::SnapshotApplyResult::Failure => Err("Snapshot apply failed"),
112    }
113}
114
115/// Dispatches a UI event in a proper snapshot context.
116///
117/// This is a convenience wrapper around [`run_in_mutable_snapshot`] that returns
118/// `Option<T>` instead of `Result<T, &str>`.
119///
120/// # Example
121/// ```ignore
122/// // In a keyboard event handler:
123/// dispatch_ui_event(|| {
124///     text_field_state.edit(|buffer| {
125///         buffer.insert("a");
126///     });
127/// });
128/// ```
129pub fn dispatch_ui_event<T>(block: impl FnOnce() -> T) -> Option<T> {
130    run_in_mutable_snapshot(block).ok()
131}
132
133// ─── Event Handler Context Tracking ─────────────────────────────────────────
134//
135// These thread-locals track whether code is running in an event handler context
136// and whether it's properly wrapped in run_in_mutable_snapshot. This allows
137// debug-mode warnings when state is modified without proper snapshot handling.
138
139thread_local! {
140    /// Tracks if we're in a UI event handler context (keyboard, mouse, etc.)
141    pub(crate) static IN_EVENT_HANDLER: Cell<bool> = const { Cell::new(false) };
142    /// Tracks if we're in a properly-applied mutable snapshot
143    pub(crate) static IN_APPLIED_SNAPSHOT: Cell<bool> = const { Cell::new(false) };
144}
145
146#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
147pub struct CompositionPassDebugStats {
148    pub commands_len: usize,
149    pub commands_cap: usize,
150    pub command_payload_len_bytes: usize,
151    pub command_payload_cap_bytes: usize,
152    pub sync_children_len: usize,
153    pub sync_children_cap: usize,
154    pub sync_child_ids_len: usize,
155    pub sync_child_ids_cap: usize,
156    pub side_effects_len: usize,
157    pub side_effects_cap: usize,
158}
159
160/// Marks the start of an event handler context.
161/// Call this at the start of keyboard/mouse/touch event handling.
162/// Use `run_in_mutable_snapshot` or `dispatch_ui_event` inside to ensure proper snapshot handling.
163pub fn enter_event_handler() {
164    IN_EVENT_HANDLER.with(|c| c.set(true));
165}
166
167/// Marks the end of an event handler context.
168pub fn exit_event_handler() {
169    IN_EVENT_HANDLER.with(|c| c.set(false));
170}
171
172/// Returns true if currently in an event handler context.
173pub fn in_event_handler() -> bool {
174    IN_EVENT_HANDLER.with(|c| c.get())
175}
176
177/// Returns true if currently in an applied snapshot context.
178pub fn in_applied_snapshot() -> bool {
179    IN_APPLIED_SNAPSHOT.with(|c| c.get())
180}
181
182#[cfg(test)]
183pub use runtime::{TestRuntime, TestScheduler};
184
185use crate::collections::map::{HashMap, HashSet};
186use smallvec::SmallVec;
187use std::any::{Any, TypeId};
188use std::cell::{Cell, Ref, RefCell, RefMut};
189use std::cmp::Reverse;
190use std::collections::BinaryHeap;
191use std::hash::{Hash, Hasher};
192use std::ops::{Deref, DerefMut};
193use std::rc::{Rc, Weak};
194use std::sync::atomic::{AtomicUsize, Ordering};
195#[cfg(any(test, debug_assertions))]
196use std::sync::{Mutex, OnceLock};
197
198pub type Key = u64;
199pub type NodeId = usize;
200
201#[cfg(any(test, debug_assertions))]
202#[derive(Clone, Debug, PartialEq, Eq)]
203struct LocationKeyDebugInfo {
204    file: String,
205    line: u32,
206    column: u32,
207}
208
209#[cfg(any(test, debug_assertions))]
210fn location_key_registry() -> &'static Mutex<HashMap<Key, LocationKeyDebugInfo>> {
211    static REGISTRY: OnceLock<Mutex<HashMap<Key, LocationKeyDebugInfo>>> = OnceLock::new();
212    REGISTRY.get_or_init(|| Mutex::new(HashMap::default()))
213}
214
215#[cfg(any(test, debug_assertions))]
216fn lock_location_key_registry() -> std::sync::MutexGuard<'static, HashMap<Key, LocationKeyDebugInfo>>
217{
218    location_key_registry()
219        .lock()
220        .unwrap_or_else(|poisoned| poisoned.into_inner())
221}
222
223#[cfg(any(test, debug_assertions))]
224fn register_location_key_debug_info(key: Key, file: &str, line: u32, column: u32) {
225    let info = LocationKeyDebugInfo {
226        file: file.to_owned(),
227        line,
228        column,
229    };
230    let collision = {
231        let mut registry = lock_location_key_registry();
232        match registry.entry(key) {
233            std::collections::hash_map::Entry::Vacant(entry) => {
234                entry.insert(info);
235                None
236            }
237            std::collections::hash_map::Entry::Occupied(entry) => {
238                let existing = entry.get();
239                (existing != &info).then(|| (existing.clone(), info))
240            }
241        }
242    };
243    if let Some((existing, incoming)) = collision {
244        panic!("location key collision: key={key} first={existing:?} second={incoming:?}");
245    }
246}
247
248#[cfg(all(debug_assertions, not(test)))]
249fn location_key_diagnostics_enabled() -> bool {
250    static ENABLED: OnceLock<bool> = OnceLock::new();
251    *ENABLED.get_or_init(|| std::env::var_os("CRANPOSE_LOCATION_KEY_DIAGNOSTICS").is_some())
252}
253
254#[cfg(test)]
255pub(crate) fn register_location_key_debug_info_for_test(
256    key: Key,
257    file: &str,
258    line: u32,
259    column: u32,
260) {
261    register_location_key_debug_info(key, file, line, column);
262}
263
264#[cfg(test)]
265pub(crate) fn slot_validation_diagnostics_enabled() -> bool {
266    true
267}
268
269#[cfg(all(debug_assertions, not(test)))]
270pub(crate) fn slot_validation_diagnostics_enabled() -> bool {
271    static ENABLED: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
272    *ENABLED.get_or_init(|| std::env::var_os("CRANPOSE_VALIDATE_SLOTS").is_some())
273}
274
275fn source_location_key(file: &str, line: u32, column: u32) -> Key {
276    let mut hash = 0xcbf2_9ce4_8422_2325u64;
277    hash = fnv1a_location_key_bytes(hash, file.as_bytes());
278    hash = fnv1a_location_key_bytes(hash, &[0xff]);
279    hash = fnv1a_location_key_bytes(hash, &line.to_le_bytes());
280    hash = fnv1a_location_key_bytes(hash, &[0xfe]);
281    hash = fnv1a_location_key_bytes(hash, &column.to_le_bytes());
282    avalanche_location_key(hash)
283}
284
285fn fnv1a_location_key_bytes(mut hash: u64, bytes: &[u8]) -> u64 {
286    for byte in bytes {
287        hash ^= u64::from(*byte);
288        hash = hash.wrapping_mul(0x0000_0100_0000_01b3);
289    }
290    hash
291}
292
293fn avalanche_location_key(mut value: u64) -> u64 {
294    value ^= value >> 33;
295    value = value.wrapping_mul(0xff51_afd7_ed55_8ccd);
296    value ^= value >> 33;
297    value = value.wrapping_mul(0xc4ce_b9fe_1a85_ec53);
298    value ^ (value >> 33)
299}
300
301pub fn location_key(file: &str, line: u32, column: u32) -> Key {
302    let key = source_location_key(file, line, column);
303    #[cfg(test)]
304    register_location_key_debug_info(key, file, line, column);
305    #[cfg(all(debug_assertions, not(test)))]
306    if location_key_diagnostics_enabled() {
307        register_location_key_debug_info(key, file, line, column);
308    }
309    key
310}
311
312/// Stable identifier for a slot in the slot table.
313///
314/// Anchors provide positional stability: they maintain their identity even when
315/// the slot table is reorganized (e.g., during conditional rendering or group moves).
316/// This prevents effect states from being prematurely removed during recomposition.
317#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
318pub struct AnchorId {
319    id: u32,
320    generation: u32,
321}
322
323impl AnchorId {
324    /// Invalid anchor that represents no anchor.
325    pub(crate) const INVALID: AnchorId = AnchorId {
326        id: 0,
327        generation: 0,
328    };
329
330    pub(crate) fn new(id: usize) -> Self {
331        Self {
332            id: crate::slot::checked_usize_to_u32(id, "anchor id"),
333            generation: 1,
334        }
335    }
336
337    /// Check if this anchor is valid (non-zero).
338    pub fn is_valid(&self) -> bool {
339        self.id != 0
340    }
341}
342
343pub(crate) type ScopeId = usize;
344type LocalKey = usize;
345pub(crate) type FrameCallbackId = u64;
346type LocalStackSnapshot = Rc<Vec<composer::LocalContext>>;
347
348thread_local! {
349    static EMPTY_LOCAL_STACK: LocalStackSnapshot = Rc::new(Vec::new());
350    #[cfg(debug_assertions)]
351    static DEBUG_SCOPE_LABELS: RefCell<HashMap<usize, &'static str>> = RefCell::new(HashMap::default());
352    #[cfg(debug_assertions)]
353    static DEBUG_SCOPE_INVALIDATION_SOURCES: RefCell<HashMap<usize, HashSet<String>>> =
354        RefCell::new(HashMap::default());
355    #[cfg(all(test, debug_assertions))]
356    static DEBUG_SCOPE_TRACKING_OVERRIDE: Cell<Option<bool>> = const { Cell::new(None) };
357}
358
359static NEXT_SCOPE_ID: AtomicUsize = AtomicUsize::new(1);
360static NEXT_LOCAL_KEY: AtomicUsize = AtomicUsize::new(1);
361static LIVE_RECOMPOSE_SCOPE_COUNT: AtomicUsize = AtomicUsize::new(0);
362
363fn next_scope_id() -> ScopeId {
364    NEXT_SCOPE_ID.fetch_add(1, Ordering::Relaxed)
365}
366
367fn next_local_key() -> LocalKey {
368    NEXT_LOCAL_KEY.fetch_add(1, Ordering::Relaxed)
369}
370
371fn empty_local_stack() -> LocalStackSnapshot {
372    EMPTY_LOCAL_STACK.with(Rc::clone)
373}
374
375enum RecomposeCallback {
376    Static(fn(&Composer)),
377    Dynamic(Box<dyn FnMut(&Composer) + 'static>),
378}
379
380pub(crate) struct RecomposeScopeInner {
381    id: ScopeId,
382    runtime: RuntimeHandle,
383    invalid: Cell<bool>,
384    enqueued: Cell<bool>,
385    active: Cell<bool>,
386    composed_once: Cell<bool>,
387    pending_recompose: Cell<bool>,
388    force_reuse: Cell<bool>,
389    force_recompose: Cell<bool>,
390    retention_mode: Cell<RetentionMode>,
391    parent_hint: Cell<Option<NodeId>>,
392    recompose: RefCell<Option<RecomposeCallback>>,
393    parent_scope: RefCell<Option<Weak<RecomposeScopeInner>>>,
394    local_stack: RefCell<LocalStackSnapshot>,
395    slots_storage_key: Cell<usize>,
396    slots_runtime_state: RefCell<Option<std::rc::Weak<crate::composer::ComposerRuntimeState>>>,
397    state_subscriptions: RefCell<HashSet<StateId>>,
398}
399
400impl RecomposeScopeInner {
401    fn new(runtime: RuntimeHandle) -> Self {
402        LIVE_RECOMPOSE_SCOPE_COUNT.fetch_add(1, Ordering::Relaxed);
403        Self {
404            id: next_scope_id(),
405            runtime,
406            invalid: Cell::new(false),
407            enqueued: Cell::new(false),
408            active: Cell::new(true),
409            composed_once: Cell::new(false),
410            pending_recompose: Cell::new(false),
411            force_reuse: Cell::new(false),
412            force_recompose: Cell::new(false),
413            retention_mode: Cell::new(RetentionMode::DisposeWhenInactive),
414            parent_hint: Cell::new(None),
415            recompose: RefCell::new(None),
416            parent_scope: RefCell::new(None),
417            local_stack: RefCell::new(empty_local_stack()),
418            slots_storage_key: Cell::new(0),
419            slots_runtime_state: RefCell::new(None),
420            state_subscriptions: RefCell::new(HashSet::default()),
421        }
422    }
423}
424
425impl Drop for RecomposeScopeInner {
426    fn drop(&mut self) {
427        LIVE_RECOMPOSE_SCOPE_COUNT.fetch_sub(1, Ordering::Relaxed);
428        let subscriptions = std::mem::take(self.state_subscriptions.get_mut());
429        for state_id in subscriptions {
430            self.runtime.unregister_state_scope(state_id, self.id);
431        }
432        #[cfg(debug_assertions)]
433        {
434            let _ = DEBUG_SCOPE_LABELS.try_with(|labels| {
435                labels.borrow_mut().remove(&self.id);
436            });
437            let _ = DEBUG_SCOPE_INVALIDATION_SOURCES.try_with(|sources| {
438                sources.borrow_mut().remove(&self.id);
439            });
440        }
441        if self.enqueued.replace(false) {
442            self.runtime.mark_scope_recomposed(self.id);
443        }
444    }
445}
446
447#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
448pub struct RecomposeScopeRegistryDebugStats {
449    pub len: usize,
450    pub capacity: usize,
451}
452
453#[derive(Clone)]
454pub struct RecomposeScope {
455    inner: Rc<RecomposeScopeInner>,
456}
457
458impl PartialEq for RecomposeScope {
459    fn eq(&self, other: &Self) -> bool {
460        Rc::ptr_eq(&self.inner, &other.inner)
461    }
462}
463
464impl Eq for RecomposeScope {}
465
466impl Hash for RecomposeScope {
467    fn hash<H: Hasher>(&self, state: &mut H) {
468        self.id().hash(state);
469    }
470}
471
472impl RecomposeScope {
473    fn new(runtime: RuntimeHandle) -> Self {
474        Self {
475            inner: Rc::new(RecomposeScopeInner::new(runtime)),
476        }
477    }
478
479    pub(crate) fn downgrade(&self) -> Weak<RecomposeScopeInner> {
480        Rc::downgrade(&self.inner)
481    }
482
483    pub fn id(&self) -> ScopeId {
484        self.inner.id
485    }
486
487    pub fn is_invalid(&self) -> bool {
488        self.inner.invalid.get()
489    }
490
491    pub fn is_active(&self) -> bool {
492        self.inner.active.get()
493    }
494
495    fn record_state_subscription(&self, state_id: StateId) {
496        self.inner.state_subscriptions.borrow_mut().insert(state_id);
497    }
498
499    fn invalidate(&self) {
500        self.inner.invalid.set(true);
501        if !self.inner.active.get() {
502            return;
503        }
504        if !self.inner.enqueued.replace(true) {
505            self.inner
506                .runtime
507                .register_invalid_scope(self.inner.id, self.downgrade());
508        }
509    }
510
511    fn mark_recomposed(&self) {
512        self.inner.invalid.set(false);
513        self.inner.force_reuse.set(false);
514        self.inner.force_recompose.set(false);
515        if self.inner.enqueued.replace(false) {
516            self.inner.runtime.mark_scope_recomposed(self.inner.id);
517        }
518        let pending = self.inner.pending_recompose.replace(false);
519        if pending {
520            if self.inner.active.get() {
521                self.invalidate();
522            } else {
523                self.inner.invalid.set(true);
524            }
525        }
526    }
527
528    fn set_recompose(&self, callback: Box<dyn FnMut(&Composer) + 'static>) {
529        *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Dynamic(callback));
530    }
531
532    fn set_recompose_fn(&self, callback: fn(&Composer)) {
533        *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Static(callback));
534    }
535
536    /// Returns true if a callback was found and executed, false if no callback was registered.
537    fn run_recompose(&self, composer: &Composer) -> bool {
538        let callback = self.inner.recompose.borrow_mut().take();
539        if let Some(callback) = callback {
540            match callback {
541                RecomposeCallback::Static(callback) => callback(composer),
542                RecomposeCallback::Dynamic(mut callback) => callback(composer),
543            }
544            true
545        } else {
546            false
547        }
548    }
549
550    fn has_recompose_callback(&self) -> bool {
551        self.inner.recompose.borrow().is_some()
552    }
553
554    fn snapshot_locals(&self, stack: LocalStackSnapshot) {
555        *self.inner.local_stack.borrow_mut() = stack;
556    }
557
558    fn local_stack(&self) -> LocalStackSnapshot {
559        self.inner.local_stack.borrow().clone()
560    }
561
562    fn set_parent_hint(&self, parent: Option<NodeId>) {
563        self.inner.parent_hint.set(parent);
564    }
565
566    fn set_parent_scope(&self, parent: Option<RecomposeScope>) {
567        *self.inner.parent_scope.borrow_mut() = parent.map(|scope| scope.downgrade());
568    }
569
570    fn parent_scope(&self) -> Option<RecomposeScope> {
571        self.inner
572            .parent_scope
573            .borrow()
574            .as_ref()
575            .and_then(Weak::upgrade)
576            .map(|inner| RecomposeScope { inner })
577    }
578
579    fn callback_promotion_target(&self) -> Option<RecomposeScope> {
580        let mut current = self.parent_scope();
581        while let Some(scope) = current {
582            if scope.has_recompose_callback() {
583                return Some(scope);
584            }
585            current = scope.parent_scope();
586        }
587        None
588    }
589
590    fn parent_hint(&self) -> Option<NodeId> {
591        self.inner.parent_hint.get()
592    }
593
594    fn set_slots_host(&self, host: &Rc<SlotsHost>) {
595        self.inner.slots_storage_key.set(host.storage_key());
596        *self.inner.slots_runtime_state.borrow_mut() =
597            host.runtime_state().map(|state| Rc::downgrade(&state));
598    }
599
600    pub(crate) fn slots_storage_key(&self) -> Option<usize> {
601        let key = self.inner.slots_storage_key.get();
602        (key != 0).then_some(key)
603    }
604
605    pub(crate) fn slots_runtime_state(&self) -> Option<Rc<crate::composer::ComposerRuntimeState>> {
606        self.inner
607            .slots_runtime_state
608            .borrow()
609            .as_ref()
610            .and_then(std::rc::Weak::upgrade)
611    }
612
613    pub fn deactivate(&self) {
614        if !self.inner.active.replace(false) {
615            return;
616        }
617        if self.inner.enqueued.replace(false) {
618            self.inner.runtime.mark_scope_recomposed(self.inner.id);
619        }
620    }
621
622    pub fn reactivate(&self) {
623        if self.inner.active.replace(true) {
624            return;
625        }
626        if self.inner.invalid.get() && !self.inner.enqueued.replace(true) {
627            self.inner
628                .runtime
629                .register_invalid_scope(self.inner.id, self.downgrade());
630        }
631    }
632
633    pub fn force_reuse(&self) {
634        self.inner.force_reuse.set(true);
635        self.inner.force_recompose.set(false);
636        self.inner.pending_recompose.set(true);
637    }
638
639    pub fn force_recompose(&self) {
640        self.inner.force_recompose.set(true);
641        self.inner.force_reuse.set(false);
642        self.inner.pending_recompose.set(false);
643    }
644
645    pub(crate) fn set_retention_mode(&self, mode: RetentionMode) {
646        self.inner.retention_mode.set(mode);
647    }
648
649    pub(crate) fn retention_mode(&self) -> RetentionMode {
650        self.inner.retention_mode.get()
651    }
652
653    pub fn should_recompose(&self) -> bool {
654        if self.inner.force_recompose.replace(false) {
655            self.inner.force_reuse.set(false);
656            return true;
657        }
658        if self.inner.force_reuse.replace(false) {
659            return false;
660        }
661        self.is_invalid()
662    }
663
664    pub fn has_composed_once(&self) -> bool {
665        self.inner.composed_once.get()
666    }
667
668    fn mark_composed_once(&self) {
669        self.inner.composed_once.set(true);
670    }
671}
672
673#[cfg(test)]
674impl RecomposeScope {
675    pub(crate) fn new_for_test(runtime: RuntimeHandle) -> Self {
676        Self::new(runtime)
677    }
678}
679
680#[derive(Debug, Clone, Copy, Default)]
681pub struct RecomposeOptions {
682    pub force_reuse: bool,
683    pub force_recompose: bool,
684    pub retention: RetentionMode,
685}
686
687#[derive(Debug, Clone, PartialEq, Eq)]
688pub enum NodeError {
689    Missing { id: NodeId },
690    TypeMismatch { id: NodeId, expected: &'static str },
691    MissingContext { id: NodeId, reason: &'static str },
692    AlreadyExists { id: NodeId },
693}
694
695impl std::fmt::Display for NodeError {
696    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
697        match self {
698            NodeError::Missing { id } => write!(f, "node {id} missing"),
699            NodeError::TypeMismatch { id, expected } => {
700                write!(f, "node {id} type mismatch; expected {expected}")
701            }
702            NodeError::MissingContext { id, reason } => {
703                write!(f, "missing context for node {id}: {reason}")
704            }
705            NodeError::AlreadyExists { id } => {
706                write!(f, "node {id} already exists")
707            }
708        }
709    }
710}
711
712impl std::error::Error for NodeError {}
713
714pub use subcompose::{
715    ContentTypeReusePolicy, DefaultSlotReusePolicy, SlotId, SlotReusePolicy, SubcomposeState,
716};
717
718#[derive(Copy, Clone, Debug, PartialEq, Eq)]
719pub enum Phase {
720    Compose,
721    Measure,
722    Layout,
723}
724
725pub use composer_context::with_composer as with_current_composer;
726
727#[allow(non_snake_case)]
728pub fn withCurrentComposer<R>(f: impl FnOnce(&Composer) -> R) -> R {
729    composer_context::with_composer(f)
730}
731
732fn with_current_composer_opt<R>(f: impl FnOnce(&Composer) -> R) -> Option<R> {
733    composer_context::try_with_composer(f)
734}
735
736#[track_caller]
737pub fn with_key<K: Hash>(key: &K, content: impl FnOnce()) {
738    let seed = explicit_group_key_seed(key, std::panic::Location::caller());
739    with_current_composer(|composer| composer.with_group_seed(seed, |_| content()));
740}
741
742#[allow(non_snake_case)]
743pub fn withKey<K: Hash>(key: &K, content: impl FnOnce()) {
744    with_key(key, content)
745}
746
747#[derive(Default)]
748struct DisposableEffectState {
749    key: Option<Key>,
750    cleanup: Option<Box<dyn FnOnce()>>,
751}
752
753impl DisposableEffectState {
754    fn should_run(&self, key: Key) -> bool {
755        match self.key {
756            Some(current) => current != key,
757            None => true,
758        }
759    }
760
761    fn set_key(&mut self, key: Key) {
762        self.key = Some(key);
763    }
764
765    fn set_cleanup(&mut self, cleanup: Option<Box<dyn FnOnce()>>) {
766        self.cleanup = cleanup;
767    }
768
769    fn run_cleanup(&mut self) {
770        if let Some(cleanup) = self.cleanup.take() {
771            cleanup();
772        }
773    }
774}
775
776impl Drop for DisposableEffectState {
777    fn drop(&mut self) {
778        self.run_cleanup();
779    }
780}
781
782#[derive(Clone, Copy, Debug, Default)]
783pub struct DisposableEffectScope;
784
785#[derive(Default)]
786pub struct DisposableEffectResult {
787    cleanup: Option<Box<dyn FnOnce()>>,
788}
789
790impl DisposableEffectScope {
791    pub fn on_dispose(&self, cleanup: impl FnOnce() + 'static) -> DisposableEffectResult {
792        DisposableEffectResult::new(cleanup)
793    }
794}
795
796impl DisposableEffectResult {
797    pub fn new(cleanup: impl FnOnce() + 'static) -> Self {
798        Self {
799            cleanup: Some(Box::new(cleanup)),
800        }
801    }
802
803    fn into_cleanup(self) -> Option<Box<dyn FnOnce()>> {
804        self.cleanup
805    }
806}
807
808#[allow(non_snake_case)]
809pub fn SideEffect(effect: impl FnOnce() + 'static) {
810    with_current_composer(|composer| composer.register_side_effect(effect));
811}
812
813pub fn __disposable_effect_impl<K, F>(group_key: Key, keys: K, effect: F)
814where
815    K: Hash,
816    F: FnOnce(DisposableEffectScope) -> DisposableEffectResult + 'static,
817{
818    // Create a group using the caller's location to ensure each DisposableEffect
819    // gets its own slot table entry, even in conditional branches
820    with_current_composer(|composer| {
821        composer.with_group(group_key, |composer| {
822            let key_hash = hash_key(&keys);
823            let state = composer.remember_effect(DisposableEffectState::default);
824            if state.with(|state| state.should_run(key_hash)) {
825                state.update(|state| {
826                    state.run_cleanup();
827                    state.set_key(key_hash);
828                });
829                let state_for_effect = state.clone();
830                let mut effect_opt = Some(effect);
831                composer.register_side_effect(move || {
832                    if let Some(effect) = effect_opt.take() {
833                        let result = effect(DisposableEffectScope);
834                        state_for_effect.update(|state| state.set_cleanup(result.into_cleanup()));
835                    }
836                });
837            }
838        });
839    });
840}
841
842#[macro_export]
843macro_rules! DisposableEffect {
844    ($keys:expr, $effect:expr) => {
845        $crate::__disposable_effect_impl(
846            $crate::location_key(file!(), line!(), column!()),
847            $keys,
848            $effect,
849        )
850    };
851}
852
853#[macro_export]
854macro_rules! clone_captures {
855    ($($alias:ident $(= $value:expr)?),+ $(,)?; $body:expr) => {{
856        $(let $alias = $crate::clone_captures!(@clone $alias $(= $value)?);)+
857        $body
858    }};
859    (@clone $alias:ident = $value:expr) => {
860        ($value).clone()
861    };
862    (@clone $alias:ident) => {
863        $alias.clone()
864    };
865}
866
867pub fn with_node_mut<N: Node + 'static, R>(
868    id: NodeId,
869    f: impl FnOnce(&mut N) -> R,
870) -> Result<R, NodeError> {
871    with_current_composer(|composer| composer.with_node_mut(id, f))
872}
873
874pub fn push_parent(id: NodeId) {
875    with_current_composer(|composer| composer.push_parent(id));
876}
877
878pub fn pop_parent() {
879    with_current_composer(|composer| composer.pop_parent());
880}
881
882// ═══════════════════════════════════════════════════════════════════════════
883// Public slot storage helper types
884// ═══════════════════════════════════════════════════════════════════════════
885
886pub trait Node: Any {
887    fn mount(&mut self) {}
888    fn update(&mut self) {}
889    fn unmount(&mut self) {}
890    fn insert_child(&mut self, _child: NodeId) {}
891    fn remove_child(&mut self, _child: NodeId) {}
892    fn move_child(&mut self, _from: usize, _to: usize) {}
893    fn update_children(&mut self, _children: &[NodeId]) {}
894    fn children(&self) -> Vec<NodeId> {
895        Vec::new()
896    }
897    /// Copies child IDs into the provided scratch buffer without allocating a
898    /// fresh container for every traversal.
899    fn collect_children_into(&self, out: &mut SmallVec<[NodeId; 8]>) {
900        out.clear();
901        out.extend(self.children());
902    }
903    /// Called after the node is created to record its own ID.
904    /// Useful for nodes that need to store their ID for later operations.
905    fn set_node_id(&mut self, _id: NodeId) {}
906    /// Called when this node is attached to a parent.
907    /// Nodes with parent tracking should set their parent reference here.
908    fn on_attached_to_parent(&mut self, _parent: NodeId) {}
909    /// Called when this node is removed from its parent.
910    /// Nodes with parent tracking should clear their parent reference here.
911    fn on_removed_from_parent(&mut self) {}
912    /// Get this node's parent ID (for nodes that track parents).
913    /// Returns None if node has no parent or doesn't track parents.
914    fn parent(&self) -> Option<NodeId> {
915        None
916    }
917    /// Mark this node as needing layout (for nodes with dirty flags).
918    /// Called during bubbling to propagate dirtiness up the tree.
919    fn mark_needs_layout(&self) {}
920    /// Check if this node needs layout (for nodes with dirty flags).
921    fn needs_layout(&self) -> bool {
922        false
923    }
924    /// Mark this node as needing measure (size may have changed).
925    /// Called during bubbling when children are added/removed.
926    fn mark_needs_measure(&self) {}
927    /// Check if this node needs measure (for nodes with dirty flags).
928    fn needs_measure(&self) -> bool {
929        false
930    }
931    /// Mark this node as needing semantics recomputation.
932    fn mark_needs_semantics(&self) {}
933    /// Check if this node needs semantics recomputation.
934    fn needs_semantics(&self) -> bool {
935        false
936    }
937    /// Set parent reference for dirty flag bubbling ONLY.
938    /// This is a minimal version of on_attached_to_parent that doesn't trigger
939    /// registry updates or other side effects. Used during measurement when we
940    /// need to establish parent connections for bubble_measure_dirty without
941    /// causing the full attachment lifecycle.
942    ///
943    /// Default: delegates to on_attached_to_parent for backward compatibility.
944    fn set_parent_for_bubbling(&mut self, parent: NodeId) {
945        self.on_attached_to_parent(parent);
946    }
947
948    /// Returns a recycle pool key when this node supports shell reuse.
949    fn recycle_key(&self) -> Option<TypeId> {
950        None
951    }
952
953    /// Bounds how many recyclable shells of this node type should be retained.
954    fn recycle_pool_limit(&self) -> Option<usize> {
955        None
956    }
957
958    /// Clears live attachments before the node shell enters a recycle pool.
959    fn prepare_for_recycle(&mut self) {}
960
961    /// Optionally provides a compact replacement box for this recycled shell.
962    ///
963    /// Returning `Some` lets nodes move pooled survivors onto fresh compact
964    /// storage so the recycle pool does not pin large spike-era allocations.
965    fn rehouse_for_recycle(&self) -> Option<Box<dyn Node>> {
966        None
967    }
968
969    /// Optionally moves a live node onto a fresh box during applier compaction.
970    ///
971    /// This is used after large-majority teardowns where a small surviving live
972    /// tree can otherwise pin allocator pages from a much larger spike-era node
973    /// population. Implementations must preserve the node's live state.
974    fn rehouse_for_live_compaction(&mut self) -> Option<Box<dyn Node>> {
975        None
976    }
977
978    /// Returns the node-owned heap retained beyond the node's own box allocation.
979    fn debug_heap_bytes(&self) -> usize {
980        0
981    }
982}
983
984/// Unified API for bubbling layout dirty flags from a node to the root (Applier context).
985///
986/// This is the canonical function for dirty bubbling during the apply phase (structural changes).
987/// Call this after mutations like insert/remove/move that happen during apply.
988///
989/// # Behavior
990/// 1. Marks the starting node as needing layout
991/// 2. Walks up the parent chain, marking each ancestor
992/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
993/// 4. Stops at the root (node with no parent)
994///
995/// # Performance
996/// This function is O(height) in the worst case, but typically O(1) due to early exit
997/// when encountering an already-dirty ancestor.
998///
999/// # Usage
1000/// - Call from composer mutations (insert/remove/move) during apply phase
1001/// - Call from applier-level operations that modify the tree structure
1002pub fn bubble_layout_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1003    bubble_layout_dirty_applier(applier, node_id);
1004}
1005
1006/// Unified API for bubbling measure dirty flags from a node to the root (Applier context).
1007///
1008/// Call this when a node's size may have changed (children added/removed, modifier changed).
1009/// This ensures that measure_layout will increment the cache epoch and re-measure the subtree.
1010///
1011/// # Behavior
1012/// 1. Marks the starting node as needing measure
1013/// 2. Walks up the parent chain, marking each ancestor
1014/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
1015/// 4. Stops at the root (node with no parent)
1016pub fn bubble_measure_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1017    bubble_measure_dirty_applier(applier, node_id);
1018}
1019
1020/// Unified API for bubbling semantics dirty flags from a node to the root (Applier context).
1021///
1022/// This mirrors [`bubble_layout_dirty`] but toggles semantics-specific dirty
1023/// flags instead of layout ones, allowing semantics updates to propagate during
1024/// the apply phase without forcing layout work.
1025pub fn bubble_semantics_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1026    bubble_semantics_dirty_applier(applier, node_id);
1027}
1028
1029/// Schedules semantics bubbling for a node using the active composer if present.
1030///
1031/// This defers the work to the apply phase where we can safely mutate the
1032/// applier tree without re-entrantly borrowing the composer during composition.
1033pub fn queue_semantics_invalidation(node_id: NodeId) {
1034    let _ = composer_context::try_with_composer(|composer| {
1035        composer.enqueue_semantics_invalidation(node_id);
1036    });
1037}
1038
1039/// Unified API for bubbling layout dirty flags from a node to the root (Composer context).
1040///
1041/// This is the canonical function for dirty bubbling during composition (property changes).
1042/// Call this after property changes that happen during composition via with_node_mut.
1043///
1044/// # Behavior
1045/// 1. Marks the starting node as needing layout
1046/// 2. Walks up the parent chain, marking each ancestor
1047/// 3. Stops when it reaches a node that's already dirty (O(1) optimization)
1048/// 4. Stops at the root (node with no parent)
1049///
1050/// # Performance
1051/// This function is O(height) in the worst case, but typically O(1) due to early exit
1052/// when encountering an already-dirty ancestor.
1053///
1054/// # Type Requirements
1055/// The node type N must implement Node (which includes mark_needs_layout, parent, etc.).
1056/// Typically this will be LayoutNode or similar layout-aware node types.
1057///
1058/// # Usage
1059/// - Call from property setters during composition (e.g., set_modifier, set_measure_policy)
1060/// - Call from widget composition when layout-affecting state changes
1061pub fn bubble_layout_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
1062    bubble_layout_dirty_composer::<N>(node_id);
1063}
1064
1065/// Unified API for bubbling measure dirty flags from a node to the root during composition.
1066///
1067/// This queues a dirty-bubble command on the active composer so measure invalidation
1068/// runs during the apply phase, avoiding re-entrant applier borrows while widgets are
1069/// mutating nodes via `with_node_mut`.
1070pub fn bubble_measure_dirty_in_composer(node_id: NodeId) {
1071    with_current_composer(|composer| {
1072        composer.commands_mut().push(Command::BubbleDirty {
1073            node_id,
1074            bubble: DirtyBubble {
1075                layout: false,
1076                measure: true,
1077                semantics: false,
1078            },
1079        });
1080    });
1081}
1082
1083/// Unified API for bubbling semantics dirty flags from a node to the root (Composer context).
1084///
1085/// This mirrors [`bubble_layout_dirty_in_composer`] but routes through the semantics
1086/// dirty flag instead of the layout one. Modifier nodes can request semantics
1087/// invalidations without triggering measure/layout work, and the runtime can
1088/// query the root to determine whether the semantics tree needs rebuilding.
1089pub fn bubble_semantics_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
1090    bubble_semantics_dirty_composer::<N>(node_id);
1091}
1092
1093/// Internal implementation for applier-based bubbling.
1094fn bubble_layout_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1095    // First, mark the starting node dirty (critical!)
1096    // This ensures root gets marked even if it has no parent
1097    if let Ok(node) = applier.get_mut(node_id) {
1098        node.mark_needs_layout();
1099    }
1100
1101    // Then bubble up to ancestors
1102    loop {
1103        // Get parent of current node
1104        let parent_id = match applier.get_mut(node_id) {
1105            Ok(node) => node.parent(),
1106            Err(_) => None,
1107        };
1108
1109        match parent_id {
1110            Some(pid) => {
1111                // Mark parent as needing layout
1112                if let Ok(parent) = applier.get_mut(pid) {
1113                    let parent_already_dirty = parent.needs_layout();
1114                    if !parent_already_dirty {
1115                        parent.mark_needs_layout();
1116                    }
1117                    node_id = pid;
1118                } else {
1119                    break;
1120                }
1121            }
1122            None => break, // No parent, stop
1123        }
1124    }
1125}
1126
1127/// Internal implementation for applier-based bubbling of measure dirtiness.
1128fn bubble_measure_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1129    // First, mark the starting node as needing measure
1130    if let Ok(node) = applier.get_mut(node_id) {
1131        node.mark_needs_measure();
1132    }
1133
1134    // Then bubble up to ancestors
1135    loop {
1136        // Get parent of current node
1137        let parent_id = match applier.get_mut(node_id) {
1138            Ok(node) => node.parent(),
1139            Err(_) => None,
1140        };
1141
1142        match parent_id {
1143            Some(pid) => {
1144                // Mark parent as needing measure
1145                if let Ok(parent) = applier.get_mut(pid) {
1146                    if !parent.needs_measure() {
1147                        parent.mark_needs_measure();
1148                    }
1149                    node_id = pid;
1150                } else {
1151                    break;
1152                }
1153            }
1154            None => {
1155                break; // No parent, stop
1156            }
1157        }
1158    }
1159}
1160
1161/// Internal implementation for applier-based bubbling of semantics dirtiness.
1162fn bubble_semantics_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1163    if let Ok(node) = applier.get_mut(node_id) {
1164        node.mark_needs_semantics();
1165    }
1166
1167    loop {
1168        let parent_id = match applier.get_mut(node_id) {
1169            Ok(node) => node.parent(),
1170            Err(_) => None,
1171        };
1172
1173        match parent_id {
1174            Some(pid) => {
1175                if let Ok(parent) = applier.get_mut(pid) {
1176                    if !parent.needs_semantics() {
1177                        parent.mark_needs_semantics();
1178                    }
1179                    node_id = pid;
1180                } else {
1181                    break;
1182                }
1183            }
1184            None => break,
1185        }
1186    }
1187}
1188
1189/// Internal implementation for composer-based bubbling.
1190/// This uses with_node_mut and works during composition with a concrete node type.
1191/// The node type N must implement Node (which includes mark_needs_layout, parent, etc.).
1192fn bubble_layout_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1193    // Mark the starting node dirty
1194    let _ = with_node_mut(node_id, |node: &mut N| {
1195        node.mark_needs_layout();
1196    });
1197
1198    // Then bubble up to ancestors
1199    while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1200        let parent_id = pid;
1201
1202        // Mark parent as needing layout
1203        let advanced = with_node_mut(parent_id, |node: &mut N| {
1204            if !node.needs_layout() {
1205                node.mark_needs_layout();
1206            }
1207            true
1208        })
1209        .unwrap_or(false);
1210
1211        if advanced {
1212            node_id = parent_id;
1213        } else {
1214            break;
1215        }
1216    }
1217}
1218
1219/// Internal implementation for composer-based bubbling of semantics dirtiness.
1220fn bubble_semantics_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1221    // Mark the starting node semantics-dirty.
1222    let _ = with_node_mut(node_id, |node: &mut N| {
1223        node.mark_needs_semantics();
1224    });
1225
1226    while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1227        let parent_id = pid;
1228
1229        let advanced = with_node_mut(parent_id, |node: &mut N| {
1230            if !node.needs_semantics() {
1231                node.mark_needs_semantics();
1232            }
1233            true
1234        })
1235        .unwrap_or(false);
1236
1237        if advanced {
1238            node_id = parent_id;
1239        } else {
1240            break;
1241        }
1242    }
1243}
1244
1245impl dyn Node {
1246    pub fn as_any_mut(&mut self) -> &mut dyn Any {
1247        self
1248    }
1249}
1250
1251pub struct RecycledNode {
1252    stable_id: NodeId,
1253    node: Box<dyn Node>,
1254    warm_origin: bool,
1255}
1256
1257impl RecycledNode {
1258    fn new(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1259        let node = node.rehouse_for_recycle().unwrap_or(node);
1260        Self {
1261            stable_id,
1262            node,
1263            warm_origin,
1264        }
1265    }
1266
1267    fn from_shell(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1268        Self {
1269            stable_id,
1270            node,
1271            warm_origin,
1272        }
1273    }
1274
1275    pub fn stable_id(&self) -> NodeId {
1276        self.stable_id
1277    }
1278
1279    fn warm_origin(&self) -> bool {
1280        self.warm_origin
1281    }
1282
1283    fn set_warm_origin(&mut self, warm_origin: bool) {
1284        self.warm_origin = warm_origin;
1285    }
1286
1287    pub fn node_mut(&mut self) -> &mut dyn Node {
1288        self.node.as_mut()
1289    }
1290
1291    pub fn into_parts(self) -> (NodeId, Box<dyn Node>, bool) {
1292        (self.stable_id, self.node, self.warm_origin)
1293    }
1294}
1295
1296pub trait Applier: Any {
1297    fn create(&mut self, node: Box<dyn Node>) -> NodeId;
1298    fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError>;
1299    fn remove(&mut self, id: NodeId) -> Result<(), NodeError>;
1300
1301    /// Returns the current generation for a node index.
1302    /// Generation is incremented when an index is reused from the freelist,
1303    /// preventing stale slot entries from matching recycled nodes.
1304    fn node_generation(&self, id: NodeId) -> u32;
1305
1306    /// Inserts a node with a pre-assigned ID.
1307    ///
1308    /// This is used for virtual nodes whose IDs are allocated separately
1309    /// (e.g., via allocate_virtual_node_id()). Unlike `create()` which assigns
1310    /// a new ID, this method uses the provided ID.
1311    ///
1312    /// Returns Ok(()) if successful, or an error if the ID is already in use.
1313    fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError>;
1314
1315    fn as_any(&self) -> &dyn Any
1316    where
1317        Self: Sized,
1318    {
1319        self
1320    }
1321
1322    fn as_any_mut(&mut self) -> &mut dyn Any
1323    where
1324        Self: Sized,
1325    {
1326        self
1327    }
1328
1329    /// Trim trailing tombstones/unused capacity after structural changes.
1330    fn compact(&mut self) {}
1331
1332    /// Returns a previously recycled node shell and its stable ID for the requested concrete type.
1333    fn take_recycled_node(&mut self, _key: TypeId) -> Option<RecycledNode> {
1334        None
1335    }
1336
1337    /// Marks whether a reinserted recycled node originated from the warm recycle path.
1338    fn set_recycled_node_origin(&mut self, _id: NodeId, _warm_origin: bool) {}
1339
1340    /// Seeds a warm recyclable shell for future reuse without requiring a prior removal.
1341    fn seed_recycled_node_shell(
1342        &mut self,
1343        _key: TypeId,
1344        _recycle_pool_limit: Option<usize>,
1345        _shell: Box<dyn Node>,
1346    ) {
1347    }
1348
1349    /// Records that the current apply pass had to allocate a fresh recyclable shell for this type.
1350    fn record_fresh_recyclable_creation(&mut self, _key: TypeId) {}
1351
1352    /// Drops any recyclable shells that should not survive beyond the current apply pass.
1353    fn clear_recycled_nodes(&mut self) {}
1354}
1355
1356type TypedNodeUpdate = fn(&mut dyn Node, NodeId) -> Result<(), NodeError>;
1357type CommandCallback = Box<dyn FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static>;
1358
1359#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1360pub(crate) struct DirtyBubble {
1361    layout: bool,
1362    measure: bool,
1363    semantics: bool,
1364}
1365
1366impl DirtyBubble {
1367    pub(crate) const LAYOUT_AND_MEASURE: Self = Self {
1368        layout: true,
1369        measure: true,
1370        semantics: false,
1371    };
1372
1373    pub(crate) const SEMANTICS: Self = Self {
1374        layout: false,
1375        measure: false,
1376        semantics: true,
1377    };
1378
1379    fn apply(self, applier: &mut dyn Applier, node_id: NodeId) {
1380        if self.layout {
1381            bubble_layout_dirty(applier, node_id);
1382        }
1383        if self.measure {
1384            bubble_measure_dirty(applier, node_id);
1385        }
1386        if self.semantics {
1387            bubble_semantics_dirty(applier, node_id);
1388        }
1389    }
1390}
1391
1392pub(crate) enum Command {
1393    BubbleDirty {
1394        node_id: NodeId,
1395        bubble: DirtyBubble,
1396    },
1397    UpdateTypedNode {
1398        id: NodeId,
1399        updater: TypedNodeUpdate,
1400    },
1401    RemoveNode {
1402        id: NodeId,
1403    },
1404    MountNode {
1405        id: NodeId,
1406    },
1407    AttachChild {
1408        parent_id: NodeId,
1409        child_id: NodeId,
1410        bubble: DirtyBubble,
1411    },
1412    InsertChild {
1413        parent_id: NodeId,
1414        child_id: NodeId,
1415        appended_index: usize,
1416        insert_index: usize,
1417        bubble: DirtyBubble,
1418    },
1419    MoveChild {
1420        parent_id: NodeId,
1421        from_index: usize,
1422        to_index: usize,
1423        bubble: DirtyBubble,
1424    },
1425    RemoveChild {
1426        parent_id: NodeId,
1427        child_id: NodeId,
1428    },
1429    DetachChild {
1430        parent_id: NodeId,
1431        child_id: NodeId,
1432    },
1433    SyncChildren {
1434        parent_id: NodeId,
1435        expected_children: ChildList,
1436    },
1437    Callback(CommandCallback),
1438}
1439
1440#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1441struct DeferredChildCleanup {
1442    child_id: NodeId,
1443    generation: u32,
1444    removed_from_parent: bool,
1445}
1446
1447#[derive(Default)]
1448struct DeferredChildCleanupQueue {
1449    pending: Vec<DeferredChildCleanup>,
1450    preserved: Vec<(NodeId, u32)>,
1451}
1452
1453impl DeferredChildCleanupQueue {
1454    fn push(&mut self, child_id: NodeId, generation: u32, removed_from_parent: bool) {
1455        if self
1456            .preserved
1457            .iter()
1458            .any(|&(preserved_id, preserved_generation)| {
1459                preserved_id == child_id && preserved_generation == generation
1460            })
1461        {
1462            return;
1463        }
1464        self.pending.push(DeferredChildCleanup {
1465            child_id,
1466            generation,
1467            removed_from_parent,
1468        });
1469    }
1470
1471    fn preserve(&mut self, child_id: NodeId, generation: u32) {
1472        if !self
1473            .preserved
1474            .iter()
1475            .any(|&(preserved_id, preserved_generation)| {
1476                preserved_id == child_id && preserved_generation == generation
1477            })
1478        {
1479            self.preserved.push((child_id, generation));
1480        }
1481        self.pending
1482            .retain(|cleanup| cleanup.child_id != child_id || cleanup.generation != generation);
1483    }
1484
1485    fn flush(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1486        for cleanup in self.pending {
1487            cleanup_detached_child(applier, cleanup)?;
1488        }
1489        Ok(())
1490    }
1491}
1492
1493impl Command {
1494    pub(crate) fn update_node<N: Node + 'static>(id: NodeId) -> Self {
1495        Self::UpdateTypedNode {
1496            id,
1497            updater: update_typed_node::<N>,
1498        }
1499    }
1500
1501    pub(crate) fn callback(
1502        callback: impl FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static,
1503    ) -> Self {
1504        Self::Callback(Box::new(callback))
1505    }
1506
1507    pub(crate) fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1508        let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1509        self.apply_with_cleanup(applier, &mut deferred_cleanup)?;
1510        deferred_cleanup.flush(applier)
1511    }
1512
1513    fn apply_with_cleanup(
1514        self,
1515        applier: &mut dyn Applier,
1516        deferred_cleanup: &mut DeferredChildCleanupQueue,
1517    ) -> Result<(), NodeError> {
1518        match self {
1519            Self::BubbleDirty { node_id, bubble } => {
1520                bubble.apply(applier, node_id);
1521                Ok(())
1522            }
1523            Self::UpdateTypedNode { id, updater } => {
1524                let node = match applier.get_mut(id) {
1525                    Ok(node) => node,
1526                    Err(NodeError::Missing { .. }) => return Ok(()),
1527                    Err(err) => return Err(err),
1528                };
1529                updater(node, id)
1530            }
1531            Self::RemoveNode { id } => {
1532                if let Ok(node) = applier.get_mut(id) {
1533                    node.unmount();
1534                }
1535                match applier.remove(id) {
1536                    Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
1537                    Err(err) => Err(err),
1538                }
1539            }
1540            Self::MountNode { id } => {
1541                let node = match applier.get_mut(id) {
1542                    Ok(node) => node,
1543                    Err(NodeError::Missing { .. }) => return Ok(()),
1544                    Err(err) => return Err(err),
1545                };
1546                node.set_node_id(id);
1547                node.mount();
1548                Ok(())
1549            }
1550            Self::AttachChild {
1551                parent_id,
1552                child_id,
1553                bubble,
1554            } => {
1555                insert_child_with_reparenting(applier, parent_id, child_id);
1556                bubble.apply(applier, parent_id);
1557                Ok(())
1558            }
1559            Self::InsertChild {
1560                parent_id,
1561                child_id,
1562                appended_index,
1563                insert_index,
1564                bubble,
1565            } => {
1566                insert_child_with_reparenting(applier, parent_id, child_id);
1567                bubble.apply(applier, parent_id);
1568                if insert_index != appended_index {
1569                    if let Ok(parent_node) = applier.get_mut(parent_id) {
1570                        parent_node.move_child(appended_index, insert_index);
1571                    }
1572                }
1573                Ok(())
1574            }
1575            Self::MoveChild {
1576                parent_id,
1577                from_index,
1578                to_index,
1579                bubble,
1580            } => {
1581                if let Ok(parent_node) = applier.get_mut(parent_id) {
1582                    parent_node.move_child(from_index, to_index);
1583                }
1584                bubble.apply(applier, parent_id);
1585                Ok(())
1586            }
1587            Self::RemoveChild {
1588                parent_id,
1589                child_id,
1590            } => apply_remove_child(applier, parent_id, child_id, deferred_cleanup),
1591            Self::DetachChild {
1592                parent_id,
1593                child_id,
1594            } => {
1595                let generation = applier.node_generation(child_id);
1596                detach_child_from_parent(applier, parent_id, child_id)?;
1597                deferred_cleanup.preserve(child_id, generation);
1598                Ok(())
1599            }
1600            Self::SyncChildren {
1601                parent_id,
1602                expected_children,
1603            } => sync_children(applier, parent_id, &expected_children, deferred_cleanup),
1604            Self::Callback(callback) => callback(applier),
1605        }
1606    }
1607}
1608
1609const COMMAND_CHUNK_CAPACITY: usize = 1024;
1610const COMMAND_FLUSH_THRESHOLD: usize = COMMAND_CHUNK_CAPACITY * 4;
1611type ChildList = SmallVec<[NodeId; 4]>;
1612const SMALL_CHILD_SYNC_LINEAR_THRESHOLD: usize = 8;
1613
1614#[derive(Copy, Clone)]
1615enum CommandTag {
1616    BubbleDirty,
1617    UpdateTypedNode,
1618    RemoveNode,
1619    MountNode,
1620    AttachChild,
1621    InsertChild,
1622    MoveChild,
1623    RemoveChild,
1624    DetachChild,
1625    SyncChildren,
1626    Callback,
1627}
1628
1629#[derive(Copy, Clone)]
1630struct BubbleDirtyCommand {
1631    node_id: NodeId,
1632    bubble: DirtyBubble,
1633}
1634
1635#[derive(Copy, Clone)]
1636struct UpdateTypedNodeCommand {
1637    id: NodeId,
1638    updater: TypedNodeUpdate,
1639}
1640
1641#[derive(Copy, Clone)]
1642struct AttachChildCommand {
1643    parent_id: NodeId,
1644    child_id: NodeId,
1645    bubble: DirtyBubble,
1646}
1647
1648#[derive(Copy, Clone)]
1649struct InsertChildCommand {
1650    parent_id: NodeId,
1651    child_id: NodeId,
1652    appended_index: usize,
1653    insert_index: usize,
1654    bubble: DirtyBubble,
1655}
1656
1657#[derive(Copy, Clone)]
1658struct MoveChildCommand {
1659    parent_id: NodeId,
1660    from_index: usize,
1661    to_index: usize,
1662    bubble: DirtyBubble,
1663}
1664
1665#[derive(Copy, Clone)]
1666struct RemoveChildCommand {
1667    parent_id: NodeId,
1668    child_id: NodeId,
1669}
1670
1671#[derive(Copy, Clone)]
1672struct DetachChildCommand {
1673    parent_id: NodeId,
1674    child_id: NodeId,
1675}
1676
1677struct SyncChildrenCommand {
1678    parent_id: NodeId,
1679    child_start: usize,
1680    child_len: usize,
1681}
1682
1683#[derive(Default)]
1684struct CommandQueue {
1685    chunks: Vec<Vec<CommandTag>>,
1686    len: usize,
1687    bubble_dirty: Vec<BubbleDirtyCommand>,
1688    update_typed_nodes: Vec<UpdateTypedNodeCommand>,
1689    remove_nodes: Vec<NodeId>,
1690    mount_nodes: Vec<NodeId>,
1691    attach_children: Vec<AttachChildCommand>,
1692    insert_children: Vec<InsertChildCommand>,
1693    move_children: Vec<MoveChildCommand>,
1694    remove_children: Vec<RemoveChildCommand>,
1695    detach_children: Vec<DetachChildCommand>,
1696    sync_children: Vec<SyncChildrenCommand>,
1697    sync_child_ids: Vec<NodeId>,
1698    callbacks: Vec<CommandCallback>,
1699}
1700
1701impl CommandQueue {
1702    fn push_tag(&mut self, tag: CommandTag) {
1703        let needs_chunk = self
1704            .chunks
1705            .last()
1706            .map(|chunk| chunk.len() == chunk.capacity())
1707            .unwrap_or(true);
1708        if needs_chunk {
1709            self.chunks.push(Vec::with_capacity(COMMAND_CHUNK_CAPACITY));
1710        }
1711        self.chunks
1712            .last_mut()
1713            .expect("command chunk should exist")
1714            .push(tag);
1715        self.len += 1;
1716    }
1717
1718    fn push(&mut self, command: Command) {
1719        match command {
1720            Command::BubbleDirty { node_id, bubble } => {
1721                self.bubble_dirty
1722                    .push(BubbleDirtyCommand { node_id, bubble });
1723                self.push_tag(CommandTag::BubbleDirty);
1724            }
1725            Command::UpdateTypedNode { id, updater } => {
1726                self.update_typed_nodes
1727                    .push(UpdateTypedNodeCommand { id, updater });
1728                self.push_tag(CommandTag::UpdateTypedNode);
1729            }
1730            Command::RemoveNode { id } => {
1731                self.remove_nodes.push(id);
1732                self.push_tag(CommandTag::RemoveNode);
1733            }
1734            Command::MountNode { id } => {
1735                self.mount_nodes.push(id);
1736                self.push_tag(CommandTag::MountNode);
1737            }
1738            Command::AttachChild {
1739                parent_id,
1740                child_id,
1741                bubble,
1742            } => {
1743                self.attach_children.push(AttachChildCommand {
1744                    parent_id,
1745                    child_id,
1746                    bubble,
1747                });
1748                self.push_tag(CommandTag::AttachChild);
1749            }
1750            Command::InsertChild {
1751                parent_id,
1752                child_id,
1753                appended_index,
1754                insert_index,
1755                bubble,
1756            } => {
1757                self.insert_children.push(InsertChildCommand {
1758                    parent_id,
1759                    child_id,
1760                    appended_index,
1761                    insert_index,
1762                    bubble,
1763                });
1764                self.push_tag(CommandTag::InsertChild);
1765            }
1766            Command::MoveChild {
1767                parent_id,
1768                from_index,
1769                to_index,
1770                bubble,
1771            } => {
1772                self.move_children.push(MoveChildCommand {
1773                    parent_id,
1774                    from_index,
1775                    to_index,
1776                    bubble,
1777                });
1778                self.push_tag(CommandTag::MoveChild);
1779            }
1780            Command::RemoveChild {
1781                parent_id,
1782                child_id,
1783            } => {
1784                self.remove_children.push(RemoveChildCommand {
1785                    parent_id,
1786                    child_id,
1787                });
1788                self.push_tag(CommandTag::RemoveChild);
1789            }
1790            Command::DetachChild {
1791                parent_id,
1792                child_id,
1793            } => {
1794                self.detach_children.push(DetachChildCommand {
1795                    parent_id,
1796                    child_id,
1797                });
1798                self.push_tag(CommandTag::DetachChild);
1799            }
1800            Command::SyncChildren {
1801                parent_id,
1802                expected_children,
1803            } => {
1804                let child_start = self.sync_child_ids.len();
1805                let child_len = expected_children.len();
1806                self.sync_child_ids.extend(expected_children);
1807                self.sync_children.push(SyncChildrenCommand {
1808                    parent_id,
1809                    child_start,
1810                    child_len,
1811                });
1812                self.push_tag(CommandTag::SyncChildren);
1813            }
1814            Command::Callback(callback) => {
1815                self.callbacks.push(callback);
1816                self.push_tag(CommandTag::Callback);
1817            }
1818        }
1819    }
1820
1821    fn len(&self) -> usize {
1822        self.len
1823    }
1824
1825    fn capacity(&self) -> usize {
1826        self.chunks.iter().map(Vec::capacity).sum()
1827    }
1828
1829    fn payload_len_bytes(&self) -> usize {
1830        self.bubble_dirty
1831            .len()
1832            .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1833            .saturating_add(
1834                self.update_typed_nodes
1835                    .len()
1836                    .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1837            )
1838            .saturating_add(
1839                self.remove_nodes
1840                    .len()
1841                    .saturating_mul(std::mem::size_of::<NodeId>()),
1842            )
1843            .saturating_add(
1844                self.mount_nodes
1845                    .len()
1846                    .saturating_mul(std::mem::size_of::<NodeId>()),
1847            )
1848            .saturating_add(
1849                self.attach_children
1850                    .len()
1851                    .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1852            )
1853            .saturating_add(
1854                self.insert_children
1855                    .len()
1856                    .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1857            )
1858            .saturating_add(
1859                self.move_children
1860                    .len()
1861                    .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1862            )
1863            .saturating_add(
1864                self.remove_children
1865                    .len()
1866                    .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1867            )
1868            .saturating_add(
1869                self.detach_children
1870                    .len()
1871                    .saturating_mul(std::mem::size_of::<DetachChildCommand>()),
1872            )
1873            .saturating_add(
1874                self.sync_children
1875                    .len()
1876                    .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1877            )
1878            .saturating_add(
1879                self.sync_child_ids
1880                    .len()
1881                    .saturating_mul(std::mem::size_of::<NodeId>()),
1882            )
1883            .saturating_add(
1884                self.callbacks
1885                    .len()
1886                    .saturating_mul(std::mem::size_of::<CommandCallback>()),
1887            )
1888    }
1889
1890    fn payload_capacity_bytes(&self) -> usize {
1891        self.bubble_dirty
1892            .capacity()
1893            .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1894            .saturating_add(
1895                self.update_typed_nodes
1896                    .capacity()
1897                    .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1898            )
1899            .saturating_add(
1900                self.remove_nodes
1901                    .capacity()
1902                    .saturating_mul(std::mem::size_of::<NodeId>()),
1903            )
1904            .saturating_add(
1905                self.mount_nodes
1906                    .capacity()
1907                    .saturating_mul(std::mem::size_of::<NodeId>()),
1908            )
1909            .saturating_add(
1910                self.attach_children
1911                    .capacity()
1912                    .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1913            )
1914            .saturating_add(
1915                self.insert_children
1916                    .capacity()
1917                    .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1918            )
1919            .saturating_add(
1920                self.move_children
1921                    .capacity()
1922                    .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1923            )
1924            .saturating_add(
1925                self.remove_children
1926                    .capacity()
1927                    .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1928            )
1929            .saturating_add(
1930                self.detach_children
1931                    .capacity()
1932                    .saturating_mul(std::mem::size_of::<DetachChildCommand>()),
1933            )
1934            .saturating_add(
1935                self.sync_children
1936                    .capacity()
1937                    .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1938            )
1939            .saturating_add(
1940                self.sync_child_ids
1941                    .capacity()
1942                    .saturating_mul(std::mem::size_of::<NodeId>()),
1943            )
1944            .saturating_add(
1945                self.callbacks
1946                    .capacity()
1947                    .saturating_mul(std::mem::size_of::<CommandCallback>()),
1948            )
1949    }
1950
1951    fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1952        let mut bubble_dirty = self.bubble_dirty.into_iter();
1953        let mut update_typed_nodes = self.update_typed_nodes.into_iter();
1954        let mut remove_nodes = self.remove_nodes.into_iter();
1955        let mut mount_nodes = self.mount_nodes.into_iter();
1956        let mut attach_children = self.attach_children.into_iter();
1957        let mut insert_children = self.insert_children.into_iter();
1958        let mut move_children = self.move_children.into_iter();
1959        let mut remove_children = self.remove_children.into_iter();
1960        let mut detach_children = self.detach_children.into_iter();
1961        let mut sync_children_commands = self.sync_children.into_iter();
1962        let sync_child_ids = self.sync_child_ids;
1963        let mut callbacks = self.callbacks.into_iter();
1964        let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1965
1966        for chunk in self.chunks {
1967            for tag in chunk {
1968                match tag {
1969                    CommandTag::BubbleDirty => {
1970                        let BubbleDirtyCommand { node_id, bubble } =
1971                            bubble_dirty.next().expect("missing BubbleDirty payload");
1972                        Command::BubbleDirty { node_id, bubble }
1973                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1974                    }
1975                    CommandTag::UpdateTypedNode => {
1976                        let UpdateTypedNodeCommand { id, updater } = update_typed_nodes
1977                            .next()
1978                            .expect("missing UpdateTypedNode payload");
1979                        Command::UpdateTypedNode { id, updater }
1980                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1981                    }
1982                    CommandTag::RemoveNode => {
1983                        let id = remove_nodes.next().expect("missing RemoveNode payload");
1984                        Command::RemoveNode { id }
1985                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1986                    }
1987                    CommandTag::MountNode => {
1988                        let id = mount_nodes.next().expect("missing MountNode payload");
1989                        Command::MountNode { id }
1990                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1991                    }
1992                    CommandTag::AttachChild => {
1993                        let AttachChildCommand {
1994                            parent_id,
1995                            child_id,
1996                            bubble,
1997                        } = attach_children.next().expect("missing AttachChild payload");
1998                        Command::AttachChild {
1999                            parent_id,
2000                            child_id,
2001                            bubble,
2002                        }
2003                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2004                    }
2005                    CommandTag::InsertChild => {
2006                        let InsertChildCommand {
2007                            parent_id,
2008                            child_id,
2009                            appended_index,
2010                            insert_index,
2011                            bubble,
2012                        } = insert_children.next().expect("missing InsertChild payload");
2013                        Command::InsertChild {
2014                            parent_id,
2015                            child_id,
2016                            appended_index,
2017                            insert_index,
2018                            bubble,
2019                        }
2020                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2021                    }
2022                    CommandTag::MoveChild => {
2023                        let MoveChildCommand {
2024                            parent_id,
2025                            from_index,
2026                            to_index,
2027                            bubble,
2028                        } = move_children.next().expect("missing MoveChild payload");
2029                        Command::MoveChild {
2030                            parent_id,
2031                            from_index,
2032                            to_index,
2033                            bubble,
2034                        }
2035                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2036                    }
2037                    CommandTag::RemoveChild => {
2038                        let RemoveChildCommand {
2039                            parent_id,
2040                            child_id,
2041                        } = remove_children.next().expect("missing RemoveChild payload");
2042                        Command::RemoveChild {
2043                            parent_id,
2044                            child_id,
2045                        }
2046                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2047                    }
2048                    CommandTag::DetachChild => {
2049                        let DetachChildCommand {
2050                            parent_id,
2051                            child_id,
2052                        } = detach_children.next().expect("missing DetachChild payload");
2053                        Command::DetachChild {
2054                            parent_id,
2055                            child_id,
2056                        }
2057                        .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2058                    }
2059                    CommandTag::SyncChildren => {
2060                        let SyncChildrenCommand {
2061                            parent_id,
2062                            child_start,
2063                            child_len,
2064                        } = sync_children_commands
2065                            .next()
2066                            .expect("missing SyncChildren payload");
2067                        let expected_children =
2068                            &sync_child_ids[child_start..child_start + child_len];
2069                        sync_children(
2070                            applier,
2071                            parent_id,
2072                            expected_children,
2073                            &mut deferred_cleanup,
2074                        )?;
2075                    }
2076                    CommandTag::Callback => {
2077                        let callback = callbacks.next().expect("missing Callback payload");
2078                        Command::Callback(callback)
2079                            .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2080                    }
2081                }
2082            }
2083        }
2084
2085        debug_assert!(bubble_dirty.next().is_none());
2086        debug_assert!(update_typed_nodes.next().is_none());
2087        debug_assert!(remove_nodes.next().is_none());
2088        debug_assert!(mount_nodes.next().is_none());
2089        debug_assert!(attach_children.next().is_none());
2090        debug_assert!(insert_children.next().is_none());
2091        debug_assert!(move_children.next().is_none());
2092        debug_assert!(remove_children.next().is_none());
2093        debug_assert!(detach_children.next().is_none());
2094        debug_assert!(sync_children_commands.next().is_none());
2095        debug_assert!(callbacks.next().is_none());
2096        deferred_cleanup.flush(applier)
2097    }
2098}
2099
2100fn update_typed_node<N: Node + 'static>(node: &mut dyn Node, id: NodeId) -> Result<(), NodeError> {
2101    let typed = node
2102        .as_any_mut()
2103        .downcast_mut::<N>()
2104        .ok_or(NodeError::TypeMismatch {
2105            id,
2106            expected: std::any::type_name::<N>(),
2107        })?;
2108    typed.update();
2109    Ok(())
2110}
2111
2112fn insert_child_with_reparenting(applier: &mut dyn Applier, parent_id: NodeId, child_id: NodeId) {
2113    let old_parent = applier
2114        .get_mut(child_id)
2115        .ok()
2116        .and_then(|node| node.parent());
2117    if let Some(old_parent_id) = old_parent {
2118        if old_parent_id != parent_id {
2119            if let Ok(old_parent_node) = applier.get_mut(old_parent_id) {
2120                old_parent_node.remove_child(child_id);
2121            }
2122            if let Ok(child_node) = applier.get_mut(child_id) {
2123                child_node.on_removed_from_parent();
2124            }
2125            bubble_layout_dirty(applier, old_parent_id);
2126            bubble_measure_dirty(applier, old_parent_id);
2127        }
2128    }
2129
2130    if let Ok(parent_node) = applier.get_mut(parent_id) {
2131        parent_node.insert_child(child_id);
2132    }
2133    if let Ok(child_node) = applier.get_mut(child_id) {
2134        child_node.on_attached_to_parent(parent_id);
2135    }
2136}
2137
2138fn apply_remove_child(
2139    applier: &mut dyn Applier,
2140    parent_id: NodeId,
2141    child_id: NodeId,
2142    deferred_cleanup: &mut DeferredChildCleanupQueue,
2143) -> Result<(), NodeError> {
2144    detach_child_from_parent(applier, parent_id, child_id)?;
2145
2146    let generation = applier.node_generation(child_id);
2147    let removed_from_parent = if let Ok(node) = applier.get_mut(child_id) {
2148        node.parent().is_none()
2149    } else {
2150        return Ok(());
2151    };
2152    deferred_cleanup.push(child_id, generation, removed_from_parent);
2153    Ok(())
2154}
2155
2156fn detach_child_from_parent(
2157    applier: &mut dyn Applier,
2158    parent_id: NodeId,
2159    child_id: NodeId,
2160) -> Result<(), NodeError> {
2161    if let Ok(parent_node) = applier.get_mut(parent_id) {
2162        parent_node.remove_child(child_id);
2163    }
2164    bubble_layout_dirty(applier, parent_id);
2165    bubble_measure_dirty(applier, parent_id);
2166
2167    if let Ok(node) = applier.get_mut(child_id) {
2168        match node.parent() {
2169            Some(existing_parent_id) if existing_parent_id == parent_id => {
2170                node.on_removed_from_parent();
2171            }
2172            None => {}
2173            Some(_) => return Ok(()),
2174        }
2175    } else {
2176        return Ok(());
2177    }
2178
2179    Ok(())
2180}
2181
2182fn cleanup_detached_child(
2183    applier: &mut dyn Applier,
2184    cleanup: DeferredChildCleanup,
2185) -> Result<(), NodeError> {
2186    if applier.node_generation(cleanup.child_id) != cleanup.generation {
2187        return Ok(());
2188    }
2189
2190    let parent_id = match applier.get_mut(cleanup.child_id) {
2191        Ok(node) => node.parent(),
2192        Err(NodeError::Missing { .. }) => return Ok(()),
2193        Err(err) => return Err(err),
2194    };
2195    if parent_id.is_some() {
2196        return Ok(());
2197    }
2198
2199    if let Ok(node) = applier.get_mut(cleanup.child_id) {
2200        if !cleanup.removed_from_parent {
2201            node.on_removed_from_parent();
2202        }
2203        node.unmount();
2204    }
2205    match applier.remove(cleanup.child_id) {
2206        Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
2207        Err(err) => Err(err),
2208    }
2209}
2210
2211fn remove_child_and_cleanup_now(
2212    applier: &mut dyn Applier,
2213    parent_id: NodeId,
2214    child_id: NodeId,
2215) -> Result<(), NodeError> {
2216    let mut deferred_cleanup = DeferredChildCleanupQueue::default();
2217    apply_remove_child(applier, parent_id, child_id, &mut deferred_cleanup)?;
2218    deferred_cleanup.flush(applier)
2219}
2220
2221fn collect_current_children(applier: &mut dyn Applier, parent_id: NodeId) -> ChildList {
2222    let mut scratch = SmallVec::<[NodeId; 8]>::new();
2223    if let Ok(node) = applier.get_mut(parent_id) {
2224        node.collect_children_into(&mut scratch);
2225    }
2226    let mut current = ChildList::new();
2227    current.extend(scratch);
2228    current
2229}
2230
2231fn sync_children(
2232    applier: &mut dyn Applier,
2233    parent_id: NodeId,
2234    expected_children: &[NodeId],
2235    deferred_cleanup: &mut DeferredChildCleanupQueue,
2236) -> Result<(), NodeError> {
2237    let mut current = collect_current_children(applier, parent_id);
2238    let children_changed = current.as_slice() != expected_children;
2239
2240    if children_changed {
2241        if current.len().max(expected_children.len()) <= SMALL_CHILD_SYNC_LINEAR_THRESHOLD {
2242            sync_children_small(
2243                applier,
2244                parent_id,
2245                &mut current,
2246                expected_children,
2247                deferred_cleanup,
2248            )?;
2249        } else {
2250            let mut target_positions: HashMap<NodeId, usize> = HashMap::default();
2251            target_positions.reserve(expected_children.len());
2252            for (index, &child) in expected_children.iter().enumerate() {
2253                target_positions.insert(child, index);
2254            }
2255
2256            for index in (0..current.len()).rev() {
2257                let child = current[index];
2258                if !target_positions.contains_key(&child) {
2259                    current.remove(index);
2260                    apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2261                }
2262            }
2263
2264            let mut current_positions = build_child_positions(&current);
2265            for (target_index, &child) in expected_children.iter().enumerate() {
2266                if let Some(current_index) = current_positions.get(&child).copied() {
2267                    if current_index != target_index {
2268                        let from_index = current_index;
2269                        let to_index = move_child_in_diff_state(
2270                            &mut current,
2271                            &mut current_positions,
2272                            from_index,
2273                            target_index,
2274                        );
2275                        Command::MoveChild {
2276                            parent_id,
2277                            from_index,
2278                            to_index,
2279                            bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2280                        }
2281                        .apply(applier)?;
2282                    }
2283                } else {
2284                    let insert_index = target_index.min(current.len());
2285                    let appended_index = current.len();
2286                    insert_child_into_diff_state(
2287                        &mut current,
2288                        &mut current_positions,
2289                        insert_index,
2290                        child,
2291                    );
2292                    Command::InsertChild {
2293                        parent_id,
2294                        child_id: child,
2295                        appended_index,
2296                        insert_index,
2297                        bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2298                    }
2299                    .apply(applier)?;
2300                }
2301            }
2302        }
2303    }
2304
2305    reconcile_children(applier, parent_id, expected_children, !children_changed)
2306}
2307
2308fn sync_children_small(
2309    applier: &mut dyn Applier,
2310    parent_id: NodeId,
2311    current: &mut ChildList,
2312    expected_children: &[NodeId],
2313    deferred_cleanup: &mut DeferredChildCleanupQueue,
2314) -> Result<(), NodeError> {
2315    for index in (0..current.len()).rev() {
2316        let child = current[index];
2317        if !expected_children.contains(&child) {
2318            current.remove(index);
2319            apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2320        }
2321    }
2322
2323    for (target_index, &child) in expected_children.iter().enumerate() {
2324        if let Some(current_index) = current
2325            .iter()
2326            .position(|&current_child| current_child == child)
2327        {
2328            if current_index != target_index {
2329                let child = current.remove(current_index);
2330                let to_index = target_index.min(current.len());
2331                current.insert(to_index, child);
2332                Command::MoveChild {
2333                    parent_id,
2334                    from_index: current_index,
2335                    to_index,
2336                    bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2337                }
2338                .apply(applier)?;
2339            }
2340        } else {
2341            let insert_index = target_index.min(current.len());
2342            let appended_index = current.len();
2343            current.insert(insert_index, child);
2344            Command::InsertChild {
2345                parent_id,
2346                child_id: child,
2347                appended_index,
2348                insert_index,
2349                bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2350            }
2351            .apply(applier)?;
2352        }
2353    }
2354
2355    Ok(())
2356}
2357
2358fn reconcile_children(
2359    applier: &mut dyn Applier,
2360    parent_id: NodeId,
2361    expected_children: &[NodeId],
2362    needs_dirty_check: bool,
2363) -> Result<(), NodeError> {
2364    let mut repaired = false;
2365    for &child_id in expected_children {
2366        let needs_attach = if let Ok(node) = applier.get_mut(child_id) {
2367            node.parent() != Some(parent_id)
2368        } else {
2369            false
2370        };
2371
2372        if needs_attach {
2373            insert_child_with_reparenting(applier, parent_id, child_id);
2374            repaired = true;
2375        }
2376    }
2377
2378    let is_dirty = if needs_dirty_check {
2379        if let Ok(node) = applier.get_mut(parent_id) {
2380            node.needs_layout()
2381        } else {
2382            false
2383        }
2384    } else {
2385        false
2386    };
2387
2388    if repaired {
2389        bubble_layout_dirty(applier, parent_id);
2390        bubble_measure_dirty(applier, parent_id);
2391    } else if is_dirty {
2392        bubble_layout_dirty(applier, parent_id);
2393    }
2394
2395    Ok(())
2396}
2397
2398#[derive(Default)]
2399pub struct MemoryApplier {
2400    nodes: Vec<Option<Box<dyn Node>>>,
2401    /// Stable node ID occupying each physical slot.
2402    physical_stable_ids: Vec<u32>,
2403    physical_warm_recycled_origins: Vec<bool>,
2404    /// Physical storage location for each live stable node ID.
2405    stable_to_physical: HashMap<NodeId, usize>,
2406    /// Generation counter per stable node ID.
2407    stable_generations: HashMap<NodeId, u32>,
2408    /// Lowest free physical slots are reused first so the dense storage stays compact.
2409    free_ids: BinaryHeap<Reverse<usize>>,
2410    /// Storage for high-ID nodes (like virtual nodes with IDs starting at 0xFFFFFFFF00000000)
2411    /// that can't be stored in the Vec without causing capacity overflow.
2412    high_id_nodes: HashMap<NodeId, Box<dyn Node>>,
2413    high_id_warm_recycled_origins: HashMap<NodeId, bool>,
2414    high_id_generations: HashMap<NodeId, u32>,
2415    next_stable_id: NodeId,
2416    layout_runtime: Option<RuntimeHandle>,
2417    slots: SlotTable,
2418    recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2419    returning_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2420    cold_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2421    recycled_node_limits: HashMap<TypeId, usize>,
2422    warm_recycled_node_targets: HashMap<TypeId, usize>,
2423    fresh_recyclable_creations: HashMap<TypeId, usize>,
2424    recycled_node_prototypes: HashMap<TypeId, Box<dyn Node>>,
2425}
2426
2427struct RemovalFrame {
2428    node_id: NodeId,
2429    children: SmallVec<[NodeId; 8]>,
2430    next_child: usize,
2431}
2432
2433#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
2434pub struct MemoryApplierDebugStats {
2435    pub next_stable_id: NodeId,
2436    pub nodes_len: usize,
2437    pub nodes_cap: usize,
2438    pub physical_stable_ids_len: usize,
2439    pub physical_stable_ids_cap: usize,
2440    pub stable_to_physical_len: usize,
2441    pub stable_to_physical_cap: usize,
2442    pub stable_generations_len: usize,
2443    pub stable_generations_cap: usize,
2444    pub free_ids_len: usize,
2445    pub free_ids_cap: usize,
2446    pub high_id_nodes_len: usize,
2447    pub high_id_nodes_cap: usize,
2448    pub high_id_generations_len: usize,
2449    pub high_id_generations_cap: usize,
2450    pub recycled_type_count: usize,
2451    pub recycled_type_cap: usize,
2452    pub recycled_node_count: usize,
2453    pub recycled_node_capacity: usize,
2454    pub warm_recycled_node_id_count: usize,
2455    pub warm_recycled_node_id_capacity: usize,
2456}
2457
2458impl MemoryApplier {
2459    const EAGER_COMPACT_NODE_LEN: usize = 1_024;
2460    const INVALID_STABLE_ID: u32 = u32::MAX;
2461    const INITIAL_DENSE_NODE_CAP: usize = 32;
2462    const LARGE_DENSE_NODE_GROWTH_THRESHOLD: usize = 32 * 1024;
2463    const LARGE_DENSE_NODE_GROWTH_DIVISOR: usize = 4;
2464
2465    fn pack_stable_id(stable_id: NodeId) -> u32 {
2466        u32::try_from(stable_id).expect("stable id overflow")
2467    }
2468
2469    fn unpack_stable_id(stable_id: u32) -> NodeId {
2470        stable_id as NodeId
2471    }
2472
2473    fn next_dense_node_target_len(old_len: usize) -> usize {
2474        if old_len < Self::INITIAL_DENSE_NODE_CAP {
2475            return Self::INITIAL_DENSE_NODE_CAP;
2476        }
2477        if old_len < Self::LARGE_DENSE_NODE_GROWTH_THRESHOLD {
2478            return old_len.saturating_mul(2);
2479        }
2480
2481        let incremental_growth =
2482            (old_len / Self::LARGE_DENSE_NODE_GROWTH_DIVISOR).max(Self::INITIAL_DENSE_NODE_CAP);
2483        old_len.saturating_add(incremental_growth)
2484    }
2485
2486    fn ensure_dense_node_storage_capacity(&mut self) {
2487        let len = self
2488            .nodes
2489            .len()
2490            .max(self.physical_stable_ids.len())
2491            .max(self.physical_warm_recycled_origins.len());
2492        if len < self.nodes.capacity()
2493            && len < self.physical_stable_ids.capacity()
2494            && len < self.physical_warm_recycled_origins.capacity()
2495        {
2496            return;
2497        }
2498
2499        let target = Self::next_dense_node_target_len(len);
2500        if self.nodes.capacity() < target {
2501            self.nodes
2502                .reserve_exact(target.saturating_sub(self.nodes.len()));
2503        }
2504        if self.physical_stable_ids.capacity() < target {
2505            self.physical_stable_ids
2506                .reserve_exact(target.saturating_sub(self.physical_stable_ids.len()));
2507        }
2508        if self.physical_warm_recycled_origins.capacity() < target {
2509            self.physical_warm_recycled_origins
2510                .reserve_exact(target.saturating_sub(self.physical_warm_recycled_origins.len()));
2511        }
2512    }
2513
2514    fn ensure_stable_index_capacity(&mut self) {
2515        let len = self
2516            .stable_to_physical
2517            .len()
2518            .max(self.stable_generations.len());
2519        if len < self.stable_to_physical.capacity() && len < self.stable_generations.capacity() {
2520            return;
2521        }
2522
2523        let target = Self::next_dense_node_target_len(len);
2524        let additional = target.saturating_sub(len);
2525        if self.stable_to_physical.capacity() < target {
2526            self.stable_to_physical.reserve(additional);
2527        }
2528        if self.stable_generations.capacity() < target {
2529            self.stable_generations.reserve(additional);
2530        }
2531    }
2532
2533    pub fn new() -> Self {
2534        Self {
2535            nodes: Vec::new(),
2536            physical_stable_ids: Vec::new(),
2537            physical_warm_recycled_origins: Vec::new(),
2538            stable_to_physical: HashMap::default(),
2539            stable_generations: HashMap::default(),
2540            free_ids: BinaryHeap::new(),
2541            high_id_nodes: HashMap::default(),
2542            high_id_warm_recycled_origins: HashMap::default(),
2543            high_id_generations: HashMap::default(),
2544            next_stable_id: 0,
2545            layout_runtime: None,
2546            slots: SlotTable::default(),
2547            recycled_nodes: HashMap::default(),
2548            returning_recycled_nodes: HashMap::default(),
2549            cold_recycled_nodes: HashMap::default(),
2550            recycled_node_limits: HashMap::default(),
2551            warm_recycled_node_targets: HashMap::default(),
2552            fresh_recyclable_creations: HashMap::default(),
2553            recycled_node_prototypes: HashMap::default(),
2554        }
2555    }
2556
2557    pub fn slots(&mut self) -> &mut SlotTable {
2558        &mut self.slots
2559    }
2560
2561    pub fn with_node<N: Node + 'static, R>(
2562        &mut self,
2563        id: NodeId,
2564        f: impl FnOnce(&mut N) -> R,
2565    ) -> Result<R, NodeError> {
2566        let physical_id = self
2567            .resolve_node_index(id)
2568            .ok_or(NodeError::Missing { id })?;
2569        let slot = self
2570            .nodes
2571            .get_mut(physical_id)
2572            .ok_or(NodeError::Missing { id })?
2573            .as_deref_mut()
2574            .ok_or(NodeError::Missing { id })?;
2575        let typed = slot
2576            .as_any_mut()
2577            .downcast_mut::<N>()
2578            .ok_or(NodeError::TypeMismatch {
2579                id,
2580                expected: std::any::type_name::<N>(),
2581            })?;
2582        Ok(f(typed))
2583    }
2584
2585    pub fn len(&self) -> usize {
2586        self.nodes.iter().filter(|n| n.is_some()).count()
2587    }
2588
2589    pub fn capacity(&self) -> usize {
2590        self.nodes.len()
2591    }
2592
2593    pub fn tombstone_count(&self) -> usize {
2594        self.nodes.iter().filter(|n| n.is_none()).count()
2595    }
2596
2597    pub fn freelist_len(&self) -> usize {
2598        self.free_ids.len()
2599    }
2600
2601    pub fn debug_recycled_node_count(&self) -> usize {
2602        self.total_recycled_node_count()
2603    }
2604
2605    pub fn debug_recycled_node_count_for<N: Node + 'static>(&self) -> usize {
2606        let key = TypeId::of::<N>();
2607        self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2608            + self
2609                .returning_recycled_nodes
2610                .get(&key)
2611                .map(Vec::len)
2612                .unwrap_or(0)
2613            + self
2614                .cold_recycled_nodes
2615                .get(&key)
2616                .map(Vec::len)
2617                .unwrap_or(0)
2618    }
2619
2620    pub fn debug_stats(&self) -> MemoryApplierDebugStats {
2621        let mut recycled_keys: HashSet<TypeId> = HashSet::default();
2622        recycled_keys.extend(self.recycled_nodes.keys().copied());
2623        recycled_keys.extend(self.returning_recycled_nodes.keys().copied());
2624        recycled_keys.extend(self.cold_recycled_nodes.keys().copied());
2625
2626        MemoryApplierDebugStats {
2627            next_stable_id: self.next_stable_id,
2628            nodes_len: self.len(),
2629            nodes_cap: self.nodes.len(),
2630            physical_stable_ids_len: self.physical_stable_ids.len(),
2631            physical_stable_ids_cap: self.physical_stable_ids.capacity(),
2632            stable_to_physical_len: self.stable_to_physical.len(),
2633            stable_to_physical_cap: self.stable_to_physical.capacity(),
2634            stable_generations_len: self.stable_generations.len(),
2635            stable_generations_cap: self.stable_generations.capacity(),
2636            free_ids_len: self.free_ids.len(),
2637            free_ids_cap: self.free_ids.capacity(),
2638            high_id_nodes_len: self.high_id_nodes.len(),
2639            high_id_nodes_cap: self.high_id_nodes.capacity(),
2640            high_id_generations_len: self.high_id_generations.len(),
2641            high_id_generations_cap: self.high_id_generations.capacity(),
2642            recycled_type_count: recycled_keys.len(),
2643            recycled_type_cap: self.recycled_nodes.capacity()
2644                + self.returning_recycled_nodes.capacity()
2645                + self.cold_recycled_nodes.capacity(),
2646            recycled_node_count: self.total_recycled_node_count(),
2647            recycled_node_capacity: self.total_recycled_node_capacity(),
2648            warm_recycled_node_id_count: self.total_warm_recycled_node_id_count(),
2649            warm_recycled_node_id_capacity: self.total_warm_recycled_node_id_capacity(),
2650        }
2651    }
2652
2653    pub fn is_empty(&self) -> bool {
2654        self.len() == 0
2655    }
2656
2657    pub fn debug_live_node_heap_bytes(&self) -> usize {
2658        let dense_nodes = self
2659            .nodes
2660            .iter()
2661            .flatten()
2662            .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2663            .sum::<usize>();
2664        let high_id_nodes = self
2665            .high_id_nodes
2666            .values()
2667            .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2668            .sum::<usize>();
2669        dense_nodes + high_id_nodes
2670    }
2671
2672    pub fn debug_recycled_node_heap_bytes(&self) -> usize {
2673        let pool_bytes = |pools: &HashMap<TypeId, Vec<RecycledNode>>| {
2674            pools
2675                .values()
2676                .flat_map(|nodes| nodes.iter())
2677                .map(|node| std::mem::size_of_val(&*node.node) + node.node.debug_heap_bytes())
2678                .sum::<usize>()
2679        };
2680
2681        pool_bytes(&self.recycled_nodes)
2682            + pool_bytes(&self.returning_recycled_nodes)
2683            + pool_bytes(&self.cold_recycled_nodes)
2684    }
2685
2686    pub fn set_runtime_handle(&mut self, handle: RuntimeHandle) {
2687        self.layout_runtime = Some(handle);
2688    }
2689
2690    pub fn clear_runtime_handle(&mut self) {
2691        self.layout_runtime = None;
2692    }
2693
2694    pub fn runtime_handle(&self) -> Option<RuntimeHandle> {
2695        self.layout_runtime.clone()
2696    }
2697
2698    fn pool_node_count(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2699        pools.values().map(Vec::len).sum()
2700    }
2701
2702    fn pool_node_capacity(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2703        pools.values().map(Vec::capacity).sum()
2704    }
2705
2706    fn total_recycled_node_count(&self) -> usize {
2707        Self::pool_node_count(&self.recycled_nodes)
2708            + Self::pool_node_count(&self.returning_recycled_nodes)
2709            + Self::pool_node_count(&self.cold_recycled_nodes)
2710    }
2711
2712    fn total_recycled_node_capacity(&self) -> usize {
2713        Self::pool_node_capacity(&self.recycled_nodes)
2714            + Self::pool_node_capacity(&self.returning_recycled_nodes)
2715            + Self::pool_node_capacity(&self.cold_recycled_nodes)
2716    }
2717
2718    fn total_warm_recycled_node_id_count(&self) -> usize {
2719        self.live_warm_recycled_origin_count()
2720            + Self::pool_node_count(&self.recycled_nodes)
2721            + Self::pool_node_count(&self.returning_recycled_nodes)
2722    }
2723
2724    fn total_warm_recycled_node_id_capacity(&self) -> usize {
2725        self.live_warm_recycled_origin_capacity()
2726            + Self::pool_node_capacity(&self.recycled_nodes)
2727            + Self::pool_node_capacity(&self.returning_recycled_nodes)
2728    }
2729
2730    fn remember_recycle_pool_limit(&mut self, key: TypeId, recycle_pool_limit: Option<usize>) {
2731        if let Some(limit) = recycle_pool_limit {
2732            self.recycled_node_limits.insert(key, limit);
2733        } else {
2734            self.recycled_node_limits.remove(&key);
2735        }
2736    }
2737
2738    fn recycle_pool_limit_for(&self, key: TypeId) -> Option<usize> {
2739        self.recycled_node_limits.get(&key).copied()
2740    }
2741
2742    fn warm_recycled_pool_len(&self, key: TypeId) -> usize {
2743        self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2744    }
2745
2746    fn warm_recycled_node_target(&self, key: TypeId) -> usize {
2747        self.warm_recycled_node_targets
2748            .get(&key)
2749            .copied()
2750            .unwrap_or(0)
2751    }
2752
2753    fn warm_recycled_node_target_limit(&self, key: TypeId) -> usize {
2754        let Some(limit) = self.recycle_pool_limit_for(key) else {
2755            return usize::MAX;
2756        };
2757        if limit <= 8 {
2758            limit
2759        } else {
2760            limit / 4
2761        }
2762    }
2763
2764    fn update_warm_recycled_node_target(&mut self, key: TypeId, observed_demand: usize) -> usize {
2765        let target_limit = self.warm_recycled_node_target_limit(key);
2766        let existing = self.warm_recycled_node_target(key).min(target_limit);
2767        if observed_demand == 0 {
2768            return existing;
2769        }
2770
2771        let target = match self.recycle_pool_limit_for(key) {
2772            Some(limit) if limit > 8 => target_limit,
2773            Some(_) => observed_demand.min(target_limit),
2774            None => observed_demand,
2775        };
2776        self.warm_recycled_node_targets.insert(key, target);
2777        target
2778    }
2779
2780    fn remember_recycled_node_prototype(&mut self, key: TypeId, shell: &dyn Node) {
2781        if self.recycled_node_prototypes.contains_key(&key) {
2782            return;
2783        }
2784        if let Some(prototype) = shell.rehouse_for_recycle() {
2785            self.recycled_node_prototypes.insert(key, prototype);
2786        }
2787    }
2788
2789    fn live_warm_recycled_origin_count(&self) -> usize {
2790        self.physical_warm_recycled_origins
2791            .iter()
2792            .zip(self.nodes.iter())
2793            .filter(|(warm_origin, node)| **warm_origin && node.is_some())
2794            .count()
2795            + self
2796                .high_id_warm_recycled_origins
2797                .values()
2798                .filter(|warm_origin| **warm_origin)
2799                .count()
2800    }
2801
2802    fn live_warm_recycled_origin_capacity(&self) -> usize {
2803        self.physical_warm_recycled_origins.capacity()
2804            + self.high_id_warm_recycled_origins.capacity()
2805    }
2806
2807    fn push_recycled_node(
2808        &mut self,
2809        key: TypeId,
2810        recycle_pool_limit: Option<usize>,
2811        recycled: RecycledNode,
2812    ) {
2813        self.remember_recycle_pool_limit(key, recycle_pool_limit);
2814        self.remember_recycled_node_prototype(key, recycled.node.as_ref());
2815
2816        let warm_origin = recycled.warm_origin();
2817        let pool = if warm_origin {
2818            self.returning_recycled_nodes.entry(key).or_default()
2819        } else {
2820            self.cold_recycled_nodes.entry(key).or_default()
2821        };
2822        pool.push(recycled);
2823        if let Some(limit) = recycle_pool_limit {
2824            if pool.len() > limit {
2825                let excess = pool.len() - limit;
2826                let dropped: Vec<_> = pool.drain(0..excess).collect();
2827                drop(dropped);
2828            }
2829        }
2830    }
2831
2832    fn push_warm_recycled_node(
2833        &mut self,
2834        key: TypeId,
2835        recycle_pool_limit: Option<usize>,
2836        mut recycled: RecycledNode,
2837    ) {
2838        self.remember_recycle_pool_limit(key, recycle_pool_limit);
2839
2840        recycled.set_warm_origin(true);
2841        let mut dropped = Vec::new();
2842        let mut remove_pool_entry = false;
2843        {
2844            let pool = self.recycled_nodes.entry(key).or_default();
2845            pool.push(recycled);
2846            if let Some(limit) = recycle_pool_limit {
2847                if pool.len() > limit {
2848                    let excess = pool.len() - limit;
2849                    dropped = pool.drain(0..excess).collect();
2850                    remove_pool_entry = pool.is_empty();
2851                }
2852            }
2853        }
2854        if remove_pool_entry {
2855            self.recycled_nodes.remove(&key);
2856        }
2857        drop(dropped);
2858    }
2859
2860    fn seed_recycled_node_shell_impl(
2861        &mut self,
2862        key: TypeId,
2863        recycle_pool_limit: Option<usize>,
2864        shell: Box<dyn Node>,
2865    ) {
2866        let limit = recycle_pool_limit.unwrap_or(usize::MAX);
2867        if self.warm_recycled_pool_len(key) >= limit {
2868            return;
2869        }
2870
2871        self.remember_recycled_node_prototype(key, shell.as_ref());
2872        let stable_id = self.next_stable_id;
2873        self.next_stable_id = self.next_stable_id.saturating_add(1);
2874        self.push_warm_recycled_node(
2875            key,
2876            recycle_pool_limit,
2877            RecycledNode::from_shell(stable_id, shell, true),
2878        );
2879    }
2880
2881    fn take_recycled_node_from_pool(
2882        pools: &mut HashMap<TypeId, Vec<RecycledNode>>,
2883        key: TypeId,
2884    ) -> Option<RecycledNode> {
2885        let pool = pools.get_mut(&key)?;
2886        let node = pool.pop();
2887        if pool.is_empty() {
2888            pools.remove(&key);
2889        }
2890        node
2891    }
2892
2893    fn compact_idle_warm_pool(&mut self, key: TypeId) {
2894        let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2895            return;
2896        };
2897        if pool.capacity() <= pool.len().saturating_mul(4).max(64) {
2898            return;
2899        }
2900
2901        let retained = pool.len();
2902        let mut compacted = Vec::with_capacity(retained);
2903        compacted.append(pool);
2904        let remove_pool_entry = compacted.is_empty();
2905        *pool = compacted;
2906        let _ = pool;
2907
2908        if remove_pool_entry {
2909            self.recycled_nodes.remove(&key);
2910        }
2911    }
2912
2913    fn trim_idle_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2914        let pool_len = self.warm_recycled_pool_len(key);
2915        if pool_len <= target {
2916            return;
2917        }
2918
2919        let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2920            return;
2921        };
2922        let removable = (pool_len - target).min(pool.len());
2923        let dropped: Vec<_> = pool.drain(0..removable).collect();
2924        let remove_pool_entry = pool.is_empty();
2925        let _ = pool;
2926
2927        if remove_pool_entry {
2928            self.recycled_nodes.remove(&key);
2929        }
2930        drop(dropped);
2931    }
2932
2933    fn replenish_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2934        let missing = target.saturating_sub(self.warm_recycled_pool_len(key));
2935        if missing == 0 {
2936            return;
2937        }
2938
2939        let recycle_pool_limit = self.recycle_pool_limit_for(key);
2940        let mut shells = Vec::with_capacity(missing);
2941        if let Some(prototype) = self.recycled_node_prototypes.get(&key) {
2942            for _ in 0..missing {
2943                let Some(shell) = prototype.rehouse_for_recycle() else {
2944                    break;
2945                };
2946                shells.push(shell);
2947            }
2948        }
2949
2950        for shell in shells {
2951            self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
2952        }
2953    }
2954
2955    fn prune_stable_generations(&mut self) {
2956        let retained_len = self.stable_to_physical.len() + self.total_recycled_node_count();
2957        if retained_len == self.stable_generations.len() {
2958            return;
2959        }
2960
2961        let mut retained = HashMap::default();
2962        retained.reserve(retained_len);
2963        for stable_id in self.stable_to_physical.keys().copied() {
2964            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2965                retained.insert(stable_id, generation);
2966            }
2967        }
2968        for stable_id in self
2969            .recycled_nodes
2970            .values()
2971            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2972        {
2973            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2974                retained.insert(stable_id, generation);
2975            }
2976        }
2977        for stable_id in self
2978            .returning_recycled_nodes
2979            .values()
2980            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2981        {
2982            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2983                retained.insert(stable_id, generation);
2984            }
2985        }
2986        for stable_id in self
2987            .cold_recycled_nodes
2988            .values()
2989            .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2990        {
2991            if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2992                retained.insert(stable_id, generation);
2993            }
2994        }
2995        self.stable_generations = retained;
2996    }
2997
2998    pub fn dump_tree(&self, root: Option<NodeId>) -> String {
2999        let mut output = String::new();
3000        if let Some(root_id) = root {
3001            self.dump_node(&mut output, root_id, 0);
3002        } else {
3003            output.push_str("(no root)\n");
3004        }
3005        output
3006    }
3007
3008    fn dump_node(&self, output: &mut String, id: NodeId, depth: usize) {
3009        let indent = "  ".repeat(depth);
3010        if let Some(physical_id) = self.resolve_node_index(id) {
3011            let node = self.nodes[physical_id]
3012                .as_ref()
3013                .expect("resolved physical node must exist");
3014            let type_name = std::any::type_name_of_val(&**node);
3015            output.push_str(&format!("{}[{}] {}\n", indent, id, type_name));
3016
3017            let children = node.children();
3018            for child_id in children {
3019                self.dump_node(output, child_id, depth + 1);
3020            }
3021        } else {
3022            output.push_str(&format!("{}[{}] (missing)\n", indent, id));
3023        }
3024    }
3025
3026    fn resolve_node_index(&self, id: NodeId) -> Option<usize> {
3027        self.stable_to_physical.get(&id).copied()
3028    }
3029
3030    fn get_ref(&self, id: NodeId) -> Result<&dyn Node, NodeError> {
3031        if let Some(physical_id) = self.resolve_node_index(id) {
3032            let slot = self
3033                .nodes
3034                .get(physical_id)
3035                .ok_or(NodeError::Missing { id })?
3036                .as_deref()
3037                .ok_or(NodeError::Missing { id })?;
3038            return Ok(slot);
3039        }
3040
3041        self.high_id_nodes
3042            .get(&id)
3043            .map(|node| node.as_ref())
3044            .ok_or(NodeError::Missing { id })
3045    }
3046
3047    fn collect_node_children_into(
3048        &self,
3049        id: NodeId,
3050        out: &mut SmallVec<[NodeId; 8]>,
3051    ) -> Result<(), NodeError> {
3052        self.get_ref(id)?.collect_children_into(out);
3053        Ok(())
3054    }
3055
3056    fn node_parent(&self, id: NodeId) -> Result<Option<NodeId>, NodeError> {
3057        Ok(self.get_ref(id)?.parent())
3058    }
3059
3060    fn collect_owned_children(
3061        &self,
3062        node_id: NodeId,
3063        out: &mut SmallVec<[NodeId; 8]>,
3064    ) -> Result<(), NodeError> {
3065        self.collect_node_children_into(node_id, out)?;
3066        out.retain(|child_id| {
3067            self.node_parent(*child_id)
3068                .map(|parent| parent == Some(node_id))
3069                .unwrap_or(false)
3070        });
3071        Ok(())
3072    }
3073
3074    fn remove_node_storage(&mut self, node_id: NodeId) -> Result<(), NodeError> {
3075        if self.high_id_nodes.contains_key(&node_id) {
3076            if let Some(mut node) = self.high_id_nodes.remove(&node_id) {
3077                if let Some(key) = node.recycle_key() {
3078                    let recycle_pool_limit = node.recycle_pool_limit();
3079                    let warm_origin = self
3080                        .high_id_warm_recycled_origins
3081                        .remove(&node_id)
3082                        .unwrap_or(false);
3083                    node.prepare_for_recycle();
3084                    self.push_recycled_node(
3085                        key,
3086                        recycle_pool_limit,
3087                        RecycledNode::new(node_id, node, warm_origin),
3088                    );
3089                }
3090            }
3091            let generation = self.high_id_generations.entry(node_id).or_insert(0);
3092            *generation = generation.wrapping_add(1);
3093            return Ok(());
3094        }
3095
3096        let physical_id = self
3097            .resolve_node_index(node_id)
3098            .ok_or(NodeError::Missing { id: node_id })?;
3099        if let Some(mut node) = self.nodes[physical_id].take() {
3100            if let Some(key) = node.recycle_key() {
3101                let recycle_pool_limit = node.recycle_pool_limit();
3102                let warm_origin = self
3103                    .physical_warm_recycled_origins
3104                    .get_mut(physical_id)
3105                    .map(std::mem::take)
3106                    .unwrap_or(false);
3107                node.prepare_for_recycle();
3108                self.push_recycled_node(
3109                    key,
3110                    recycle_pool_limit,
3111                    RecycledNode::new(node_id, node, warm_origin),
3112                );
3113            }
3114        }
3115        self.physical_stable_ids[physical_id] = Self::INVALID_STABLE_ID;
3116        self.stable_to_physical.remove(&node_id);
3117        if let Some(generation) = self.stable_generations.get_mut(&node_id) {
3118            *generation = generation.wrapping_add(1);
3119        } else {
3120            self.stable_generations.insert(node_id, 1);
3121        }
3122        self.free_ids.push(Reverse(physical_id));
3123        Ok(())
3124    }
3125
3126    fn remove_subtree_postorder(&mut self, id: NodeId) -> Result<usize, NodeError> {
3127        self.get_ref(id)?;
3128
3129        let mut root_children = SmallVec::<[NodeId; 8]>::new();
3130        self.collect_owned_children(id, &mut root_children)?;
3131
3132        let mut stack = Vec::new();
3133        stack.push(RemovalFrame {
3134            node_id: id,
3135            children: root_children,
3136            next_child: 0,
3137        });
3138        let mut max_depth = stack.len();
3139
3140        while let Some(frame) = stack.last_mut() {
3141            if frame.next_child < frame.children.len() {
3142                let child_id = frame.children[frame.next_child];
3143                frame.next_child += 1;
3144
3145                if let Ok(child) = self.get_mut(child_id) {
3146                    child.on_removed_from_parent();
3147                    child.unmount();
3148                }
3149
3150                let mut child_children = SmallVec::<[NodeId; 8]>::new();
3151                self.collect_owned_children(child_id, &mut child_children)?;
3152                stack.push(RemovalFrame {
3153                    node_id: child_id,
3154                    children: child_children,
3155                    next_child: 0,
3156                });
3157                max_depth = max_depth.max(stack.len());
3158                continue;
3159            }
3160
3161            let node_id = frame.node_id;
3162            stack.pop();
3163            self.remove_node_storage(node_id)?;
3164        }
3165
3166        Ok(max_depth)
3167    }
3168
3169    #[cfg(test)]
3170    fn debug_remove_max_traversal_depth(&mut self, id: NodeId) -> Result<usize, NodeError> {
3171        self.remove_subtree_postorder(id)
3172    }
3173}
3174
3175impl Applier for MemoryApplier {
3176    fn create(&mut self, node: Box<dyn Node>) -> NodeId {
3177        self.ensure_stable_index_capacity();
3178        let stable_id = self.next_stable_id;
3179        self.next_stable_id = self.next_stable_id.saturating_add(1);
3180        self.stable_generations.insert(stable_id, 0);
3181
3182        let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
3183            debug_assert!(self.nodes[id].is_none(), "freelist entry {id} is not None");
3184            self.nodes[id] = Some(node);
3185            self.physical_stable_ids[id] = Self::pack_stable_id(stable_id);
3186            self.physical_warm_recycled_origins[id] = false;
3187            id
3188        } else {
3189            self.ensure_dense_node_storage_capacity();
3190            let id = self.nodes.len();
3191            self.nodes.push(Some(node));
3192            self.physical_stable_ids
3193                .push(Self::pack_stable_id(stable_id));
3194            self.physical_warm_recycled_origins.push(false);
3195            id
3196        };
3197        self.stable_to_physical.insert(stable_id, physical_id);
3198        stable_id
3199    }
3200
3201    fn node_generation(&self, id: NodeId) -> u32 {
3202        self.high_id_generations
3203            .get(&id)
3204            .copied()
3205            .or_else(|| self.stable_generations.get(&id).copied())
3206            .unwrap_or(0)
3207    }
3208
3209    fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError> {
3210        // Try Vec first (common case for normal nodes), then HashMap for virtual nodes.
3211        if let Some(physical_id) = self.resolve_node_index(id) {
3212            let slot = self.nodes[physical_id]
3213                .as_deref_mut()
3214                .ok_or(NodeError::Missing { id })?;
3215            return Ok(slot);
3216        }
3217        self.high_id_nodes
3218            .get_mut(&id)
3219            .map(|n| n.as_mut())
3220            .ok_or(NodeError::Missing { id })
3221    }
3222
3223    fn remove(&mut self, id: NodeId) -> Result<(), NodeError> {
3224        self.remove_subtree_postorder(id).map(|_| ())
3225    }
3226
3227    fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError> {
3228        // Use HashMap for high IDs (virtual nodes) to avoid Vec capacity overflow
3229        // Virtual node IDs start at a very high value that can't fit in a Vec
3230        const HIGH_ID_THRESHOLD: NodeId = 1_000_000_000; // 1 billion
3231
3232        if id >= HIGH_ID_THRESHOLD {
3233            if self.high_id_nodes.contains_key(&id) {
3234                return Err(NodeError::AlreadyExists { id });
3235            }
3236            self.high_id_nodes.insert(id, node);
3237            self.high_id_warm_recycled_origins.insert(id, false);
3238            self.high_id_generations.entry(id).or_insert(0);
3239            Ok(())
3240        } else {
3241            if self.stable_to_physical.contains_key(&id) {
3242                return Err(NodeError::AlreadyExists { id });
3243            }
3244
3245            let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
3246                self.nodes[id] = Some(node);
3247                self.physical_stable_ids[id] = Self::pack_stable_id(id);
3248                self.physical_warm_recycled_origins[id] = false;
3249                id
3250            } else {
3251                self.ensure_dense_node_storage_capacity();
3252                let id = self.nodes.len();
3253                self.nodes.push(Some(node));
3254                self.physical_stable_ids.push(Self::pack_stable_id(id));
3255                self.physical_warm_recycled_origins.push(false);
3256                id
3257            };
3258
3259            self.next_stable_id = self.next_stable_id.max(id.saturating_add(1));
3260            self.ensure_stable_index_capacity();
3261            self.stable_generations.entry(id).or_insert(0);
3262            self.physical_stable_ids[physical_id] = Self::pack_stable_id(id);
3263            self.stable_to_physical.insert(id, physical_id);
3264            Ok(())
3265        }
3266    }
3267
3268    fn compact(&mut self) {
3269        let live_count = self.nodes.iter().filter(|slot| slot.is_some()).count();
3270        let tombstone_count = self.nodes.len().saturating_sub(live_count);
3271        if tombstone_count == 0 {
3272            return;
3273        }
3274        if self.nodes.len() > Self::EAGER_COMPACT_NODE_LEN && tombstone_count < live_count {
3275            return;
3276        }
3277        let rehouse_live_nodes = tombstone_count >= live_count;
3278        let mut packed_nodes = Vec::with_capacity(live_count);
3279        let mut packed_physical_stable_ids = Vec::with_capacity(live_count);
3280        let mut packed_warm_recycled_origins = Vec::with_capacity(live_count);
3281        let mut stable_to_physical = HashMap::default();
3282        stable_to_physical.reserve(live_count);
3283
3284        for physical_id in 0..self.nodes.len() {
3285            let Some(mut node) = self.nodes[physical_id].take() else {
3286                continue;
3287            };
3288            if rehouse_live_nodes {
3289                if let Some(rehoused) = node.rehouse_for_live_compaction() {
3290                    node = rehoused;
3291                }
3292            }
3293            let stable_id = std::mem::replace(
3294                &mut self.physical_stable_ids[physical_id],
3295                Self::INVALID_STABLE_ID,
3296            );
3297            debug_assert_ne!(
3298                stable_id,
3299                Self::INVALID_STABLE_ID,
3300                "live physical slot must have a stable id",
3301            );
3302            let stable_id = Self::unpack_stable_id(stable_id);
3303            packed_nodes.push(Some(node));
3304            packed_physical_stable_ids.push(Self::pack_stable_id(stable_id));
3305            packed_warm_recycled_origins.push(self.physical_warm_recycled_origins[physical_id]);
3306            stable_to_physical.insert(stable_id, packed_nodes.len() - 1);
3307        }
3308
3309        self.nodes = packed_nodes;
3310        self.physical_stable_ids = packed_physical_stable_ids;
3311        self.physical_warm_recycled_origins = packed_warm_recycled_origins;
3312        self.free_ids = BinaryHeap::new();
3313        self.stable_to_physical = stable_to_physical;
3314        self.prune_stable_generations();
3315    }
3316
3317    fn take_recycled_node(&mut self, key: TypeId) -> Option<RecycledNode> {
3318        Self::take_recycled_node_from_pool(&mut self.returning_recycled_nodes, key)
3319            .or_else(|| Self::take_recycled_node_from_pool(&mut self.recycled_nodes, key))
3320    }
3321
3322    fn set_recycled_node_origin(&mut self, id: NodeId, warm_origin: bool) {
3323        if let Some(physical_id) = self.resolve_node_index(id) {
3324            self.physical_warm_recycled_origins[physical_id] = warm_origin;
3325        } else if self.high_id_nodes.contains_key(&id) {
3326            self.high_id_warm_recycled_origins.insert(id, warm_origin);
3327        }
3328    }
3329
3330    fn seed_recycled_node_shell(
3331        &mut self,
3332        key: TypeId,
3333        recycle_pool_limit: Option<usize>,
3334        shell: Box<dyn Node>,
3335    ) {
3336        self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
3337    }
3338
3339    fn record_fresh_recyclable_creation(&mut self, key: TypeId) {
3340        *self.fresh_recyclable_creations.entry(key).or_insert(0) += 1;
3341    }
3342
3343    fn clear_recycled_nodes(&mut self) {
3344        let returning = std::mem::take(&mut self.returning_recycled_nodes);
3345        for (key, mut nodes) in returning {
3346            let pool = self.recycled_nodes.entry(key).or_default();
3347            pool.append(&mut nodes);
3348        }
3349
3350        let fresh_recyclable_creations = std::mem::take(&mut self.fresh_recyclable_creations);
3351        let cold = std::mem::take(&mut self.cold_recycled_nodes);
3352        for (key, mut nodes) in cold {
3353            let needed = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3354            if needed > 0 {
3355                let remaining_limit = self
3356                    .recycle_pool_limit_for(key)
3357                    .unwrap_or(usize::MAX)
3358                    .saturating_sub(self.warm_recycled_pool_len(key));
3359                let promote = nodes.len().min(needed).min(remaining_limit);
3360                let split_at = nodes.len().saturating_sub(promote);
3361                let promoted = nodes.split_off(split_at);
3362                for mut recycled in promoted {
3363                    recycled.set_warm_origin(true);
3364                    self.recycled_nodes.entry(key).or_default().push(recycled);
3365                }
3366            }
3367        }
3368
3369        let mut keys: HashSet<TypeId> = HashSet::default();
3370        keys.extend(self.recycled_nodes.keys().copied());
3371        keys.extend(self.recycled_node_limits.keys().copied());
3372        keys.extend(self.warm_recycled_node_targets.keys().copied());
3373        keys.extend(self.recycled_node_prototypes.keys().copied());
3374        for key in keys {
3375            let observed_demand = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3376            let target = self.update_warm_recycled_node_target(key, observed_demand);
3377            self.replenish_warm_pool_to_target(key, target);
3378            self.trim_idle_warm_pool_to_target(key, target);
3379            self.compact_idle_warm_pool(key);
3380        }
3381        self.prune_stable_generations();
3382        // Compact the dense physical storage if it has grown very large relative
3383        // to the live node count (e.g. after a spike of recyclable nodes that
3384        // were all removed). This keeps warm_recycled_node_id_capacity bounded.
3385        self.compact();
3386    }
3387}
3388
3389pub trait ApplierHost {
3390    fn borrow_dyn(&self) -> RefMut<'_, dyn Applier>;
3391    /// Compact internal storage after commands have been applied.
3392    fn compact(&self) {}
3393}
3394
3395pub struct ConcreteApplierHost<A: Applier + 'static> {
3396    inner: RefCell<A>,
3397}
3398
3399impl<A: Applier + 'static> ConcreteApplierHost<A> {
3400    pub fn new(applier: A) -> Self {
3401        Self {
3402            inner: RefCell::new(applier),
3403        }
3404    }
3405
3406    pub fn borrow_typed(&self) -> RefMut<'_, A> {
3407        self.inner.borrow_mut()
3408    }
3409
3410    pub fn try_borrow_typed(&self) -> Result<RefMut<'_, A>, std::cell::BorrowMutError> {
3411        self.inner.try_borrow_mut()
3412    }
3413
3414    pub fn into_inner(self) -> A {
3415        self.inner.into_inner()
3416    }
3417}
3418
3419impl<A: Applier + 'static> ApplierHost for ConcreteApplierHost<A> {
3420    fn borrow_dyn(&self) -> RefMut<'_, dyn Applier> {
3421        RefMut::map(self.inner.borrow_mut(), |applier| {
3422            applier as &mut dyn Applier
3423        })
3424    }
3425
3426    fn compact(&self) {
3427        self.inner.borrow_mut().compact();
3428    }
3429}
3430
3431pub struct ApplierGuard<'a, A: Applier + 'static> {
3432    inner: RefMut<'a, A>,
3433}
3434
3435impl<'a, A: Applier + 'static> ApplierGuard<'a, A> {
3436    fn new(inner: RefMut<'a, A>) -> Self {
3437        Self { inner }
3438    }
3439}
3440
3441impl<'a, A: Applier + 'static> Deref for ApplierGuard<'a, A> {
3442    type Target = A;
3443
3444    fn deref(&self) -> &Self::Target {
3445        &self.inner
3446    }
3447}
3448
3449impl<'a, A: Applier + 'static> DerefMut for ApplierGuard<'a, A> {
3450    fn deref_mut(&mut self) -> &mut Self::Target {
3451        &mut self.inner
3452    }
3453}
3454
3455pub struct SlotsHost {
3456    storage_key: Cell<usize>,
3457    inner: RefCell<SlotsHostInner>,
3458}
3459
3460#[derive(Debug, Default)]
3461pub(crate) struct SlotPassOutcome {
3462    pub(crate) compacted: bool,
3463    pub(crate) compact_anchor_registry_storage: bool,
3464    pub(crate) compact_payload_storage: bool,
3465}
3466
3467#[derive(Default)]
3468pub(crate) struct FinishedSlotPass {
3469    pub(crate) outcome: SlotPassOutcome,
3470    pub(crate) detached_root_children: Vec<slot::DetachedSubtree>,
3471}
3472
3473struct ActivePassState {
3474    state: slot::SlotWriteSessionState,
3475}
3476
3477struct SlotsHostInner {
3478    table: SlotTable,
3479    lifecycle: slot::SlotLifecycleCoordinator,
3480    runtime_state: Option<Rc<crate::composer::ComposerRuntimeState>>,
3481    active_pass: Option<ActivePassState>,
3482}
3483
3484impl Drop for SlotsHost {
3485    fn drop(&mut self) {
3486        let storage_key = self.storage_key.get();
3487        let inner = self.inner.get_mut();
3488        if let Some(state) = inner.runtime_state.clone() {
3489            if let Err(err) = state.dispose_retained_subtrees_for_host(
3490                storage_key,
3491                &mut inner.table,
3492                &mut inner.lifecycle,
3493            ) {
3494                log::error!(
3495                    "retained subtree disposal failed while dropping SlotsHost {storage_key}: {err}"
3496                );
3497                state.abandon_retained_subtrees_for_host(
3498                    storage_key,
3499                    &mut inner.table,
3500                    &mut inner.lifecycle,
3501                );
3502            } else {
3503                state.clear_host_storage_key(storage_key);
3504            }
3505        }
3506        inner.lifecycle.dispose_slot_table(&mut inner.table);
3507    }
3508}
3509
3510impl SlotsHost {
3511    pub(crate) fn storage_key(&self) -> usize {
3512        self.storage_key.get()
3513    }
3514
3515    pub fn new(storage: SlotTable) -> Self {
3516        let storage_key = storage.storage_id();
3517        Self {
3518            storage_key: Cell::new(storage_key),
3519            inner: RefCell::new(SlotsHostInner {
3520                table: storage,
3521                lifecycle: slot::SlotLifecycleCoordinator::default(),
3522                runtime_state: None,
3523                active_pass: None,
3524            }),
3525        }
3526    }
3527
3528    pub(crate) fn bind_runtime_state(&self, state: &Rc<crate::composer::ComposerRuntimeState>) {
3529        let mut inner = self.inner.borrow_mut();
3530        inner.runtime_state = Some(Rc::clone(state));
3531    }
3532
3533    pub(crate) fn rebind_orphaned_runtime_state(
3534        &self,
3535        state: &Rc<crate::composer::ComposerRuntimeState>,
3536    ) -> bool {
3537        let inner = self.inner.borrow();
3538        assert!(
3539            inner.active_pass.is_none(),
3540            "cannot rebind SlotsHost during an active pass"
3541        );
3542        let Some(bound_state) = inner.runtime_state.as_ref() else {
3543            drop(inner);
3544            self.bind_runtime_state(state);
3545            return true;
3546        };
3547        if Rc::ptr_eq(bound_state, state) {
3548            return true;
3549        }
3550        if bound_state.has_live_applier_host() {
3551            return false;
3552        }
3553        drop(inner);
3554
3555        let mut inner = self.inner.borrow_mut();
3556        let Some(bound_state) = inner.runtime_state.as_ref() else {
3557            inner.runtime_state = Some(Rc::clone(state));
3558            return true;
3559        };
3560        if Rc::ptr_eq(bound_state, state) {
3561            return true;
3562        }
3563        if bound_state.has_live_applier_host() {
3564            return false;
3565        }
3566
3567        let previous_state = Rc::clone(bound_state);
3568        let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3569        lifecycle.flush_pending_drops();
3570        let host_key = self.storage_key();
3571        if previous_state
3572            .dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)
3573            .is_err()
3574        {
3575            inner.lifecycle = lifecycle;
3576            return false;
3577        }
3578        previous_state.clear_host(self);
3579        lifecycle.flush_pending_drops();
3580        inner.runtime_state = Some(Rc::clone(state));
3581        inner.lifecycle = lifecycle;
3582        true
3583    }
3584
3585    pub(crate) fn runtime_state(&self) -> Option<Rc<crate::composer::ComposerRuntimeState>> {
3586        self.inner.borrow().runtime_state.clone()
3587    }
3588
3589    pub(crate) fn borrow(&self) -> Ref<'_, SlotTable> {
3590        Ref::map(self.inner.borrow(), |inner| &inner.table)
3591    }
3592
3593    pub(crate) fn borrow_mut(&self) -> RefMut<'_, SlotTable> {
3594        RefMut::map(self.inner.borrow_mut(), |inner| &mut inner.table)
3595    }
3596
3597    pub fn into_table(self: Rc<Self>) -> Result<SlotTable, NodeError> {
3598        assert_eq!(
3599            Rc::strong_count(&self),
3600            1,
3601            "cannot transfer SlotsHost table while other host references are alive"
3602        );
3603        self.take_table_for_transfer()
3604    }
3605
3606    fn take_table_for_transfer(&self) -> Result<SlotTable, NodeError> {
3607        let inner = self.inner.borrow();
3608        assert!(
3609            inner.active_pass.is_none(),
3610            "cannot take SlotsHost during an active pass"
3611        );
3612        drop(inner);
3613        let mut inner = self.inner.borrow_mut();
3614        let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3615        lifecycle.flush_pending_drops();
3616        if let Some(state) = inner.runtime_state.clone() {
3617            let host_key = self.storage_key();
3618            state.dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)?;
3619            state.clear_host(self);
3620            lifecycle.flush_pending_drops();
3621        }
3622        let taken = std::mem::take(&mut inner.table);
3623        self.storage_key.set(inner.table.storage_id());
3624        inner.runtime_state = None;
3625        inner.lifecycle = lifecycle;
3626        Ok(taken)
3627    }
3628
3629    pub fn reset(&self) -> Result<(), NodeError> {
3630        let inner = self.inner.borrow();
3631        assert!(
3632            inner.active_pass.is_none(),
3633            "cannot reset SlotsHost during an active pass"
3634        );
3635        let runtime_state = inner.runtime_state.clone();
3636        drop(inner);
3637        let mut inner = self.inner.borrow_mut();
3638        let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3639        if let Some(state) = runtime_state {
3640            let host_key = self.storage_key();
3641            state.dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)?;
3642            state.clear_host(self);
3643        }
3644        lifecycle.dispose_slot_table(&mut inner.table);
3645        inner.table = SlotTable::default();
3646        self.storage_key.set(inner.table.storage_id());
3647        inner.runtime_state = None;
3648        inner.lifecycle = slot::SlotLifecycleCoordinator::default();
3649        Ok(())
3650    }
3651
3652    pub(crate) fn abandon_after_apply_failure(&self) {
3653        let inner = self.inner.borrow();
3654        assert!(
3655            inner.active_pass.is_none(),
3656            "cannot abandon SlotsHost during an active pass"
3657        );
3658        let runtime_state = inner.runtime_state.clone();
3659        drop(inner);
3660        let mut inner = self.inner.borrow_mut();
3661        let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3662        if let Some(state) = runtime_state {
3663            let host_key = self.storage_key();
3664            state.abandon_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle);
3665        }
3666        lifecycle.dispose_slot_table(&mut inner.table);
3667        inner.table = SlotTable::default();
3668        self.storage_key.set(inner.table.storage_id());
3669        inner.runtime_state = None;
3670        inner.lifecycle = slot::SlotLifecycleCoordinator::default();
3671    }
3672
3673    pub(crate) fn debug_stats(&self) -> SlotTableDebugStats {
3674        let inner = self.inner.borrow();
3675        let local = inner.table.debug_stats();
3676        let lifecycle = inner.lifecycle.debug_stats();
3677        let retention = inner
3678            .runtime_state
3679            .clone()
3680            .map(|state| state.slot_retention_debug_stats(self))
3681            .unwrap_or_default();
3682        SlotTableDebugStats::from_parts(local, lifecycle, retention)
3683    }
3684
3685    pub(crate) fn debug_snapshot(&self) -> slot::SlotDebugSnapshot {
3686        let inner = self.inner.borrow();
3687        let mut snapshot = inner.table.debug_snapshot();
3688        if let Some(state) = inner.runtime_state.clone() {
3689            state.fill_slot_debug_snapshot(self, &mut snapshot);
3690        }
3691        snapshot
3692    }
3693
3694    pub(crate) fn begin_pass(&self, mode: slot::SlotPassMode) {
3695        let mut inner = self.inner.borrow_mut();
3696        assert!(
3697            inner.active_pass.is_none(),
3698            "slot pass already active for host"
3699        );
3700        let mut state = slot::SlotWriteSessionState::default();
3701        state.reset_for_pass(mode);
3702        inner.active_pass = Some(ActivePassState { state });
3703    }
3704
3705    pub(crate) fn has_active_pass(&self) -> bool {
3706        self.inner.borrow().active_pass.is_some()
3707    }
3708
3709    pub(crate) fn abandon_active_pass(&self) {
3710        self.inner.borrow_mut().active_pass = None;
3711    }
3712
3713    pub(crate) fn with_write_session<R>(
3714        &self,
3715        f: impl FnOnce(&mut slot::SlotWriteSession<'_>) -> R,
3716    ) -> R {
3717        let mut inner = self.inner.borrow_mut();
3718        let SlotsHostInner {
3719            table,
3720            lifecycle,
3721            active_pass,
3722            ..
3723        } = &mut *inner;
3724        let active_pass = active_pass
3725            .as_mut()
3726            .expect("slot write session requires an active pass");
3727        let mut session = table.write_session(lifecycle, &mut active_pass.state);
3728        f(&mut session)
3729    }
3730
3731    pub(crate) fn with_table_and_lifecycle_mut<R>(
3732        &self,
3733        f: impl FnOnce(&mut SlotTable, &mut slot::SlotLifecycleCoordinator) -> R,
3734    ) -> R {
3735        let mut inner = self.inner.borrow_mut();
3736        let SlotsHostInner {
3737            table, lifecycle, ..
3738        } = &mut *inner;
3739        f(table, lifecycle)
3740    }
3741
3742    pub(crate) fn finish_pass(
3743        &self,
3744        applier: &mut dyn Applier,
3745    ) -> Result<FinishedSlotPass, NodeError> {
3746        let mut inner = self.inner.borrow_mut();
3747        let SlotsHostInner {
3748            table,
3749            lifecycle,
3750            active_pass: active_pass_slot,
3751            ..
3752        } = &mut *inner;
3753        let Some(mut active_pass) = active_pass_slot.take() else {
3754            return Ok(FinishedSlotPass::default());
3755        };
3756
3757        active_pass.state.flush_payload_location_refreshes(table);
3758
3759        #[cfg(debug_assertions)]
3760        if let Err(err) = active_pass.state.validate(table) {
3761            panic!("slot writer invariant violation before finalize_pass: {err:?}");
3762        }
3763
3764        let detached_root_children = {
3765            let mut session = table.write_session(lifecycle, &mut active_pass.state);
3766            session.finalize_pass(applier)?
3767        };
3768
3769        Ok(FinishedSlotPass {
3770            outcome: SlotPassOutcome {
3771                compacted: active_pass.state.request_compaction,
3772                compact_anchor_registry_storage: active_pass
3773                    .state
3774                    .request_anchor_storage_compaction,
3775                compact_payload_storage: active_pass.state.request_payload_storage_compaction,
3776            },
3777            detached_root_children,
3778        })
3779    }
3780
3781    pub(crate) fn complete_pass_cleanup(&self, outcome: &SlotPassOutcome) {
3782        let mut inner = self.inner.borrow_mut();
3783        let SlotsHostInner {
3784            table,
3785            lifecycle,
3786            runtime_state,
3787            ..
3788        } = &mut *inner;
3789        lifecycle.flush_pending_drops();
3790        if outcome.compacted {
3791            table.compact_storage();
3792            lifecycle.compact_storage();
3793        }
3794        if let Some(state) = runtime_state.clone() {
3795            state.compact_table_identity_storage_for_host(
3796                self,
3797                table,
3798                outcome.compact_anchor_registry_storage,
3799                outcome.compact_payload_storage,
3800            );
3801        } else {
3802            if outcome.compact_anchor_registry_storage {
3803                table.compact_anchor_registry_storage(None);
3804            }
3805            if outcome.compact_payload_storage {
3806                table.compact_payload_anchor_registry_storage(None);
3807            }
3808        }
3809        table.assert_fast_integrity("slot pass cleanup");
3810        #[cfg(any(test, debug_assertions))]
3811        {
3812            table.debug_verify();
3813            if let Some(state) = runtime_state.clone() {
3814                state.debug_verify_host(self, table);
3815            }
3816        }
3817    }
3818}
3819
3820fn build_child_positions(children: &[NodeId]) -> HashMap<NodeId, usize> {
3821    let mut positions = HashMap::default();
3822    positions.reserve(children.len());
3823    for (index, &child) in children.iter().enumerate() {
3824        positions.insert(child, index);
3825    }
3826    positions
3827}
3828
3829fn refresh_child_positions(
3830    current: &[NodeId],
3831    positions: &mut HashMap<NodeId, usize>,
3832    start: usize,
3833    end: usize,
3834) {
3835    if current.is_empty() || start >= current.len() {
3836        return;
3837    }
3838    let end = end.min(current.len() - 1);
3839    for (offset, &child) in current[start..=end].iter().enumerate() {
3840        positions.insert(child, start + offset);
3841    }
3842}
3843
3844fn insert_child_into_diff_state(
3845    current: &mut ChildList,
3846    positions: &mut HashMap<NodeId, usize>,
3847    index: usize,
3848    child: NodeId,
3849) {
3850    let index = index.min(current.len());
3851    current.insert(index, child);
3852    refresh_child_positions(current, positions, index, current.len() - 1);
3853}
3854
3855fn move_child_in_diff_state(
3856    current: &mut ChildList,
3857    positions: &mut HashMap<NodeId, usize>,
3858    from_index: usize,
3859    target_index: usize,
3860) -> usize {
3861    let child = current.remove(from_index);
3862    let to_index = target_index.min(current.len());
3863    current.insert(to_index, child);
3864    refresh_child_positions(
3865        current,
3866        positions,
3867        from_index.min(to_index),
3868        from_index.max(to_index),
3869    );
3870    to_index
3871}
3872
3873pub(crate) use state::MutableStateInner;
3874pub use state::{MutableState, OwnedMutableState, SnapshotStateList, SnapshotStateMap, State};
3875
3876fn hash_key<K: Hash>(key: &K) -> Key {
3877    let mut hasher = hash::default::new();
3878    key.hash(&mut hasher);
3879    hasher.finish()
3880}
3881
3882pub(crate) fn explicit_group_key_seed<K: Hash>(
3883    key: &K,
3884    caller: &'static std::panic::Location<'static>,
3885) -> slot::GroupKeySeed {
3886    let source_key = location_key(caller.file(), caller.line(), caller.column());
3887    let explicit_key = hash_key(key);
3888    slot::GroupKeySeed::keyed(source_key, explicit_key)
3889}
3890
3891#[cfg(test)]
3892#[path = "tests/mod.rs"]
3893mod tests;
3894
3895#[cfg(test)]
3896#[path = "tests/recursive_decrease_increase_test.rs"]
3897mod recursive_decrease_increase_test;
3898
3899pub mod collections;
3900pub mod hash;