1#![doc = r"Core runtime pieces for the Cranpose experiment."]
2
3pub extern crate self as cranpose_core;
4
5mod callbacks;
6mod composer;
7pub mod composer_context;
8mod composition;
9mod composition_locals;
10mod debug_trace;
11mod emit;
12#[cfg(any(feature = "internal", test))]
13mod frame_clock;
14mod hooks;
15mod launched_effect;
16pub mod owned;
17pub mod platform;
18mod recompose;
19mod retention;
20pub mod runtime;
21mod slot;
22pub mod snapshot_double_index_heap;
23pub mod snapshot_id_set;
24pub mod snapshot_pinning;
25pub mod snapshot_state_observer;
26pub mod snapshot_v2;
27mod snapshot_weak_set;
28mod state;
29pub mod subcompose;
30
31#[cfg(feature = "internal")]
32#[doc(hidden)]
33pub mod internal {
34 pub use crate::frame_clock::{FrameCallbackRegistration, FrameClock};
35}
36pub use callbacks::{CallbackHolder, CallbackHolder1, ParamSlot, ParamState, ReturnSlot};
37pub use composer::{Composer, ValueSlotHandle};
38pub(crate) use composer::{ComposerCore, EmittedNode, ParentAttachMode, ParentFrame};
39pub use composition::{Composition, ROOT_RENDER_REPLAY_LIMIT};
40pub use composition_locals::{
41 compositionLocalOf, compositionLocalOfWithPolicy, staticCompositionLocalOf, CompositionLocal,
42 CompositionLocalProvider, ProvidedValue, StaticCompositionLocal,
43};
44pub(crate) use composition_locals::{LocalStateEntry, StaticLocalEntry};
45#[doc(hidden)]
46pub use debug_trace::{
47 debug_label_current_scope, debug_live_recompose_scope_count,
48 debug_recompose_scope_registry_stats, debug_scope_invalidation_sources, debug_scope_label,
49};
50pub use hooks::{
51 derivedStateOf, mutableStateList, mutableStateListOf, mutableStateMap, mutableStateMapOf,
52 mutableStateOf, ownedMutableStateOf, remember, rememberUpdatedState, try_mutableStateOf,
53 useState,
54};
55#[cfg(feature = "internal")]
56#[doc(hidden)]
57pub use hooks::{withFrameMillis, withFrameNanos};
58pub use launched_effect::{
59 __launched_effect_async_impl, __launched_effect_impl, CancelToken, LaunchedEffectScope,
60};
61pub use owned::Owned;
62pub use platform::{Clock, RuntimeScheduler};
63pub use retention::{RetentionBudget, RetentionEvictionPolicy, RetentionMode, RetentionPolicy};
64#[doc(hidden)]
65pub use runtime::{
66 current_runtime_handle, schedule_frame, schedule_node_update, DefaultScheduler, Runtime,
67 RuntimeHandle, StateId, TaskHandle,
68};
69pub use slot::{
70 SlotDebugAnchor, SlotDebugEntry, SlotDebugEntryKind, SlotDebugGroup, SlotDebugScope,
71 SlotDebugSnapshot, SlotRetentionDebugStats, SlotTable, SlotTableDebugStats,
72 SlotTableLocalDebugStats, SlotTableMutationDebugStats,
73};
74#[doc(hidden)]
75pub use snapshot_state_observer::SnapshotStateObserver;
76
77pub fn run_in_mutable_snapshot<T>(block: impl FnOnce() -> T) -> Result<T, &'static str> {
102 let snapshot = snapshot_v2::take_mutable_snapshot(None, None);
103
104 IN_APPLIED_SNAPSHOT.with(|c| c.set(true));
106 let value = snapshot.enter(block);
107 IN_APPLIED_SNAPSHOT.with(|c| c.set(false));
108
109 match snapshot.apply() {
110 snapshot_v2::SnapshotApplyResult::Success => Ok(value),
111 snapshot_v2::SnapshotApplyResult::Failure => Err("Snapshot apply failed"),
112 }
113}
114
115pub fn dispatch_ui_event<T>(block: impl FnOnce() -> T) -> Option<T> {
130 run_in_mutable_snapshot(block).ok()
131}
132
133thread_local! {
140 pub(crate) static IN_EVENT_HANDLER: Cell<bool> = const { Cell::new(false) };
142 pub(crate) static IN_APPLIED_SNAPSHOT: Cell<bool> = const { Cell::new(false) };
144}
145
146#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
147pub struct CompositionPassDebugStats {
148 pub commands_len: usize,
149 pub commands_cap: usize,
150 pub command_payload_len_bytes: usize,
151 pub command_payload_cap_bytes: usize,
152 pub sync_children_len: usize,
153 pub sync_children_cap: usize,
154 pub sync_child_ids_len: usize,
155 pub sync_child_ids_cap: usize,
156 pub side_effects_len: usize,
157 pub side_effects_cap: usize,
158}
159
160pub fn enter_event_handler() {
164 IN_EVENT_HANDLER.with(|c| c.set(true));
165}
166
167pub fn exit_event_handler() {
169 IN_EVENT_HANDLER.with(|c| c.set(false));
170}
171
172pub fn in_event_handler() -> bool {
174 IN_EVENT_HANDLER.with(|c| c.get())
175}
176
177pub fn in_applied_snapshot() -> bool {
179 IN_APPLIED_SNAPSHOT.with(|c| c.get())
180}
181
182#[cfg(test)]
183pub use runtime::{TestRuntime, TestScheduler};
184
185use crate::collections::map::{HashMap, HashSet};
186use smallvec::SmallVec;
187use std::any::{Any, TypeId};
188use std::cell::{Cell, Ref, RefCell, RefMut};
189use std::cmp::Reverse;
190use std::collections::BinaryHeap;
191use std::hash::{Hash, Hasher};
192use std::ops::{Deref, DerefMut};
193use std::rc::{Rc, Weak};
194use std::sync::atomic::{AtomicUsize, Ordering};
195#[cfg(any(test, debug_assertions))]
196use std::sync::{Mutex, OnceLock};
197
198pub type Key = u64;
199pub type NodeId = usize;
200
201#[cfg(any(test, debug_assertions))]
202#[derive(Clone, Debug, PartialEq, Eq)]
203struct LocationKeyDebugInfo {
204 file: String,
205 line: u32,
206 column: u32,
207}
208
209#[cfg(any(test, debug_assertions))]
210fn location_key_registry() -> &'static Mutex<HashMap<Key, LocationKeyDebugInfo>> {
211 static REGISTRY: OnceLock<Mutex<HashMap<Key, LocationKeyDebugInfo>>> = OnceLock::new();
212 REGISTRY.get_or_init(|| Mutex::new(HashMap::default()))
213}
214
215#[cfg(any(test, debug_assertions))]
216fn lock_location_key_registry() -> std::sync::MutexGuard<'static, HashMap<Key, LocationKeyDebugInfo>>
217{
218 location_key_registry()
219 .lock()
220 .unwrap_or_else(|poisoned| poisoned.into_inner())
221}
222
223#[cfg(any(test, debug_assertions))]
224fn register_location_key_debug_info(key: Key, file: &str, line: u32, column: u32) {
225 let info = LocationKeyDebugInfo {
226 file: file.to_owned(),
227 line,
228 column,
229 };
230 let collision = {
231 let mut registry = lock_location_key_registry();
232 match registry.entry(key) {
233 std::collections::hash_map::Entry::Vacant(entry) => {
234 entry.insert(info);
235 None
236 }
237 std::collections::hash_map::Entry::Occupied(entry) => {
238 let existing = entry.get();
239 (existing != &info).then(|| (existing.clone(), info))
240 }
241 }
242 };
243 if let Some((existing, incoming)) = collision {
244 panic!("location key collision: key={key} first={existing:?} second={incoming:?}");
245 }
246}
247
248#[cfg(all(debug_assertions, not(test)))]
249fn location_key_diagnostics_enabled() -> bool {
250 static ENABLED: OnceLock<bool> = OnceLock::new();
251 *ENABLED.get_or_init(|| std::env::var_os("CRANPOSE_LOCATION_KEY_DIAGNOSTICS").is_some())
252}
253
254#[cfg(test)]
255pub(crate) fn register_location_key_debug_info_for_test(
256 key: Key,
257 file: &str,
258 line: u32,
259 column: u32,
260) {
261 register_location_key_debug_info(key, file, line, column);
262}
263
264#[cfg(test)]
265pub(crate) fn slot_validation_diagnostics_enabled() -> bool {
266 true
267}
268
269#[cfg(all(debug_assertions, not(test)))]
270pub(crate) fn slot_validation_diagnostics_enabled() -> bool {
271 static ENABLED: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
272 *ENABLED.get_or_init(|| std::env::var_os("CRANPOSE_VALIDATE_SLOTS").is_some())
273}
274
275fn source_location_key(file: &str, line: u32, column: u32) -> Key {
276 let mut hash = 0xcbf2_9ce4_8422_2325u64;
277 hash = fnv1a_location_key_bytes(hash, file.as_bytes());
278 hash = fnv1a_location_key_bytes(hash, &[0xff]);
279 hash = fnv1a_location_key_bytes(hash, &line.to_le_bytes());
280 hash = fnv1a_location_key_bytes(hash, &[0xfe]);
281 hash = fnv1a_location_key_bytes(hash, &column.to_le_bytes());
282 avalanche_location_key(hash)
283}
284
285fn fnv1a_location_key_bytes(mut hash: u64, bytes: &[u8]) -> u64 {
286 for byte in bytes {
287 hash ^= u64::from(*byte);
288 hash = hash.wrapping_mul(0x0000_0100_0000_01b3);
289 }
290 hash
291}
292
293fn avalanche_location_key(mut value: u64) -> u64 {
294 value ^= value >> 33;
295 value = value.wrapping_mul(0xff51_afd7_ed55_8ccd);
296 value ^= value >> 33;
297 value = value.wrapping_mul(0xc4ce_b9fe_1a85_ec53);
298 value ^ (value >> 33)
299}
300
301pub fn location_key(file: &str, line: u32, column: u32) -> Key {
302 let key = source_location_key(file, line, column);
303 #[cfg(test)]
304 register_location_key_debug_info(key, file, line, column);
305 #[cfg(all(debug_assertions, not(test)))]
306 if location_key_diagnostics_enabled() {
307 register_location_key_debug_info(key, file, line, column);
308 }
309 key
310}
311
312#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
318pub struct AnchorId {
319 id: u32,
320 generation: u32,
321}
322
323impl AnchorId {
324 pub(crate) const INVALID: AnchorId = AnchorId {
326 id: 0,
327 generation: 0,
328 };
329
330 pub(crate) fn new(id: usize) -> Self {
331 Self {
332 id: crate::slot::checked_usize_to_u32(id, "anchor id"),
333 generation: 1,
334 }
335 }
336
337 pub fn is_valid(&self) -> bool {
339 self.id != 0
340 }
341}
342
343pub(crate) type ScopeId = usize;
344type LocalKey = usize;
345pub(crate) type FrameCallbackId = u64;
346type LocalStackSnapshot = Rc<Vec<composer::LocalContext>>;
347
348thread_local! {
349 static EMPTY_LOCAL_STACK: LocalStackSnapshot = Rc::new(Vec::new());
350 #[cfg(debug_assertions)]
351 static DEBUG_SCOPE_LABELS: RefCell<HashMap<usize, &'static str>> = RefCell::new(HashMap::default());
352 #[cfg(debug_assertions)]
353 static DEBUG_SCOPE_INVALIDATION_SOURCES: RefCell<HashMap<usize, HashSet<String>>> =
354 RefCell::new(HashMap::default());
355 #[cfg(all(test, debug_assertions))]
356 static DEBUG_SCOPE_TRACKING_OVERRIDE: Cell<Option<bool>> = const { Cell::new(None) };
357}
358
359static NEXT_SCOPE_ID: AtomicUsize = AtomicUsize::new(1);
360static NEXT_LOCAL_KEY: AtomicUsize = AtomicUsize::new(1);
361static LIVE_RECOMPOSE_SCOPE_COUNT: AtomicUsize = AtomicUsize::new(0);
362
363fn next_scope_id() -> ScopeId {
364 NEXT_SCOPE_ID.fetch_add(1, Ordering::Relaxed)
365}
366
367fn next_local_key() -> LocalKey {
368 NEXT_LOCAL_KEY.fetch_add(1, Ordering::Relaxed)
369}
370
371fn empty_local_stack() -> LocalStackSnapshot {
372 EMPTY_LOCAL_STACK.with(Rc::clone)
373}
374
375enum RecomposeCallback {
376 Static(fn(&Composer)),
377 Dynamic(Box<dyn FnMut(&Composer) + 'static>),
378}
379
380pub(crate) struct RecomposeScopeInner {
381 id: ScopeId,
382 runtime: RuntimeHandle,
383 invalid: Cell<bool>,
384 enqueued: Cell<bool>,
385 active: Cell<bool>,
386 composed_once: Cell<bool>,
387 pending_recompose: Cell<bool>,
388 force_reuse: Cell<bool>,
389 force_recompose: Cell<bool>,
390 retention_mode: Cell<RetentionMode>,
391 parent_hint: Cell<Option<NodeId>>,
392 recompose: RefCell<Option<RecomposeCallback>>,
393 parent_scope: RefCell<Option<Weak<RecomposeScopeInner>>>,
394 local_stack: RefCell<LocalStackSnapshot>,
395 slots_storage_key: Cell<usize>,
396 slots_runtime_state: RefCell<Option<std::rc::Weak<crate::composer::ComposerRuntimeState>>>,
397 state_subscriptions: RefCell<HashSet<StateId>>,
398}
399
400impl RecomposeScopeInner {
401 fn new(runtime: RuntimeHandle) -> Self {
402 LIVE_RECOMPOSE_SCOPE_COUNT.fetch_add(1, Ordering::Relaxed);
403 Self {
404 id: next_scope_id(),
405 runtime,
406 invalid: Cell::new(false),
407 enqueued: Cell::new(false),
408 active: Cell::new(true),
409 composed_once: Cell::new(false),
410 pending_recompose: Cell::new(false),
411 force_reuse: Cell::new(false),
412 force_recompose: Cell::new(false),
413 retention_mode: Cell::new(RetentionMode::DisposeWhenInactive),
414 parent_hint: Cell::new(None),
415 recompose: RefCell::new(None),
416 parent_scope: RefCell::new(None),
417 local_stack: RefCell::new(empty_local_stack()),
418 slots_storage_key: Cell::new(0),
419 slots_runtime_state: RefCell::new(None),
420 state_subscriptions: RefCell::new(HashSet::default()),
421 }
422 }
423}
424
425impl Drop for RecomposeScopeInner {
426 fn drop(&mut self) {
427 LIVE_RECOMPOSE_SCOPE_COUNT.fetch_sub(1, Ordering::Relaxed);
428 let subscriptions = std::mem::take(self.state_subscriptions.get_mut());
429 for state_id in subscriptions {
430 self.runtime.unregister_state_scope(state_id, self.id);
431 }
432 #[cfg(debug_assertions)]
433 {
434 let _ = DEBUG_SCOPE_LABELS.try_with(|labels| {
435 labels.borrow_mut().remove(&self.id);
436 });
437 let _ = DEBUG_SCOPE_INVALIDATION_SOURCES.try_with(|sources| {
438 sources.borrow_mut().remove(&self.id);
439 });
440 }
441 if self.enqueued.replace(false) {
442 self.runtime.mark_scope_recomposed(self.id);
443 }
444 }
445}
446
447#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
448pub struct RecomposeScopeRegistryDebugStats {
449 pub len: usize,
450 pub capacity: usize,
451}
452
453#[derive(Clone)]
454pub struct RecomposeScope {
455 inner: Rc<RecomposeScopeInner>,
456}
457
458impl PartialEq for RecomposeScope {
459 fn eq(&self, other: &Self) -> bool {
460 Rc::ptr_eq(&self.inner, &other.inner)
461 }
462}
463
464impl Eq for RecomposeScope {}
465
466impl Hash for RecomposeScope {
467 fn hash<H: Hasher>(&self, state: &mut H) {
468 self.id().hash(state);
469 }
470}
471
472impl RecomposeScope {
473 fn new(runtime: RuntimeHandle) -> Self {
474 Self {
475 inner: Rc::new(RecomposeScopeInner::new(runtime)),
476 }
477 }
478
479 pub(crate) fn downgrade(&self) -> Weak<RecomposeScopeInner> {
480 Rc::downgrade(&self.inner)
481 }
482
483 pub fn id(&self) -> ScopeId {
484 self.inner.id
485 }
486
487 pub fn is_invalid(&self) -> bool {
488 self.inner.invalid.get()
489 }
490
491 pub fn is_active(&self) -> bool {
492 self.inner.active.get()
493 }
494
495 fn record_state_subscription(&self, state_id: StateId) {
496 self.inner.state_subscriptions.borrow_mut().insert(state_id);
497 }
498
499 fn invalidate(&self) {
500 self.inner.invalid.set(true);
501 if !self.inner.active.get() {
502 return;
503 }
504 if !self.inner.enqueued.replace(true) {
505 self.inner
506 .runtime
507 .register_invalid_scope(self.inner.id, self.downgrade());
508 }
509 }
510
511 fn mark_recomposed(&self) {
512 self.inner.invalid.set(false);
513 self.inner.force_reuse.set(false);
514 self.inner.force_recompose.set(false);
515 if self.inner.enqueued.replace(false) {
516 self.inner.runtime.mark_scope_recomposed(self.inner.id);
517 }
518 let pending = self.inner.pending_recompose.replace(false);
519 if pending {
520 if self.inner.active.get() {
521 self.invalidate();
522 } else {
523 self.inner.invalid.set(true);
524 }
525 }
526 }
527
528 fn set_recompose(&self, callback: Box<dyn FnMut(&Composer) + 'static>) {
529 *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Dynamic(callback));
530 }
531
532 fn set_recompose_fn(&self, callback: fn(&Composer)) {
533 *self.inner.recompose.borrow_mut() = Some(RecomposeCallback::Static(callback));
534 }
535
536 fn run_recompose(&self, composer: &Composer) -> bool {
538 let callback = self.inner.recompose.borrow_mut().take();
539 if let Some(callback) = callback {
540 match callback {
541 RecomposeCallback::Static(callback) => callback(composer),
542 RecomposeCallback::Dynamic(mut callback) => callback(composer),
543 }
544 true
545 } else {
546 false
547 }
548 }
549
550 fn has_recompose_callback(&self) -> bool {
551 self.inner.recompose.borrow().is_some()
552 }
553
554 fn snapshot_locals(&self, stack: LocalStackSnapshot) {
555 *self.inner.local_stack.borrow_mut() = stack;
556 }
557
558 fn local_stack(&self) -> LocalStackSnapshot {
559 self.inner.local_stack.borrow().clone()
560 }
561
562 fn set_parent_hint(&self, parent: Option<NodeId>) {
563 self.inner.parent_hint.set(parent);
564 }
565
566 fn set_parent_scope(&self, parent: Option<RecomposeScope>) {
567 *self.inner.parent_scope.borrow_mut() = parent.map(|scope| scope.downgrade());
568 }
569
570 fn parent_scope(&self) -> Option<RecomposeScope> {
571 self.inner
572 .parent_scope
573 .borrow()
574 .as_ref()
575 .and_then(Weak::upgrade)
576 .map(|inner| RecomposeScope { inner })
577 }
578
579 fn callback_promotion_target(&self) -> Option<RecomposeScope> {
580 let mut current = self.parent_scope();
581 while let Some(scope) = current {
582 if scope.has_recompose_callback() {
583 return Some(scope);
584 }
585 current = scope.parent_scope();
586 }
587 None
588 }
589
590 fn parent_hint(&self) -> Option<NodeId> {
591 self.inner.parent_hint.get()
592 }
593
594 fn set_slots_host(&self, host: &Rc<SlotsHost>) {
595 self.inner.slots_storage_key.set(host.storage_key());
596 *self.inner.slots_runtime_state.borrow_mut() =
597 host.runtime_state().map(|state| Rc::downgrade(&state));
598 }
599
600 pub(crate) fn slots_storage_key(&self) -> Option<usize> {
601 let key = self.inner.slots_storage_key.get();
602 (key != 0).then_some(key)
603 }
604
605 pub(crate) fn slots_runtime_state(&self) -> Option<Rc<crate::composer::ComposerRuntimeState>> {
606 self.inner
607 .slots_runtime_state
608 .borrow()
609 .as_ref()
610 .and_then(std::rc::Weak::upgrade)
611 }
612
613 pub fn deactivate(&self) {
614 if !self.inner.active.replace(false) {
615 return;
616 }
617 if self.inner.enqueued.replace(false) {
618 self.inner.runtime.mark_scope_recomposed(self.inner.id);
619 }
620 }
621
622 pub fn reactivate(&self) {
623 if self.inner.active.replace(true) {
624 return;
625 }
626 if self.inner.invalid.get() && !self.inner.enqueued.replace(true) {
627 self.inner
628 .runtime
629 .register_invalid_scope(self.inner.id, self.downgrade());
630 }
631 }
632
633 pub fn force_reuse(&self) {
634 self.inner.force_reuse.set(true);
635 self.inner.force_recompose.set(false);
636 self.inner.pending_recompose.set(true);
637 }
638
639 pub fn force_recompose(&self) {
640 self.inner.force_recompose.set(true);
641 self.inner.force_reuse.set(false);
642 self.inner.pending_recompose.set(false);
643 }
644
645 pub(crate) fn set_retention_mode(&self, mode: RetentionMode) {
646 self.inner.retention_mode.set(mode);
647 }
648
649 pub(crate) fn retention_mode(&self) -> RetentionMode {
650 self.inner.retention_mode.get()
651 }
652
653 pub fn should_recompose(&self) -> bool {
654 if self.inner.force_recompose.replace(false) {
655 self.inner.force_reuse.set(false);
656 return true;
657 }
658 if self.inner.force_reuse.replace(false) {
659 return false;
660 }
661 self.is_invalid()
662 }
663
664 pub fn has_composed_once(&self) -> bool {
665 self.inner.composed_once.get()
666 }
667
668 fn mark_composed_once(&self) {
669 self.inner.composed_once.set(true);
670 }
671}
672
673#[cfg(test)]
674impl RecomposeScope {
675 pub(crate) fn new_for_test(runtime: RuntimeHandle) -> Self {
676 Self::new(runtime)
677 }
678}
679
680#[derive(Debug, Clone, Copy, Default)]
681pub struct RecomposeOptions {
682 pub force_reuse: bool,
683 pub force_recompose: bool,
684 pub retention: RetentionMode,
685}
686
687#[derive(Debug, Clone, PartialEq, Eq)]
688pub enum NodeError {
689 Missing { id: NodeId },
690 TypeMismatch { id: NodeId, expected: &'static str },
691 MissingContext { id: NodeId, reason: &'static str },
692 AlreadyExists { id: NodeId },
693}
694
695impl std::fmt::Display for NodeError {
696 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
697 match self {
698 NodeError::Missing { id } => write!(f, "node {id} missing"),
699 NodeError::TypeMismatch { id, expected } => {
700 write!(f, "node {id} type mismatch; expected {expected}")
701 }
702 NodeError::MissingContext { id, reason } => {
703 write!(f, "missing context for node {id}: {reason}")
704 }
705 NodeError::AlreadyExists { id } => {
706 write!(f, "node {id} already exists")
707 }
708 }
709 }
710}
711
712impl std::error::Error for NodeError {}
713
714pub use subcompose::{
715 ContentTypeReusePolicy, DefaultSlotReusePolicy, SlotId, SlotReusePolicy, SubcomposeState,
716};
717
718#[derive(Copy, Clone, Debug, PartialEq, Eq)]
719pub enum Phase {
720 Compose,
721 Measure,
722 Layout,
723}
724
725pub use composer_context::with_composer as with_current_composer;
726
727#[allow(non_snake_case)]
728pub fn withCurrentComposer<R>(f: impl FnOnce(&Composer) -> R) -> R {
729 composer_context::with_composer(f)
730}
731
732fn with_current_composer_opt<R>(f: impl FnOnce(&Composer) -> R) -> Option<R> {
733 composer_context::try_with_composer(f)
734}
735
736#[track_caller]
737pub fn with_key<K: Hash>(key: &K, content: impl FnOnce()) {
738 let seed = explicit_group_key_seed(key, std::panic::Location::caller());
739 with_current_composer(|composer| composer.with_group_seed(seed, |_| content()));
740}
741
742#[allow(non_snake_case)]
743pub fn withKey<K: Hash>(key: &K, content: impl FnOnce()) {
744 with_key(key, content)
745}
746
747#[derive(Default)]
748struct DisposableEffectState {
749 key: Option<Key>,
750 cleanup: Option<Box<dyn FnOnce()>>,
751}
752
753impl DisposableEffectState {
754 fn should_run(&self, key: Key) -> bool {
755 match self.key {
756 Some(current) => current != key,
757 None => true,
758 }
759 }
760
761 fn set_key(&mut self, key: Key) {
762 self.key = Some(key);
763 }
764
765 fn set_cleanup(&mut self, cleanup: Option<Box<dyn FnOnce()>>) {
766 self.cleanup = cleanup;
767 }
768
769 fn run_cleanup(&mut self) {
770 if let Some(cleanup) = self.cleanup.take() {
771 cleanup();
772 }
773 }
774}
775
776impl Drop for DisposableEffectState {
777 fn drop(&mut self) {
778 self.run_cleanup();
779 }
780}
781
782#[derive(Clone, Copy, Debug, Default)]
783pub struct DisposableEffectScope;
784
785#[derive(Default)]
786pub struct DisposableEffectResult {
787 cleanup: Option<Box<dyn FnOnce()>>,
788}
789
790impl DisposableEffectScope {
791 pub fn on_dispose(&self, cleanup: impl FnOnce() + 'static) -> DisposableEffectResult {
792 DisposableEffectResult::new(cleanup)
793 }
794}
795
796impl DisposableEffectResult {
797 pub fn new(cleanup: impl FnOnce() + 'static) -> Self {
798 Self {
799 cleanup: Some(Box::new(cleanup)),
800 }
801 }
802
803 fn into_cleanup(self) -> Option<Box<dyn FnOnce()>> {
804 self.cleanup
805 }
806}
807
808#[allow(non_snake_case)]
809pub fn SideEffect(effect: impl FnOnce() + 'static) {
810 with_current_composer(|composer| composer.register_side_effect(effect));
811}
812
813pub fn __disposable_effect_impl<K, F>(group_key: Key, keys: K, effect: F)
814where
815 K: Hash,
816 F: FnOnce(DisposableEffectScope) -> DisposableEffectResult + 'static,
817{
818 with_current_composer(|composer| {
821 composer.with_group(group_key, |composer| {
822 let key_hash = hash_key(&keys);
823 let state = composer.remember_effect(DisposableEffectState::default);
824 if state.with(|state| state.should_run(key_hash)) {
825 state.update(|state| {
826 state.run_cleanup();
827 state.set_key(key_hash);
828 });
829 let state_for_effect = state.clone();
830 let mut effect_opt = Some(effect);
831 composer.register_side_effect(move || {
832 if let Some(effect) = effect_opt.take() {
833 let result = effect(DisposableEffectScope);
834 state_for_effect.update(|state| state.set_cleanup(result.into_cleanup()));
835 }
836 });
837 }
838 });
839 });
840}
841
842#[macro_export]
843macro_rules! DisposableEffect {
844 ($keys:expr, $effect:expr) => {
845 $crate::__disposable_effect_impl(
846 $crate::location_key(file!(), line!(), column!()),
847 $keys,
848 $effect,
849 )
850 };
851}
852
853#[macro_export]
854macro_rules! clone_captures {
855 ($($alias:ident $(= $value:expr)?),+ $(,)?; $body:expr) => {{
856 $(let $alias = $crate::clone_captures!(@clone $alias $(= $value)?);)+
857 $body
858 }};
859 (@clone $alias:ident = $value:expr) => {
860 ($value).clone()
861 };
862 (@clone $alias:ident) => {
863 $alias.clone()
864 };
865}
866
867pub fn with_node_mut<N: Node + 'static, R>(
868 id: NodeId,
869 f: impl FnOnce(&mut N) -> R,
870) -> Result<R, NodeError> {
871 with_current_composer(|composer| composer.with_node_mut(id, f))
872}
873
874pub fn push_parent(id: NodeId) {
875 with_current_composer(|composer| composer.push_parent(id));
876}
877
878pub fn pop_parent() {
879 with_current_composer(|composer| composer.pop_parent());
880}
881
882pub trait Node: Any {
887 fn mount(&mut self) {}
888 fn update(&mut self) {}
889 fn unmount(&mut self) {}
890 fn insert_child(&mut self, _child: NodeId) {}
891 fn remove_child(&mut self, _child: NodeId) {}
892 fn move_child(&mut self, _from: usize, _to: usize) {}
893 fn update_children(&mut self, _children: &[NodeId]) {}
894 fn children(&self) -> Vec<NodeId> {
895 Vec::new()
896 }
897 fn collect_children_into(&self, out: &mut SmallVec<[NodeId; 8]>) {
900 out.clear();
901 out.extend(self.children());
902 }
903 fn set_node_id(&mut self, _id: NodeId) {}
906 fn on_attached_to_parent(&mut self, _parent: NodeId) {}
909 fn on_removed_from_parent(&mut self) {}
912 fn parent(&self) -> Option<NodeId> {
915 None
916 }
917 fn mark_needs_layout(&self) {}
920 fn needs_layout(&self) -> bool {
922 false
923 }
924 fn mark_needs_measure(&self) {}
927 fn needs_measure(&self) -> bool {
929 false
930 }
931 fn mark_needs_semantics(&self) {}
933 fn needs_semantics(&self) -> bool {
935 false
936 }
937 fn set_parent_for_bubbling(&mut self, parent: NodeId) {
945 self.on_attached_to_parent(parent);
946 }
947
948 fn recycle_key(&self) -> Option<TypeId> {
950 None
951 }
952
953 fn recycle_pool_limit(&self) -> Option<usize> {
955 None
956 }
957
958 fn prepare_for_recycle(&mut self) {}
960
961 fn rehouse_for_recycle(&self) -> Option<Box<dyn Node>> {
966 None
967 }
968
969 fn rehouse_for_live_compaction(&mut self) -> Option<Box<dyn Node>> {
975 None
976 }
977
978 fn debug_heap_bytes(&self) -> usize {
980 0
981 }
982}
983
984pub fn bubble_layout_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1003 bubble_layout_dirty_applier(applier, node_id);
1004}
1005
1006pub fn bubble_measure_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1017 bubble_measure_dirty_applier(applier, node_id);
1018}
1019
1020pub fn bubble_semantics_dirty(applier: &mut dyn Applier, node_id: NodeId) {
1026 bubble_semantics_dirty_applier(applier, node_id);
1027}
1028
1029pub fn queue_semantics_invalidation(node_id: NodeId) {
1034 let _ = composer_context::try_with_composer(|composer| {
1035 composer.enqueue_semantics_invalidation(node_id);
1036 });
1037}
1038
1039pub fn bubble_layout_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
1062 bubble_layout_dirty_composer::<N>(node_id);
1063}
1064
1065pub fn bubble_measure_dirty_in_composer(node_id: NodeId) {
1071 with_current_composer(|composer| {
1072 composer.commands_mut().push(Command::BubbleDirty {
1073 node_id,
1074 bubble: DirtyBubble {
1075 layout: false,
1076 measure: true,
1077 semantics: false,
1078 },
1079 });
1080 });
1081}
1082
1083pub fn bubble_semantics_dirty_in_composer<N: Node + 'static>(node_id: NodeId) {
1090 bubble_semantics_dirty_composer::<N>(node_id);
1091}
1092
1093fn bubble_layout_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1095 if let Ok(node) = applier.get_mut(node_id) {
1098 node.mark_needs_layout();
1099 }
1100
1101 loop {
1103 let parent_id = match applier.get_mut(node_id) {
1105 Ok(node) => node.parent(),
1106 Err(_) => None,
1107 };
1108
1109 match parent_id {
1110 Some(pid) => {
1111 if let Ok(parent) = applier.get_mut(pid) {
1113 let parent_already_dirty = parent.needs_layout();
1114 if !parent_already_dirty {
1115 parent.mark_needs_layout();
1116 }
1117 node_id = pid;
1118 } else {
1119 break;
1120 }
1121 }
1122 None => break, }
1124 }
1125}
1126
1127fn bubble_measure_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1129 if let Ok(node) = applier.get_mut(node_id) {
1131 node.mark_needs_measure();
1132 }
1133
1134 loop {
1136 let parent_id = match applier.get_mut(node_id) {
1138 Ok(node) => node.parent(),
1139 Err(_) => None,
1140 };
1141
1142 match parent_id {
1143 Some(pid) => {
1144 if let Ok(parent) = applier.get_mut(pid) {
1146 if !parent.needs_measure() {
1147 parent.mark_needs_measure();
1148 }
1149 node_id = pid;
1150 } else {
1151 break;
1152 }
1153 }
1154 None => {
1155 break; }
1157 }
1158 }
1159}
1160
1161fn bubble_semantics_dirty_applier(applier: &mut dyn Applier, mut node_id: NodeId) {
1163 if let Ok(node) = applier.get_mut(node_id) {
1164 node.mark_needs_semantics();
1165 }
1166
1167 loop {
1168 let parent_id = match applier.get_mut(node_id) {
1169 Ok(node) => node.parent(),
1170 Err(_) => None,
1171 };
1172
1173 match parent_id {
1174 Some(pid) => {
1175 if let Ok(parent) = applier.get_mut(pid) {
1176 if !parent.needs_semantics() {
1177 parent.mark_needs_semantics();
1178 }
1179 node_id = pid;
1180 } else {
1181 break;
1182 }
1183 }
1184 None => break,
1185 }
1186 }
1187}
1188
1189fn bubble_layout_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1193 let _ = with_node_mut(node_id, |node: &mut N| {
1195 node.mark_needs_layout();
1196 });
1197
1198 while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1200 let parent_id = pid;
1201
1202 let advanced = with_node_mut(parent_id, |node: &mut N| {
1204 if !node.needs_layout() {
1205 node.mark_needs_layout();
1206 }
1207 true
1208 })
1209 .unwrap_or(false);
1210
1211 if advanced {
1212 node_id = parent_id;
1213 } else {
1214 break;
1215 }
1216 }
1217}
1218
1219fn bubble_semantics_dirty_composer<N: Node + 'static>(mut node_id: NodeId) {
1221 let _ = with_node_mut(node_id, |node: &mut N| {
1223 node.mark_needs_semantics();
1224 });
1225
1226 while let Ok(Some(pid)) = with_node_mut(node_id, |node: &mut N| node.parent()) {
1227 let parent_id = pid;
1228
1229 let advanced = with_node_mut(parent_id, |node: &mut N| {
1230 if !node.needs_semantics() {
1231 node.mark_needs_semantics();
1232 }
1233 true
1234 })
1235 .unwrap_or(false);
1236
1237 if advanced {
1238 node_id = parent_id;
1239 } else {
1240 break;
1241 }
1242 }
1243}
1244
1245impl dyn Node {
1246 pub fn as_any_mut(&mut self) -> &mut dyn Any {
1247 self
1248 }
1249}
1250
1251pub struct RecycledNode {
1252 stable_id: NodeId,
1253 node: Box<dyn Node>,
1254 warm_origin: bool,
1255}
1256
1257impl RecycledNode {
1258 fn new(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1259 let node = node.rehouse_for_recycle().unwrap_or(node);
1260 Self {
1261 stable_id,
1262 node,
1263 warm_origin,
1264 }
1265 }
1266
1267 fn from_shell(stable_id: NodeId, node: Box<dyn Node>, warm_origin: bool) -> Self {
1268 Self {
1269 stable_id,
1270 node,
1271 warm_origin,
1272 }
1273 }
1274
1275 pub fn stable_id(&self) -> NodeId {
1276 self.stable_id
1277 }
1278
1279 fn warm_origin(&self) -> bool {
1280 self.warm_origin
1281 }
1282
1283 fn set_warm_origin(&mut self, warm_origin: bool) {
1284 self.warm_origin = warm_origin;
1285 }
1286
1287 pub fn node_mut(&mut self) -> &mut dyn Node {
1288 self.node.as_mut()
1289 }
1290
1291 pub fn into_parts(self) -> (NodeId, Box<dyn Node>, bool) {
1292 (self.stable_id, self.node, self.warm_origin)
1293 }
1294}
1295
1296pub trait Applier: Any {
1297 fn create(&mut self, node: Box<dyn Node>) -> NodeId;
1298 fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError>;
1299 fn remove(&mut self, id: NodeId) -> Result<(), NodeError>;
1300
1301 fn node_generation(&self, id: NodeId) -> u32;
1305
1306 fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError>;
1314
1315 fn as_any(&self) -> &dyn Any
1316 where
1317 Self: Sized,
1318 {
1319 self
1320 }
1321
1322 fn as_any_mut(&mut self) -> &mut dyn Any
1323 where
1324 Self: Sized,
1325 {
1326 self
1327 }
1328
1329 fn compact(&mut self) {}
1331
1332 fn take_recycled_node(&mut self, _key: TypeId) -> Option<RecycledNode> {
1334 None
1335 }
1336
1337 fn set_recycled_node_origin(&mut self, _id: NodeId, _warm_origin: bool) {}
1339
1340 fn seed_recycled_node_shell(
1342 &mut self,
1343 _key: TypeId,
1344 _recycle_pool_limit: Option<usize>,
1345 _shell: Box<dyn Node>,
1346 ) {
1347 }
1348
1349 fn record_fresh_recyclable_creation(&mut self, _key: TypeId) {}
1351
1352 fn clear_recycled_nodes(&mut self) {}
1354}
1355
1356type TypedNodeUpdate = fn(&mut dyn Node, NodeId) -> Result<(), NodeError>;
1357type CommandCallback = Box<dyn FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static>;
1358
1359#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1360pub(crate) struct DirtyBubble {
1361 layout: bool,
1362 measure: bool,
1363 semantics: bool,
1364}
1365
1366impl DirtyBubble {
1367 pub(crate) const LAYOUT_AND_MEASURE: Self = Self {
1368 layout: true,
1369 measure: true,
1370 semantics: false,
1371 };
1372
1373 pub(crate) const SEMANTICS: Self = Self {
1374 layout: false,
1375 measure: false,
1376 semantics: true,
1377 };
1378
1379 fn apply(self, applier: &mut dyn Applier, node_id: NodeId) {
1380 if self.layout {
1381 bubble_layout_dirty(applier, node_id);
1382 }
1383 if self.measure {
1384 bubble_measure_dirty(applier, node_id);
1385 }
1386 if self.semantics {
1387 bubble_semantics_dirty(applier, node_id);
1388 }
1389 }
1390}
1391
1392pub(crate) enum Command {
1393 BubbleDirty {
1394 node_id: NodeId,
1395 bubble: DirtyBubble,
1396 },
1397 UpdateTypedNode {
1398 id: NodeId,
1399 updater: TypedNodeUpdate,
1400 },
1401 RemoveNode {
1402 id: NodeId,
1403 },
1404 MountNode {
1405 id: NodeId,
1406 },
1407 AttachChild {
1408 parent_id: NodeId,
1409 child_id: NodeId,
1410 bubble: DirtyBubble,
1411 },
1412 InsertChild {
1413 parent_id: NodeId,
1414 child_id: NodeId,
1415 appended_index: usize,
1416 insert_index: usize,
1417 bubble: DirtyBubble,
1418 },
1419 MoveChild {
1420 parent_id: NodeId,
1421 from_index: usize,
1422 to_index: usize,
1423 bubble: DirtyBubble,
1424 },
1425 RemoveChild {
1426 parent_id: NodeId,
1427 child_id: NodeId,
1428 },
1429 DetachChild {
1430 parent_id: NodeId,
1431 child_id: NodeId,
1432 },
1433 SyncChildren {
1434 parent_id: NodeId,
1435 expected_children: ChildList,
1436 },
1437 Callback(CommandCallback),
1438}
1439
1440#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1441struct DeferredChildCleanup {
1442 child_id: NodeId,
1443 generation: u32,
1444 removed_from_parent: bool,
1445}
1446
1447#[derive(Default)]
1448struct DeferredChildCleanupQueue {
1449 pending: Vec<DeferredChildCleanup>,
1450 preserved: Vec<(NodeId, u32)>,
1451}
1452
1453impl DeferredChildCleanupQueue {
1454 fn push(&mut self, child_id: NodeId, generation: u32, removed_from_parent: bool) {
1455 if self
1456 .preserved
1457 .iter()
1458 .any(|&(preserved_id, preserved_generation)| {
1459 preserved_id == child_id && preserved_generation == generation
1460 })
1461 {
1462 return;
1463 }
1464 self.pending.push(DeferredChildCleanup {
1465 child_id,
1466 generation,
1467 removed_from_parent,
1468 });
1469 }
1470
1471 fn preserve(&mut self, child_id: NodeId, generation: u32) {
1472 if !self
1473 .preserved
1474 .iter()
1475 .any(|&(preserved_id, preserved_generation)| {
1476 preserved_id == child_id && preserved_generation == generation
1477 })
1478 {
1479 self.preserved.push((child_id, generation));
1480 }
1481 self.pending
1482 .retain(|cleanup| cleanup.child_id != child_id || cleanup.generation != generation);
1483 }
1484
1485 fn flush(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1486 for cleanup in self.pending {
1487 cleanup_detached_child(applier, cleanup)?;
1488 }
1489 Ok(())
1490 }
1491}
1492
1493impl Command {
1494 pub(crate) fn update_node<N: Node + 'static>(id: NodeId) -> Self {
1495 Self::UpdateTypedNode {
1496 id,
1497 updater: update_typed_node::<N>,
1498 }
1499 }
1500
1501 pub(crate) fn callback(
1502 callback: impl FnOnce(&mut dyn Applier) -> Result<(), NodeError> + 'static,
1503 ) -> Self {
1504 Self::Callback(Box::new(callback))
1505 }
1506
1507 pub(crate) fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1508 let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1509 self.apply_with_cleanup(applier, &mut deferred_cleanup)?;
1510 deferred_cleanup.flush(applier)
1511 }
1512
1513 fn apply_with_cleanup(
1514 self,
1515 applier: &mut dyn Applier,
1516 deferred_cleanup: &mut DeferredChildCleanupQueue,
1517 ) -> Result<(), NodeError> {
1518 match self {
1519 Self::BubbleDirty { node_id, bubble } => {
1520 bubble.apply(applier, node_id);
1521 Ok(())
1522 }
1523 Self::UpdateTypedNode { id, updater } => {
1524 let node = match applier.get_mut(id) {
1525 Ok(node) => node,
1526 Err(NodeError::Missing { .. }) => return Ok(()),
1527 Err(err) => return Err(err),
1528 };
1529 updater(node, id)
1530 }
1531 Self::RemoveNode { id } => {
1532 if let Ok(node) = applier.get_mut(id) {
1533 node.unmount();
1534 }
1535 match applier.remove(id) {
1536 Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
1537 Err(err) => Err(err),
1538 }
1539 }
1540 Self::MountNode { id } => {
1541 let node = match applier.get_mut(id) {
1542 Ok(node) => node,
1543 Err(NodeError::Missing { .. }) => return Ok(()),
1544 Err(err) => return Err(err),
1545 };
1546 node.set_node_id(id);
1547 node.mount();
1548 Ok(())
1549 }
1550 Self::AttachChild {
1551 parent_id,
1552 child_id,
1553 bubble,
1554 } => {
1555 insert_child_with_reparenting(applier, parent_id, child_id);
1556 bubble.apply(applier, parent_id);
1557 Ok(())
1558 }
1559 Self::InsertChild {
1560 parent_id,
1561 child_id,
1562 appended_index,
1563 insert_index,
1564 bubble,
1565 } => {
1566 insert_child_with_reparenting(applier, parent_id, child_id);
1567 bubble.apply(applier, parent_id);
1568 if insert_index != appended_index {
1569 if let Ok(parent_node) = applier.get_mut(parent_id) {
1570 parent_node.move_child(appended_index, insert_index);
1571 }
1572 }
1573 Ok(())
1574 }
1575 Self::MoveChild {
1576 parent_id,
1577 from_index,
1578 to_index,
1579 bubble,
1580 } => {
1581 if let Ok(parent_node) = applier.get_mut(parent_id) {
1582 parent_node.move_child(from_index, to_index);
1583 }
1584 bubble.apply(applier, parent_id);
1585 Ok(())
1586 }
1587 Self::RemoveChild {
1588 parent_id,
1589 child_id,
1590 } => apply_remove_child(applier, parent_id, child_id, deferred_cleanup),
1591 Self::DetachChild {
1592 parent_id,
1593 child_id,
1594 } => {
1595 let generation = applier.node_generation(child_id);
1596 detach_child_from_parent(applier, parent_id, child_id)?;
1597 deferred_cleanup.preserve(child_id, generation);
1598 Ok(())
1599 }
1600 Self::SyncChildren {
1601 parent_id,
1602 expected_children,
1603 } => sync_children(applier, parent_id, &expected_children, deferred_cleanup),
1604 Self::Callback(callback) => callback(applier),
1605 }
1606 }
1607}
1608
1609const COMMAND_CHUNK_CAPACITY: usize = 1024;
1610const COMMAND_FLUSH_THRESHOLD: usize = COMMAND_CHUNK_CAPACITY * 4;
1611type ChildList = SmallVec<[NodeId; 4]>;
1612const SMALL_CHILD_SYNC_LINEAR_THRESHOLD: usize = 8;
1613
1614#[derive(Copy, Clone)]
1615enum CommandTag {
1616 BubbleDirty,
1617 UpdateTypedNode,
1618 RemoveNode,
1619 MountNode,
1620 AttachChild,
1621 InsertChild,
1622 MoveChild,
1623 RemoveChild,
1624 DetachChild,
1625 SyncChildren,
1626 Callback,
1627}
1628
1629#[derive(Copy, Clone)]
1630struct BubbleDirtyCommand {
1631 node_id: NodeId,
1632 bubble: DirtyBubble,
1633}
1634
1635#[derive(Copy, Clone)]
1636struct UpdateTypedNodeCommand {
1637 id: NodeId,
1638 updater: TypedNodeUpdate,
1639}
1640
1641#[derive(Copy, Clone)]
1642struct AttachChildCommand {
1643 parent_id: NodeId,
1644 child_id: NodeId,
1645 bubble: DirtyBubble,
1646}
1647
1648#[derive(Copy, Clone)]
1649struct InsertChildCommand {
1650 parent_id: NodeId,
1651 child_id: NodeId,
1652 appended_index: usize,
1653 insert_index: usize,
1654 bubble: DirtyBubble,
1655}
1656
1657#[derive(Copy, Clone)]
1658struct MoveChildCommand {
1659 parent_id: NodeId,
1660 from_index: usize,
1661 to_index: usize,
1662 bubble: DirtyBubble,
1663}
1664
1665#[derive(Copy, Clone)]
1666struct RemoveChildCommand {
1667 parent_id: NodeId,
1668 child_id: NodeId,
1669}
1670
1671#[derive(Copy, Clone)]
1672struct DetachChildCommand {
1673 parent_id: NodeId,
1674 child_id: NodeId,
1675}
1676
1677struct SyncChildrenCommand {
1678 parent_id: NodeId,
1679 child_start: usize,
1680 child_len: usize,
1681}
1682
1683#[derive(Default)]
1684struct CommandQueue {
1685 chunks: Vec<Vec<CommandTag>>,
1686 len: usize,
1687 bubble_dirty: Vec<BubbleDirtyCommand>,
1688 update_typed_nodes: Vec<UpdateTypedNodeCommand>,
1689 remove_nodes: Vec<NodeId>,
1690 mount_nodes: Vec<NodeId>,
1691 attach_children: Vec<AttachChildCommand>,
1692 insert_children: Vec<InsertChildCommand>,
1693 move_children: Vec<MoveChildCommand>,
1694 remove_children: Vec<RemoveChildCommand>,
1695 detach_children: Vec<DetachChildCommand>,
1696 sync_children: Vec<SyncChildrenCommand>,
1697 sync_child_ids: Vec<NodeId>,
1698 callbacks: Vec<CommandCallback>,
1699}
1700
1701impl CommandQueue {
1702 fn push_tag(&mut self, tag: CommandTag) {
1703 let needs_chunk = self
1704 .chunks
1705 .last()
1706 .map(|chunk| chunk.len() == chunk.capacity())
1707 .unwrap_or(true);
1708 if needs_chunk {
1709 self.chunks.push(Vec::with_capacity(COMMAND_CHUNK_CAPACITY));
1710 }
1711 self.chunks
1712 .last_mut()
1713 .expect("command chunk should exist")
1714 .push(tag);
1715 self.len += 1;
1716 }
1717
1718 fn push(&mut self, command: Command) {
1719 match command {
1720 Command::BubbleDirty { node_id, bubble } => {
1721 self.bubble_dirty
1722 .push(BubbleDirtyCommand { node_id, bubble });
1723 self.push_tag(CommandTag::BubbleDirty);
1724 }
1725 Command::UpdateTypedNode { id, updater } => {
1726 self.update_typed_nodes
1727 .push(UpdateTypedNodeCommand { id, updater });
1728 self.push_tag(CommandTag::UpdateTypedNode);
1729 }
1730 Command::RemoveNode { id } => {
1731 self.remove_nodes.push(id);
1732 self.push_tag(CommandTag::RemoveNode);
1733 }
1734 Command::MountNode { id } => {
1735 self.mount_nodes.push(id);
1736 self.push_tag(CommandTag::MountNode);
1737 }
1738 Command::AttachChild {
1739 parent_id,
1740 child_id,
1741 bubble,
1742 } => {
1743 self.attach_children.push(AttachChildCommand {
1744 parent_id,
1745 child_id,
1746 bubble,
1747 });
1748 self.push_tag(CommandTag::AttachChild);
1749 }
1750 Command::InsertChild {
1751 parent_id,
1752 child_id,
1753 appended_index,
1754 insert_index,
1755 bubble,
1756 } => {
1757 self.insert_children.push(InsertChildCommand {
1758 parent_id,
1759 child_id,
1760 appended_index,
1761 insert_index,
1762 bubble,
1763 });
1764 self.push_tag(CommandTag::InsertChild);
1765 }
1766 Command::MoveChild {
1767 parent_id,
1768 from_index,
1769 to_index,
1770 bubble,
1771 } => {
1772 self.move_children.push(MoveChildCommand {
1773 parent_id,
1774 from_index,
1775 to_index,
1776 bubble,
1777 });
1778 self.push_tag(CommandTag::MoveChild);
1779 }
1780 Command::RemoveChild {
1781 parent_id,
1782 child_id,
1783 } => {
1784 self.remove_children.push(RemoveChildCommand {
1785 parent_id,
1786 child_id,
1787 });
1788 self.push_tag(CommandTag::RemoveChild);
1789 }
1790 Command::DetachChild {
1791 parent_id,
1792 child_id,
1793 } => {
1794 self.detach_children.push(DetachChildCommand {
1795 parent_id,
1796 child_id,
1797 });
1798 self.push_tag(CommandTag::DetachChild);
1799 }
1800 Command::SyncChildren {
1801 parent_id,
1802 expected_children,
1803 } => {
1804 let child_start = self.sync_child_ids.len();
1805 let child_len = expected_children.len();
1806 self.sync_child_ids.extend(expected_children);
1807 self.sync_children.push(SyncChildrenCommand {
1808 parent_id,
1809 child_start,
1810 child_len,
1811 });
1812 self.push_tag(CommandTag::SyncChildren);
1813 }
1814 Command::Callback(callback) => {
1815 self.callbacks.push(callback);
1816 self.push_tag(CommandTag::Callback);
1817 }
1818 }
1819 }
1820
1821 fn len(&self) -> usize {
1822 self.len
1823 }
1824
1825 fn capacity(&self) -> usize {
1826 self.chunks.iter().map(Vec::capacity).sum()
1827 }
1828
1829 fn payload_len_bytes(&self) -> usize {
1830 self.bubble_dirty
1831 .len()
1832 .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1833 .saturating_add(
1834 self.update_typed_nodes
1835 .len()
1836 .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1837 )
1838 .saturating_add(
1839 self.remove_nodes
1840 .len()
1841 .saturating_mul(std::mem::size_of::<NodeId>()),
1842 )
1843 .saturating_add(
1844 self.mount_nodes
1845 .len()
1846 .saturating_mul(std::mem::size_of::<NodeId>()),
1847 )
1848 .saturating_add(
1849 self.attach_children
1850 .len()
1851 .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1852 )
1853 .saturating_add(
1854 self.insert_children
1855 .len()
1856 .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1857 )
1858 .saturating_add(
1859 self.move_children
1860 .len()
1861 .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1862 )
1863 .saturating_add(
1864 self.remove_children
1865 .len()
1866 .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1867 )
1868 .saturating_add(
1869 self.detach_children
1870 .len()
1871 .saturating_mul(std::mem::size_of::<DetachChildCommand>()),
1872 )
1873 .saturating_add(
1874 self.sync_children
1875 .len()
1876 .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1877 )
1878 .saturating_add(
1879 self.sync_child_ids
1880 .len()
1881 .saturating_mul(std::mem::size_of::<NodeId>()),
1882 )
1883 .saturating_add(
1884 self.callbacks
1885 .len()
1886 .saturating_mul(std::mem::size_of::<CommandCallback>()),
1887 )
1888 }
1889
1890 fn payload_capacity_bytes(&self) -> usize {
1891 self.bubble_dirty
1892 .capacity()
1893 .saturating_mul(std::mem::size_of::<BubbleDirtyCommand>())
1894 .saturating_add(
1895 self.update_typed_nodes
1896 .capacity()
1897 .saturating_mul(std::mem::size_of::<UpdateTypedNodeCommand>()),
1898 )
1899 .saturating_add(
1900 self.remove_nodes
1901 .capacity()
1902 .saturating_mul(std::mem::size_of::<NodeId>()),
1903 )
1904 .saturating_add(
1905 self.mount_nodes
1906 .capacity()
1907 .saturating_mul(std::mem::size_of::<NodeId>()),
1908 )
1909 .saturating_add(
1910 self.attach_children
1911 .capacity()
1912 .saturating_mul(std::mem::size_of::<AttachChildCommand>()),
1913 )
1914 .saturating_add(
1915 self.insert_children
1916 .capacity()
1917 .saturating_mul(std::mem::size_of::<InsertChildCommand>()),
1918 )
1919 .saturating_add(
1920 self.move_children
1921 .capacity()
1922 .saturating_mul(std::mem::size_of::<MoveChildCommand>()),
1923 )
1924 .saturating_add(
1925 self.remove_children
1926 .capacity()
1927 .saturating_mul(std::mem::size_of::<RemoveChildCommand>()),
1928 )
1929 .saturating_add(
1930 self.detach_children
1931 .capacity()
1932 .saturating_mul(std::mem::size_of::<DetachChildCommand>()),
1933 )
1934 .saturating_add(
1935 self.sync_children
1936 .capacity()
1937 .saturating_mul(std::mem::size_of::<SyncChildrenCommand>()),
1938 )
1939 .saturating_add(
1940 self.sync_child_ids
1941 .capacity()
1942 .saturating_mul(std::mem::size_of::<NodeId>()),
1943 )
1944 .saturating_add(
1945 self.callbacks
1946 .capacity()
1947 .saturating_mul(std::mem::size_of::<CommandCallback>()),
1948 )
1949 }
1950
1951 fn apply(self, applier: &mut dyn Applier) -> Result<(), NodeError> {
1952 let mut bubble_dirty = self.bubble_dirty.into_iter();
1953 let mut update_typed_nodes = self.update_typed_nodes.into_iter();
1954 let mut remove_nodes = self.remove_nodes.into_iter();
1955 let mut mount_nodes = self.mount_nodes.into_iter();
1956 let mut attach_children = self.attach_children.into_iter();
1957 let mut insert_children = self.insert_children.into_iter();
1958 let mut move_children = self.move_children.into_iter();
1959 let mut remove_children = self.remove_children.into_iter();
1960 let mut detach_children = self.detach_children.into_iter();
1961 let mut sync_children_commands = self.sync_children.into_iter();
1962 let sync_child_ids = self.sync_child_ids;
1963 let mut callbacks = self.callbacks.into_iter();
1964 let mut deferred_cleanup = DeferredChildCleanupQueue::default();
1965
1966 for chunk in self.chunks {
1967 for tag in chunk {
1968 match tag {
1969 CommandTag::BubbleDirty => {
1970 let BubbleDirtyCommand { node_id, bubble } =
1971 bubble_dirty.next().expect("missing BubbleDirty payload");
1972 Command::BubbleDirty { node_id, bubble }
1973 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1974 }
1975 CommandTag::UpdateTypedNode => {
1976 let UpdateTypedNodeCommand { id, updater } = update_typed_nodes
1977 .next()
1978 .expect("missing UpdateTypedNode payload");
1979 Command::UpdateTypedNode { id, updater }
1980 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1981 }
1982 CommandTag::RemoveNode => {
1983 let id = remove_nodes.next().expect("missing RemoveNode payload");
1984 Command::RemoveNode { id }
1985 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1986 }
1987 CommandTag::MountNode => {
1988 let id = mount_nodes.next().expect("missing MountNode payload");
1989 Command::MountNode { id }
1990 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
1991 }
1992 CommandTag::AttachChild => {
1993 let AttachChildCommand {
1994 parent_id,
1995 child_id,
1996 bubble,
1997 } = attach_children.next().expect("missing AttachChild payload");
1998 Command::AttachChild {
1999 parent_id,
2000 child_id,
2001 bubble,
2002 }
2003 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2004 }
2005 CommandTag::InsertChild => {
2006 let InsertChildCommand {
2007 parent_id,
2008 child_id,
2009 appended_index,
2010 insert_index,
2011 bubble,
2012 } = insert_children.next().expect("missing InsertChild payload");
2013 Command::InsertChild {
2014 parent_id,
2015 child_id,
2016 appended_index,
2017 insert_index,
2018 bubble,
2019 }
2020 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2021 }
2022 CommandTag::MoveChild => {
2023 let MoveChildCommand {
2024 parent_id,
2025 from_index,
2026 to_index,
2027 bubble,
2028 } = move_children.next().expect("missing MoveChild payload");
2029 Command::MoveChild {
2030 parent_id,
2031 from_index,
2032 to_index,
2033 bubble,
2034 }
2035 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2036 }
2037 CommandTag::RemoveChild => {
2038 let RemoveChildCommand {
2039 parent_id,
2040 child_id,
2041 } = remove_children.next().expect("missing RemoveChild payload");
2042 Command::RemoveChild {
2043 parent_id,
2044 child_id,
2045 }
2046 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2047 }
2048 CommandTag::DetachChild => {
2049 let DetachChildCommand {
2050 parent_id,
2051 child_id,
2052 } = detach_children.next().expect("missing DetachChild payload");
2053 Command::DetachChild {
2054 parent_id,
2055 child_id,
2056 }
2057 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2058 }
2059 CommandTag::SyncChildren => {
2060 let SyncChildrenCommand {
2061 parent_id,
2062 child_start,
2063 child_len,
2064 } = sync_children_commands
2065 .next()
2066 .expect("missing SyncChildren payload");
2067 let expected_children =
2068 &sync_child_ids[child_start..child_start + child_len];
2069 sync_children(
2070 applier,
2071 parent_id,
2072 expected_children,
2073 &mut deferred_cleanup,
2074 )?;
2075 }
2076 CommandTag::Callback => {
2077 let callback = callbacks.next().expect("missing Callback payload");
2078 Command::Callback(callback)
2079 .apply_with_cleanup(applier, &mut deferred_cleanup)?;
2080 }
2081 }
2082 }
2083 }
2084
2085 debug_assert!(bubble_dirty.next().is_none());
2086 debug_assert!(update_typed_nodes.next().is_none());
2087 debug_assert!(remove_nodes.next().is_none());
2088 debug_assert!(mount_nodes.next().is_none());
2089 debug_assert!(attach_children.next().is_none());
2090 debug_assert!(insert_children.next().is_none());
2091 debug_assert!(move_children.next().is_none());
2092 debug_assert!(remove_children.next().is_none());
2093 debug_assert!(detach_children.next().is_none());
2094 debug_assert!(sync_children_commands.next().is_none());
2095 debug_assert!(callbacks.next().is_none());
2096 deferred_cleanup.flush(applier)
2097 }
2098}
2099
2100fn update_typed_node<N: Node + 'static>(node: &mut dyn Node, id: NodeId) -> Result<(), NodeError> {
2101 let typed = node
2102 .as_any_mut()
2103 .downcast_mut::<N>()
2104 .ok_or(NodeError::TypeMismatch {
2105 id,
2106 expected: std::any::type_name::<N>(),
2107 })?;
2108 typed.update();
2109 Ok(())
2110}
2111
2112fn insert_child_with_reparenting(applier: &mut dyn Applier, parent_id: NodeId, child_id: NodeId) {
2113 if parent_id == child_id {
2114 debug_assert_ne!(
2115 parent_id, child_id,
2116 "a node cannot be attached as its own child"
2117 );
2118 return;
2119 }
2120
2121 let old_parent = applier
2122 .get_mut(child_id)
2123 .ok()
2124 .and_then(|node| node.parent());
2125 if let Some(old_parent_id) = old_parent {
2126 if old_parent_id != parent_id {
2127 if let Ok(old_parent_node) = applier.get_mut(old_parent_id) {
2128 old_parent_node.remove_child(child_id);
2129 }
2130 if let Ok(child_node) = applier.get_mut(child_id) {
2131 child_node.on_removed_from_parent();
2132 }
2133 bubble_layout_dirty(applier, old_parent_id);
2134 bubble_measure_dirty(applier, old_parent_id);
2135 }
2136 }
2137
2138 if let Ok(parent_node) = applier.get_mut(parent_id) {
2139 parent_node.insert_child(child_id);
2140 }
2141 if let Ok(child_node) = applier.get_mut(child_id) {
2142 child_node.on_attached_to_parent(parent_id);
2143 }
2144}
2145
2146fn apply_remove_child(
2147 applier: &mut dyn Applier,
2148 parent_id: NodeId,
2149 child_id: NodeId,
2150 deferred_cleanup: &mut DeferredChildCleanupQueue,
2151) -> Result<(), NodeError> {
2152 detach_child_from_parent(applier, parent_id, child_id)?;
2153
2154 let generation = applier.node_generation(child_id);
2155 let removed_from_parent = if let Ok(node) = applier.get_mut(child_id) {
2156 node.parent().is_none()
2157 } else {
2158 return Ok(());
2159 };
2160 deferred_cleanup.push(child_id, generation, removed_from_parent);
2161 Ok(())
2162}
2163
2164fn detach_child_from_parent(
2165 applier: &mut dyn Applier,
2166 parent_id: NodeId,
2167 child_id: NodeId,
2168) -> Result<(), NodeError> {
2169 if let Ok(parent_node) = applier.get_mut(parent_id) {
2170 parent_node.remove_child(child_id);
2171 }
2172 bubble_layout_dirty(applier, parent_id);
2173 bubble_measure_dirty(applier, parent_id);
2174
2175 if let Ok(node) = applier.get_mut(child_id) {
2176 match node.parent() {
2177 Some(existing_parent_id) if existing_parent_id == parent_id => {
2178 node.on_removed_from_parent();
2179 }
2180 None => {}
2181 Some(_) => return Ok(()),
2182 }
2183 } else {
2184 return Ok(());
2185 }
2186
2187 Ok(())
2188}
2189
2190fn cleanup_detached_child(
2191 applier: &mut dyn Applier,
2192 cleanup: DeferredChildCleanup,
2193) -> Result<(), NodeError> {
2194 if applier.node_generation(cleanup.child_id) != cleanup.generation {
2195 return Ok(());
2196 }
2197
2198 let parent_id = match applier.get_mut(cleanup.child_id) {
2199 Ok(node) => node.parent(),
2200 Err(NodeError::Missing { .. }) => return Ok(()),
2201 Err(err) => return Err(err),
2202 };
2203 if parent_id.is_some() {
2204 return Ok(());
2205 }
2206
2207 if let Ok(node) = applier.get_mut(cleanup.child_id) {
2208 if !cleanup.removed_from_parent {
2209 node.on_removed_from_parent();
2210 }
2211 node.unmount();
2212 }
2213 match applier.remove(cleanup.child_id) {
2214 Ok(()) | Err(NodeError::Missing { .. }) => Ok(()),
2215 Err(err) => Err(err),
2216 }
2217}
2218
2219fn remove_child_and_cleanup_now(
2220 applier: &mut dyn Applier,
2221 parent_id: NodeId,
2222 child_id: NodeId,
2223) -> Result<(), NodeError> {
2224 let mut deferred_cleanup = DeferredChildCleanupQueue::default();
2225 apply_remove_child(applier, parent_id, child_id, &mut deferred_cleanup)?;
2226 deferred_cleanup.flush(applier)
2227}
2228
2229fn collect_current_children(applier: &mut dyn Applier, parent_id: NodeId) -> ChildList {
2230 let mut scratch = SmallVec::<[NodeId; 8]>::new();
2231 if let Ok(node) = applier.get_mut(parent_id) {
2232 node.collect_children_into(&mut scratch);
2233 }
2234 let mut current = ChildList::new();
2235 current.extend(scratch);
2236 current
2237}
2238
2239fn sync_children(
2240 applier: &mut dyn Applier,
2241 parent_id: NodeId,
2242 expected_children: &[NodeId],
2243 deferred_cleanup: &mut DeferredChildCleanupQueue,
2244) -> Result<(), NodeError> {
2245 let mut current = collect_current_children(applier, parent_id);
2246 let children_changed = current.as_slice() != expected_children;
2247
2248 if children_changed {
2249 if current.len().max(expected_children.len()) <= SMALL_CHILD_SYNC_LINEAR_THRESHOLD {
2250 sync_children_small(
2251 applier,
2252 parent_id,
2253 &mut current,
2254 expected_children,
2255 deferred_cleanup,
2256 )?;
2257 } else {
2258 let mut target_positions: HashMap<NodeId, usize> = HashMap::default();
2259 target_positions.reserve(expected_children.len());
2260 for (index, &child) in expected_children.iter().enumerate() {
2261 target_positions.insert(child, index);
2262 }
2263
2264 for index in (0..current.len()).rev() {
2265 let child = current[index];
2266 if !target_positions.contains_key(&child) {
2267 current.remove(index);
2268 apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2269 }
2270 }
2271
2272 let mut current_positions = build_child_positions(¤t);
2273 for (target_index, &child) in expected_children.iter().enumerate() {
2274 if let Some(current_index) = current_positions.get(&child).copied() {
2275 if current_index != target_index {
2276 let from_index = current_index;
2277 let to_index = move_child_in_diff_state(
2278 &mut current,
2279 &mut current_positions,
2280 from_index,
2281 target_index,
2282 );
2283 Command::MoveChild {
2284 parent_id,
2285 from_index,
2286 to_index,
2287 bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2288 }
2289 .apply(applier)?;
2290 }
2291 } else {
2292 let insert_index = target_index.min(current.len());
2293 let appended_index = current.len();
2294 insert_child_into_diff_state(
2295 &mut current,
2296 &mut current_positions,
2297 insert_index,
2298 child,
2299 );
2300 Command::InsertChild {
2301 parent_id,
2302 child_id: child,
2303 appended_index,
2304 insert_index,
2305 bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2306 }
2307 .apply(applier)?;
2308 }
2309 }
2310 }
2311 }
2312
2313 reconcile_children(applier, parent_id, expected_children, !children_changed)
2314}
2315
2316fn sync_children_small(
2317 applier: &mut dyn Applier,
2318 parent_id: NodeId,
2319 current: &mut ChildList,
2320 expected_children: &[NodeId],
2321 deferred_cleanup: &mut DeferredChildCleanupQueue,
2322) -> Result<(), NodeError> {
2323 for index in (0..current.len()).rev() {
2324 let child = current[index];
2325 if !expected_children.contains(&child) {
2326 current.remove(index);
2327 apply_remove_child(applier, parent_id, child, deferred_cleanup)?;
2328 }
2329 }
2330
2331 for (target_index, &child) in expected_children.iter().enumerate() {
2332 if let Some(current_index) = current
2333 .iter()
2334 .position(|¤t_child| current_child == child)
2335 {
2336 if current_index != target_index {
2337 let child = current.remove(current_index);
2338 let to_index = target_index.min(current.len());
2339 current.insert(to_index, child);
2340 Command::MoveChild {
2341 parent_id,
2342 from_index: current_index,
2343 to_index,
2344 bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2345 }
2346 .apply(applier)?;
2347 }
2348 } else {
2349 let insert_index = target_index.min(current.len());
2350 let appended_index = current.len();
2351 current.insert(insert_index, child);
2352 Command::InsertChild {
2353 parent_id,
2354 child_id: child,
2355 appended_index,
2356 insert_index,
2357 bubble: DirtyBubble::LAYOUT_AND_MEASURE,
2358 }
2359 .apply(applier)?;
2360 }
2361 }
2362
2363 Ok(())
2364}
2365
2366fn reconcile_children(
2367 applier: &mut dyn Applier,
2368 parent_id: NodeId,
2369 expected_children: &[NodeId],
2370 needs_dirty_check: bool,
2371) -> Result<(), NodeError> {
2372 let mut repaired = false;
2373 for &child_id in expected_children {
2374 let needs_attach = if let Ok(node) = applier.get_mut(child_id) {
2375 node.parent() != Some(parent_id)
2376 } else {
2377 false
2378 };
2379
2380 if needs_attach {
2381 insert_child_with_reparenting(applier, parent_id, child_id);
2382 repaired = true;
2383 }
2384 }
2385
2386 let is_dirty = if needs_dirty_check {
2387 if let Ok(node) = applier.get_mut(parent_id) {
2388 node.needs_layout()
2389 } else {
2390 false
2391 }
2392 } else {
2393 false
2394 };
2395
2396 if repaired {
2397 bubble_layout_dirty(applier, parent_id);
2398 bubble_measure_dirty(applier, parent_id);
2399 } else if is_dirty {
2400 bubble_layout_dirty(applier, parent_id);
2401 }
2402
2403 Ok(())
2404}
2405
2406#[derive(Default)]
2407pub struct MemoryApplier {
2408 nodes: Vec<Option<Box<dyn Node>>>,
2409 physical_stable_ids: Vec<u32>,
2411 physical_warm_recycled_origins: Vec<bool>,
2412 stable_to_physical: HashMap<NodeId, usize>,
2414 stable_generations: HashMap<NodeId, u32>,
2416 free_ids: BinaryHeap<Reverse<usize>>,
2418 high_id_nodes: HashMap<NodeId, Box<dyn Node>>,
2421 high_id_warm_recycled_origins: HashMap<NodeId, bool>,
2422 high_id_generations: HashMap<NodeId, u32>,
2423 next_stable_id: NodeId,
2424 layout_runtime: Option<RuntimeHandle>,
2425 slots: SlotTable,
2426 recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2427 returning_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2428 cold_recycled_nodes: HashMap<TypeId, Vec<RecycledNode>>,
2429 recycled_node_limits: HashMap<TypeId, usize>,
2430 warm_recycled_node_targets: HashMap<TypeId, usize>,
2431 fresh_recyclable_creations: HashMap<TypeId, usize>,
2432 recycled_node_prototypes: HashMap<TypeId, Box<dyn Node>>,
2433}
2434
2435struct RemovalFrame {
2436 node_id: NodeId,
2437 children: SmallVec<[NodeId; 8]>,
2438 next_child: usize,
2439}
2440
2441#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
2442pub struct MemoryApplierDebugStats {
2443 pub next_stable_id: NodeId,
2444 pub nodes_len: usize,
2445 pub nodes_cap: usize,
2446 pub physical_stable_ids_len: usize,
2447 pub physical_stable_ids_cap: usize,
2448 pub stable_to_physical_len: usize,
2449 pub stable_to_physical_cap: usize,
2450 pub stable_generations_len: usize,
2451 pub stable_generations_cap: usize,
2452 pub free_ids_len: usize,
2453 pub free_ids_cap: usize,
2454 pub high_id_nodes_len: usize,
2455 pub high_id_nodes_cap: usize,
2456 pub high_id_generations_len: usize,
2457 pub high_id_generations_cap: usize,
2458 pub recycled_type_count: usize,
2459 pub recycled_type_cap: usize,
2460 pub recycled_node_count: usize,
2461 pub recycled_node_capacity: usize,
2462 pub warm_recycled_node_id_count: usize,
2463 pub warm_recycled_node_id_capacity: usize,
2464}
2465
2466impl MemoryApplier {
2467 const EAGER_COMPACT_NODE_LEN: usize = 1_024;
2468 const INVALID_STABLE_ID: u32 = u32::MAX;
2469 const INITIAL_DENSE_NODE_CAP: usize = 32;
2470 const LARGE_DENSE_NODE_GROWTH_THRESHOLD: usize = 32 * 1024;
2471 const LARGE_DENSE_NODE_GROWTH_DIVISOR: usize = 4;
2472
2473 fn pack_stable_id(stable_id: NodeId) -> u32 {
2474 u32::try_from(stable_id).expect("stable id overflow")
2475 }
2476
2477 fn unpack_stable_id(stable_id: u32) -> NodeId {
2478 stable_id as NodeId
2479 }
2480
2481 fn next_dense_node_target_len(old_len: usize) -> usize {
2482 if old_len < Self::INITIAL_DENSE_NODE_CAP {
2483 return Self::INITIAL_DENSE_NODE_CAP;
2484 }
2485 if old_len < Self::LARGE_DENSE_NODE_GROWTH_THRESHOLD {
2486 return old_len.saturating_mul(2);
2487 }
2488
2489 let incremental_growth =
2490 (old_len / Self::LARGE_DENSE_NODE_GROWTH_DIVISOR).max(Self::INITIAL_DENSE_NODE_CAP);
2491 old_len.saturating_add(incremental_growth)
2492 }
2493
2494 fn ensure_dense_node_storage_capacity(&mut self) {
2495 let len = self
2496 .nodes
2497 .len()
2498 .max(self.physical_stable_ids.len())
2499 .max(self.physical_warm_recycled_origins.len());
2500 if len < self.nodes.capacity()
2501 && len < self.physical_stable_ids.capacity()
2502 && len < self.physical_warm_recycled_origins.capacity()
2503 {
2504 return;
2505 }
2506
2507 let target = Self::next_dense_node_target_len(len);
2508 if self.nodes.capacity() < target {
2509 self.nodes
2510 .reserve_exact(target.saturating_sub(self.nodes.len()));
2511 }
2512 if self.physical_stable_ids.capacity() < target {
2513 self.physical_stable_ids
2514 .reserve_exact(target.saturating_sub(self.physical_stable_ids.len()));
2515 }
2516 if self.physical_warm_recycled_origins.capacity() < target {
2517 self.physical_warm_recycled_origins
2518 .reserve_exact(target.saturating_sub(self.physical_warm_recycled_origins.len()));
2519 }
2520 }
2521
2522 fn ensure_stable_index_capacity(&mut self) {
2523 let len = self
2524 .stable_to_physical
2525 .len()
2526 .max(self.stable_generations.len());
2527 if len < self.stable_to_physical.capacity() && len < self.stable_generations.capacity() {
2528 return;
2529 }
2530
2531 let target = Self::next_dense_node_target_len(len);
2532 let additional = target.saturating_sub(len);
2533 if self.stable_to_physical.capacity() < target {
2534 self.stable_to_physical.reserve(additional);
2535 }
2536 if self.stable_generations.capacity() < target {
2537 self.stable_generations.reserve(additional);
2538 }
2539 }
2540
2541 pub fn new() -> Self {
2542 Self {
2543 nodes: Vec::new(),
2544 physical_stable_ids: Vec::new(),
2545 physical_warm_recycled_origins: Vec::new(),
2546 stable_to_physical: HashMap::default(),
2547 stable_generations: HashMap::default(),
2548 free_ids: BinaryHeap::new(),
2549 high_id_nodes: HashMap::default(),
2550 high_id_warm_recycled_origins: HashMap::default(),
2551 high_id_generations: HashMap::default(),
2552 next_stable_id: 0,
2553 layout_runtime: None,
2554 slots: SlotTable::default(),
2555 recycled_nodes: HashMap::default(),
2556 returning_recycled_nodes: HashMap::default(),
2557 cold_recycled_nodes: HashMap::default(),
2558 recycled_node_limits: HashMap::default(),
2559 warm_recycled_node_targets: HashMap::default(),
2560 fresh_recyclable_creations: HashMap::default(),
2561 recycled_node_prototypes: HashMap::default(),
2562 }
2563 }
2564
2565 pub fn slots(&mut self) -> &mut SlotTable {
2566 &mut self.slots
2567 }
2568
2569 pub fn with_node<N: Node + 'static, R>(
2570 &mut self,
2571 id: NodeId,
2572 f: impl FnOnce(&mut N) -> R,
2573 ) -> Result<R, NodeError> {
2574 let physical_id = self
2575 .resolve_node_index(id)
2576 .ok_or(NodeError::Missing { id })?;
2577 let slot = self
2578 .nodes
2579 .get_mut(physical_id)
2580 .ok_or(NodeError::Missing { id })?
2581 .as_deref_mut()
2582 .ok_or(NodeError::Missing { id })?;
2583 let typed = slot
2584 .as_any_mut()
2585 .downcast_mut::<N>()
2586 .ok_or(NodeError::TypeMismatch {
2587 id,
2588 expected: std::any::type_name::<N>(),
2589 })?;
2590 Ok(f(typed))
2591 }
2592
2593 pub fn len(&self) -> usize {
2594 self.nodes.iter().filter(|n| n.is_some()).count()
2595 }
2596
2597 pub fn capacity(&self) -> usize {
2598 self.nodes.len()
2599 }
2600
2601 pub fn tombstone_count(&self) -> usize {
2602 self.nodes.iter().filter(|n| n.is_none()).count()
2603 }
2604
2605 pub fn freelist_len(&self) -> usize {
2606 self.free_ids.len()
2607 }
2608
2609 pub fn debug_recycled_node_count(&self) -> usize {
2610 self.total_recycled_node_count()
2611 }
2612
2613 pub fn debug_recycled_node_count_for<N: Node + 'static>(&self) -> usize {
2614 let key = TypeId::of::<N>();
2615 self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2616 + self
2617 .returning_recycled_nodes
2618 .get(&key)
2619 .map(Vec::len)
2620 .unwrap_or(0)
2621 + self
2622 .cold_recycled_nodes
2623 .get(&key)
2624 .map(Vec::len)
2625 .unwrap_or(0)
2626 }
2627
2628 pub fn debug_stats(&self) -> MemoryApplierDebugStats {
2629 let mut recycled_keys: HashSet<TypeId> = HashSet::default();
2630 recycled_keys.extend(self.recycled_nodes.keys().copied());
2631 recycled_keys.extend(self.returning_recycled_nodes.keys().copied());
2632 recycled_keys.extend(self.cold_recycled_nodes.keys().copied());
2633
2634 MemoryApplierDebugStats {
2635 next_stable_id: self.next_stable_id,
2636 nodes_len: self.len(),
2637 nodes_cap: self.nodes.len(),
2638 physical_stable_ids_len: self.physical_stable_ids.len(),
2639 physical_stable_ids_cap: self.physical_stable_ids.capacity(),
2640 stable_to_physical_len: self.stable_to_physical.len(),
2641 stable_to_physical_cap: self.stable_to_physical.capacity(),
2642 stable_generations_len: self.stable_generations.len(),
2643 stable_generations_cap: self.stable_generations.capacity(),
2644 free_ids_len: self.free_ids.len(),
2645 free_ids_cap: self.free_ids.capacity(),
2646 high_id_nodes_len: self.high_id_nodes.len(),
2647 high_id_nodes_cap: self.high_id_nodes.capacity(),
2648 high_id_generations_len: self.high_id_generations.len(),
2649 high_id_generations_cap: self.high_id_generations.capacity(),
2650 recycled_type_count: recycled_keys.len(),
2651 recycled_type_cap: self.recycled_nodes.capacity()
2652 + self.returning_recycled_nodes.capacity()
2653 + self.cold_recycled_nodes.capacity(),
2654 recycled_node_count: self.total_recycled_node_count(),
2655 recycled_node_capacity: self.total_recycled_node_capacity(),
2656 warm_recycled_node_id_count: self.total_warm_recycled_node_id_count(),
2657 warm_recycled_node_id_capacity: self.total_warm_recycled_node_id_capacity(),
2658 }
2659 }
2660
2661 pub fn is_empty(&self) -> bool {
2662 self.len() == 0
2663 }
2664
2665 pub fn debug_live_node_heap_bytes(&self) -> usize {
2666 let dense_nodes = self
2667 .nodes
2668 .iter()
2669 .flatten()
2670 .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2671 .sum::<usize>();
2672 let high_id_nodes = self
2673 .high_id_nodes
2674 .values()
2675 .map(|node| std::mem::size_of_val(&**node) + node.debug_heap_bytes())
2676 .sum::<usize>();
2677 dense_nodes + high_id_nodes
2678 }
2679
2680 pub fn debug_recycled_node_heap_bytes(&self) -> usize {
2681 let pool_bytes = |pools: &HashMap<TypeId, Vec<RecycledNode>>| {
2682 pools
2683 .values()
2684 .flat_map(|nodes| nodes.iter())
2685 .map(|node| std::mem::size_of_val(&*node.node) + node.node.debug_heap_bytes())
2686 .sum::<usize>()
2687 };
2688
2689 pool_bytes(&self.recycled_nodes)
2690 + pool_bytes(&self.returning_recycled_nodes)
2691 + pool_bytes(&self.cold_recycled_nodes)
2692 }
2693
2694 pub fn set_runtime_handle(&mut self, handle: RuntimeHandle) {
2695 self.layout_runtime = Some(handle);
2696 }
2697
2698 pub fn clear_runtime_handle(&mut self) {
2699 self.layout_runtime = None;
2700 }
2701
2702 pub fn runtime_handle(&self) -> Option<RuntimeHandle> {
2703 self.layout_runtime.clone()
2704 }
2705
2706 fn pool_node_count(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2707 pools.values().map(Vec::len).sum()
2708 }
2709
2710 fn pool_node_capacity(pools: &HashMap<TypeId, Vec<RecycledNode>>) -> usize {
2711 pools.values().map(Vec::capacity).sum()
2712 }
2713
2714 fn total_recycled_node_count(&self) -> usize {
2715 Self::pool_node_count(&self.recycled_nodes)
2716 + Self::pool_node_count(&self.returning_recycled_nodes)
2717 + Self::pool_node_count(&self.cold_recycled_nodes)
2718 }
2719
2720 fn total_recycled_node_capacity(&self) -> usize {
2721 Self::pool_node_capacity(&self.recycled_nodes)
2722 + Self::pool_node_capacity(&self.returning_recycled_nodes)
2723 + Self::pool_node_capacity(&self.cold_recycled_nodes)
2724 }
2725
2726 fn total_warm_recycled_node_id_count(&self) -> usize {
2727 self.live_warm_recycled_origin_count()
2728 + Self::pool_node_count(&self.recycled_nodes)
2729 + Self::pool_node_count(&self.returning_recycled_nodes)
2730 }
2731
2732 fn total_warm_recycled_node_id_capacity(&self) -> usize {
2733 self.live_warm_recycled_origin_capacity()
2734 + Self::pool_node_capacity(&self.recycled_nodes)
2735 + Self::pool_node_capacity(&self.returning_recycled_nodes)
2736 }
2737
2738 fn remember_recycle_pool_limit(&mut self, key: TypeId, recycle_pool_limit: Option<usize>) {
2739 if let Some(limit) = recycle_pool_limit {
2740 self.recycled_node_limits.insert(key, limit);
2741 } else {
2742 self.recycled_node_limits.remove(&key);
2743 }
2744 }
2745
2746 fn recycle_pool_limit_for(&self, key: TypeId) -> Option<usize> {
2747 self.recycled_node_limits.get(&key).copied()
2748 }
2749
2750 fn warm_recycled_pool_len(&self, key: TypeId) -> usize {
2751 self.recycled_nodes.get(&key).map(Vec::len).unwrap_or(0)
2752 }
2753
2754 fn warm_recycled_node_target(&self, key: TypeId) -> usize {
2755 self.warm_recycled_node_targets
2756 .get(&key)
2757 .copied()
2758 .unwrap_or(0)
2759 }
2760
2761 fn warm_recycled_node_target_limit(&self, key: TypeId) -> usize {
2762 let Some(limit) = self.recycle_pool_limit_for(key) else {
2763 return usize::MAX;
2764 };
2765 if limit <= 8 {
2766 limit
2767 } else {
2768 limit / 4
2769 }
2770 }
2771
2772 fn update_warm_recycled_node_target(&mut self, key: TypeId, observed_demand: usize) -> usize {
2773 let target_limit = self.warm_recycled_node_target_limit(key);
2774 let existing = self.warm_recycled_node_target(key).min(target_limit);
2775 if observed_demand == 0 {
2776 return existing;
2777 }
2778
2779 let target = match self.recycle_pool_limit_for(key) {
2780 Some(limit) if limit > 8 => target_limit,
2781 Some(_) => observed_demand.min(target_limit),
2782 None => observed_demand,
2783 };
2784 self.warm_recycled_node_targets.insert(key, target);
2785 target
2786 }
2787
2788 fn remember_recycled_node_prototype(&mut self, key: TypeId, shell: &dyn Node) {
2789 if self.recycled_node_prototypes.contains_key(&key) {
2790 return;
2791 }
2792 if let Some(prototype) = shell.rehouse_for_recycle() {
2793 self.recycled_node_prototypes.insert(key, prototype);
2794 }
2795 }
2796
2797 fn live_warm_recycled_origin_count(&self) -> usize {
2798 self.physical_warm_recycled_origins
2799 .iter()
2800 .zip(self.nodes.iter())
2801 .filter(|(warm_origin, node)| **warm_origin && node.is_some())
2802 .count()
2803 + self
2804 .high_id_warm_recycled_origins
2805 .values()
2806 .filter(|warm_origin| **warm_origin)
2807 .count()
2808 }
2809
2810 fn live_warm_recycled_origin_capacity(&self) -> usize {
2811 self.physical_warm_recycled_origins.capacity()
2812 + self.high_id_warm_recycled_origins.capacity()
2813 }
2814
2815 fn push_recycled_node(
2816 &mut self,
2817 key: TypeId,
2818 recycle_pool_limit: Option<usize>,
2819 recycled: RecycledNode,
2820 ) {
2821 self.remember_recycle_pool_limit(key, recycle_pool_limit);
2822 self.remember_recycled_node_prototype(key, recycled.node.as_ref());
2823
2824 let warm_origin = recycled.warm_origin();
2825 let pool = if warm_origin {
2826 self.returning_recycled_nodes.entry(key).or_default()
2827 } else {
2828 self.cold_recycled_nodes.entry(key).or_default()
2829 };
2830 pool.push(recycled);
2831 if let Some(limit) = recycle_pool_limit {
2832 if pool.len() > limit {
2833 let excess = pool.len() - limit;
2834 let dropped: Vec<_> = pool.drain(0..excess).collect();
2835 drop(dropped);
2836 }
2837 }
2838 }
2839
2840 fn push_warm_recycled_node(
2841 &mut self,
2842 key: TypeId,
2843 recycle_pool_limit: Option<usize>,
2844 mut recycled: RecycledNode,
2845 ) {
2846 self.remember_recycle_pool_limit(key, recycle_pool_limit);
2847
2848 recycled.set_warm_origin(true);
2849 let mut dropped = Vec::new();
2850 let mut remove_pool_entry = false;
2851 {
2852 let pool = self.recycled_nodes.entry(key).or_default();
2853 pool.push(recycled);
2854 if let Some(limit) = recycle_pool_limit {
2855 if pool.len() > limit {
2856 let excess = pool.len() - limit;
2857 dropped = pool.drain(0..excess).collect();
2858 remove_pool_entry = pool.is_empty();
2859 }
2860 }
2861 }
2862 if remove_pool_entry {
2863 self.recycled_nodes.remove(&key);
2864 }
2865 drop(dropped);
2866 }
2867
2868 fn seed_recycled_node_shell_impl(
2869 &mut self,
2870 key: TypeId,
2871 recycle_pool_limit: Option<usize>,
2872 shell: Box<dyn Node>,
2873 ) {
2874 let limit = recycle_pool_limit.unwrap_or(usize::MAX);
2875 if self.warm_recycled_pool_len(key) >= limit {
2876 return;
2877 }
2878
2879 self.remember_recycled_node_prototype(key, shell.as_ref());
2880 let stable_id = self.next_stable_id;
2881 self.next_stable_id = self.next_stable_id.saturating_add(1);
2882 self.push_warm_recycled_node(
2883 key,
2884 recycle_pool_limit,
2885 RecycledNode::from_shell(stable_id, shell, true),
2886 );
2887 }
2888
2889 fn take_recycled_node_from_pool(
2890 pools: &mut HashMap<TypeId, Vec<RecycledNode>>,
2891 key: TypeId,
2892 ) -> Option<RecycledNode> {
2893 let pool = pools.get_mut(&key)?;
2894 let node = pool.pop();
2895 if pool.is_empty() {
2896 pools.remove(&key);
2897 }
2898 node
2899 }
2900
2901 fn compact_idle_warm_pool(&mut self, key: TypeId) {
2902 let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2903 return;
2904 };
2905 if pool.capacity() <= pool.len().saturating_mul(4).max(64) {
2906 return;
2907 }
2908
2909 let retained = pool.len();
2910 let mut compacted = Vec::with_capacity(retained);
2911 compacted.append(pool);
2912 let remove_pool_entry = compacted.is_empty();
2913 *pool = compacted;
2914 let _ = pool;
2915
2916 if remove_pool_entry {
2917 self.recycled_nodes.remove(&key);
2918 }
2919 }
2920
2921 fn trim_idle_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2922 let pool_len = self.warm_recycled_pool_len(key);
2923 if pool_len <= target {
2924 return;
2925 }
2926
2927 let Some(pool) = self.recycled_nodes.get_mut(&key) else {
2928 return;
2929 };
2930 let removable = (pool_len - target).min(pool.len());
2931 let dropped: Vec<_> = pool.drain(0..removable).collect();
2932 let remove_pool_entry = pool.is_empty();
2933 let _ = pool;
2934
2935 if remove_pool_entry {
2936 self.recycled_nodes.remove(&key);
2937 }
2938 drop(dropped);
2939 }
2940
2941 fn replenish_warm_pool_to_target(&mut self, key: TypeId, target: usize) {
2942 let missing = target.saturating_sub(self.warm_recycled_pool_len(key));
2943 if missing == 0 {
2944 return;
2945 }
2946
2947 let recycle_pool_limit = self.recycle_pool_limit_for(key);
2948 let mut shells = Vec::with_capacity(missing);
2949 if let Some(prototype) = self.recycled_node_prototypes.get(&key) {
2950 for _ in 0..missing {
2951 let Some(shell) = prototype.rehouse_for_recycle() else {
2952 break;
2953 };
2954 shells.push(shell);
2955 }
2956 }
2957
2958 for shell in shells {
2959 self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
2960 }
2961 }
2962
2963 fn prune_stable_generations(&mut self) {
2964 let retained_len = self.stable_to_physical.len() + self.total_recycled_node_count();
2965 if retained_len == self.stable_generations.len() {
2966 return;
2967 }
2968
2969 let mut retained = HashMap::default();
2970 retained.reserve(retained_len);
2971 for stable_id in self.stable_to_physical.keys().copied() {
2972 if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2973 retained.insert(stable_id, generation);
2974 }
2975 }
2976 for stable_id in self
2977 .recycled_nodes
2978 .values()
2979 .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2980 {
2981 if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2982 retained.insert(stable_id, generation);
2983 }
2984 }
2985 for stable_id in self
2986 .returning_recycled_nodes
2987 .values()
2988 .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2989 {
2990 if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
2991 retained.insert(stable_id, generation);
2992 }
2993 }
2994 for stable_id in self
2995 .cold_recycled_nodes
2996 .values()
2997 .flat_map(|nodes| nodes.iter().map(RecycledNode::stable_id))
2998 {
2999 if let Some(generation) = self.stable_generations.get(&stable_id).copied() {
3000 retained.insert(stable_id, generation);
3001 }
3002 }
3003 self.stable_generations = retained;
3004 }
3005
3006 pub fn dump_tree(&self, root: Option<NodeId>) -> String {
3007 let mut output = String::new();
3008 if let Some(root_id) = root {
3009 self.dump_node(&mut output, root_id, 0);
3010 } else {
3011 output.push_str("(no root)\n");
3012 }
3013 output
3014 }
3015
3016 fn dump_node(&self, output: &mut String, id: NodeId, depth: usize) {
3017 let indent = " ".repeat(depth);
3018 if let Some(physical_id) = self.resolve_node_index(id) {
3019 let node = self.nodes[physical_id]
3020 .as_ref()
3021 .expect("resolved physical node must exist");
3022 let type_name = std::any::type_name_of_val(&**node);
3023 output.push_str(&format!("{}[{}] {}\n", indent, id, type_name));
3024
3025 let children = node.children();
3026 for child_id in children {
3027 self.dump_node(output, child_id, depth + 1);
3028 }
3029 } else {
3030 output.push_str(&format!("{}[{}] (missing)\n", indent, id));
3031 }
3032 }
3033
3034 fn resolve_node_index(&self, id: NodeId) -> Option<usize> {
3035 self.stable_to_physical.get(&id).copied()
3036 }
3037
3038 fn get_ref(&self, id: NodeId) -> Result<&dyn Node, NodeError> {
3039 if let Some(physical_id) = self.resolve_node_index(id) {
3040 let slot = self
3041 .nodes
3042 .get(physical_id)
3043 .ok_or(NodeError::Missing { id })?
3044 .as_deref()
3045 .ok_or(NodeError::Missing { id })?;
3046 return Ok(slot);
3047 }
3048
3049 self.high_id_nodes
3050 .get(&id)
3051 .map(|node| node.as_ref())
3052 .ok_or(NodeError::Missing { id })
3053 }
3054
3055 fn collect_node_children_into(
3056 &self,
3057 id: NodeId,
3058 out: &mut SmallVec<[NodeId; 8]>,
3059 ) -> Result<(), NodeError> {
3060 self.get_ref(id)?.collect_children_into(out);
3061 Ok(())
3062 }
3063
3064 fn node_parent(&self, id: NodeId) -> Result<Option<NodeId>, NodeError> {
3065 Ok(self.get_ref(id)?.parent())
3066 }
3067
3068 fn collect_owned_children(
3069 &self,
3070 node_id: NodeId,
3071 out: &mut SmallVec<[NodeId; 8]>,
3072 ) -> Result<(), NodeError> {
3073 self.collect_node_children_into(node_id, out)?;
3074 out.retain(|child_id| {
3075 self.node_parent(*child_id)
3076 .map(|parent| parent == Some(node_id))
3077 .unwrap_or(false)
3078 });
3079 Ok(())
3080 }
3081
3082 fn remove_node_storage(&mut self, node_id: NodeId) -> Result<(), NodeError> {
3083 if self.high_id_nodes.contains_key(&node_id) {
3084 if let Some(mut node) = self.high_id_nodes.remove(&node_id) {
3085 if let Some(key) = node.recycle_key() {
3086 let recycle_pool_limit = node.recycle_pool_limit();
3087 let warm_origin = self
3088 .high_id_warm_recycled_origins
3089 .remove(&node_id)
3090 .unwrap_or(false);
3091 node.prepare_for_recycle();
3092 self.push_recycled_node(
3093 key,
3094 recycle_pool_limit,
3095 RecycledNode::new(node_id, node, warm_origin),
3096 );
3097 }
3098 }
3099 let generation = self.high_id_generations.entry(node_id).or_insert(0);
3100 *generation = generation.wrapping_add(1);
3101 return Ok(());
3102 }
3103
3104 let physical_id = self
3105 .resolve_node_index(node_id)
3106 .ok_or(NodeError::Missing { id: node_id })?;
3107 if let Some(mut node) = self.nodes[physical_id].take() {
3108 if let Some(key) = node.recycle_key() {
3109 let recycle_pool_limit = node.recycle_pool_limit();
3110 let warm_origin = self
3111 .physical_warm_recycled_origins
3112 .get_mut(physical_id)
3113 .map(std::mem::take)
3114 .unwrap_or(false);
3115 node.prepare_for_recycle();
3116 self.push_recycled_node(
3117 key,
3118 recycle_pool_limit,
3119 RecycledNode::new(node_id, node, warm_origin),
3120 );
3121 }
3122 }
3123 self.physical_stable_ids[physical_id] = Self::INVALID_STABLE_ID;
3124 self.stable_to_physical.remove(&node_id);
3125 if let Some(generation) = self.stable_generations.get_mut(&node_id) {
3126 *generation = generation.wrapping_add(1);
3127 } else {
3128 self.stable_generations.insert(node_id, 1);
3129 }
3130 self.free_ids.push(Reverse(physical_id));
3131 Ok(())
3132 }
3133
3134 fn remove_subtree_postorder(&mut self, id: NodeId) -> Result<usize, NodeError> {
3135 self.get_ref(id)?;
3136
3137 let mut root_children = SmallVec::<[NodeId; 8]>::new();
3138 self.collect_owned_children(id, &mut root_children)?;
3139
3140 let mut stack = Vec::new();
3141 stack.push(RemovalFrame {
3142 node_id: id,
3143 children: root_children,
3144 next_child: 0,
3145 });
3146 let mut max_depth = stack.len();
3147
3148 while let Some(frame) = stack.last_mut() {
3149 if frame.next_child < frame.children.len() {
3150 let child_id = frame.children[frame.next_child];
3151 frame.next_child += 1;
3152
3153 if let Ok(child) = self.get_mut(child_id) {
3154 child.on_removed_from_parent();
3155 child.unmount();
3156 }
3157
3158 let mut child_children = SmallVec::<[NodeId; 8]>::new();
3159 self.collect_owned_children(child_id, &mut child_children)?;
3160 stack.push(RemovalFrame {
3161 node_id: child_id,
3162 children: child_children,
3163 next_child: 0,
3164 });
3165 max_depth = max_depth.max(stack.len());
3166 continue;
3167 }
3168
3169 let node_id = frame.node_id;
3170 stack.pop();
3171 self.remove_node_storage(node_id)?;
3172 }
3173
3174 Ok(max_depth)
3175 }
3176
3177 #[cfg(test)]
3178 fn debug_remove_max_traversal_depth(&mut self, id: NodeId) -> Result<usize, NodeError> {
3179 self.remove_subtree_postorder(id)
3180 }
3181}
3182
3183impl Applier for MemoryApplier {
3184 fn create(&mut self, node: Box<dyn Node>) -> NodeId {
3185 self.ensure_stable_index_capacity();
3186 let stable_id = self.next_stable_id;
3187 self.next_stable_id = self.next_stable_id.saturating_add(1);
3188 self.stable_generations.insert(stable_id, 0);
3189
3190 let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
3191 debug_assert!(self.nodes[id].is_none(), "freelist entry {id} is not None");
3192 self.nodes[id] = Some(node);
3193 self.physical_stable_ids[id] = Self::pack_stable_id(stable_id);
3194 self.physical_warm_recycled_origins[id] = false;
3195 id
3196 } else {
3197 self.ensure_dense_node_storage_capacity();
3198 let id = self.nodes.len();
3199 self.nodes.push(Some(node));
3200 self.physical_stable_ids
3201 .push(Self::pack_stable_id(stable_id));
3202 self.physical_warm_recycled_origins.push(false);
3203 id
3204 };
3205 self.stable_to_physical.insert(stable_id, physical_id);
3206 stable_id
3207 }
3208
3209 fn node_generation(&self, id: NodeId) -> u32 {
3210 self.high_id_generations
3211 .get(&id)
3212 .copied()
3213 .or_else(|| self.stable_generations.get(&id).copied())
3214 .unwrap_or(0)
3215 }
3216
3217 fn get_mut(&mut self, id: NodeId) -> Result<&mut dyn Node, NodeError> {
3218 if let Some(physical_id) = self.resolve_node_index(id) {
3220 let slot = self.nodes[physical_id]
3221 .as_deref_mut()
3222 .ok_or(NodeError::Missing { id })?;
3223 return Ok(slot);
3224 }
3225 self.high_id_nodes
3226 .get_mut(&id)
3227 .map(|n| n.as_mut())
3228 .ok_or(NodeError::Missing { id })
3229 }
3230
3231 fn remove(&mut self, id: NodeId) -> Result<(), NodeError> {
3232 self.remove_subtree_postorder(id).map(|_| ())
3233 }
3234
3235 fn insert_with_id(&mut self, id: NodeId, node: Box<dyn Node>) -> Result<(), NodeError> {
3236 const HIGH_ID_THRESHOLD: NodeId = 1_000_000_000; if id >= HIGH_ID_THRESHOLD {
3241 if self.high_id_nodes.contains_key(&id) {
3242 return Err(NodeError::AlreadyExists { id });
3243 }
3244 self.high_id_nodes.insert(id, node);
3245 self.high_id_warm_recycled_origins.insert(id, false);
3246 self.high_id_generations.entry(id).or_insert(0);
3247 Ok(())
3248 } else {
3249 if self.stable_to_physical.contains_key(&id) {
3250 return Err(NodeError::AlreadyExists { id });
3251 }
3252
3253 let physical_id = if let Some(Reverse(id)) = self.free_ids.pop() {
3254 self.nodes[id] = Some(node);
3255 self.physical_stable_ids[id] = Self::pack_stable_id(id);
3256 self.physical_warm_recycled_origins[id] = false;
3257 id
3258 } else {
3259 self.ensure_dense_node_storage_capacity();
3260 let id = self.nodes.len();
3261 self.nodes.push(Some(node));
3262 self.physical_stable_ids.push(Self::pack_stable_id(id));
3263 self.physical_warm_recycled_origins.push(false);
3264 id
3265 };
3266
3267 self.next_stable_id = self.next_stable_id.max(id.saturating_add(1));
3268 self.ensure_stable_index_capacity();
3269 self.stable_generations.entry(id).or_insert(0);
3270 self.physical_stable_ids[physical_id] = Self::pack_stable_id(id);
3271 self.stable_to_physical.insert(id, physical_id);
3272 Ok(())
3273 }
3274 }
3275
3276 fn compact(&mut self) {
3277 let live_count = self.nodes.iter().filter(|slot| slot.is_some()).count();
3278 let tombstone_count = self.nodes.len().saturating_sub(live_count);
3279 if tombstone_count == 0 {
3280 return;
3281 }
3282 if self.nodes.len() > Self::EAGER_COMPACT_NODE_LEN && tombstone_count < live_count {
3283 return;
3284 }
3285 let rehouse_live_nodes = tombstone_count >= live_count;
3286 let mut packed_nodes = Vec::with_capacity(live_count);
3287 let mut packed_physical_stable_ids = Vec::with_capacity(live_count);
3288 let mut packed_warm_recycled_origins = Vec::with_capacity(live_count);
3289 let mut stable_to_physical = HashMap::default();
3290 stable_to_physical.reserve(live_count);
3291
3292 for physical_id in 0..self.nodes.len() {
3293 let Some(mut node) = self.nodes[physical_id].take() else {
3294 continue;
3295 };
3296 if rehouse_live_nodes {
3297 if let Some(rehoused) = node.rehouse_for_live_compaction() {
3298 node = rehoused;
3299 }
3300 }
3301 let stable_id = std::mem::replace(
3302 &mut self.physical_stable_ids[physical_id],
3303 Self::INVALID_STABLE_ID,
3304 );
3305 debug_assert_ne!(
3306 stable_id,
3307 Self::INVALID_STABLE_ID,
3308 "live physical slot must have a stable id",
3309 );
3310 let stable_id = Self::unpack_stable_id(stable_id);
3311 packed_nodes.push(Some(node));
3312 packed_physical_stable_ids.push(Self::pack_stable_id(stable_id));
3313 packed_warm_recycled_origins.push(self.physical_warm_recycled_origins[physical_id]);
3314 stable_to_physical.insert(stable_id, packed_nodes.len() - 1);
3315 }
3316
3317 self.nodes = packed_nodes;
3318 self.physical_stable_ids = packed_physical_stable_ids;
3319 self.physical_warm_recycled_origins = packed_warm_recycled_origins;
3320 self.free_ids = BinaryHeap::new();
3321 self.stable_to_physical = stable_to_physical;
3322 self.prune_stable_generations();
3323 }
3324
3325 fn take_recycled_node(&mut self, key: TypeId) -> Option<RecycledNode> {
3326 Self::take_recycled_node_from_pool(&mut self.returning_recycled_nodes, key)
3327 .or_else(|| Self::take_recycled_node_from_pool(&mut self.recycled_nodes, key))
3328 }
3329
3330 fn set_recycled_node_origin(&mut self, id: NodeId, warm_origin: bool) {
3331 if let Some(physical_id) = self.resolve_node_index(id) {
3332 self.physical_warm_recycled_origins[physical_id] = warm_origin;
3333 } else if self.high_id_nodes.contains_key(&id) {
3334 self.high_id_warm_recycled_origins.insert(id, warm_origin);
3335 }
3336 }
3337
3338 fn seed_recycled_node_shell(
3339 &mut self,
3340 key: TypeId,
3341 recycle_pool_limit: Option<usize>,
3342 shell: Box<dyn Node>,
3343 ) {
3344 self.seed_recycled_node_shell_impl(key, recycle_pool_limit, shell);
3345 }
3346
3347 fn record_fresh_recyclable_creation(&mut self, key: TypeId) {
3348 *self.fresh_recyclable_creations.entry(key).or_insert(0) += 1;
3349 }
3350
3351 fn clear_recycled_nodes(&mut self) {
3352 let returning = std::mem::take(&mut self.returning_recycled_nodes);
3353 for (key, mut nodes) in returning {
3354 let pool = self.recycled_nodes.entry(key).or_default();
3355 pool.append(&mut nodes);
3356 }
3357
3358 let fresh_recyclable_creations = std::mem::take(&mut self.fresh_recyclable_creations);
3359 let cold = std::mem::take(&mut self.cold_recycled_nodes);
3360 for (key, mut nodes) in cold {
3361 let needed = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3362 if needed > 0 {
3363 let remaining_limit = self
3364 .recycle_pool_limit_for(key)
3365 .unwrap_or(usize::MAX)
3366 .saturating_sub(self.warm_recycled_pool_len(key));
3367 let promote = nodes.len().min(needed).min(remaining_limit);
3368 let split_at = nodes.len().saturating_sub(promote);
3369 let promoted = nodes.split_off(split_at);
3370 for mut recycled in promoted {
3371 recycled.set_warm_origin(true);
3372 self.recycled_nodes.entry(key).or_default().push(recycled);
3373 }
3374 }
3375 }
3376
3377 let mut keys: HashSet<TypeId> = HashSet::default();
3378 keys.extend(self.recycled_nodes.keys().copied());
3379 keys.extend(self.recycled_node_limits.keys().copied());
3380 keys.extend(self.warm_recycled_node_targets.keys().copied());
3381 keys.extend(self.recycled_node_prototypes.keys().copied());
3382 for key in keys {
3383 let observed_demand = fresh_recyclable_creations.get(&key).copied().unwrap_or(0);
3384 let target = self.update_warm_recycled_node_target(key, observed_demand);
3385 self.replenish_warm_pool_to_target(key, target);
3386 self.trim_idle_warm_pool_to_target(key, target);
3387 self.compact_idle_warm_pool(key);
3388 }
3389 self.prune_stable_generations();
3390 self.compact();
3394 }
3395}
3396
3397pub trait ApplierHost {
3398 fn borrow_dyn(&self) -> RefMut<'_, dyn Applier>;
3399 fn compact(&self) {}
3401}
3402
3403pub struct ConcreteApplierHost<A: Applier + 'static> {
3404 inner: RefCell<A>,
3405}
3406
3407impl<A: Applier + 'static> ConcreteApplierHost<A> {
3408 pub fn new(applier: A) -> Self {
3409 Self {
3410 inner: RefCell::new(applier),
3411 }
3412 }
3413
3414 pub fn borrow_typed(&self) -> RefMut<'_, A> {
3415 self.inner.borrow_mut()
3416 }
3417
3418 pub fn try_borrow_typed(&self) -> Result<RefMut<'_, A>, std::cell::BorrowMutError> {
3419 self.inner.try_borrow_mut()
3420 }
3421
3422 pub fn into_inner(self) -> A {
3423 self.inner.into_inner()
3424 }
3425}
3426
3427impl<A: Applier + 'static> ApplierHost for ConcreteApplierHost<A> {
3428 fn borrow_dyn(&self) -> RefMut<'_, dyn Applier> {
3429 RefMut::map(self.inner.borrow_mut(), |applier| {
3430 applier as &mut dyn Applier
3431 })
3432 }
3433
3434 fn compact(&self) {
3435 self.inner.borrow_mut().compact();
3436 }
3437}
3438
3439pub struct ApplierGuard<'a, A: Applier + 'static> {
3440 inner: RefMut<'a, A>,
3441}
3442
3443impl<'a, A: Applier + 'static> ApplierGuard<'a, A> {
3444 fn new(inner: RefMut<'a, A>) -> Self {
3445 Self { inner }
3446 }
3447}
3448
3449impl<'a, A: Applier + 'static> Deref for ApplierGuard<'a, A> {
3450 type Target = A;
3451
3452 fn deref(&self) -> &Self::Target {
3453 &self.inner
3454 }
3455}
3456
3457impl<'a, A: Applier + 'static> DerefMut for ApplierGuard<'a, A> {
3458 fn deref_mut(&mut self) -> &mut Self::Target {
3459 &mut self.inner
3460 }
3461}
3462
3463pub struct SlotsHost {
3464 storage_key: Cell<usize>,
3465 inner: RefCell<SlotsHostInner>,
3466}
3467
3468#[derive(Debug, Default)]
3469pub(crate) struct SlotPassOutcome {
3470 pub(crate) compacted: bool,
3471 pub(crate) compact_anchor_registry_storage: bool,
3472 pub(crate) compact_payload_storage: bool,
3473}
3474
3475#[derive(Default)]
3476pub(crate) struct FinishedSlotPass {
3477 pub(crate) outcome: SlotPassOutcome,
3478 pub(crate) detached_root_children: Vec<slot::DetachedSubtree>,
3479}
3480
3481struct ActivePassState {
3482 state: slot::SlotWriteSessionState,
3483}
3484
3485struct SlotsHostInner {
3486 table: SlotTable,
3487 lifecycle: slot::SlotLifecycleCoordinator,
3488 runtime_state: Option<Rc<crate::composer::ComposerRuntimeState>>,
3489 active_pass: Option<ActivePassState>,
3490}
3491
3492impl Drop for SlotsHost {
3493 fn drop(&mut self) {
3494 let storage_key = self.storage_key.get();
3495 let inner = self.inner.get_mut();
3496 if let Some(state) = inner.runtime_state.clone() {
3497 if let Err(err) = state.dispose_retained_subtrees_for_host(
3498 storage_key,
3499 &mut inner.table,
3500 &mut inner.lifecycle,
3501 ) {
3502 log::error!(
3503 "retained subtree disposal failed while dropping SlotsHost {storage_key}: {err}"
3504 );
3505 state.abandon_retained_subtrees_for_host(
3506 storage_key,
3507 &mut inner.table,
3508 &mut inner.lifecycle,
3509 );
3510 } else {
3511 state.clear_host_storage_key(storage_key);
3512 }
3513 }
3514 inner.lifecycle.dispose_slot_table(&mut inner.table);
3515 }
3516}
3517
3518impl SlotsHost {
3519 pub(crate) fn storage_key(&self) -> usize {
3520 self.storage_key.get()
3521 }
3522
3523 pub fn new(storage: SlotTable) -> Self {
3524 let storage_key = storage.storage_id();
3525 Self {
3526 storage_key: Cell::new(storage_key),
3527 inner: RefCell::new(SlotsHostInner {
3528 table: storage,
3529 lifecycle: slot::SlotLifecycleCoordinator::default(),
3530 runtime_state: None,
3531 active_pass: None,
3532 }),
3533 }
3534 }
3535
3536 pub(crate) fn bind_runtime_state(&self, state: &Rc<crate::composer::ComposerRuntimeState>) {
3537 let mut inner = self.inner.borrow_mut();
3538 inner.runtime_state = Some(Rc::clone(state));
3539 }
3540
3541 pub(crate) fn rebind_orphaned_runtime_state(
3542 &self,
3543 state: &Rc<crate::composer::ComposerRuntimeState>,
3544 ) -> bool {
3545 let inner = self.inner.borrow();
3546 assert!(
3547 inner.active_pass.is_none(),
3548 "cannot rebind SlotsHost during an active pass"
3549 );
3550 let Some(bound_state) = inner.runtime_state.as_ref() else {
3551 drop(inner);
3552 self.bind_runtime_state(state);
3553 return true;
3554 };
3555 if Rc::ptr_eq(bound_state, state) {
3556 return true;
3557 }
3558 if bound_state.has_live_applier_host() {
3559 return false;
3560 }
3561 drop(inner);
3562
3563 let mut inner = self.inner.borrow_mut();
3564 let Some(bound_state) = inner.runtime_state.as_ref() else {
3565 inner.runtime_state = Some(Rc::clone(state));
3566 return true;
3567 };
3568 if Rc::ptr_eq(bound_state, state) {
3569 return true;
3570 }
3571 if bound_state.has_live_applier_host() {
3572 return false;
3573 }
3574
3575 let previous_state = Rc::clone(bound_state);
3576 let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3577 lifecycle.flush_pending_drops();
3578 let host_key = self.storage_key();
3579 if previous_state
3580 .dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)
3581 .is_err()
3582 {
3583 inner.lifecycle = lifecycle;
3584 return false;
3585 }
3586 previous_state.clear_host(self);
3587 lifecycle.flush_pending_drops();
3588 inner.runtime_state = Some(Rc::clone(state));
3589 inner.lifecycle = lifecycle;
3590 true
3591 }
3592
3593 pub(crate) fn runtime_state(&self) -> Option<Rc<crate::composer::ComposerRuntimeState>> {
3594 self.inner.borrow().runtime_state.clone()
3595 }
3596
3597 pub(crate) fn borrow(&self) -> Ref<'_, SlotTable> {
3598 Ref::map(self.inner.borrow(), |inner| &inner.table)
3599 }
3600
3601 pub(crate) fn borrow_mut(&self) -> RefMut<'_, SlotTable> {
3602 RefMut::map(self.inner.borrow_mut(), |inner| &mut inner.table)
3603 }
3604
3605 pub fn into_table(self: Rc<Self>) -> Result<SlotTable, NodeError> {
3606 assert_eq!(
3607 Rc::strong_count(&self),
3608 1,
3609 "cannot transfer SlotsHost table while other host references are alive"
3610 );
3611 self.take_table_for_transfer()
3612 }
3613
3614 fn take_table_for_transfer(&self) -> Result<SlotTable, NodeError> {
3615 let inner = self.inner.borrow();
3616 assert!(
3617 inner.active_pass.is_none(),
3618 "cannot take SlotsHost during an active pass"
3619 );
3620 drop(inner);
3621 let mut inner = self.inner.borrow_mut();
3622 let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3623 lifecycle.flush_pending_drops();
3624 if let Some(state) = inner.runtime_state.clone() {
3625 let host_key = self.storage_key();
3626 state.dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)?;
3627 state.clear_host(self);
3628 lifecycle.flush_pending_drops();
3629 }
3630 let taken = std::mem::take(&mut inner.table);
3631 self.storage_key.set(inner.table.storage_id());
3632 inner.runtime_state = None;
3633 inner.lifecycle = lifecycle;
3634 Ok(taken)
3635 }
3636
3637 pub fn reset(&self) -> Result<(), NodeError> {
3638 let inner = self.inner.borrow();
3639 assert!(
3640 inner.active_pass.is_none(),
3641 "cannot reset SlotsHost during an active pass"
3642 );
3643 let runtime_state = inner.runtime_state.clone();
3644 drop(inner);
3645 let mut inner = self.inner.borrow_mut();
3646 let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3647 if let Some(state) = runtime_state {
3648 let host_key = self.storage_key();
3649 state.dispose_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle)?;
3650 state.clear_host(self);
3651 }
3652 lifecycle.dispose_slot_table(&mut inner.table);
3653 inner.table = SlotTable::default();
3654 self.storage_key.set(inner.table.storage_id());
3655 inner.runtime_state = None;
3656 inner.lifecycle = slot::SlotLifecycleCoordinator::default();
3657 Ok(())
3658 }
3659
3660 pub(crate) fn abandon_after_apply_failure(&self) {
3661 let inner = self.inner.borrow();
3662 assert!(
3663 inner.active_pass.is_none(),
3664 "cannot abandon SlotsHost during an active pass"
3665 );
3666 let runtime_state = inner.runtime_state.clone();
3667 drop(inner);
3668 let mut inner = self.inner.borrow_mut();
3669 let mut lifecycle = std::mem::take(&mut inner.lifecycle);
3670 if let Some(state) = runtime_state {
3671 let host_key = self.storage_key();
3672 state.abandon_retained_subtrees_for_host(host_key, &mut inner.table, &mut lifecycle);
3673 }
3674 lifecycle.dispose_slot_table(&mut inner.table);
3675 inner.table = SlotTable::default();
3676 self.storage_key.set(inner.table.storage_id());
3677 inner.runtime_state = None;
3678 inner.lifecycle = slot::SlotLifecycleCoordinator::default();
3679 }
3680
3681 pub(crate) fn debug_stats(&self) -> SlotTableDebugStats {
3682 let inner = self.inner.borrow();
3683 let local = inner.table.debug_stats();
3684 let lifecycle = inner.lifecycle.debug_stats();
3685 let retention = inner
3686 .runtime_state
3687 .clone()
3688 .map(|state| state.slot_retention_debug_stats(self))
3689 .unwrap_or_default();
3690 SlotTableDebugStats::from_parts(local, lifecycle, retention)
3691 }
3692
3693 pub(crate) fn debug_snapshot(&self) -> slot::SlotDebugSnapshot {
3694 let inner = self.inner.borrow();
3695 let mut snapshot = inner.table.debug_snapshot();
3696 if let Some(state) = inner.runtime_state.clone() {
3697 state.fill_slot_debug_snapshot(self, &mut snapshot);
3698 }
3699 snapshot
3700 }
3701
3702 pub(crate) fn begin_pass(&self, mode: slot::SlotPassMode) {
3703 let mut inner = self.inner.borrow_mut();
3704 assert!(
3705 inner.active_pass.is_none(),
3706 "slot pass already active for host"
3707 );
3708 let mut state = slot::SlotWriteSessionState::default();
3709 state.reset_for_pass(mode);
3710 inner.active_pass = Some(ActivePassState { state });
3711 }
3712
3713 pub(crate) fn has_active_pass(&self) -> bool {
3714 self.inner.borrow().active_pass.is_some()
3715 }
3716
3717 pub(crate) fn abandon_active_pass(&self) {
3718 self.inner.borrow_mut().active_pass = None;
3719 }
3720
3721 pub(crate) fn with_write_session<R>(
3722 &self,
3723 f: impl FnOnce(&mut slot::SlotWriteSession<'_>) -> R,
3724 ) -> R {
3725 let mut inner = self.inner.borrow_mut();
3726 let SlotsHostInner {
3727 table,
3728 lifecycle,
3729 active_pass,
3730 ..
3731 } = &mut *inner;
3732 let active_pass = active_pass
3733 .as_mut()
3734 .expect("slot write session requires an active pass");
3735 let mut session = table.write_session(lifecycle, &mut active_pass.state);
3736 f(&mut session)
3737 }
3738
3739 pub(crate) fn with_table_and_lifecycle_mut<R>(
3740 &self,
3741 f: impl FnOnce(&mut SlotTable, &mut slot::SlotLifecycleCoordinator) -> R,
3742 ) -> R {
3743 let mut inner = self.inner.borrow_mut();
3744 let SlotsHostInner {
3745 table, lifecycle, ..
3746 } = &mut *inner;
3747 f(table, lifecycle)
3748 }
3749
3750 pub(crate) fn finish_pass(
3751 &self,
3752 applier: &mut dyn Applier,
3753 ) -> Result<FinishedSlotPass, NodeError> {
3754 let mut inner = self.inner.borrow_mut();
3755 let SlotsHostInner {
3756 table,
3757 lifecycle,
3758 active_pass: active_pass_slot,
3759 ..
3760 } = &mut *inner;
3761 let Some(mut active_pass) = active_pass_slot.take() else {
3762 return Ok(FinishedSlotPass::default());
3763 };
3764
3765 active_pass.state.flush_payload_location_refreshes(table);
3766
3767 #[cfg(debug_assertions)]
3768 if let Err(err) = active_pass.state.validate(table) {
3769 panic!("slot writer invariant violation before finalize_pass: {err:?}");
3770 }
3771
3772 let detached_root_children = {
3773 let mut session = table.write_session(lifecycle, &mut active_pass.state);
3774 session.finalize_pass(applier)?
3775 };
3776
3777 Ok(FinishedSlotPass {
3778 outcome: SlotPassOutcome {
3779 compacted: active_pass.state.request_compaction,
3780 compact_anchor_registry_storage: active_pass
3781 .state
3782 .request_anchor_storage_compaction,
3783 compact_payload_storage: active_pass.state.request_payload_storage_compaction,
3784 },
3785 detached_root_children,
3786 })
3787 }
3788
3789 pub(crate) fn complete_pass_cleanup(&self, outcome: &SlotPassOutcome) {
3790 let mut inner = self.inner.borrow_mut();
3791 let SlotsHostInner {
3792 table,
3793 lifecycle,
3794 runtime_state,
3795 ..
3796 } = &mut *inner;
3797 lifecycle.flush_pending_drops();
3798 if outcome.compacted {
3799 table.compact_storage();
3800 lifecycle.compact_storage();
3801 }
3802 if let Some(state) = runtime_state.clone() {
3803 state.compact_table_identity_storage_for_host(
3804 self,
3805 table,
3806 outcome.compact_anchor_registry_storage,
3807 outcome.compact_payload_storage,
3808 );
3809 } else {
3810 if outcome.compact_anchor_registry_storage {
3811 table.compact_anchor_registry_storage(None);
3812 }
3813 if outcome.compact_payload_storage {
3814 table.compact_payload_anchor_registry_storage(None);
3815 }
3816 }
3817 table.assert_fast_integrity("slot pass cleanup");
3818 #[cfg(any(test, debug_assertions))]
3819 {
3820 table.debug_verify();
3821 if let Some(state) = runtime_state.clone() {
3822 state.debug_verify_host(self, table);
3823 }
3824 }
3825 }
3826}
3827
3828fn build_child_positions(children: &[NodeId]) -> HashMap<NodeId, usize> {
3829 let mut positions = HashMap::default();
3830 positions.reserve(children.len());
3831 for (index, &child) in children.iter().enumerate() {
3832 positions.insert(child, index);
3833 }
3834 positions
3835}
3836
3837fn refresh_child_positions(
3838 current: &[NodeId],
3839 positions: &mut HashMap<NodeId, usize>,
3840 start: usize,
3841 end: usize,
3842) {
3843 if current.is_empty() || start >= current.len() {
3844 return;
3845 }
3846 let end = end.min(current.len() - 1);
3847 for (offset, &child) in current[start..=end].iter().enumerate() {
3848 positions.insert(child, start + offset);
3849 }
3850}
3851
3852fn insert_child_into_diff_state(
3853 current: &mut ChildList,
3854 positions: &mut HashMap<NodeId, usize>,
3855 index: usize,
3856 child: NodeId,
3857) {
3858 let index = index.min(current.len());
3859 current.insert(index, child);
3860 refresh_child_positions(current, positions, index, current.len() - 1);
3861}
3862
3863fn move_child_in_diff_state(
3864 current: &mut ChildList,
3865 positions: &mut HashMap<NodeId, usize>,
3866 from_index: usize,
3867 target_index: usize,
3868) -> usize {
3869 let child = current.remove(from_index);
3870 let to_index = target_index.min(current.len());
3871 current.insert(to_index, child);
3872 refresh_child_positions(
3873 current,
3874 positions,
3875 from_index.min(to_index),
3876 from_index.max(to_index),
3877 );
3878 to_index
3879}
3880
3881pub(crate) use state::MutableStateInner;
3882pub use state::{MutableState, OwnedMutableState, SnapshotStateList, SnapshotStateMap, State};
3883
3884fn hash_key<K: Hash>(key: &K) -> Key {
3885 let mut hasher = hash::default::new();
3886 key.hash(&mut hasher);
3887 hasher.finish()
3888}
3889
3890pub(crate) fn explicit_group_key_seed<K: Hash>(
3891 key: &K,
3892 caller: &'static std::panic::Location<'static>,
3893) -> slot::GroupKeySeed {
3894 let source_key = location_key(caller.file(), caller.line(), caller.column());
3895 let explicit_key = hash_key(key);
3896 slot::GroupKeySeed::keyed(source_key, explicit_key)
3897}
3898
3899#[cfg(test)]
3900#[path = "tests/mod.rs"]
3901mod tests;
3902
3903#[cfg(test)]
3904#[path = "tests/recursive_decrease_increase_test.rs"]
3905mod recursive_decrease_increase_test;
3906
3907pub mod collections;
3908pub mod hash;