1mod anchor_map;
4mod boundary_policy;
5mod lifecycle_queue;
6mod storage;
7mod verifier;
8
9use crate::{
10 collections::map::HashMap,
11 remove_child_and_cleanup_now,
12 slot_storage::{GroupId, StartScopedGroup},
13 AnchorId, Applier, Key, NodeId, Owned, RecomposeScope, ScopeId,
14};
15use boundary_policy::{BoundaryTransition, PassBoundary};
16use lifecycle_queue::DeferredDrop;
17pub use lifecycle_queue::OrphanedNode;
18pub(crate) use lifecycle_queue::SlotLifecycleCoordinator;
19use storage::{EntryClass, EntryKind, EntryVisibility, GroupSpans, SlotStorage, StorageEffects};
20use verifier::debug_verify_slot_table;
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
23pub struct SlotTableDebugStats {
24 pub slots_len: usize,
25 pub slots_cap: usize,
26 pub pending_slot_drops_len: usize,
27 pub pending_slot_drops_cap: usize,
28 pub anchors_len: usize,
29 pub anchors_cap: usize,
30 pub hidden_entries_len: usize,
31 pub preserved_groups_len: usize,
32 pub free_anchor_ids_len: usize,
33 pub free_anchor_ids_cap: usize,
34 pub group_stack_len: usize,
35 pub group_stack_cap: usize,
36 pub orphaned_node_ids_len: usize,
37 pub orphaned_node_ids_cap: usize,
38}
39
40#[derive(Debug, Clone, PartialEq, Eq)]
41pub struct SlotValueTypeDebugStat {
42 pub type_name: &'static str,
43 pub count: usize,
44 pub inline_payload_bytes: usize,
45}
46
47#[derive(Clone, Copy, Debug, PartialEq, Eq)]
48pub(crate) enum NodeSlotState {
49 Active,
50 PreservedGap,
51 Missing,
52}
53
54#[derive(Clone, Copy, Debug, Eq, PartialEq)]
55pub(crate) enum SlotPassMode {
56 Compose,
57 Recompose,
58}
59
60#[derive(Clone, Debug)]
61struct GroupFrame {
62 start: usize,
63 end: usize,
64 stored_live_end: usize,
65 live_end: usize,
66 pass_boundary: PassBoundary,
67 previous_direct_child_group: Option<AnchorId>,
68}
69
70#[derive(Clone)]
71struct MatchedGroup {
72 index: usize,
73 scan_extent: usize,
74 live_extent: usize,
75 boundary_key: Key,
76 reused_hidden: bool,
77}
78
79#[derive(Clone, Copy, Debug)]
80struct GroupEntryPlan {
81 pass_boundary: PassBoundary,
82 requires_recompose: bool,
83}
84
85#[derive(Clone, Copy, Debug)]
86struct StartedGroup {
87 index: usize,
88 requires_recompose: bool,
89}
90
91#[derive(Default)]
92struct DirectChildReparentBatch {
93 removed_hidden_descendants: HashMap<usize, usize>,
94 removed_preserved_scan: HashMap<usize, usize>,
95 added_hidden_descendants: usize,
96}
97
98impl DirectChildReparentBatch {
99 fn record_previous_parent(
100 &mut self,
101 previous_parent_index: usize,
102 hidden_owned: usize,
103 removed_scan_extent: usize,
104 ) {
105 Self::accumulate(
106 &mut self.removed_hidden_descendants,
107 previous_parent_index,
108 hidden_owned,
109 );
110 Self::accumulate(
111 &mut self.removed_preserved_scan,
112 previous_parent_index,
113 removed_scan_extent,
114 );
115 }
116
117 fn add_hidden_descendants_to_new_parent(&mut self, hidden_owned: usize) {
118 self.added_hidden_descendants += hidden_owned;
119 }
120
121 fn apply(self, table: &mut SlotTable, parent_index: usize) {
122 for (previous_parent_index, hidden_owned) in self.removed_hidden_descendants {
123 table.adjust_hidden_descendants(Some(previous_parent_index), -(hidden_owned as isize));
124 }
125 for (previous_parent_index, removed_scan_extent) in self.removed_preserved_scan {
126 table.shrink_group_preserved_tail(previous_parent_index, removed_scan_extent);
127 }
128 if self.added_hidden_descendants > 0 {
129 table.adjust_hidden_descendants(
130 Some(parent_index),
131 self.added_hidden_descendants as isize,
132 );
133 }
134 }
135
136 fn accumulate(map: &mut HashMap<usize, usize>, index: usize, delta: usize) {
137 if delta == 0 {
138 return;
139 }
140 *map.entry(index).or_default() += delta;
141 }
142}
143
144#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)]
145pub(crate) struct GroupRetention {
146 boundary_key: Key,
147}
148
149impl GroupRetention {
150 pub(crate) fn new(boundary_key: Key) -> Self {
151 Self { boundary_key }
152 }
153
154 pub(crate) fn boundary_key(self) -> Key {
155 self.boundary_key
156 }
157}
158
159#[derive(Default)]
160pub(crate) struct SlotWriteSessionState {
161 cursor: usize,
162 group_stack: Vec<GroupFrame>,
163 previous_root_group: Option<AnchorId>,
164}
165
166pub(crate) struct SlotWriteSession<'a> {
167 table: &'a mut SlotTable,
168 lifecycle: &'a mut SlotLifecycleCoordinator,
169 state: &'a mut SlotWriteSessionState,
170 mode: SlotPassMode,
171}
172
173struct SlotReadCursor<'a> {
174 table: &'a SlotTable,
175}
176
177mod reuse_planner;
178#[cfg(test)]
179mod tests;
180
181use reuse_planner::{ReusePlanner, ReusePlannerContext, StartPlan};
182
183pub struct SlotTable {
184 storage: SlotStorage,
185}
186
187impl SlotTable {
188 const INITIAL_CAP: usize = 32;
189 const EAGER_COMPACT_SLOT_LEN: usize = 1_024;
190 const FRACTIONAL_COMPACT_GAP_THRESHOLD: usize = 256;
191 const FRACTIONAL_COMPACT_RATIO_DIVISOR: usize = 4;
192 const LARGE_GROWTH_THRESHOLD: usize = 32 * 1024;
193 const LARGE_GROWTH_DIVISOR: usize = 4;
194
195 pub fn new() -> Self {
196 Self {
197 storage: SlotStorage::new(),
198 }
199 }
200
201 pub fn heap_bytes(&self) -> usize {
202 self.storage.heap_bytes()
203 }
204
205 pub fn debug_stats(&self) -> SlotTableDebugStats {
206 self.storage.debug_stats()
207 }
208
209 fn next_slot_target_len(old_len: usize) -> usize {
210 if old_len < Self::INITIAL_CAP {
211 return Self::INITIAL_CAP;
212 }
213 if old_len < Self::LARGE_GROWTH_THRESHOLD {
214 return old_len.saturating_mul(2);
215 }
216
217 let incremental_growth = (old_len / Self::LARGE_GROWTH_DIVISOR).max(Self::INITIAL_CAP);
218 old_len.saturating_add(incremental_growth)
219 }
220
221 fn ensure_insert_capacity(&mut self, index: usize) {
222 if self.storage.gap_len() == 0 {
223 let current = self.storage.capacity();
224 let target = Self::next_slot_target_len(current.max(self.storage.len()));
225 self.storage
226 .grow(target.max(self.storage.len().saturating_add(1)));
227 }
228 self.storage.ensure_gap_at(index);
229 }
230
231 pub(crate) fn write_session<'a>(
232 &'a mut self,
233 lifecycle: &'a mut SlotLifecycleCoordinator,
234 state: &'a mut SlotWriteSessionState,
235 mode: SlotPassMode,
236 ) -> SlotWriteSession<'a> {
237 SlotWriteSession {
238 table: self,
239 lifecycle,
240 state,
241 mode,
242 }
243 }
244
245 fn update_group_bounds(&self, state: &mut SlotWriteSessionState) {
246 if let Some(frame) = state.group_stack.last_mut() {
247 if frame.end < state.cursor {
248 frame.end = state.cursor;
249 }
250 if frame.live_end < state.cursor {
251 frame.live_end = state.cursor;
252 }
253 }
254 }
255
256 fn update_group_live_bounds_to(&self, state: &mut SlotWriteSessionState, live_cursor: usize) {
257 if let Some(frame) = state.group_stack.last_mut() {
258 if frame.live_end < live_cursor {
259 frame.live_end = live_cursor;
260 }
261 }
262 }
263
264 fn shift_group_frames(&self, state: &mut SlotWriteSessionState, index: usize, delta: isize) {
265 if delta == 0 {
266 return;
267 }
268
269 if delta > 0 {
270 let delta = delta as usize;
271 for frame in &mut state.group_stack {
272 if frame.start >= index {
273 frame.start += delta;
274 frame.end += delta;
275 frame.stored_live_end += delta;
276 frame.live_end += delta;
277 } else if frame.end >= index {
278 frame.end += delta;
279 if frame.stored_live_end >= index {
280 frame.stored_live_end += delta;
281 }
282 if frame.live_end >= index {
283 frame.live_end += delta;
284 }
285 }
286 }
287 } else {
288 let delta = (-delta) as usize;
289 for frame in &mut state.group_stack {
290 if frame.start >= index {
291 frame.start = frame.start.saturating_sub(delta);
292 frame.end = frame.end.saturating_sub(delta);
293 frame.stored_live_end = frame.stored_live_end.saturating_sub(delta);
294 frame.live_end = frame.live_end.saturating_sub(delta);
295 } else if frame.end > index {
296 frame.end = frame.end.saturating_sub(delta);
297 if frame.stored_live_end > index {
298 frame.stored_live_end = frame.stored_live_end.saturating_sub(delta);
299 }
300 if frame.live_end > index {
301 frame.live_end = frame.live_end.saturating_sub(delta);
302 }
303 }
304 }
305 }
306 }
307
308 fn finish_slot_write_at(&self, state: &mut SlotWriteSessionState, index: usize) -> usize {
309 self.clear_previous_direct_child_group(state);
310 state.cursor = index + 1;
311 self.update_group_bounds(state);
312 index
313 }
314
315 fn current_previous_direct_child_group(
316 &self,
317 state: &SlotWriteSessionState,
318 ) -> Option<AnchorId> {
319 state
320 .group_stack
321 .last()
322 .and_then(|frame| frame.previous_direct_child_group)
323 .or(state.previous_root_group)
324 }
325
326 fn clear_previous_direct_child_group(&self, state: &mut SlotWriteSessionState) {
327 if let Some(frame) = state.group_stack.last_mut() {
328 frame.previous_direct_child_group = None;
329 } else {
330 state.previous_root_group = None;
331 }
332 }
333
334 fn set_previous_direct_child_group(&self, state: &mut SlotWriteSessionState, anchor: AnchorId) {
335 if let Some(frame) = state.group_stack.last_mut() {
336 frame.previous_direct_child_group = Some(anchor);
337 } else {
338 state.previous_root_group = Some(anchor);
339 }
340 }
341
342 fn current_parent_boundary(&self, state: &SlotWriteSessionState) -> PassBoundary {
343 state
344 .group_stack
345 .last()
346 .map(|frame| frame.pass_boundary)
347 .unwrap_or(PassBoundary::Open)
348 }
349
350 fn current_parent_end(&self, state: &SlotWriteSessionState) -> usize {
351 state
352 .group_stack
353 .last()
354 .map(|frame| frame.end.min(self.storage.len()))
355 .unwrap_or(self.storage.len())
356 }
357
358 fn current_parent_anchor(&self, state: &SlotWriteSessionState) -> AnchorId {
359 state
360 .group_stack
361 .last()
362 .map(|frame| self.storage.entry_anchor(frame.start))
363 .unwrap_or(AnchorId::INVALID)
364 }
365
366 fn current_parent_boundary_key(&self, state: &SlotWriteSessionState) -> Option<Key> {
367 state
368 .group_stack
369 .last()
370 .and_then(|frame| frame.pass_boundary.policy().restricted_boundary())
371 }
372
373 fn current_disallow_live_slot_reuse(&self, state: &SlotWriteSessionState) -> bool {
374 state
375 .group_stack
376 .last()
377 .is_some_and(|frame| frame.pass_boundary.policy().disallows_live_value_reuse())
378 }
379
380 fn current_parent_allows_exact_node_reuse(&self, state: &SlotWriteSessionState) -> bool {
381 state
382 .group_stack
383 .last()
384 .map(|frame| frame.pass_boundary.policy().allows_exact_live_reuse())
385 .unwrap_or(true)
386 }
387
388 fn group_scope_value(&self, group_index: usize) -> Option<&RecomposeScope> {
389 self.storage.live_group_scope(group_index)
390 }
391
392 fn group_scope_owner(&self, group_index: usize) -> Option<ScopeId> {
393 self.group_scope_value(group_index).map(RecomposeScope::id)
394 }
395
396 fn group_has_scope(&self, group_index: usize) -> bool {
397 self.storage.live_group_has_scope(group_index)
398 }
399
400 #[cfg(test)]
401 fn clear_group_scope(&mut self, group_index: usize) {
402 self.storage.clear_group_scope(group_index);
403 }
404
405 fn move_slot_range(
406 &mut self,
407 state: &mut SlotWriteSessionState,
408 source_start: usize,
409 len: usize,
410 dest: usize,
411 ) -> usize {
412 if len == 0 || source_start == dest {
413 return source_start;
414 }
415
416 let removed = self.storage.remove_entry_range(source_start, len);
417 self.shift_group_frames(state, source_start, -(len as isize));
418 let insert_at = if dest > source_start {
419 dest.saturating_sub(len)
420 } else {
421 dest
422 }
423 .min(self.storage.len());
424 self.ensure_insert_capacity(insert_at);
425 self.shift_group_frames(state, insert_at, len as isize);
426 self.storage.insert_entry_range(insert_at, &removed);
427 insert_at
428 }
429
430 fn enter_group(
431 &self,
432 state: &mut SlotWriteSessionState,
433 start: usize,
434 scan_len: usize,
435 live_len: usize,
436 plan: GroupEntryPlan,
437 ) -> StartedGroup {
438 state.group_stack.push(GroupFrame {
439 start,
440 end: start + scan_len,
441 stored_live_end: start + live_len,
442 live_end: start + 1,
443 pass_boundary: plan.pass_boundary,
444 previous_direct_child_group: None,
445 });
446 state.cursor = start + 1;
447 self.update_group_bounds(state);
448 StartedGroup {
449 index: start,
450 requires_recompose: plan.requires_recompose,
451 }
452 }
453
454 fn insert_group_entry(
455 &mut self,
456 state: &mut SlotWriteSessionState,
457 index: usize,
458 key: Key,
459 boundary_key: Key,
460 scope: Option<RecomposeScope>,
461 ) -> AnchorId {
462 self.ensure_insert_capacity(index);
463 let anchor = self.storage.allocate_anchor();
464 let group = self.storage.alloc_group(
465 key,
466 GroupRetention::new(boundary_key),
467 self.current_parent_anchor(state),
468 scope,
469 );
470 self.shift_group_frames(state, index, 1);
471 self.storage.insert_group(index, anchor, group, false);
472 anchor
473 }
474
475 fn insert_value_entry<T: 'static>(
476 &mut self,
477 state: &mut SlotWriteSessionState,
478 index: usize,
479 value: T,
480 ) {
481 self.ensure_insert_capacity(index);
482 let payload = self.storage.alloc_value(value);
483 self.shift_group_frames(state, index, 1);
484 self.storage.insert_value(index, payload, false);
485 }
486
487 fn overwrite_value_entry<T: 'static>(
488 &mut self,
489 lifecycle: &mut SlotLifecycleCoordinator,
490 owner_index: Option<usize>,
491 index: usize,
492 value: T,
493 hidden: bool,
494 ) {
495 if !hidden
496 && self
497 .storage
498 .entry_kind(index)
499 .is_some_and(EntryKind::is_hidden)
500 {
501 self.adjust_hidden_descendants(owner_index, -1);
502 }
503 let payload = self.storage.alloc_value(value);
504 if let Some(deferred_drop) = self.storage.overwrite_value(index, payload, hidden) {
505 lifecycle.push_drop(deferred_drop);
506 }
507 }
508
509 fn insert_node_entry(
510 &mut self,
511 state: &mut SlotWriteSessionState,
512 index: usize,
513 id: NodeId,
514 generation: u32,
515 ) {
516 self.ensure_insert_capacity(index);
517 let anchor = self.storage.allocate_anchor();
518 let payload = self.storage.alloc_node(id, generation);
519 self.shift_group_frames(state, index, 1);
520 self.storage.insert_node(index, anchor, payload, false);
521 }
522
523 fn overwrite_node_entry(
524 &mut self,
525 lifecycle: &mut SlotLifecycleCoordinator,
526 owner_index: Option<usize>,
527 index: usize,
528 id: NodeId,
529 generation: u32,
530 hidden: bool,
531 ) {
532 if !hidden
533 && self
534 .storage
535 .entry_kind(index)
536 .is_some_and(EntryKind::is_hidden)
537 {
538 self.adjust_hidden_descendants(owner_index, -1);
539 }
540 let anchor = self.storage.entry_anchor(index);
541 let payload = self.storage.alloc_node(id, generation);
542 if let Some(deferred_drop) = self.storage.overwrite_node(index, anchor, payload, hidden) {
543 lifecycle.push_drop(deferred_drop);
544 }
545 }
546
547 fn mark_range_as_hidden(
548 &mut self,
549 lifecycle: &mut SlotLifecycleCoordinator,
550 start: usize,
551 end: usize,
552 owner_index: Option<usize>,
553 ) -> bool {
554 let mut result = self.storage.hide_range(start, end);
555 Self::apply_storage_effects(lifecycle, &mut result.effects);
556 if result.marked_any {
557 let owner_anchor = owner_index
558 .map(|index| self.storage.entry_anchor(index))
559 .unwrap_or(AnchorId::INVALID);
560 for group_index in result.top_level_hidden_group_roots.iter().copied() {
561 self.storage
562 .set_group_parent_anchor(group_index, owner_anchor);
563 }
564 self.adjust_hidden_descendants(owner_index, result.newly_hidden_entries as isize);
565 for (group_index, scan_extent) in result.hidden_group_roots {
566 self.storage
567 .set_group_hidden_descendants(group_index, scan_extent.saturating_sub(1));
568 }
569 }
570 result.marked_any
571 }
572
573 fn adjust_hidden_descendants(&mut self, owner_index: Option<usize>, delta: isize) {
574 if delta == 0 {
575 return;
576 }
577
578 let mut current = owner_index;
579 while let Some(index) = current {
580 self.storage.adjust_group_hidden_descendants(index, delta);
581 current = self
582 .storage
583 .group_parent_anchor_at(index)
584 .filter(|anchor| anchor.is_valid())
585 .and_then(|anchor| self.storage.resolve_anchor(anchor));
586 }
587 }
588
589 fn shrink_group_preserved_tail(&mut self, index: usize, removed_scan_extent: usize) {
590 let live_extent = self.storage.group_live_extent_at(index).max(1);
591 let scan_extent = self.storage.group_scan_extent_at(index).max(live_extent);
592 let next_scan_extent = scan_extent
593 .saturating_sub(removed_scan_extent)
594 .max(live_extent);
595 self.storage
596 .set_group_spans(index, GroupSpans::new(live_extent, next_scan_extent));
597 }
598
599 fn reparent_direct_child_groups(&mut self, parent_index: usize) {
600 let parent_anchor = self.storage.entry_anchor(parent_index);
601 let scan_end = parent_index + self.storage.group_scan_extent_at(parent_index).max(1);
602 let mut index = parent_index + 1;
603 let mut batch = DirectChildReparentBatch::default();
604
605 while index < scan_end {
606 let Some(kind) = self.storage.entry_kind(index) else {
607 break;
608 };
609 let extent = self.storage.entry_scan_extent(index).max(1);
610
611 if kind.matches(EntryClass::Group, EntryVisibility::Live)
612 || kind.matches(EntryClass::Group, EntryVisibility::Hidden)
613 {
614 let previous_parent_anchor = self
615 .storage
616 .group_parent_anchor_at(index)
617 .unwrap_or(AnchorId::INVALID);
618 if previous_parent_anchor != parent_anchor {
619 let hidden_owned = self.storage.group_hidden_descendants_at(index)
620 + usize::from(kind.matches(EntryClass::Group, EntryVisibility::Hidden));
621 let removed_scan_extent =
622 usize::from(kind.matches(EntryClass::Group, EntryVisibility::Hidden))
623 * extent;
624
625 if let Some(previous_parent_index) = previous_parent_anchor
626 .is_valid()
627 .then(|| self.storage.resolve_anchor(previous_parent_anchor))
628 .flatten()
629 {
630 batch.record_previous_parent(
631 previous_parent_index,
632 hidden_owned,
633 removed_scan_extent,
634 );
635 }
636
637 batch.add_hidden_descendants_to_new_parent(hidden_owned);
638 self.storage.set_group_parent_anchor(index, parent_anchor);
639 }
640 }
641
642 index += extent;
643 }
644
645 batch.apply(self, parent_index);
646 }
647
648 fn restore_hidden_entry(&mut self, owner_index: Option<usize>, index: usize) {
649 let was_hidden = self
650 .storage
651 .entry_kind(index)
652 .is_some_and(EntryKind::is_hidden);
653 self.storage.restore_hidden_entry(index);
654 if was_hidden {
655 self.adjust_hidden_descendants(owner_index, -1);
656 }
657 }
658
659 fn restore_hidden_group_for_current_parent(
660 &mut self,
661 state: &SlotWriteSessionState,
662 index: usize,
663 scan_extent: usize,
664 ) {
665 let current_parent_anchor = self.current_parent_anchor(state);
666 let current_owner_index = state.group_stack.last().map(|frame| frame.start);
667 let previous_parent_anchor = self
668 .storage
669 .group_parent_anchor_at(index)
670 .unwrap_or(AnchorId::INVALID);
671 let hidden_descendants = self.storage.group_hidden_descendants_at(index);
672 let parent_changed = previous_parent_anchor != current_parent_anchor;
673
674 if parent_changed {
675 if let Some(previous_parent_index) = previous_parent_anchor
676 .is_valid()
677 .then(|| self.storage.resolve_anchor(previous_parent_anchor))
678 .flatten()
679 {
680 self.adjust_hidden_descendants(
681 Some(previous_parent_index),
682 -((hidden_descendants + 1) as isize),
683 );
684 self.shrink_group_preserved_tail(previous_parent_index, scan_extent);
685 }
686 if hidden_descendants > 0 {
687 self.adjust_hidden_descendants(current_owner_index, hidden_descendants as isize);
688 }
689 self.storage
690 .set_group_parent_anchor(index, current_parent_anchor);
691 } else {
692 self.adjust_hidden_descendants(current_owner_index, -1);
693 }
694
695 self.storage.restore_hidden_entry(index);
696 if parent_changed {
697 self.reparent_direct_child_groups(index);
698 }
699 }
700
701 fn set_group_boundary_key(&mut self, index: usize, boundary_key: Key) {
702 if self.storage.group_retention_at(index).is_some() {
703 self.storage
704 .set_group_retention(index, GroupRetention::new(boundary_key));
705 }
706 }
707
708 fn apply_storage_effects(
709 lifecycle: &mut SlotLifecycleCoordinator,
710 effects: &mut StorageEffects,
711 ) {
712 for scope in effects.take_deactivated_scopes() {
713 lifecycle.deactivate_scope(&scope);
714 }
715 for orphaned in effects.take_orphaned_nodes() {
716 lifecycle.preserve_orphaned_node(orphaned);
717 }
718 }
719
720 pub fn debug_dump_groups(&self) -> Vec<(usize, Key, Option<ScopeId>, usize)> {
721 SlotReadCursor::new(self).collect_group_debug_rows()
722 }
723
724 pub fn debug_dump_all_slots(&self) -> Vec<(usize, String)> {
725 SlotReadCursor::new(self).collect_slot_debug_rows()
726 }
727
728 pub fn debug_value_type_counts(&self, limit: usize) -> Vec<SlotValueTypeDebugStat> {
729 self.storage.debug_value_type_counts(limit)
730 }
731
732 fn retire_conflicting_group_at_cursor(
733 &mut self,
734 lifecycle: &mut SlotLifecycleCoordinator,
735 state: &SlotWriteSessionState,
736 key: Key,
737 cursor: usize,
738 ) {
739 if self.storage.entry_kind(cursor) != Some(EntryKind::live(EntryClass::Group))
740 || self.storage.group_key_at(cursor) == Some(key)
741 {
742 return;
743 }
744
745 let old_extent = self.storage.group_scan_extent_at(cursor).max(1);
746 let owner_index = state.group_stack.last().map(|frame| frame.start);
747 let _ = self.mark_range_as_hidden(lifecycle, cursor, cursor + old_extent, owner_index);
748 }
749
750 fn restore_hidden_group_at_cursor(
751 &mut self,
752 state: &mut SlotWriteSessionState,
753 cursor: usize,
754 parent_boundary: PassBoundary,
755 scan_extent: usize,
756 live_extent: usize,
757 boundary_key: Key,
758 ) -> Option<StartedGroup> {
759 if self.storage.entry_kind(cursor) != Some(EntryKind::hidden(EntryClass::Group)) {
760 return None;
761 }
762
763 let pass_boundary =
764 parent_boundary.transition(BoundaryTransition::Restore { boundary_key });
765
766 self.restore_hidden_group_for_current_parent(state, cursor, scan_extent);
767 self.set_group_boundary_key(cursor, boundary_key);
768 Some(self.enter_group(
769 state,
770 cursor,
771 scan_extent,
772 live_extent,
773 GroupEntryPlan {
774 pass_boundary,
775 requires_recompose: true,
776 },
777 ))
778 }
779
780 fn try_restore_matching_group(
781 &mut self,
782 state: &mut SlotWriteSessionState,
783 key: Key,
784 cursor: usize,
785 parent_boundary: PassBoundary,
786 matched: MatchedGroup,
787 ) -> Option<StartedGroup> {
788 let current_parent_anchor = self.current_parent_anchor(state);
789 if !matched.reused_hidden
790 && self
791 .storage
792 .group_parent_anchor_at(matched.index)
793 .unwrap_or(AnchorId::INVALID)
794 != current_parent_anchor
795 {
796 return None;
797 }
798
799 let restored_boundary_key =
800 if matched.reused_hidden && matches!(parent_boundary, PassBoundary::Fresh { .. }) {
801 parent_boundary.policy().inherited_boundary(key)
802 } else {
803 matched.boundary_key
804 };
805
806 if matched.reused_hidden {
807 self.restore_hidden_group_for_current_parent(state, matched.index, matched.scan_extent);
808 self.set_group_boundary_key(matched.index, restored_boundary_key);
809 }
810
811 let requires_recompose = matched.reused_hidden || matched.index != cursor;
812 let actual_extent = matched
813 .scan_extent
814 .max(1)
815 .min(self.storage.len().saturating_sub(matched.index));
816 if actual_extent == 0 {
817 return None;
818 }
819
820 let restored_index = self.move_slot_range(state, matched.index, actual_extent, cursor);
821 let pass_boundary = if requires_recompose {
822 parent_boundary.transition(BoundaryTransition::Restore {
823 boundary_key: restored_boundary_key,
824 })
825 } else {
826 parent_boundary.transition(BoundaryTransition::ReuseLive {
827 boundary_key: restored_boundary_key,
828 })
829 };
830
831 Some(self.enter_group(
832 state,
833 restored_index,
834 actual_extent,
835 matched.live_extent.min(actual_extent),
836 GroupEntryPlan {
837 pass_boundary,
838 requires_recompose,
839 },
840 ))
841 }
842
843 fn insert_new_group_at_cursor(
844 &mut self,
845 state: &mut SlotWriteSessionState,
846 key: Key,
847 pass_boundary: PassBoundary,
848 ) -> StartedGroup {
849 let boundary_key = pass_boundary.policy().inherited_boundary(key);
850 self.insert_group_entry(state, state.cursor, key, boundary_key, None);
851 self.enter_group(
852 state,
853 state.cursor,
854 1,
855 1,
856 GroupEntryPlan {
857 pass_boundary,
858 requires_recompose: false,
859 },
860 )
861 }
862
863 fn start_group_entry(
864 &mut self,
865 lifecycle: &mut SlotLifecycleCoordinator,
866 state: &mut SlotWriteSessionState,
867 key: Key,
868 ) -> StartedGroup {
869 let cursor = state.cursor;
870 let parent_boundary = self.current_parent_boundary(state);
871
872 let plan = ReusePlanner::new(
873 &self.storage,
874 ReusePlannerContext {
875 key,
876 cursor,
877 parent_end: self.current_parent_end(state),
878 parent_boundary,
879 current_parent_boundary_key: self.current_parent_boundary_key(state),
880 current_parent_anchor: self.current_parent_anchor(state),
881 previous_live_sibling_group: self.current_previous_direct_child_group(state),
882 },
883 )
884 .plan();
885
886 match plan {
887 StartPlan::ReuseLiveAtCursor {
888 scan_extent,
889 live_extent,
890 boundary_key,
891 } => {
892 let pass_boundary =
893 parent_boundary.transition(BoundaryTransition::ReuseLive { boundary_key });
894 return self.enter_group(
895 state,
896 cursor,
897 scan_extent,
898 live_extent,
899 GroupEntryPlan {
900 pass_boundary,
901 requires_recompose: false,
902 },
903 );
904 }
905 StartPlan::RestoreHiddenAtCursor {
906 scan_extent,
907 live_extent,
908 boundary_key,
909 } => {
910 let boundary_key = if matches!(parent_boundary, PassBoundary::Fresh { .. }) {
911 parent_boundary.policy().inherited_boundary(key)
912 } else {
913 boundary_key
914 };
915 if let Some(restored) = self.restore_hidden_group_at_cursor(
916 state,
917 cursor,
918 parent_boundary,
919 scan_extent,
920 live_extent,
921 boundary_key,
922 ) {
923 return restored;
924 }
925 }
926 StartPlan::RestoreMatchingGroup {
927 matched_group,
928 retire_conflicting_group_at_cursor,
929 } => {
930 if retire_conflicting_group_at_cursor {
931 self.retire_conflicting_group_at_cursor(lifecycle, state, key, cursor);
932 }
933 if let Some(restored) = self.try_restore_matching_group(
934 state,
935 key,
936 cursor,
937 parent_boundary,
938 matched_group,
939 ) {
940 return restored;
941 }
942 }
943 StartPlan::InsertFresh {
944 retire_conflicting_group_at_cursor,
945 } => {
946 if retire_conflicting_group_at_cursor {
947 self.retire_conflicting_group_at_cursor(lifecycle, state, key, cursor);
948 }
949 }
950 }
951
952 self.insert_new_group_at_cursor(
953 state,
954 key,
955 parent_boundary.transition(BoundaryTransition::InsertFresh {
956 boundary_key: parent_boundary.policy().inherited_boundary(key),
957 }),
958 )
959 }
960
961 fn end_group_entry(&mut self, state: &mut SlotWriteSessionState) {
962 let Some(frame) = state.group_stack.pop() else {
963 return;
964 };
965
966 let completed_anchor = self.storage.entry_anchor(frame.start);
967 let scan_end = frame.end;
968 let live_end = frame.live_end.max(frame.start + 1);
969 let old_live_extent = self.storage.group_live_extent_at(frame.start).max(1);
970 let old_scan_extent = self
971 .storage
972 .group_scan_extent_at(frame.start)
973 .max(old_live_extent);
974 let new_live_extent = live_end.saturating_sub(frame.start).max(1);
975 let new_scan_extent = scan_end
976 .saturating_sub(frame.start)
977 .max(new_live_extent)
978 .max(1);
979
980 self.storage.set_group_spans(
981 frame.start,
982 GroupSpans::new(new_live_extent, new_scan_extent),
983 );
984 self.propagate_group_span_delta(
985 frame.start,
986 new_live_extent as isize - old_live_extent as isize,
987 new_scan_extent as isize - old_scan_extent as isize,
988 );
989 if let Some(parent) = state.group_stack.last_mut() {
990 if parent.end < scan_end {
991 parent.end = scan_end;
992 }
993 if parent.live_end < live_end {
994 parent.live_end = live_end;
995 }
996 }
997 state.cursor = scan_end.min(self.storage.len());
998 self.set_previous_direct_child_group(state, completed_anchor);
999 }
1000
1001 fn start_recompose_entry(&self, state: &mut SlotWriteSessionState, index: usize) {
1002 let scan_extent = self.storage.group_scan_extent_at(index);
1003 let live_extent = self.storage.group_live_extent_at(index);
1004 if scan_extent == 0 || live_extent == 0 {
1005 return;
1006 }
1007 state.group_stack.push(GroupFrame {
1008 start: index,
1009 end: index + scan_extent,
1010 stored_live_end: index + live_extent,
1011 live_end: index + 1,
1012 pass_boundary: PassBoundary::Open,
1016 previous_direct_child_group: None,
1017 });
1018 state.cursor = index + 1;
1019 }
1020
1021 fn end_recompose_entry(
1022 &mut self,
1023 lifecycle: &mut SlotLifecycleCoordinator,
1024 state: &mut SlotWriteSessionState,
1025 ) {
1026 let Some(frame) = state.group_stack.pop() else {
1027 return;
1028 };
1029
1030 let completed_anchor = self.storage.entry_anchor(frame.start);
1031 let actual_end = state.cursor;
1032 if actual_end < frame.end {
1033 let _ = self.mark_range_as_hidden(lifecycle, actual_end, frame.end, Some(frame.start));
1034 }
1035
1036 let actual_extent = frame.live_end.saturating_sub(frame.start).max(1);
1037 let old_live_extent = self.storage.group_live_extent_at(frame.start).max(1);
1038 let old_scan_extent = self
1039 .storage
1040 .group_scan_extent_at(frame.start)
1041 .max(old_live_extent);
1042 let new_scan_extent = frame
1043 .end
1044 .saturating_sub(frame.start)
1045 .max(actual_extent)
1046 .max(1);
1047 self.storage
1048 .set_group_spans(frame.start, GroupSpans::new(actual_extent, new_scan_extent));
1049 self.propagate_group_span_delta(
1050 frame.start,
1051 actual_extent as isize - old_live_extent as isize,
1052 new_scan_extent as isize - old_scan_extent as isize,
1053 );
1054 if let Some(parent) = state.group_stack.last_mut() {
1055 if parent.end < frame.end {
1056 parent.end = frame.end;
1057 }
1058 if parent.live_end < frame.live_end {
1059 parent.live_end = frame.live_end;
1060 }
1061 }
1062 state.cursor = frame.end.min(self.storage.len());
1063 self.set_previous_direct_child_group(state, completed_anchor);
1064 }
1065
1066 fn propagate_group_span_delta(
1067 &mut self,
1068 child_start: usize,
1069 live_delta: isize,
1070 scan_delta: isize,
1071 ) {
1072 if live_delta == 0 && scan_delta == 0 {
1073 return;
1074 }
1075
1076 let mut current = child_start;
1077 while let Some(parent_anchor) = self.storage.group_parent_anchor_at(current) {
1078 if !parent_anchor.is_valid() {
1079 break;
1080 }
1081 let Some(parent_index) = self.storage.resolve_anchor(parent_anchor) else {
1082 break;
1083 };
1084 let parent_scan_extent = self.storage.group_scan_extent_at(parent_index).max(1);
1085 let parent_live_extent = self.storage.group_live_extent_at(parent_index).max(1);
1086 let next_live_extent = parent_live_extent.saturating_add_signed(live_delta).max(1);
1087 let next_scan_extent = parent_scan_extent.saturating_add_signed(scan_delta).max(1);
1088 self.storage.set_group_spans(
1089 parent_index,
1090 GroupSpans::new(next_live_extent, next_scan_extent),
1091 );
1092 current = parent_index;
1093 }
1094 }
1095
1096 fn skip_current(&mut self, state: &mut SlotWriteSessionState) {
1097 if let Some((live_end, scan_end)) = state
1098 .group_stack
1099 .last()
1100 .map(|frame| (frame.stored_live_end, frame.end))
1101 {
1102 if let Some(frame) = state.group_stack.last() {
1103 self.reparent_direct_child_groups(frame.start);
1104 }
1105 self.update_group_live_bounds_to(state, live_end.min(self.storage.len()));
1106 state.cursor = scan_end.min(self.storage.len());
1107 }
1108 }
1109
1110 fn node_ids_in_current_group(&self, state: &SlotWriteSessionState) -> Vec<NodeId> {
1111 let Some(frame) = state.group_stack.last() else {
1112 return Vec::new();
1113 };
1114
1115 SlotReadCursor::new(self).collect_node_ids(frame.start, frame.end.min(self.storage.len()))
1116 }
1117
1118 #[cfg(test)]
1119 fn descendant_scopes_in_current_group(
1120 &self,
1121 state: &SlotWriteSessionState,
1122 current_scope: ScopeId,
1123 ) -> Vec<RecomposeScope> {
1124 let Some(frame) = state.group_stack.last() else {
1125 return Vec::new();
1126 };
1127
1128 SlotReadCursor::new(self).collect_descendant_scopes(
1129 frame.start.saturating_add(1),
1130 frame.end.min(self.storage.len()),
1131 current_scope,
1132 )
1133 }
1134
1135 fn preserved_hidden_node_at_cursor(
1136 &self,
1137 state: &SlotWriteSessionState,
1138 cursor: usize,
1139 ) -> Option<(NodeId, u32)> {
1140 let (kind, id, generation) = self.storage.node_at(cursor)?;
1141 if !kind.matches(EntryClass::Node, EntryVisibility::Hidden)
1142 || !self.current_parent_allows_exact_node_reuse(state)
1143 {
1144 return None;
1145 }
1146 Some((id, generation))
1147 }
1148
1149 pub(crate) fn read_value<T: 'static>(&self, idx: usize) -> &T {
1150 self.storage.read_value(idx)
1151 }
1152
1153 pub(crate) fn read_value_mut<T: 'static>(&mut self, idx: usize) -> &mut T {
1154 self.storage.read_value_mut(idx)
1155 }
1156
1157 pub(crate) fn write_value<T: 'static>(&mut self, idx: usize, value: T) {
1158 self.storage.write_value(idx, value);
1159 }
1160
1161 pub(crate) fn orphaned_node_state(&self, orphaned: OrphanedNode) -> NodeSlotState {
1162 self.storage.orphaned_node_state(orphaned)
1163 }
1164
1165 pub(crate) fn compact(&mut self) -> Vec<DeferredDrop> {
1166 self.storage.compact(
1167 Self::EAGER_COMPACT_SLOT_LEN,
1168 Self::FRACTIONAL_COMPACT_GAP_THRESHOLD,
1169 Self::FRACTIONAL_COMPACT_RATIO_DIVISOR,
1170 )
1171 }
1172
1173 pub(crate) fn debug_verify(&self, lifecycle: Option<&SlotLifecycleCoordinator>) {
1174 debug_verify_slot_table(self, lifecycle);
1175 }
1176
1177 pub(crate) fn drop_all_reverse(&mut self) -> Vec<DeferredDrop> {
1178 self.storage.drop_all_reverse()
1179 }
1180}
1181
1182impl SlotLifecycleCoordinator {
1183 pub(crate) fn fill_debug_stats(&self, stats: &mut SlotTableDebugStats) {
1184 stats.pending_slot_drops_len = self.pending_drops_len();
1185 stats.pending_slot_drops_cap = self.pending_drops_capacity();
1186 stats.orphaned_node_ids_len = self.orphaned_node_ids_len();
1187 stats.orphaned_node_ids_cap = self.orphaned_node_ids_capacity();
1188 }
1189
1190 pub(crate) fn drain_orphaned_nodes(
1191 &mut self,
1192 table: &SlotTable,
1193 applier: &mut dyn Applier,
1194 ) -> bool {
1195 let orphaned = self.drain_orphaned_node_ids();
1196 if orphaned.is_empty() {
1197 return false;
1198 }
1199
1200 let mut removed_any = false;
1201 for orphaned in orphaned {
1202 match table.orphaned_node_state(orphaned) {
1203 NodeSlotState::Active => continue,
1204 NodeSlotState::PreservedGap => {
1205 self.preserve_orphaned_node(orphaned);
1206 continue;
1207 }
1208 NodeSlotState::Missing => {}
1209 }
1210 if applier.node_generation(orphaned.id) != orphaned.generation {
1211 continue;
1212 }
1213 removed_any = true;
1214 let parent_id = applier
1215 .get_mut(orphaned.id)
1216 .ok()
1217 .and_then(|node| node.parent());
1218 if let Some(parent_id) = parent_id {
1219 let _ = remove_child_and_cleanup_now(applier, parent_id, orphaned.id);
1220 continue;
1221 }
1222 if let Ok(node) = applier.get_mut(orphaned.id) {
1223 node.on_removed_from_parent();
1224 node.unmount();
1225 }
1226 let _ = applier.remove(orphaned.id);
1227 }
1228
1229 removed_any
1230 }
1231
1232 pub(crate) fn dispose_slot_table(&mut self, table: &mut SlotTable) {
1233 self.flush_pending_drops();
1234 let removed = table.drop_all_reverse();
1235 self.dispose_drops_reverse(removed);
1236 self.clear_orphaned_nodes();
1237 }
1238}
1239
1240impl<'a> SlotReadCursor<'a> {
1241 fn new(table: &'a SlotTable) -> Self {
1242 Self { table }
1243 }
1244
1245 fn collect_node_ids(&self, start: usize, end: usize) -> Vec<NodeId> {
1246 let mut ids = Vec::new();
1247 for index in start..end {
1248 if let Some((kind, id, _)) = self.table.storage.node_at(index) {
1249 if kind.matches(EntryClass::Node, EntryVisibility::Live) {
1250 ids.push(id);
1251 }
1252 }
1253 }
1254 ids
1255 }
1256
1257 fn collect_group_debug_rows(&self) -> Vec<(usize, Key, Option<ScopeId>, usize)> {
1258 let mut groups = Vec::new();
1259 for index in 0..self.table.storage.len() {
1260 if self.table.storage.entry_kind(index) != Some(EntryKind::live(EntryClass::Group)) {
1261 continue;
1262 }
1263 let Some(group) = self.table.storage.group_snapshot_at(index) else {
1264 continue;
1265 };
1266 groups.push((
1267 index,
1268 group.key,
1269 group.scope.as_ref().map(RecomposeScope::id),
1270 group.spans.live_extent(),
1271 ));
1272 }
1273 groups
1274 }
1275
1276 fn collect_slot_debug_rows(&self) -> Vec<(usize, String)> {
1277 let mut slots = Vec::with_capacity(self.table.storage.len());
1278 for index in 0..self.table.storage.len() {
1279 let Some(entry) = self.table.storage.entry(index) else {
1280 continue;
1281 };
1282 let desc = match entry.kind {
1283 kind if kind.matches(EntryClass::Group, EntryVisibility::Live) => {
1284 let group = self
1285 .table
1286 .storage
1287 .group_snapshot_at(index)
1288 .expect("live group snapshot");
1289 format!(
1290 "Group(key={:?}, scope={:?}, has_scope={}, live_len={}, scan_len={})",
1291 group.key,
1292 group.scope.as_ref().map(RecomposeScope::id),
1293 self.table.group_has_scope(index),
1294 group.spans.live_extent(),
1295 group.spans.scan_extent(),
1296 )
1297 }
1298 kind if kind.matches(EntryClass::Value, EntryVisibility::Live) => {
1299 "Value".to_string()
1300 }
1301 kind if kind.matches(EntryClass::Node, EntryVisibility::Live) => {
1302 let (_, id, _) = self.table.storage.node_at(index).expect("live node");
1303 format!("Node(id={id:?})")
1304 }
1305 kind if kind.matches(EntryClass::Group, EntryVisibility::Hidden) => {
1306 let group = self
1307 .table
1308 .storage
1309 .group_snapshot_at(index)
1310 .expect("hidden group snapshot");
1311 format!(
1312 "HiddenGroup(key={:?}, scope={:?}, live_len={}, scan_len={})",
1313 group.key,
1314 group.scope.as_ref().map(RecomposeScope::id),
1315 group.spans.live_extent(),
1316 group.spans.scan_extent(),
1317 )
1318 }
1319 kind if kind.matches(EntryClass::Value, EntryVisibility::Hidden) => {
1320 "HiddenValue".to_string()
1321 }
1322 kind if kind.matches(EntryClass::Node, EntryVisibility::Hidden) => {
1323 let (_, id, generation) =
1324 self.table.storage.node_at(index).expect("hidden node");
1325 format!("HiddenNode(id={id:?}, gen={generation})")
1326 }
1327 EntryKind::Occupied { .. } => "Occupied".to_string(),
1328 EntryKind::Unused => "Unused".to_string(),
1329 };
1330 slots.push((index, desc));
1331 }
1332 slots
1333 }
1334
1335 #[cfg(test)]
1336 fn collect_descendant_scopes(
1337 &self,
1338 start: usize,
1339 end: usize,
1340 current_scope: ScopeId,
1341 ) -> Vec<RecomposeScope> {
1342 let mut scopes = Vec::new();
1343 let mut seen: crate::collections::map::HashMap<ScopeId, ()> =
1344 crate::collections::map::HashMap::default();
1345
1346 for index in start..end {
1347 let Some(scope) = self.table.storage.live_group_scope(index).cloned() else {
1348 continue;
1349 };
1350 if scope.id() == current_scope || seen.insert(scope.id(), ()).is_some() {
1351 continue;
1352 }
1353 scopes.push(scope);
1354 }
1355
1356 scopes
1357 }
1358}
1359
1360impl SlotWriteSession<'_> {
1361 pub(crate) fn start_recranpose_at_anchor(
1362 &mut self,
1363 anchor: AnchorId,
1364 owner: ScopeId,
1365 ) -> Option<GroupId> {
1366 let index = self.table.storage.resolve_anchor(anchor)?;
1367 if self.table.group_scope_owner(index) == Some(owner) {
1368 self.table.start_recompose_entry(self.state, index);
1369 Some(GroupId(index))
1370 } else {
1371 None
1372 }
1373 }
1374
1375 pub(crate) fn begin_scoped_group(
1376 &mut self,
1377 key: Key,
1378 init_scope: impl FnOnce() -> RecomposeScope,
1379 ) -> StartScopedGroup<GroupId> {
1380 let started = self
1381 .table
1382 .start_group_entry(self.lifecycle, self.state, key);
1383 let scope =
1384 if let Some(existing_scope) = self.table.group_scope_value(started.index).cloned() {
1385 existing_scope
1386 } else {
1387 let scope = init_scope();
1388 self.table
1389 .storage
1390 .set_group_scope(started.index, Some(scope.clone()));
1391 scope
1392 };
1393 StartScopedGroup {
1394 group: GroupId(started.index),
1395 anchor: self.table.storage.entry_anchor(started.index),
1396 scope,
1397 requires_recompose: started.requires_recompose,
1398 }
1399 }
1400
1401 pub(crate) fn end_group(&mut self) {
1402 self.table.end_group_entry(self.state);
1403 }
1404
1405 pub(crate) fn end_recompose(&mut self) {
1406 self.table.end_recompose_entry(self.lifecycle, self.state);
1407 }
1408
1409 pub(crate) fn use_value_slot<T: 'static>(&mut self, init: impl FnOnce() -> T) -> usize {
1410 let cursor = self.state.cursor;
1411 let disallow_live_reuse = self.table.current_disallow_live_slot_reuse(self.state);
1412 let owner_index = self.state.group_stack.last().map(|frame| frame.start);
1413
1414 match self.table.storage.entry_kind(cursor) {
1415 Some(kind)
1416 if kind.matches(EntryClass::Value, EntryVisibility::Live)
1417 && !disallow_live_reuse
1418 && self.table.storage.value_matches_type::<T>(cursor) =>
1419 {
1420 self.table.finish_slot_write_at(self.state, cursor)
1421 }
1422 Some(kind)
1423 if kind.matches(EntryClass::Value, EntryVisibility::Hidden)
1424 && !disallow_live_reuse
1425 && self.table.storage.value_matches_type::<T>(cursor) =>
1426 {
1427 self.table.restore_hidden_entry(owner_index, cursor);
1428 self.table.finish_slot_write_at(self.state, cursor)
1429 }
1430 Some(EntryKind::Occupied {
1431 class: EntryClass::Value,
1432 ..
1433 }) if !disallow_live_reuse => {
1434 self.table.overwrite_value_entry(
1435 self.lifecycle,
1436 owner_index,
1437 cursor,
1438 init(),
1439 false,
1440 );
1441 self.table.finish_slot_write_at(self.state, cursor)
1442 }
1443 _ => {
1444 self.table.insert_value_entry(self.state, cursor, init());
1445 self.table.finish_slot_write_at(self.state, cursor)
1446 }
1447 }
1448 }
1449
1450 pub(crate) fn remember<T: 'static>(&mut self, init: impl FnOnce() -> T) -> Owned<T> {
1451 let index = self.use_value_slot(|| Owned::new(init()));
1452 self.table.read_value::<Owned<T>>(index).clone()
1453 }
1454
1455 pub(crate) fn record_node(&mut self, id: NodeId, generation: u32) {
1456 let cursor = self.state.cursor;
1457 let owner_index = self.state.group_stack.last().map(|frame| frame.start);
1458 match self.table.storage.node_at(cursor) {
1459 Some((kind, existing, existing_generation))
1460 if kind.matches(EntryClass::Node, EntryVisibility::Live)
1461 && existing == id
1462 && existing_generation == generation =>
1463 {
1464 let _ = self.table.finish_slot_write_at(self.state, cursor);
1465 }
1466 Some((kind, existing, existing_generation))
1467 if kind.matches(EntryClass::Node, EntryVisibility::Hidden)
1468 && existing == id
1469 && existing_generation == generation =>
1470 {
1471 if self
1472 .table
1473 .current_parent_allows_exact_node_reuse(self.state)
1474 {
1475 self.table.restore_hidden_entry(owner_index, cursor);
1476 let _ = self.table.finish_slot_write_at(self.state, cursor);
1477 } else if !self.table.current_disallow_live_slot_reuse(self.state) {
1478 self.table.overwrite_node_entry(
1479 self.lifecycle,
1480 owner_index,
1481 cursor,
1482 id,
1483 generation,
1484 false,
1485 );
1486 let _ = self.table.finish_slot_write_at(self.state, cursor);
1487 } else {
1488 self.table
1489 .insert_node_entry(self.state, cursor, id, generation);
1490 let _ = self.table.finish_slot_write_at(self.state, cursor);
1491 }
1492 }
1493 Some((
1494 EntryKind::Occupied {
1495 class: EntryClass::Node,
1496 ..
1497 },
1498 _,
1499 _,
1500 )) if !self.table.current_disallow_live_slot_reuse(self.state) => {
1501 self.table.overwrite_node_entry(
1502 self.lifecycle,
1503 owner_index,
1504 cursor,
1505 id,
1506 generation,
1507 false,
1508 );
1509 let _ = self.table.finish_slot_write_at(self.state, cursor);
1510 }
1511 _ => {
1512 self.table
1513 .insert_node_entry(self.state, cursor, id, generation);
1514 let _ = self.table.finish_slot_write_at(self.state, cursor);
1515 }
1516 }
1517 }
1518
1519 pub(crate) fn peek_node(&self) -> Option<(NodeId, u32)> {
1520 match self.table.storage.node_at(self.state.cursor) {
1521 Some((kind, id, generation))
1522 if kind.matches(EntryClass::Node, EntryVisibility::Live) =>
1523 {
1524 Some((id, generation))
1525 }
1526 Some((kind, id, generation))
1527 if kind.matches(EntryClass::Node, EntryVisibility::Hidden)
1528 && self
1529 .table
1530 .current_parent_allows_exact_node_reuse(self.state) =>
1531 {
1532 Some((id, generation))
1533 }
1534 _ => None,
1535 }
1536 }
1537
1538 pub(crate) fn advance_after_node_read(&mut self) {
1539 if self
1540 .table
1541 .preserved_hidden_node_at_cursor(self.state, self.state.cursor)
1542 .is_some()
1543 {
1544 let owner_index = self.state.group_stack.last().map(|frame| frame.start);
1545 self.table
1546 .restore_hidden_entry(owner_index, self.state.cursor);
1547 }
1548 self.table.clear_previous_direct_child_group(self.state);
1549 self.state.cursor += 1;
1550 self.table.update_group_bounds(self.state);
1551 }
1552
1553 pub(crate) fn skip_current_group(&mut self) {
1554 self.table.skip_current(self.state);
1555 }
1556
1557 pub(crate) fn nodes_in_current_group(&self) -> Vec<NodeId> {
1558 self.table.node_ids_in_current_group(self.state)
1559 }
1560
1561 pub(crate) fn finalize_current_group(&mut self) -> bool {
1562 let mut marked = false;
1563 if let Some((owner_start, group_end)) = self
1564 .state
1565 .group_stack
1566 .last()
1567 .map(|frame| (frame.start, frame.end.min(self.table.storage.len())))
1568 {
1569 if self.state.cursor < group_end
1570 && self.table.mark_range_as_hidden(
1571 self.lifecycle,
1572 self.state.cursor,
1573 group_end,
1574 Some(owner_start),
1575 )
1576 {
1577 marked = true;
1578 }
1579 } else if self.state.cursor < self.table.storage.len()
1580 && self.table.mark_range_as_hidden(
1581 self.lifecycle,
1582 self.state.cursor,
1583 self.table.storage.len(),
1584 None,
1585 )
1586 {
1587 marked = true;
1588 }
1589
1590 marked
1591 }
1592
1593 pub(crate) fn finalize_pass(&mut self) -> bool {
1594 let mut marked_hidden = false;
1595 while !self.state.group_stack.is_empty() {
1596 marked_hidden |= self.finalize_current_group();
1597 match self.mode {
1598 SlotPassMode::Compose => self.table.end_group_entry(self.state),
1599 SlotPassMode::Recompose => {
1600 self.table.end_recompose_entry(self.lifecycle, self.state)
1601 }
1602 }
1603 }
1604 match self.mode {
1605 SlotPassMode::Compose => marked_hidden | self.finalize_current_group(),
1606 SlotPassMode::Recompose => marked_hidden,
1607 }
1608 }
1609}
1610
1611#[cfg(test)]
1612pub(crate) fn begin_group_for_test(
1613 table: &mut SlotTable,
1614 state: &mut SlotWriteSessionState,
1615 key: Key,
1616) -> GroupId {
1617 let mut lifecycle = SlotLifecycleCoordinator::default();
1618 let started = table.start_group_entry(&mut lifecycle, state, key);
1619 table.clear_group_scope(started.index);
1620 GroupId(started.index)
1621}
1622
1623#[cfg(test)]
1624pub(crate) fn hide_range_for_test(
1625 table: &mut SlotTable,
1626 lifecycle: &mut SlotLifecycleCoordinator,
1627 start: usize,
1628 end: usize,
1629 owner_index: Option<usize>,
1630) -> bool {
1631 table.mark_range_as_hidden(lifecycle, start, end, owner_index)
1632}
1633
1634#[cfg(test)]
1635pub(crate) fn queue_orphaned_node_for_test(
1636 lifecycle: &mut SlotLifecycleCoordinator,
1637 id: NodeId,
1638 generation: u32,
1639) {
1640 lifecycle.queue_orphaned_node(OrphanedNode::new(id, generation, AnchorId::INVALID));
1641}
1642
1643#[cfg(test)]
1644pub(crate) fn drain_orphaned_node_ids_for_test(
1645 lifecycle: &mut SlotLifecycleCoordinator,
1646) -> Vec<OrphanedNode> {
1647 lifecycle.drain_orphaned_node_ids()
1648}
1649
1650#[cfg(test)]
1651pub(crate) fn preserved_orphaned_node_ids_for_test(
1652 lifecycle: &SlotLifecycleCoordinator,
1653) -> Vec<OrphanedNode> {
1654 lifecycle.preserved_orphaned_node_ids_snapshot()
1655}
1656
1657#[cfg(test)]
1658pub(crate) fn compact_for_test(table: &mut SlotTable, lifecycle: &mut SlotLifecycleCoordinator) {
1659 let removed = table.compact();
1660 lifecycle.dispose_drops_reverse(removed);
1661 lifecycle.trim_orphaned_node_capacity(32);
1662}
1663
1664impl Default for SlotTable {
1665 fn default() -> Self {
1666 Self::new()
1667 }
1668}