1#![allow(clippy::collapsible_match)]
11
12mod anchor_registry;
13mod pending;
14
15use crate::{
16 collections::map::HashMap,
17 slot_storage::{GroupId, StartGroup},
18 AnchorId, Key, NodeId, Owned, RecomposeScope, ScopeId,
19};
20use anchor_registry::{AnchorRegistry, GapMetadata};
21pub use pending::OrphanedNode;
22use pending::{OrphanedNodeIds, PendingSlotDrops};
23use std::any::{Any, TypeId};
24
25#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
26struct PackedScopeId(u64);
27
28impl PackedScopeId {
29 fn new(scope: Option<ScopeId>) -> Self {
30 let packed = scope.map(|scope| scope as u64).unwrap_or(0);
31 Self(packed)
32 }
33
34 fn to_option(self) -> Option<ScopeId> {
35 match self.0 {
36 0 => None,
37 scope => Some(scope as ScopeId),
38 }
39 }
40}
41
42fn pack_slot_len(len: usize) -> u32 {
43 u32::try_from(len).expect("slot length overflow")
44}
45
46fn unpack_slot_len(len: u32) -> usize {
47 len as usize
48}
49
50pub struct SlotTable {
51 slots: Vec<Slot>,
52 pending_slot_drops: PendingSlotDrops,
53 cursor: usize,
54 group_stack: Vec<GroupFrame>,
55 anchor_registry: AnchorRegistry,
56 last_start_was_gap: bool,
58 orphaned_node_ids: OrphanedNodeIds,
61 needs_compact: bool,
63}
64
65#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
66pub struct SlotTableDebugStats {
67 pub slots_len: usize,
68 pub slots_cap: usize,
69 pub pending_slot_drops_len: usize,
70 pub pending_slot_drops_cap: usize,
71 pub anchors_len: usize,
72 pub anchors_cap: usize,
73 pub gap_metadata_len: usize,
74 pub gap_metadata_cap: usize,
75 pub free_anchor_ids_len: usize,
76 pub free_anchor_ids_cap: usize,
77 pub group_stack_len: usize,
78 pub group_stack_cap: usize,
79 pub orphaned_node_ids_len: usize,
80 pub orphaned_node_ids_cap: usize,
81}
82
83#[derive(Debug, Clone, PartialEq, Eq)]
84pub struct SlotValueTypeDebugStat {
85 pub type_name: &'static str,
86 pub count: usize,
87 pub inline_payload_bytes: usize,
88}
89
90enum Slot {
91 Group {
92 key: Key,
93 anchor: AnchorId,
94 len: u32,
95 scope: PackedScopeId,
96 boundary_key: Key,
97 has_gap_children: bool,
98 },
99 Value {
100 anchor: AnchorId,
101 data: Box<dyn SlotValue>,
102 },
103 ScopeValue {
104 anchor: AnchorId,
105 scope: RecomposeScope,
106 },
107 Node {
108 anchor: AnchorId,
109 id: NodeId,
110 gen: u32,
111 },
112 Gap { anchor: AnchorId },
117}
118
119struct GroupFrame {
120 key: Key,
121 start: usize, end: usize, child_reuse: ChildReusePolicy,
124 fresh_body: bool,
125 gap_boundary_key: Key,
126}
127
128struct GroupCompactionFrame {
129 index: usize,
130 end: usize,
131 kept_before: usize,
132}
133
134#[derive(Copy, Clone, Debug, Eq, PartialEq)]
135enum ChildReusePolicy {
136 Normal,
137 ParentRestoredFromGap,
138 FreshInsert,
139}
140
141impl ChildReusePolicy {
142 fn requires_restricted_reuse(self) -> bool {
143 !matches!(self, Self::Normal)
144 }
145
146 fn allows_exact_live_reuse(self) -> bool {
147 !matches!(self, Self::FreshInsert)
148 }
149}
150
151fn restored_child_reuse(parent_reuse: ChildReusePolicy) -> ChildReusePolicy {
152 if matches!(parent_reuse, ChildReusePolicy::FreshInsert) {
153 ChildReusePolicy::FreshInsert
154 } else {
155 ChildReusePolicy::ParentRestoredFromGap
156 }
157}
158
159trait SlotValue: Any {
160 fn as_any(&self) -> &dyn Any;
161 fn as_any_mut(&mut self) -> &mut dyn Any;
162 fn rebox(self: Box<Self>) -> Box<dyn SlotValue>;
163 fn debug_type_name(&self) -> &'static str;
164 fn debug_inline_payload_bytes(&self) -> usize;
165}
166
167struct TypedSlotValue<T: Any>(T);
168
169impl<T: Any> SlotValue for TypedSlotValue<T> {
170 fn as_any(&self) -> &dyn Any {
171 &self.0
172 }
173
174 fn as_any_mut(&mut self) -> &mut dyn Any {
175 &mut self.0
176 }
177
178 fn rebox(self: Box<Self>) -> Box<dyn SlotValue> {
179 Box::new(TypedSlotValue(self.0))
180 }
181
182 fn debug_type_name(&self) -> &'static str {
183 std::any::type_name::<T>()
184 }
185
186 fn debug_inline_payload_bytes(&self) -> usize {
187 std::mem::size_of::<T>()
188 }
189}
190
191#[derive(Clone, Copy, Debug, PartialEq, Eq)]
192pub(crate) enum NodeSlotState {
193 Active,
194 PreservedGap,
195 Missing,
196}
197
198#[derive(Debug, PartialEq)]
199enum SlotKind {
200 Group,
201 Value,
202 Node,
203 Gap,
204}
205
206impl Slot {
207 fn kind(&self) -> SlotKind {
208 match self {
209 Slot::Group { .. } => SlotKind::Group,
210 Slot::Value { .. } => SlotKind::Value,
211 Slot::ScopeValue { .. } => SlotKind::Value,
212 Slot::Node { .. } => SlotKind::Node,
213 Slot::Gap { .. } => SlotKind::Gap,
214 }
215 }
216
217 fn anchor_id(&self) -> AnchorId {
219 match self {
220 Slot::Group { anchor, .. } => *anchor,
221 Slot::Value { anchor, .. } => *anchor,
222 Slot::ScopeValue { anchor, .. } => *anchor,
223 Slot::Node { anchor, .. } => *anchor,
224 Slot::Gap { anchor, .. } => *anchor,
225 }
226 }
227
228 fn as_value<T: 'static>(&self) -> &T {
229 match self {
230 Slot::Value { data, .. } => data
231 .as_any()
232 .downcast_ref::<T>()
233 .expect("slot value type mismatch"),
234 Slot::ScopeValue { scope, .. } => (scope as &dyn Any)
235 .downcast_ref::<T>()
236 .expect("slot value type mismatch"),
237 _ => panic!("slot is not a value"),
238 }
239 }
240
241 fn as_value_mut<T: 'static>(&mut self) -> &mut T {
242 match self {
243 Slot::Value { data, .. } => data
244 .as_any_mut()
245 .downcast_mut::<T>()
246 .expect("slot value type mismatch"),
247 Slot::ScopeValue { scope, .. } => (scope as &mut dyn Any)
248 .downcast_mut::<T>()
249 .expect("slot value type mismatch"),
250 _ => panic!("slot is not a value"),
251 }
252 }
253
254 fn deactivate_scope(&self) {
255 if let Slot::ScopeValue { scope, .. } = self {
256 scope.deactivate();
257 }
258 }
259}
260
261impl Default for Slot {
262 fn default() -> Self {
263 Slot::Group {
264 key: 0,
265 anchor: AnchorId::INVALID,
266 len: 0,
267 scope: PackedScopeId::default(),
268 boundary_key: 0,
269 has_gap_children: false,
270 }
271 }
272}
273
274fn drop_slots_in_reverse(slots: &mut Vec<Slot>) {
275 let _teardown = crate::runtime::enter_state_teardown_scope();
276 while let Some(slot) = slots.pop() {
277 drop(slot);
278 }
279}
280
281impl SlotTable {
282 const INITIAL_CAP: usize = 32;
283 const LOCAL_GAP_SCAN: usize = 256; const EAGER_COMPACT_SLOT_LEN: usize = 1_024;
285 const FRACTIONAL_COMPACT_GAP_THRESHOLD: usize = 256;
286 const FRACTIONAL_COMPACT_RATIO_DIVISOR: usize = 4;
287 const LARGE_GROWTH_THRESHOLD: usize = 32 * 1024;
288 const LARGE_GROWTH_DIVISOR: usize = 4;
289 const MIN_RETAINED_PENDING_SLOT_DROPS_CAPACITY: usize = 4;
290
291 fn make_value_slot<T: 'static>(anchor: AnchorId, value: T) -> Slot {
292 if TypeId::of::<T>() == TypeId::of::<RecomposeScope>() {
293 let value: Box<dyn Any> = Box::new(value);
294 let scope = *value
295 .downcast::<RecomposeScope>()
296 .expect("recompose scope slot type mismatch");
297 Slot::ScopeValue { anchor, scope }
298 } else {
299 Slot::Value {
300 anchor,
301 data: Box::new(TypedSlotValue(value)),
302 }
303 }
304 }
305
306 fn rehouse_live_value_payloads(&mut self) {
307 for slot in &mut self.slots {
308 let moved = std::mem::take(slot);
309 *slot = match moved {
310 Slot::Value { anchor, data } => Slot::Value {
311 anchor,
312 data: data.rebox(),
313 },
314 other => other,
315 };
316 }
317 }
318
319 pub fn new() -> Self {
320 Self {
321 slots: Vec::new(),
322 pending_slot_drops: PendingSlotDrops::default(),
323 cursor: 0,
324 group_stack: Vec::new(),
325 anchor_registry: AnchorRegistry::default(),
326 last_start_was_gap: false,
327 orphaned_node_ids: OrphanedNodeIds::default(),
328 needs_compact: false,
329 }
330 }
331
332 fn replace_slot_tracked(&mut self, index: usize, slot: Slot) -> Slot {
333 self.clear_gap_metadata_for_replacement(index, &slot);
334 std::mem::replace(&mut self.slots[index], slot)
335 }
336
337 fn set_slot_tracked(&mut self, index: usize, slot: Slot) {
338 self.clear_gap_metadata_for_replacement(index, &slot);
339 self.slots[index] = slot;
340 }
341
342 fn push_slot_tracked(&mut self, slot: Slot) {
343 self.slots.push(slot);
344 }
345
346 pub fn heap_bytes(&self) -> usize {
348 let slot_size = std::mem::size_of::<Slot>();
349 let slots_bytes = self.slots.capacity() * slot_size;
350 let pending_bytes = self.pending_slot_drops.capacity() * slot_size;
351 let group_stack_bytes = self.group_stack.capacity() * std::mem::size_of::<GroupFrame>();
352 let orphaned_node_ids_bytes =
353 self.orphaned_node_ids.capacity() * std::mem::size_of::<OrphanedNode>();
354 slots_bytes
355 + pending_bytes
356 + self.anchor_registry.debug_heap_bytes()
357 + group_stack_bytes
358 + orphaned_node_ids_bytes
359 }
360
361 pub fn debug_stats(&self) -> SlotTableDebugStats {
362 let mut stats = SlotTableDebugStats {
363 slots_len: self.slots.len(),
364 slots_cap: self.slots.capacity(),
365 pending_slot_drops_len: self.pending_slot_drops.len(),
366 pending_slot_drops_cap: self.pending_slot_drops.capacity(),
367 anchors_len: 0,
368 anchors_cap: 0,
369 gap_metadata_len: 0,
370 gap_metadata_cap: 0,
371 free_anchor_ids_len: 0,
372 free_anchor_ids_cap: 0,
373 group_stack_len: self.group_stack.len(),
374 group_stack_cap: self.group_stack.capacity(),
375 orphaned_node_ids_len: self.orphaned_node_ids.len(),
376 orphaned_node_ids_cap: self.orphaned_node_ids.capacity(),
377 };
378 self.anchor_registry.fill_debug_stats(&mut stats);
379 stats
380 }
381
382 pub fn current_group(&self) -> usize {
383 self.group_stack.last().map(|f| f.start).unwrap_or(0)
384 }
385
386 fn clear_gap_metadata_for_replacement(&mut self, index: usize, new_slot: &Slot) {
387 self.anchor_registry
388 .clear_gap_metadata_for_replacement(&self.slots, index, new_slot);
389 }
390
391 fn set_gap_metadata(&mut self, anchor: AnchorId, metadata: GapMetadata) {
392 self.anchor_registry.set_gap_metadata(anchor, metadata);
393 }
394
395 fn gap_group_key(&self, anchor: AnchorId) -> Option<Key> {
396 self.anchor_registry.gap_group_key(anchor)
397 }
398
399 fn gap_boundary_key(&self, anchor: AnchorId) -> Option<Key> {
400 self.anchor_registry.gap_boundary_key(anchor)
401 }
402
403 fn gap_group_scope(&self, anchor: AnchorId) -> PackedScopeId {
404 self.anchor_registry.gap_group_scope(anchor)
405 }
406
407 fn gap_group_len(&self, anchor: AnchorId) -> u32 {
408 self.anchor_registry.gap_group_len(anchor)
409 }
410
411 fn gap_preserved_node(&self, anchor: AnchorId) -> Option<(NodeId, u32)> {
412 self.anchor_registry.gap_preserved_node(anchor)
413 }
414
415 fn gap_extent_for_anchor(&self, anchor: AnchorId) -> usize {
416 self.anchor_registry.gap_extent_for_anchor(anchor)
417 }
418
419 fn current_disallow_live_slot_reuse(&self) -> bool {
420 self.group_stack
421 .last()
422 .map(|frame| {
423 frame.fresh_body && matches!(frame.child_reuse, ChildReusePolicy::FreshInsert)
424 })
425 .unwrap_or(false)
426 }
427
428 fn current_parent_allows_exact_gap_node_reuse(&self) -> bool {
429 self.group_stack
430 .last()
431 .map(|frame| frame.child_reuse.allows_exact_live_reuse())
432 .unwrap_or(true)
433 }
434
435 fn inherited_gap_boundary_key(&self) -> Option<Key> {
436 self.group_stack.last().and_then(|frame| {
437 if frame.child_reuse.requires_restricted_reuse() {
438 Some(frame.gap_boundary_key)
439 } else {
440 None
441 }
442 })
443 }
444
445 fn next_gap_boundary_key(&self, key: Key, child_reuse: ChildReusePolicy) -> Key {
446 if child_reuse.requires_restricted_reuse() {
447 self.inherited_gap_boundary_key().unwrap_or(key)
448 } else {
449 key
450 }
451 }
452
453 fn current_parent_gap_boundary_key(&self) -> Option<Key> {
454 self.group_stack.last().map(|frame| frame.gap_boundary_key)
455 }
456
457 fn gap_matches_current_boundary(&self, boundary_key: Option<Key>) -> bool {
458 match (self.current_parent_gap_boundary_key(), boundary_key) {
459 (Some(current), Some(boundary)) => current == boundary,
460 (Some(_), None) => false,
461 (None, _) => true,
462 }
463 }
464
465 fn owner_gap_boundary_key(&self, owner_index: Option<usize>) -> Option<Key> {
466 let owner_index = owner_index?;
467 if let Some(frame) = self
468 .group_stack
469 .iter()
470 .rev()
471 .find(|frame| frame.start == owner_index)
472 {
473 return Some(frame.gap_boundary_key);
474 }
475
476 match self.slots.get(owner_index) {
477 Some(Slot::Group { boundary_key, .. }) => Some(*boundary_key),
478 Some(Slot::Gap { anchor }) => self
479 .gap_boundary_key(*anchor)
480 .or_else(|| self.gap_group_key(*anchor)),
481 _ => None,
482 }
483 }
484
485 pub fn group_key(&self, index: usize) -> Option<Key> {
486 match self.slots.get(index) {
487 Some(Slot::Group { key, .. }) => Some(*key),
488 Some(Slot::Gap { anchor }) => self.gap_group_key(*anchor),
489 _ => None,
490 }
491 }
492
493 fn ensure_capacity(&mut self) {
494 if self.slots.is_empty() {
495 self.slots.reserve_exact(Self::INITIAL_CAP);
496 self.append_gap_slots(Self::INITIAL_CAP);
497 } else if self.cursor == self.slots.len() {
498 self.grow_slots();
499 }
500 }
501
502 fn force_gap_here(&mut self, cursor: usize) {
503 self.replace_slot_deferred(
506 cursor,
507 Slot::Gap {
508 anchor: AnchorId::INVALID,
509 },
510 );
511 }
512
513 fn find_right_gap_run(&self, from: usize, scan_limit: usize) -> Option<(usize, usize)> {
514 let end = (from + scan_limit).min(self.slots.len());
515 let mut i = from;
516 while i < end {
517 if let Some(Slot::Gap { anchor, .. }) = self.slots.get(i) {
518 if *anchor == AnchorId::INVALID {
519 let start = i;
520 let mut len = 1;
521 while i + len < end {
522 match self.slots.get(i + len) {
523 Some(Slot::Gap { anchor, .. }) if *anchor == AnchorId::INVALID => {
524 len += 1;
525 }
526 _ => break,
527 }
528 }
529 return Some((start, len));
530 }
531 }
532 i += 1;
533 }
534 None
535 }
536
537 fn find_tail_gap_run(&self) -> Option<(usize, usize)> {
538 let mut run_len = 0usize;
539 let mut run_start = 0usize;
540 let mut found = false;
541
542 for index in (0..self.slots.len()).rev() {
543 match self.slots.get(index) {
544 Some(Slot::Gap { anchor, .. }) if *anchor == AnchorId::INVALID => {
545 run_start = index;
546 run_len += 1;
547 found = true;
548 }
549 _ if found => break,
550 _ => {}
551 }
552 }
553
554 found.then_some((run_start, run_len))
555 }
556
557 fn ensure_gap_at_local(&mut self, cursor: usize) {
558 if matches!(self.slots.get(cursor), Some(Slot::Gap { .. })) {
559 return;
560 }
561 self.ensure_capacity();
562
563 if let Some((run_start, run_len)) = self.find_right_gap_run(cursor, Self::LOCAL_GAP_SCAN) {
565 self.shift_group_frames(cursor, run_len as isize);
566 self.shift_anchor_positions_from(cursor, run_len as isize);
567 self.slots[cursor..run_start + run_len].rotate_right(run_len);
568 return;
569 }
570
571 let full_range = self.slots.len().saturating_sub(cursor);
574 if full_range > Self::LOCAL_GAP_SCAN {
575 if let Some((run_start, run_len)) = self.find_right_gap_run(cursor, full_range) {
576 self.shift_group_frames(cursor, run_len as isize);
577 self.shift_anchor_positions_from(cursor, run_len as isize);
578 self.slots[cursor..run_start + run_len].rotate_right(run_len);
579 return;
580 }
581 }
582
583 self.grow_slots();
585 let full_range = self.slots.len().saturating_sub(cursor);
586 if let Some((run_start, run_len)) = self.find_right_gap_run(cursor, full_range) {
587 self.shift_group_frames(cursor, run_len as isize);
588 self.shift_anchor_positions_from(cursor, run_len as isize);
589 self.slots[cursor..run_start + run_len].rotate_right(run_len);
590 return;
591 }
592
593 self.force_gap_here(cursor);
594 }
595
596 fn preserve_terminal_group_block_at_tail(&mut self, start: usize, len: usize) {
597 if len == 0 {
598 return;
599 }
600
601 let mut moved = Vec::with_capacity(len);
605 for index in start..start + len {
606 moved.push(self.replace_slot_tracked(
607 index,
608 Slot::Gap {
609 anchor: AnchorId::INVALID,
610 },
611 ));
612 }
613 self.anchor_registry.mark_dirty();
614
615 loop {
616 let (run_start, run_len) = self.find_tail_gap_run().unwrap_or((self.slots.len(), 0));
617 if run_len >= len {
618 let dest_start = run_start + run_len - len;
619 if dest_start < start + len {
620 self.grow_slots();
621 continue;
622 }
623 for (offset, slot) in moved.drain(..).enumerate() {
624 self.set_slot_tracked(dest_start + offset, slot);
625 }
626 return;
627 }
628
629 self.grow_slots();
630 }
631 }
632
633 fn append_gap_slots(&mut self, count: usize) {
634 if count == 0 {
635 return;
636 }
637 for _ in 0..count {
638 self.slots.push(Slot::Gap {
639 anchor: AnchorId::INVALID,
640 });
641 }
642 }
643
644 fn grow_slots(&mut self) {
645 let old_len = self.slots.len();
646 let target_len = Self::next_slot_target_len(old_len);
647 let additional = target_len.saturating_sub(old_len);
648 if additional == 0 {
649 return;
650 }
651 self.slots.reserve_exact(additional);
652 self.append_gap_slots(additional);
653 }
654
655 fn next_slot_target_len(old_len: usize) -> usize {
656 if old_len < Self::INITIAL_CAP {
657 return Self::INITIAL_CAP;
658 }
659 if old_len < Self::LARGE_GROWTH_THRESHOLD {
660 return old_len.saturating_mul(2);
661 }
662
663 let incremental_growth = (old_len / Self::LARGE_GROWTH_DIVISOR).max(Self::INITIAL_CAP);
664 old_len.saturating_add(incremental_growth)
665 }
666
667 fn allocate_anchor(&mut self) -> AnchorId {
669 self.anchor_registry.allocate_anchor()
670 }
671
672 fn free_anchor(&mut self, anchor: AnchorId) {
673 self.anchor_registry.free_anchor(anchor);
674 }
675
676 fn replace_slot_deferred(&mut self, index: usize, slot: Slot) {
677 let old = self.replace_slot_tracked(index, slot);
678 old.deactivate_scope();
679 let old_anchor = old.anchor_id();
681 let new_anchor = self.slots[index].anchor_id();
682 if old_anchor != new_anchor {
683 self.free_anchor(old_anchor);
684 }
685 self.pending_slot_drops.push(old);
686 }
687
688 fn replace_gap_slot_deferred(
689 &mut self,
690 index: usize,
691 group_key: Option<Key>,
692 boundary_key: Option<Key>,
693 group_scope: PackedScopeId,
694 group_len: u32,
695 ) {
696 let anchor = self.slots[index].anchor_id();
697 let preserved_node = match self.slots.get(index) {
698 Some(Slot::Node { id, gen, .. }) => Some((*id, *gen)),
699 Some(Slot::Gap { anchor }) => self.gap_preserved_node(*anchor),
700 _ => None,
701 };
702 let old = self.replace_slot_tracked(index, Slot::Gap { anchor });
703 self.set_gap_metadata(
704 anchor,
705 GapMetadata {
706 group_key,
707 boundary_key,
708 group_scope,
709 group_len,
710 preserved_node,
711 },
712 );
713 old.deactivate_scope();
714 if let Slot::Node { id, gen, .. } = old {
715 self.orphaned_node_ids
716 .push(OrphanedNode::new(id, gen, anchor));
717 }
718 self.pending_slot_drops.push(old);
719 }
720
721 fn flush_pending_slot_drops(&mut self) {
722 self.pending_slot_drops.clear_and_drop_reverse();
723 let retained = self
724 .pending_slot_drops
725 .len()
726 .max(Self::MIN_RETAINED_PENDING_SLOT_DROPS_CAPACITY);
727 self.pending_slot_drops.trim_retained_capacity(retained);
728 }
729
730 fn register_anchor(&mut self, anchor: AnchorId, position: usize) {
732 self.anchor_registry.register_anchor(anchor, position);
733 }
734
735 fn take_last_start_was_gap(&mut self) -> bool {
738 let was_gap = self.last_start_was_gap;
739 self.last_start_was_gap = false;
740 was_gap
741 }
742
743 fn resolve_anchor(&self, anchor: AnchorId) -> Option<usize> {
745 self.anchor_registry.resolve_anchor(anchor)
746 }
747
748 pub fn mark_range_as_gaps(
752 &mut self,
753 start: usize,
754 end: usize,
755 owner_index: Option<usize>,
756 ) -> bool {
757 self.mark_range_as_gaps_impl(start, end, owner_index, true)
758 }
759
760 fn mark_range_as_gaps_impl(
761 &mut self,
762 start: usize,
763 end: usize,
764 owner_index: Option<usize>,
765 preserve_group_metadata: bool,
766 ) -> bool {
767 let end = end.min(self.slots.len());
768 let boundary_key = self.owner_gap_boundary_key(owner_index);
769 let mut marked_any = false;
770
771 if !preserve_group_metadata {
772 for index in start..end {
773 self.replace_gap_slot_deferred(
774 index,
775 None,
776 boundary_key,
777 PackedScopeId::default(),
778 0,
779 );
780 marked_any = true;
781 }
782 if marked_any {
783 self.needs_compact = true;
784 if let Some(idx) = owner_index {
785 if let Some(Slot::Group {
786 has_gap_children, ..
787 }) = self.slots.get_mut(idx)
788 {
789 *has_gap_children = true;
790 }
791 }
792 }
793 return marked_any;
794 }
795
796 let mut i = start;
797
798 while i < end {
799 if i >= self.slots.len() {
800 break;
801 }
802
803 let (group_len, group_key, group_scope) = {
804 let slot = &self.slots[i];
805 let (group_len, group_key, group_scope) = match slot {
806 Slot::Group {
807 len, key, scope, ..
808 } if preserve_group_metadata => (*len, Some(*key), *scope),
809 Slot::Group { len, .. } => (*len, None, PackedScopeId::default()),
810 Slot::Gap { anchor } if preserve_group_metadata => (
811 self.gap_group_len(*anchor),
812 self.gap_group_key(*anchor),
813 self.gap_group_scope(*anchor),
814 ),
815 Slot::Gap { anchor } => {
816 (self.gap_group_len(*anchor), None, PackedScopeId::default())
817 }
818 _ => (0, None, PackedScopeId::default()),
819 };
820 (group_len, group_key, group_scope)
821 };
822
823 self.replace_gap_slot_deferred(i, group_key, boundary_key, group_scope, group_len);
826 marked_any = true;
827
828 if group_len > 0 {
830 let children_end = (i + unpack_slot_len(group_len)).min(end);
832 for j in (i + 1)..children_end {
833 if j < self.slots.len() {
834 match &self.slots[j] {
835 Slot::Group {
836 key, scope, len, ..
837 } if preserve_group_metadata => {
838 self.replace_gap_slot_deferred(
839 j,
840 Some(*key),
841 boundary_key,
842 *scope,
843 *len,
844 );
845 marked_any = true;
846 }
847 Slot::Group { .. } => {
848 self.replace_gap_slot_deferred(
849 j,
850 None,
851 boundary_key,
852 PackedScopeId::default(),
853 0,
854 );
855 marked_any = true;
856 }
857 Slot::Gap { anchor } if preserve_group_metadata => {
858 self.replace_gap_slot_deferred(
859 j,
860 self.gap_group_key(*anchor),
861 boundary_key,
862 self.gap_group_scope(*anchor),
863 self.gap_group_len(*anchor),
864 );
865 marked_any = true;
866 }
867 Slot::Gap { .. } => {
868 self.replace_gap_slot_deferred(
869 j,
870 None,
871 boundary_key,
872 PackedScopeId::default(),
873 0,
874 );
875 marked_any = true;
876 }
877 Slot::Node { .. } => {
878 self.replace_gap_slot_deferred(
879 j,
880 None,
881 boundary_key,
882 PackedScopeId::default(),
883 0,
884 );
885 marked_any = true;
886 }
887 Slot::ScopeValue { .. } => {
888 self.slots[j].deactivate_scope();
889 }
890 Slot::Value { .. } => {}
891 }
892 }
893 }
894 i = (i + unpack_slot_len(group_len)).max(i + 1);
895 } else {
896 i += 1;
897 }
898 }
899
900 if marked_any {
901 self.needs_compact = true;
902 if let Some(idx) = owner_index {
903 if let Some(Slot::Group {
904 has_gap_children, ..
905 }) = self.slots.get_mut(idx)
906 {
907 *has_gap_children = true;
908 }
909 }
910 }
911 marked_any
912 }
913
914 pub fn get_group_scope(&self, index: usize) -> Option<ScopeId> {
915 let slot = self
916 .slots
917 .get(index)
918 .expect("get_group_scope: index out of bounds");
919 match slot {
920 Slot::Group { scope, .. } => scope.to_option(),
921 _ => None,
922 }
923 }
924
925 pub fn set_group_scope(&mut self, index: usize, scope: ScopeId) {
926 let slot = self
927 .slots
928 .get_mut(index)
929 .expect("set_group_scope: index out of bounds");
930 match slot {
931 Slot::Group {
932 scope: scope_opt, ..
933 } => {
934 *scope_opt = PackedScopeId::new(Some(scope));
937 }
938 _ => panic!("set_group_scope: slot at index is not a group"),
939 }
940 }
941
942 pub fn start_recranpose_at_anchor(
943 &mut self,
944 anchor: AnchorId,
945 owner: ScopeId,
946 ) -> Option<usize> {
947 let index = self.resolve_anchor(anchor)?;
948 match self.slots.get(index) {
949 Some(Slot::Group { scope, .. }) if scope.to_option() == Some(owner) => {
950 self.start_recompose(index);
951 Some(index)
952 }
953 _ => None,
954 }
955 }
956
957 #[cfg(test)]
958 pub fn find_group_index_by_scope(&self, scope: ScopeId) -> Option<usize> {
959 self.slots
960 .iter()
961 .enumerate()
962 .find_map(|(i, slot)| match slot {
963 Slot::Group { scope: id, .. } if id.to_option() == Some(scope) => Some(i),
964 _ => None,
965 })
966 }
967
968 #[cfg(test)]
969 pub fn start_recranpose_at_scope(&mut self, scope: ScopeId) -> Option<usize> {
970 let index = self.find_group_index_by_scope(scope)?;
971 self.start_recompose(index);
972 Some(index)
973 }
974
975 pub fn debug_dump_groups(&self) -> Vec<(usize, Key, Option<ScopeId>, usize)> {
976 self.slots
977 .iter()
978 .enumerate()
979 .filter_map(|(i, slot)| match slot {
980 Slot::Group {
981 key, len, scope, ..
982 } => Some((i, *key, scope.to_option(), unpack_slot_len(*len))),
983 _ => None,
984 })
985 .collect()
986 }
987
988 pub fn debug_dump_all_slots(&self) -> Vec<(usize, String)> {
989 self.slots
990 .iter()
991 .enumerate()
992 .map(|(i, slot)| {
993 let kind = match slot {
994 Slot::Group {
995 key, scope, len, ..
996 } => format!(
997 "Group(key={:?}, scope={:?}, len={})",
998 key,
999 scope.to_option(),
1000 unpack_slot_len(*len)
1001 ),
1002 Slot::Value { .. } => "Value".to_string(),
1003 Slot::ScopeValue { .. } => "ScopeValue".to_string(),
1004 Slot::Node { id, .. } => format!("Node(id={:?})", id),
1005 Slot::Gap { anchor } => {
1006 if let Some(key) = self.gap_group_key(*anchor) {
1007 format!(
1008 "Gap(was_group_key={:?}, scope={:?})",
1009 key,
1010 self.gap_group_scope(*anchor)
1011 )
1012 } else if let Some((id, gen)) = self.gap_preserved_node(*anchor) {
1013 format!("Gap(was_node_id={id:?}, gen={gen})")
1014 } else {
1015 "Gap".to_string()
1016 }
1017 }
1018 };
1019 (i, kind)
1020 })
1021 .collect()
1022 }
1023
1024 pub fn debug_value_type_counts(&self, limit: usize) -> Vec<SlotValueTypeDebugStat> {
1025 let mut counts: HashMap<&'static str, (usize, usize)> = HashMap::default();
1026 for slot in &self.slots {
1027 let Some((type_name, inline_payload_bytes)) = (match slot {
1028 Slot::Value { data, .. } => {
1029 Some((data.debug_type_name(), data.debug_inline_payload_bytes()))
1030 }
1031 Slot::ScopeValue { .. } => Some((
1032 std::any::type_name::<RecomposeScope>(),
1033 std::mem::size_of::<RecomposeScope>(),
1034 )),
1035 _ => None,
1036 }) else {
1037 continue;
1038 };
1039
1040 let entry = counts.entry(type_name).or_insert((0, 0));
1041 entry.0 += 1;
1042 entry.1 += inline_payload_bytes;
1043 }
1044
1045 let mut stats = counts
1046 .into_iter()
1047 .map(
1048 |(type_name, (count, inline_payload_bytes))| SlotValueTypeDebugStat {
1049 type_name,
1050 count,
1051 inline_payload_bytes,
1052 },
1053 )
1054 .collect::<Vec<_>>();
1055 stats.sort_by_key(|entry| std::cmp::Reverse(entry.count));
1056 stats.truncate(limit);
1057 stats
1058 }
1059
1060 fn update_group_bounds(&mut self) {
1061 for frame in &mut self.group_stack {
1062 if frame.end < self.cursor {
1063 frame.end = self.cursor;
1064 }
1065 }
1066 }
1067
1068 fn rebuild_all_anchor_positions(&mut self) {
1071 self.anchor_registry.rebuild_all_positions(&self.slots);
1072 }
1073
1074 fn shift_group_frames(&mut self, index: usize, delta: isize) {
1075 if delta == 0 {
1076 return;
1077 }
1078 if delta > 0 {
1079 let delta = delta as usize;
1080 for frame in &mut self.group_stack {
1081 if frame.start >= index {
1082 frame.start += delta;
1083 frame.end += delta;
1084 } else if frame.end >= index {
1085 frame.end += delta;
1086 }
1087 }
1088 } else {
1089 let delta = (-delta) as usize;
1090 for frame in &mut self.group_stack {
1091 if frame.start >= index {
1092 frame.start = frame.start.saturating_sub(delta);
1093 frame.end = frame.end.saturating_sub(delta);
1094 } else if frame.end > index {
1095 frame.end = frame.end.saturating_sub(delta);
1096 }
1097 }
1098 }
1099 }
1100
1101 pub fn start(&mut self, key: Key) -> usize {
1102 self.ensure_capacity();
1103
1104 let cursor = self.cursor;
1105 let parent_reuse = self
1106 .group_stack
1107 .last()
1108 .map(|frame| frame.child_reuse)
1109 .unwrap_or(ChildReusePolicy::Normal);
1110
1111 if let Some(Slot::Group {
1113 key: existing_key,
1114 len,
1115 boundary_key,
1116 has_gap_children,
1117 ..
1118 }) = self.slots.get(cursor)
1119 {
1120 if *existing_key == key && !*has_gap_children && parent_reuse.allows_exact_live_reuse()
1125 {
1126 let inherited_fresh_body = !matches!(parent_reuse, ChildReusePolicy::Normal);
1127 self.last_start_was_gap = inherited_fresh_body;
1128
1129 let frame = GroupFrame {
1130 key,
1131 start: cursor,
1132 end: cursor + unpack_slot_len(*len),
1133 child_reuse: parent_reuse,
1134 fresh_body: inherited_fresh_body,
1135 gap_boundary_key: *boundary_key,
1136 };
1137 self.group_stack.push(frame);
1138 self.cursor = cursor + 1;
1139 self.update_group_bounds();
1140 return cursor;
1141 }
1142 }
1143
1144 let restricted_reuse = parent_reuse.requires_restricted_reuse();
1145 let allow_exact_live_reuse = parent_reuse.allows_exact_live_reuse();
1146 let allow_live_search = matches!(parent_reuse, ChildReusePolicy::Normal);
1147
1148 self.last_start_was_gap = false;
1149 let cursor = self.cursor;
1150 debug_assert!(
1151 cursor <= self.slots.len(),
1152 "slot cursor {} out of bounds",
1153 cursor
1154 );
1155
1156 if cursor == self.slots.len() {
1157 self.grow_slots();
1158 }
1159
1160 debug_assert!(
1161 cursor < self.slots.len(),
1162 "slot cursor {} failed to grow",
1163 cursor
1164 );
1165
1166 if cursor > 0 && !matches!(self.slots.get(cursor), Some(Slot::Gap { .. })) {
1167 if let Some(Slot::Group { key: prev_key, .. }) = self.slots.get(cursor - 1) {
1168 if *prev_key == key {
1169 return self.insert_new_group_at_cursor(key, ChildReusePolicy::FreshInsert);
1170 }
1171 }
1172 }
1173
1174 if let Some(slot) = self.slots.get_mut(cursor) {
1176 if let Slot::Group {
1177 key: existing_key,
1178 len,
1179 boundary_key,
1180 has_gap_children,
1181 ..
1182 } = slot
1183 {
1184 if *existing_key == key && allow_exact_live_reuse {
1185 let group_len = *len;
1186 let had_gap_children = *has_gap_children;
1187 let gap_boundary_key = *boundary_key;
1188 if had_gap_children {
1189 *has_gap_children = false;
1190 }
1191
1192 let inherited_fresh_body = !matches!(parent_reuse, ChildReusePolicy::Normal);
1193 self.last_start_was_gap = inherited_fresh_body;
1194 let frame = GroupFrame {
1195 key,
1196 start: cursor,
1197 end: cursor + unpack_slot_len(group_len),
1198 child_reuse: parent_reuse,
1199 fresh_body: inherited_fresh_body,
1200 gap_boundary_key,
1201 };
1202 self.group_stack.push(frame);
1203 self.cursor = cursor + 1;
1204 self.update_group_bounds();
1205 return cursor;
1206 }
1207 }
1208 }
1209
1210 if let Some(Slot::Group {
1211 key: existing_key,
1212 anchor: _old_anchor,
1213 len: old_len,
1214 scope: old_scope,
1215 boundary_key: old_boundary_key,
1216 has_gap_children: _,
1217 }) = self.slots.get(cursor)
1218 {
1219 if *existing_key != key {
1220 let old_key = *existing_key;
1221 let old_len = unpack_slot_len(*old_len);
1222 let old_scope = old_scope.to_option();
1223 let old_boundary_key = *old_boundary_key;
1224 let parent_end = self
1225 .group_stack
1226 .last()
1227 .map(|frame| frame.end.min(self.slots.len()))
1228 .unwrap_or(self.slots.len());
1229 let preserve_at_tail = cursor + old_len == parent_end;
1230
1231 if old_len > 1 {
1232 let start = cursor + 1;
1233 let end = cursor + old_len;
1234 let _ = self.mark_range_as_gaps_impl(start, end, Some(cursor), false);
1235 }
1236
1237 self.replace_gap_slot_deferred(
1238 cursor,
1239 Some(old_key),
1240 Some(old_boundary_key),
1241 PackedScopeId::new(old_scope),
1242 pack_slot_len(old_len),
1243 );
1244
1245 if preserve_at_tail {
1246 self.preserve_terminal_group_block_at_tail(cursor, old_len);
1247 }
1248 }
1249 }
1250
1251 let reusable_gap = match self.slots.get(cursor) {
1253 Some(Slot::Gap { anchor }) => self.gap_group_key(*anchor).map(|gap_key| {
1254 (
1255 *anchor,
1256 gap_key,
1257 self.gap_boundary_key(*anchor),
1258 self.gap_group_scope(*anchor),
1259 self.gap_group_len(*anchor),
1260 )
1261 }),
1262 _ => None,
1263 };
1264 let parent_end = self
1265 .group_stack
1266 .last()
1267 .map(|frame| frame.end.min(self.slots.len()))
1268 .unwrap_or(self.slots.len());
1269
1270 if let Some((existing_anchor, gap_key, boundary_key, group_scope, group_len)) = reusable_gap
1271 {
1272 if gap_key == key
1273 && (self.gap_matches_current_boundary(boundary_key) || allow_live_search)
1274 {
1275 let mut search_index = cursor.saturating_add(unpack_slot_len(group_len).max(1));
1276 while search_index < parent_end {
1277 match self.slots.get(search_index) {
1278 Some(Slot::Group {
1279 key: existing_key, ..
1280 }) if *existing_key == key => {
1281 let group_len = match self.slots.get(search_index) {
1282 Some(Slot::Group { len, .. }) => unpack_slot_len(*len),
1283 _ => unreachable!("matched group must still be present"),
1284 };
1285 self.last_start_was_gap = false;
1286 let actual_len = group_len
1287 .max(1)
1288 .min(self.slots.len().saturating_sub(search_index));
1289 self.shift_group_frames(search_index, -(actual_len as isize));
1290 let moved: Vec<_> = self
1291 .slots
1292 .drain(search_index..search_index + actual_len)
1293 .collect();
1294 self.shift_group_frames(cursor, moved.len() as isize);
1295 self.slots.splice(cursor..cursor, moved);
1296 self.anchor_registry.mark_dirty();
1297 let gap_boundary_key = boundary_key
1298 .unwrap_or_else(|| self.next_gap_boundary_key(key, parent_reuse));
1299 let frame = GroupFrame {
1300 key,
1301 start: cursor,
1302 end: cursor + actual_len,
1303 child_reuse: ChildReusePolicy::Normal,
1304 fresh_body: false,
1305 gap_boundary_key,
1306 };
1307 self.group_stack.push(frame);
1308 self.cursor = cursor + 1;
1309 self.update_group_bounds();
1310 return cursor;
1311 }
1312 Some(Slot::Group { len, .. }) => {
1313 search_index =
1314 search_index.saturating_add(unpack_slot_len(*len).max(1));
1315 }
1316 Some(Slot::Gap { anchor }) => {
1317 search_index =
1318 search_index.saturating_add(self.gap_extent_for_anchor(*anchor));
1319 }
1320 Some(_) => search_index += 1,
1321 None => break,
1322 }
1323 }
1324
1325 let len = unpack_slot_len(group_len);
1326 let group_anchor = if existing_anchor.is_valid() {
1327 existing_anchor
1328 } else {
1329 self.allocate_anchor()
1330 };
1331 let preserved_scope = group_scope.to_option();
1332 let gap_boundary_key = if matches!(parent_reuse, ChildReusePolicy::FreshInsert) {
1333 self.next_gap_boundary_key(key, parent_reuse)
1334 } else {
1335 boundary_key.unwrap_or_else(|| self.next_gap_boundary_key(key, parent_reuse))
1336 };
1337 let child_reuse = if matches!(parent_reuse, ChildReusePolicy::FreshInsert) {
1338 ChildReusePolicy::FreshInsert
1339 } else {
1340 ChildReusePolicy::ParentRestoredFromGap
1341 };
1342
1343 self.set_slot_tracked(
1344 cursor,
1345 Slot::Group {
1346 key,
1347 anchor: group_anchor,
1348 len: pack_slot_len(len),
1349 scope: PackedScopeId::new(preserved_scope),
1350 boundary_key: gap_boundary_key,
1351 has_gap_children: false,
1352 },
1353 );
1354 self.register_anchor(group_anchor, cursor);
1355
1356 self.last_start_was_gap = true;
1357 let frame = GroupFrame {
1358 key,
1359 start: cursor,
1360 end: cursor + len,
1361 child_reuse,
1362 fresh_body: true,
1363 gap_boundary_key,
1364 };
1365 self.group_stack.push(frame);
1366 self.cursor = cursor + 1;
1367 self.update_group_bounds();
1368 return cursor;
1369 }
1370 }
1371
1372 let mut search_index = cursor;
1373 let mut found_group: Option<(usize, AnchorId, usize, Option<ScopeId>, Key, bool)> = None;
1374 const SEARCH_BUDGET: usize = 16;
1375 let search_budget = if restricted_reuse {
1376 parent_end.saturating_sub(cursor).max(SEARCH_BUDGET)
1377 } else {
1378 SEARCH_BUDGET
1379 };
1380 let mut scanned = 0usize;
1381 while search_index < parent_end && scanned < search_budget {
1382 scanned += 1;
1383 match self.slots.get(search_index) {
1384 Some(Slot::Group {
1385 key: existing_key,
1386 anchor,
1387 len,
1388 scope,
1389 boundary_key,
1390 ..
1391 }) => {
1392 if *existing_key == key && allow_live_search {
1393 found_group = Some((
1394 search_index,
1395 *anchor,
1396 unpack_slot_len(*len),
1397 scope.to_option(),
1398 *boundary_key,
1399 false,
1400 ));
1401 break;
1402 }
1403 search_index = search_index.saturating_add(unpack_slot_len(*len).max(1));
1404 }
1405 Some(Slot::Gap { anchor }) => {
1406 if self.gap_group_key(*anchor) == Some(key)
1407 && (self.gap_matches_current_boundary(self.gap_boundary_key(*anchor))
1408 || allow_live_search)
1409 {
1410 found_group = Some((
1411 search_index,
1412 *anchor,
1413 unpack_slot_len(self.gap_group_len(*anchor)),
1414 self.gap_group_scope(*anchor).to_option(),
1415 self.gap_boundary_key(*anchor)
1416 .unwrap_or_else(|| self.next_gap_boundary_key(key, parent_reuse)),
1417 true,
1418 ));
1419 break;
1420 }
1421 search_index = search_index.saturating_add(self.gap_extent_for_anchor(*anchor));
1422 }
1423 Some(_slot) => {
1424 search_index += 1;
1425 }
1426 None => break,
1427 }
1428 }
1429
1430 if let Some((
1431 found_index,
1432 group_anchor,
1433 group_len,
1434 preserved_scope,
1435 gap_boundary_key,
1436 reused_gap,
1437 )) = found_group
1438 {
1439 if reused_gap {
1440 self.set_slot_tracked(
1441 found_index,
1442 Slot::Group {
1443 key,
1444 anchor: group_anchor,
1445 len: pack_slot_len(group_len),
1446 scope: PackedScopeId::new(preserved_scope),
1447 boundary_key: gap_boundary_key,
1448 has_gap_children: false,
1449 },
1450 );
1451 }
1452
1453 let restored_from_preserved = reused_gap || found_index != cursor;
1454 let inherited_fresh_body = !matches!(parent_reuse, ChildReusePolicy::Normal);
1455 self.last_start_was_gap = restored_from_preserved || inherited_fresh_body;
1456 let group_extent = group_len.max(1);
1457 let available = self.slots.len().saturating_sub(found_index);
1458 let actual_len = group_extent.min(available);
1459 if actual_len > 0 {
1460 self.shift_group_frames(found_index, -(actual_len as isize));
1461 let moved: Vec<_> = self
1462 .slots
1463 .drain(found_index..found_index + actual_len)
1464 .collect();
1465 self.shift_group_frames(cursor, moved.len() as isize);
1466 self.slots.splice(cursor..cursor, moved);
1467 self.anchor_registry.mark_dirty();
1468 let child_reuse = if restored_from_preserved {
1469 restored_child_reuse(parent_reuse)
1470 } else {
1471 parent_reuse
1472 };
1473 let frame_gap_boundary_key =
1474 if reused_gap && matches!(parent_reuse, ChildReusePolicy::FreshInsert) {
1475 self.next_gap_boundary_key(key, parent_reuse)
1476 } else {
1477 gap_boundary_key
1478 };
1479 let frame = GroupFrame {
1480 key,
1481 start: cursor,
1482 end: cursor + actual_len,
1483 child_reuse,
1484 fresh_body: restored_from_preserved || inherited_fresh_body,
1485 gap_boundary_key: frame_gap_boundary_key,
1486 };
1487 self.group_stack.push(frame);
1488 self.cursor = cursor + 1;
1489 self.update_group_bounds();
1490 return cursor;
1491 } else {
1492 self.shift_group_frames(found_index, 0);
1493 }
1494 }
1495
1496 self.insert_new_group_at_cursor(key, ChildReusePolicy::FreshInsert)
1497 }
1498
1499 fn insert_new_group_at_cursor(&mut self, key: Key, child_reuse: ChildReusePolicy) -> usize {
1500 self.ensure_capacity();
1502
1503 let cursor = self.cursor;
1504 self.ensure_gap_at_local(cursor);
1505
1506 if cursor < self.slots.len() {
1507 debug_assert!(matches!(self.slots[cursor], Slot::Gap { .. }));
1508 if let Some(Slot::Gap { anchor: old_anchor }) = self.slots.get(cursor) {
1509 self.free_anchor(*old_anchor);
1510 }
1511 let group_anchor = self.allocate_anchor();
1512 self.set_slot_tracked(
1513 cursor,
1514 Slot::Group {
1515 key,
1516 anchor: group_anchor,
1517 len: 0,
1518 scope: PackedScopeId::default(),
1519 boundary_key: self.next_gap_boundary_key(key, child_reuse),
1520 has_gap_children: false,
1521 },
1522 );
1523 self.register_anchor(group_anchor, cursor);
1524 } else {
1525 let group_anchor = self.allocate_anchor();
1526 self.push_slot_tracked(Slot::Group {
1527 key,
1528 anchor: group_anchor,
1529 len: 0,
1530 scope: PackedScopeId::default(),
1531 boundary_key: self.next_gap_boundary_key(key, child_reuse),
1532 has_gap_children: false,
1533 });
1534 self.register_anchor(group_anchor, cursor);
1535 }
1536 self.last_start_was_gap = false;
1537 self.cursor = cursor + 1;
1538 self.group_stack.push(GroupFrame {
1539 key,
1540 start: cursor,
1541 end: self.cursor,
1542 child_reuse,
1543 fresh_body: true,
1544 gap_boundary_key: self.next_gap_boundary_key(key, child_reuse),
1545 });
1546 self.update_group_bounds();
1547 cursor
1548 }
1549 fn shift_anchor_positions_from(&mut self, start_slot: usize, delta: isize) {
1550 self.anchor_registry.shift_positions_from(start_slot, delta);
1551 }
1552 fn flush_anchors_if_dirty(&mut self) {
1553 if self.anchor_registry.take_dirty() {
1554 self.rebuild_all_anchor_positions();
1555 }
1556 }
1557 pub fn end(&mut self) {
1558 if let Some(frame) = self.group_stack.pop() {
1559 let end = self.cursor;
1560 let mut grew = false;
1561 if let Some(slot) = self.slots.get_mut(frame.start) {
1562 debug_assert_eq!(
1563 SlotKind::Group,
1564 slot.kind(),
1565 "slot kind mismatch at {}",
1566 frame.start
1567 );
1568 if let Slot::Group {
1569 key,
1570 len,
1571 has_gap_children,
1572 ..
1573 } = slot
1574 {
1575 debug_assert_eq!(*key, frame.key, "group key mismatch");
1576 let new_len = end.saturating_sub(frame.start);
1578 let old_len = unpack_slot_len(*len);
1579 if new_len < old_len {
1580 *has_gap_children = true;
1581 }
1582 const SHRINK_MIN_DROP: usize = 64;
1583 const SHRINK_RATIO: usize = 4;
1584 if old_len > new_len
1585 && old_len >= new_len.saturating_mul(SHRINK_RATIO)
1586 && (old_len - new_len) >= SHRINK_MIN_DROP
1587 {
1588 *len = pack_slot_len(new_len);
1589 } else {
1590 grew = new_len > old_len;
1591 *len = pack_slot_len(old_len.max(new_len));
1592 }
1593 }
1594 }
1595 if grew {
1596 self.propagate_group_growth(frame.start, end);
1597 }
1598 if let Some(parent) = self.group_stack.last_mut() {
1599 if parent.end < end {
1600 parent.end = end;
1601 }
1602 }
1603 }
1604 }
1605
1606 fn start_recompose(&mut self, index: usize) {
1607 if let Some(slot) = self.slots.get(index) {
1608 debug_assert_eq!(
1609 SlotKind::Group,
1610 slot.kind(),
1611 "slot kind mismatch at {}",
1612 index
1613 );
1614 if let Slot::Group {
1615 key,
1616 len,
1617 boundary_key,
1618 ..
1619 } = *slot
1620 {
1621 let frame = GroupFrame {
1622 key,
1623 start: index,
1624 end: index + unpack_slot_len(len),
1625 child_reuse: ChildReusePolicy::Normal,
1626 fresh_body: false,
1627 gap_boundary_key: boundary_key,
1628 };
1629 self.group_stack.push(frame);
1630 self.cursor = index + 1;
1631 if self.cursor < self.slots.len()
1632 && matches!(
1633 self.slots.get(self.cursor),
1634 Some(Slot::Value { .. } | Slot::ScopeValue { .. })
1635 )
1636 {
1637 self.cursor += 1;
1638 }
1639 }
1640 }
1641 }
1642
1643 pub fn end_recompose(&mut self) {
1644 if let Some(frame) = self.group_stack.pop() {
1645 let actual_end = self.cursor;
1646 if actual_end < frame.end {
1647 let _ = self.mark_range_as_gaps(actual_end, frame.end, Some(frame.start));
1648 self.flush_pending_slot_drops();
1649 }
1650 if let Some(Slot::Group { len, .. }) = self.slots.get_mut(frame.start) {
1655 let actual_len = actual_end.saturating_sub(frame.start);
1656 if actual_len > unpack_slot_len(*len) {
1657 *len = pack_slot_len(actual_len);
1658 self.propagate_group_growth(frame.start, actual_end);
1659 }
1660 }
1661 self.cursor = actual_end;
1662 }
1663 }
1664
1665 fn propagate_group_growth(&mut self, child_start: usize, new_end: usize) {
1669 let mut i = 0;
1670 while i < child_start {
1671 if let Some(Slot::Gap { anchor }) = self.slots.get(i) {
1672 i += self.gap_extent_for_anchor(*anchor);
1673 continue;
1674 }
1675 match self.slots.get_mut(i) {
1676 Some(Slot::Group { len, .. }) => {
1677 let group_end = i + unpack_slot_len(*len);
1678 if group_end > child_start {
1679 if group_end < new_end {
1681 *len = pack_slot_len(new_end.saturating_sub(i));
1682 }
1683 i += 1;
1685 } else {
1686 i += unpack_slot_len(*len).max(1);
1688 }
1689 }
1690 _ => {
1691 i += 1;
1692 }
1693 }
1694 }
1695 }
1696
1697 pub fn skip_current(&mut self) {
1698 if let Some(frame) = self.group_stack.last() {
1699 self.cursor = frame.end.min(self.slots.len());
1700 }
1701 }
1702
1703 pub fn node_ids_in_current_group(&self) -> Vec<NodeId> {
1704 let Some(frame) = self.group_stack.last() else {
1705 return Vec::new();
1706 };
1707 let end = frame.end.min(self.slots.len());
1708 self.slots[frame.start..end]
1709 .iter()
1710 .filter_map(|slot| match slot {
1711 Slot::Node { id, .. } => Some(*id),
1712 _ => None,
1713 })
1714 .collect()
1715 }
1716
1717 pub fn descendant_scopes_in_current_group(
1718 &self,
1719 current_scope: ScopeId,
1720 ) -> Vec<RecomposeScope> {
1721 let Some(frame) = self.group_stack.last() else {
1722 return Vec::new();
1723 };
1724
1725 let end = frame.end.min(self.slots.len());
1726 let mut scopes = Vec::new();
1727 let mut seen = HashMap::default();
1728
1729 for slot in &self.slots[frame.start.saturating_add(1)..end] {
1730 let Slot::ScopeValue { scope, .. } = slot else {
1731 continue;
1732 };
1733
1734 if scope.id() == current_scope || seen.insert(scope.id(), ()).is_some() {
1735 continue;
1736 }
1737
1738 scopes.push(scope.clone());
1739 }
1740
1741 scopes
1742 }
1743
1744 pub fn use_value_slot<T: 'static>(&mut self, init: impl FnOnce() -> T) -> usize {
1745 self.ensure_capacity();
1746
1747 let cursor = self.cursor;
1748 let disallow_live_reuse = self.current_disallow_live_slot_reuse();
1749 debug_assert!(
1750 cursor <= self.slots.len(),
1751 "slot cursor {} out of bounds",
1752 cursor
1753 );
1754
1755 if cursor < self.slots.len() {
1756 let reusable_live_value_slot = !disallow_live_reuse
1757 && (matches!(
1758 self.slots.get(cursor),
1759 Some(Slot::Value { data, .. }) if data.as_any().is::<T>()
1760 ) || (TypeId::of::<T>() == TypeId::of::<RecomposeScope>()
1761 && matches!(self.slots.get(cursor), Some(Slot::ScopeValue { .. }))));
1762 let allow_live_reuse = !disallow_live_reuse;
1763 if !allow_live_reuse && !matches!(self.slots.get(cursor), Some(Slot::Gap { .. })) {
1764 self.ensure_gap_at_local(cursor);
1765 }
1766
1767 if reusable_live_value_slot {
1769 self.cursor = cursor + 1;
1770 self.update_group_bounds();
1771 return cursor;
1772 }
1773
1774 if let Some(Slot::Gap {
1776 anchor: old_anchor, ..
1777 }) = self.slots.get(cursor)
1778 {
1779 let old_anchor = *old_anchor;
1780 self.free_anchor(old_anchor);
1781 let anchor = self.allocate_anchor();
1782 self.set_slot_tracked(cursor, Self::make_value_slot(anchor, init()));
1783 self.register_anchor(anchor, cursor);
1784 self.cursor = cursor + 1;
1785 self.update_group_bounds();
1786 return cursor;
1787 }
1788
1789 let anchor = self.allocate_anchor();
1792 self.replace_slot_deferred(cursor, Self::make_value_slot(anchor, init()));
1793 self.register_anchor(anchor, cursor);
1794 self.cursor = cursor + 1;
1795 self.update_group_bounds();
1796 return cursor;
1797 }
1798
1799 let anchor = self.allocate_anchor();
1801 let slot = Self::make_value_slot(anchor, init());
1802 self.push_slot_tracked(slot);
1803 self.register_anchor(anchor, cursor);
1804 self.cursor = cursor + 1;
1805 self.update_group_bounds();
1806 cursor
1807 }
1808
1809 pub fn read_value<T: 'static>(&self, idx: usize) -> &T {
1810 let slot = self
1811 .slots
1812 .get(idx)
1813 .unwrap_or_else(|| panic!("slot index {} out of bounds", idx));
1814 debug_assert_eq!(
1815 SlotKind::Value,
1816 slot.kind(),
1817 "slot kind mismatch at {}",
1818 idx
1819 );
1820 slot.as_value()
1821 }
1822
1823 pub fn read_value_mut<T: 'static>(&mut self, idx: usize) -> &mut T {
1824 let slot = self
1825 .slots
1826 .get_mut(idx)
1827 .unwrap_or_else(|| panic!("slot index {} out of bounds", idx));
1828 debug_assert_eq!(
1829 SlotKind::Value,
1830 slot.kind(),
1831 "slot kind mismatch at {}",
1832 idx
1833 );
1834 slot.as_value_mut()
1835 }
1836
1837 pub fn write_value<T: 'static>(&mut self, idx: usize, value: T) {
1838 if idx >= self.slots.len() {
1839 panic!("attempted to write slot {} out of bounds", idx);
1840 }
1841 let slot = &mut self.slots[idx];
1842 debug_assert_eq!(
1843 SlotKind::Value,
1844 slot.kind(),
1845 "slot kind mismatch at {}",
1846 idx
1847 );
1848 let anchor = slot.anchor_id();
1850 *slot = Self::make_value_slot(anchor, value);
1851 }
1852
1853 pub fn read_value_by_anchor<T: 'static>(&self, anchor: AnchorId) -> Option<&T> {
1856 let idx = self.resolve_anchor(anchor)?;
1857 Some(self.read_value(idx))
1858 }
1859
1860 pub fn read_value_mut_by_anchor<T: 'static>(&mut self, anchor: AnchorId) -> Option<&mut T> {
1862 let idx = self.resolve_anchor(anchor)?;
1863 Some(self.read_value_mut(idx))
1864 }
1865
1866 pub fn remember<T: 'static>(&mut self, init: impl FnOnce() -> T) -> Owned<T> {
1867 let index = self.use_value_slot(|| Owned::new(init()));
1868 self.read_value::<Owned<T>>(index).clone()
1869 }
1870
1871 pub fn remember_with_anchor<T: 'static>(
1874 &mut self,
1875 init: impl FnOnce() -> T,
1876 ) -> (usize, AnchorId) {
1877 let index = self.use_value_slot(|| Owned::new(init()));
1878 let anchor = self
1879 .slots
1880 .get(index)
1881 .map(|slot| slot.anchor_id())
1882 .unwrap_or(AnchorId::INVALID);
1883 (index, anchor)
1884 }
1885
1886 pub fn record_node(&mut self, id: NodeId, gen: u32) {
1887 self.ensure_capacity();
1888
1889 let cursor = self.cursor;
1890 debug_assert!(
1891 cursor <= self.slots.len(),
1892 "slot cursor {} out of bounds",
1893 cursor
1894 );
1895 if cursor < self.slots.len() {
1896 if let Some(Slot::Node {
1898 id: existing,
1899 gen: existing_gen,
1900 ..
1901 }) = self.slots.get(cursor)
1902 {
1903 if *existing == id && *existing_gen == gen {
1904 self.cursor = cursor + 1;
1905 self.update_group_bounds();
1906 return;
1907 }
1908 }
1909 if let Some(Slot::Gap { anchor: old_anchor }) = self.slots.get(cursor) {
1910 let preserved = self.gap_preserved_node(*old_anchor);
1911 if self.current_parent_allows_exact_gap_node_reuse() && preserved == Some((id, gen))
1912 {
1913 let anchor = if old_anchor.is_valid() {
1914 *old_anchor
1915 } else {
1916 self.allocate_anchor()
1917 };
1918 self.set_slot_tracked(cursor, Slot::Node { anchor, id, gen });
1919 self.register_anchor(anchor, cursor);
1920 self.cursor = cursor + 1;
1921 self.update_group_bounds();
1922 return;
1923 }
1924 }
1925
1926 if let Some(Slot::Gap {
1928 anchor: old_anchor, ..
1929 }) = self.slots.get(cursor)
1930 {
1931 let old_anchor = *old_anchor;
1932 self.free_anchor(old_anchor);
1933 let anchor = self.allocate_anchor();
1934 self.set_slot_tracked(cursor, Slot::Node { anchor, id, gen });
1935 self.register_anchor(anchor, cursor);
1936 self.cursor = cursor + 1;
1937 self.update_group_bounds();
1938 return;
1939 }
1940
1941 let anchor = self.allocate_anchor();
1943 self.replace_slot_deferred(cursor, Slot::Node { anchor, id, gen });
1944 self.register_anchor(anchor, cursor);
1945 self.cursor = cursor + 1;
1946 self.update_group_bounds();
1947 return;
1948 }
1949
1950 let anchor = self.allocate_anchor();
1952 let slot = Slot::Node { anchor, id, gen };
1953 self.push_slot_tracked(slot);
1954 self.register_anchor(anchor, cursor);
1955 self.cursor = cursor + 1;
1956 self.update_group_bounds();
1957 }
1958
1959 pub fn peek_node(&self) -> Option<(NodeId, u32)> {
1960 let cursor = self.cursor;
1961 debug_assert!(
1962 cursor <= self.slots.len(),
1963 "slot cursor {} out of bounds",
1964 cursor
1965 );
1966 match self.slots.get(cursor) {
1967 Some(Slot::Node { id, gen, .. }) => Some((*id, *gen)),
1968 Some(Slot::Gap { anchor }) if self.current_parent_allows_exact_gap_node_reuse() => {
1969 self.gap_preserved_node(*anchor)
1970 }
1971 Some(_slot) => None,
1972 None => None,
1973 }
1974 }
1975
1976 pub fn read_node(&mut self) -> Option<NodeId> {
1977 let cursor = self.cursor;
1978 debug_assert!(
1979 cursor <= self.slots.len(),
1980 "slot cursor {} out of bounds",
1981 cursor
1982 );
1983 let node = match self.slots.get(cursor) {
1984 Some(Slot::Node { id, .. }) => Some(*id),
1985 Some(_slot) => None,
1986 None => None,
1987 };
1988 if node.is_some() {
1989 self.cursor = cursor + 1;
1990 self.update_group_bounds();
1991 }
1992 node
1993 }
1994
1995 pub fn advance_after_node_read(&mut self) {
1996 let cursor = self.cursor;
1997 let preserved = match self.slots.get(cursor) {
1998 Some(Slot::Gap { anchor }) if self.current_parent_allows_exact_gap_node_reuse() => self
1999 .gap_preserved_node(*anchor)
2000 .map(|(id, gen)| (*anchor, id, gen)),
2001 _ => None,
2002 };
2003 if let Some((old_anchor, id, gen)) = preserved {
2004 let anchor = if old_anchor.is_valid() {
2005 old_anchor
2006 } else {
2007 self.allocate_anchor()
2008 };
2009 self.set_slot_tracked(cursor, Slot::Node { anchor, id, gen });
2010 self.register_anchor(anchor, cursor);
2011 }
2012 self.cursor += 1;
2013 self.update_group_bounds();
2014 }
2015
2016 pub fn reset(&mut self) {
2017 self.cursor = 0;
2018 self.group_stack.clear();
2019 }
2020
2021 pub fn step_back(&mut self) {
2024 debug_assert!(self.cursor > 0, "Cannot step back from cursor 0");
2025 self.cursor = self.cursor.saturating_sub(1);
2026 }
2027
2028 pub fn trim_to_cursor(&mut self) -> bool {
2042 let mut marked = false;
2043 if let Some((owner_start, group_end)) = self
2044 .group_stack
2045 .last()
2046 .map(|frame| (frame.start, frame.end.min(self.slots.len())))
2047 {
2048 if self.cursor < group_end
2050 && self.mark_range_as_gaps(self.cursor, group_end, Some(owner_start))
2051 {
2052 marked = true;
2053 }
2054
2055 if let Some(frame) = self.group_stack.last_mut() {
2057 frame.end = self.cursor;
2058 }
2059 } else if self.cursor < self.slots.len() {
2060 if self.mark_range_as_gaps(self.cursor, self.slots.len(), None) {
2063 marked = true;
2064 }
2065 }
2066 self.flush_pending_slot_drops();
2067
2068 marked
2069 }
2070
2071 pub fn drain_orphaned_node_ids_with(&mut self, visitor: impl FnMut(OrphanedNode)) {
2073 self.orphaned_node_ids.drain_forward(visitor);
2074 }
2075
2076 pub fn drain_orphaned_node_ids(&mut self) -> Vec<OrphanedNode> {
2077 let mut orphaned = Vec::with_capacity(self.orphaned_node_ids.len());
2078 self.drain_orphaned_node_ids_with(|node| orphaned.push(node));
2079 orphaned
2080 }
2081
2082 pub(crate) fn requeue_orphaned_node(&mut self, orphaned: OrphanedNode) {
2083 self.orphaned_node_ids.push(orphaned);
2084 }
2085
2086 pub(crate) fn orphaned_node_state(&self, orphaned: OrphanedNode) -> NodeSlotState {
2087 let Some(index) = self.resolve_anchor(orphaned.anchor) else {
2088 return NodeSlotState::Missing;
2089 };
2090 match self.slots.get(index) {
2091 Some(Slot::Node { id, gen, .. })
2092 if *id == orphaned.id && *gen == orphaned.generation =>
2093 {
2094 NodeSlotState::Active
2095 }
2096 Some(Slot::Gap { anchor })
2097 if self.gap_preserved_node(*anchor) == Some((orphaned.id, orphaned.generation)) =>
2098 {
2099 NodeSlotState::PreservedGap
2100 }
2101 _ => NodeSlotState::Missing,
2102 }
2103 }
2104
2105 #[cfg(test)]
2106 fn gap_metadata_at(&self, index: usize) -> Option<GapMetadata> {
2107 match self.slots.get(index) {
2108 Some(Slot::Gap { anchor }) => self.anchor_registry.gap_metadata_for_anchor(*anchor),
2109 _ => None,
2110 }
2111 }
2112
2113 #[cfg(test)]
2114 fn group_anchor_at(&self, index: usize) -> AnchorId {
2115 self.slots
2116 .get(index)
2117 .map(|slot| slot.anchor_id())
2118 .unwrap_or(AnchorId::INVALID)
2119 }
2120
2121 #[cfg(test)]
2122 pub(crate) fn push_orphaned_node_for_test(&mut self, id: NodeId, generation: u32) {
2123 self.orphaned_node_ids
2124 .push(OrphanedNode::new(id, generation, AnchorId::INVALID));
2125 }
2126
2127 pub fn compact(&mut self) {
2133 if !self.needs_compact {
2134 return;
2135 }
2136
2137 let old_len = self.slots.len();
2138 if old_len == 0 {
2139 self.needs_compact = false;
2140 return;
2141 }
2142
2143 let mut gap_count = 0usize;
2145 let mut gap_scan = 0usize;
2146 while gap_scan < old_len {
2147 match &self.slots[gap_scan] {
2148 Slot::Gap { anchor } => {
2149 let extent = self
2150 .gap_extent_for_anchor(*anchor)
2151 .min(old_len.saturating_sub(gap_scan));
2152 gap_count = gap_count.saturating_add(extent);
2153 gap_scan = gap_scan.saturating_add(extent);
2154 }
2155 _ => gap_scan += 1,
2156 }
2157 }
2158 if gap_count == 0 {
2159 self.needs_compact = false;
2160 return;
2161 }
2162 let new_len = old_len - gap_count;
2163 if !Self::should_compact_gaps(old_len, gap_count, new_len) {
2164 return;
2165 }
2166 self.needs_compact = false;
2167 log::debug!(
2168 "compact: {} slots → {} (removing {} gaps, capacity {})",
2169 old_len,
2170 new_len,
2171 gap_count,
2172 self.slots.capacity()
2173 );
2174
2175 let mut group_stack = Vec::<GroupCompactionFrame>::new();
2177 let mut new_len = 0usize;
2178
2179 let mut i = 0usize;
2180 while i < old_len {
2181 while group_stack.last().is_some_and(|frame| frame.end <= i) {
2182 let frame = group_stack.pop().expect("group extent frame");
2183 if let Slot::Group {
2184 len,
2185 has_gap_children,
2186 ..
2187 } = &mut self.slots[frame.index]
2188 {
2189 *len = pack_slot_len(new_len.saturating_sub(frame.kept_before));
2190 *has_gap_children = false;
2191 }
2192 }
2193
2194 match &self.slots[i] {
2195 Slot::Gap { anchor } => {
2196 let extent = self
2197 .gap_extent_for_anchor(*anchor)
2198 .min(old_len.saturating_sub(i));
2199 i = i.saturating_add(extent);
2200 continue;
2201 }
2202 Slot::Group { len, .. } => {
2203 group_stack.push(GroupCompactionFrame {
2204 index: i,
2205 end: i.saturating_add(unpack_slot_len(*len).max(1)).min(old_len),
2206 kept_before: new_len,
2207 });
2208 }
2209 Slot::Value { .. } | Slot::ScopeValue { .. } | Slot::Node { .. } => {}
2210 }
2211
2212 new_len += 1;
2213 i += 1;
2214 }
2215
2216 while let Some(frame) = group_stack.pop() {
2217 if let Slot::Group {
2218 len,
2219 has_gap_children,
2220 ..
2221 } = &mut self.slots[frame.index]
2222 {
2223 *len = pack_slot_len(new_len.saturating_sub(frame.kept_before));
2224 *has_gap_children = false;
2225 }
2226 }
2227
2228 let mut i = 0usize;
2234 while i < old_len {
2235 if let Slot::Gap { anchor } = &self.slots[i] {
2236 let extent = self
2237 .gap_extent_for_anchor(*anchor)
2238 .min(old_len.saturating_sub(i));
2239 for j in i..i + extent {
2240 let anchor = self.slots[j].anchor_id();
2241 if anchor.is_valid() {
2242 self.anchor_registry.remove_position(anchor);
2243 }
2244 }
2245 i = i.saturating_add(extent);
2246 } else {
2247 i += 1;
2248 }
2249 }
2250
2251 let mut compacted = Vec::with_capacity(new_len);
2258 let mut removed = Vec::with_capacity(gap_count);
2259 let mut read = 0usize;
2260 while read < old_len {
2261 if let Slot::Gap { anchor } = &self.slots[read] {
2262 let extent = self
2263 .gap_extent_for_anchor(*anchor)
2264 .min(old_len.saturating_sub(read));
2265 for index in read..read + extent {
2266 removed.push(std::mem::take(&mut self.slots[index]));
2267 }
2268 read = read.saturating_add(extent);
2269 continue;
2270 }
2271 compacted.push(std::mem::take(&mut self.slots[read]));
2272 read += 1;
2273 }
2274 debug_assert_eq!(compacted.len(), new_len);
2275 debug_assert_eq!(removed.len(), gap_count);
2276 self.slots = compacted;
2277 drop_slots_in_reverse(&mut removed);
2278 self.anchor_registry.clear_all_gap_metadata();
2279 self.rehouse_live_value_payloads();
2280
2281 self.rebuild_anchor_positions();
2283
2284 self.orphaned_node_ids
2285 .trim_retained_capacity(OrphanedNodeIds::INITIAL_CAPACITY);
2286 }
2287
2288 fn should_compact_gaps(old_len: usize, gap_count: usize, new_len: usize) -> bool {
2289 if gap_count == 0 {
2290 return false;
2291 }
2292 if old_len <= Self::EAGER_COMPACT_SLOT_LEN {
2293 return true;
2294 }
2295 gap_count >= new_len
2296 || (gap_count >= Self::FRACTIONAL_COMPACT_GAP_THRESHOLD
2297 && gap_count.saturating_mul(Self::FRACTIONAL_COMPACT_RATIO_DIVISOR) >= old_len)
2298 }
2299
2300 fn rebuild_anchor_positions(&mut self) {
2301 self.anchor_registry.rebuild_positions(&self.slots);
2302 }
2303}
2304
2305impl Default for SlotTable {
2306 fn default() -> Self {
2307 Self::new()
2308 }
2309}
2310
2311impl SlotTable {
2312 pub fn begin_group(&mut self, key: Key) -> StartGroup<GroupId> {
2313 let idx = SlotTable::start(self, key);
2314 let restored = SlotTable::take_last_start_was_gap(self);
2315 StartGroup {
2316 group: GroupId(idx),
2317 anchor: self.slots[idx].anchor_id(),
2318 restored_from_gap: restored,
2319 }
2320 }
2321
2322 pub fn end_group(&mut self) {
2323 SlotTable::end(self);
2324 }
2325
2326 pub fn skip_current_group(&mut self) {
2327 SlotTable::skip_current(self);
2328 }
2329
2330 pub fn nodes_in_current_group(&self) -> Vec<NodeId> {
2331 SlotTable::node_ids_in_current_group(self)
2332 }
2333
2334 pub fn finalize_current_group(&mut self) -> bool {
2335 SlotTable::trim_to_cursor(self)
2336 }
2337
2338 pub fn flush(&mut self) {
2339 SlotTable::flush_anchors_if_dirty(self);
2340 }
2341}
2342
2343impl Drop for SlotTable {
2344 fn drop(&mut self) {
2345 self.flush_pending_slot_drops();
2346 drop_slots_in_reverse(&mut self.slots);
2347 }
2348}
2349
2350#[cfg(test)]
2351mod tests {
2352 use super::{
2353 ChildReusePolicy, GapMetadata, GroupFrame, NodeSlotState, OrphanedNode, OrphanedNodeIds,
2354 Slot, SlotTable,
2355 };
2356 use crate::{AnchorId, Owned, RecomposeScope};
2357
2358 #[test]
2359 fn large_slot_tables_grow_incrementally_instead_of_doubling() {
2360 let mut table = SlotTable::new();
2361 table.append_gap_slots(SlotTable::LARGE_GROWTH_THRESHOLD);
2362
2363 table.grow_slots();
2364
2365 assert_eq!(table.slots.len(), 40_960);
2366 assert!(table.slots.capacity() >= table.slots.len());
2367
2368 table.grow_slots();
2369
2370 assert_eq!(table.slots.len(), 51_200);
2371 assert!(table.slots.capacity() >= table.slots.len());
2372 }
2373
2374 #[test]
2375 fn flushing_large_pending_slot_drops_releases_excess_capacity() {
2376 let mut table = SlotTable::new();
2377 for _ in 0..4096 {
2378 table.pending_slot_drops.push(Slot::default());
2379 }
2380
2381 table.flush_pending_slot_drops();
2382
2383 let stats = table.debug_stats();
2384 assert_eq!(stats.pending_slot_drops_len, 0);
2385 assert!(
2386 stats.pending_slot_drops_cap <= SlotTable::MIN_RETAINED_PENDING_SLOT_DROPS_CAPACITY * 4,
2387 "pending slot drop capacity stayed too large after flush: {stats:?}",
2388 );
2389 }
2390
2391 #[test]
2392 fn draining_large_orphaned_node_ids_releases_excess_capacity() {
2393 let mut table = SlotTable::new();
2394 for node_id in 0..4096 {
2395 table
2396 .orphaned_node_ids
2397 .push(OrphanedNode::new(node_id, 0, AnchorId::INVALID));
2398 }
2399
2400 let mut drained = Vec::new();
2401 table.drain_orphaned_node_ids_with(|node| drained.push(node));
2402
2403 let stats = table.debug_stats();
2404 assert_eq!(stats.orphaned_node_ids_len, 0);
2405 assert_eq!(drained.len(), 4096);
2406 assert_eq!(drained[0], OrphanedNode::new(0, 0, AnchorId::INVALID));
2407 assert_eq!(drained[4095], OrphanedNode::new(4095, 0, AnchorId::INVALID));
2408 assert!(
2409 stats.orphaned_node_ids_cap <= OrphanedNodeIds::INITIAL_CAPACITY * 4,
2410 "orphaned node id capacity stayed too large after drain: {stats:?}",
2411 );
2412 }
2413
2414 #[test]
2415 fn compaction_rehouses_surviving_value_payloads() {
2416 let mut table = SlotTable::new();
2417 let survivor_idx = table.use_value_slot(|| String::from("survivor"));
2418 let survivor_before = table.read_value::<String>(survivor_idx) as *const String;
2419
2420 for index in 0..4096 {
2421 table.use_value_slot(|| format!("drop-{index}"));
2422 }
2423
2424 table.cursor = 1;
2425 assert!(table.trim_to_cursor(), "test setup should produce gaps");
2426 table.compact();
2427
2428 let survivor_after = table.read_value::<String>(0);
2429 let survivor_after_ptr = survivor_after as *const String;
2430 assert_eq!(survivor_after, "survivor");
2431 assert_ne!(
2432 survivor_before, survivor_after_ptr,
2433 "compaction should move surviving value payloads into fresh boxes"
2434 );
2435 }
2436
2437 #[test]
2438 fn compaction_releases_peak_anchor_storage() {
2439 let mut table = SlotTable::new();
2440 table.use_value_slot(|| String::from("survivor"));
2441 for index in 0..4096 {
2442 table.use_value_slot(|| format!("drop-{index}"));
2443 }
2444
2445 table.cursor = 1;
2446 assert!(table.trim_to_cursor(), "test setup should produce gaps");
2447 table.compact();
2448
2449 let stats = table.debug_stats();
2450 assert!(
2451 stats.anchors_cap <= stats.slots_len.saturating_mul(4).max(8),
2452 "anchor storage stayed too large after compaction: {stats:?}",
2453 );
2454 assert!(
2455 stats.free_anchor_ids_cap <= 8,
2456 "free anchor id storage stayed too large after compaction: {stats:?}",
2457 );
2458 }
2459
2460 #[test]
2461 fn compaction_reclaims_large_fractional_gap_blocks() {
2462 let mut table = SlotTable::new();
2463
2464 for index in 0..1536 {
2465 table.use_value_slot(|| format!("slot-{index}"));
2466 }
2467
2468 table.cursor = 1024;
2469 assert!(
2470 table.trim_to_cursor(),
2471 "test setup should produce a large gap block"
2472 );
2473 table.compact();
2474
2475 assert_eq!(
2476 table.slots.len(),
2477 1024,
2478 "fractional large gap blocks should compact once hidden payloads become significant",
2479 );
2480 }
2481
2482 #[test]
2483 fn compaction_preserves_anchor_identity_for_shifted_slots() {
2484 let mut table = SlotTable::new();
2485
2486 table.reset();
2487 let first_group = table.start(1);
2488 table.set_group_scope(first_group, 11);
2489 table.use_value_slot(|| String::from("drop-a"));
2490 table.use_value_slot(|| String::from("drop-b"));
2491 table.end();
2492
2493 let second_group = table.start(2);
2494 table.set_group_scope(second_group, 22);
2495 let (survivor_index, survivor_anchor) =
2496 table.remember_with_anchor(|| String::from("survivor"));
2497 table.end();
2498 table.flush_anchors_if_dirty();
2499
2500 assert_eq!(
2501 table
2502 .read_value_by_anchor::<Owned<String>>(survivor_anchor)
2503 .map(|value| value.with(|text| text.clone())),
2504 Some(String::from("survivor"))
2505 );
2506 assert_eq!(table.resolve_anchor(survivor_anchor), Some(survivor_index));
2507
2508 table.reset();
2509 let started = table
2510 .start_recranpose_at_anchor(table.group_anchor_at(first_group), 11)
2511 .expect("recompose scope should be found");
2512 assert_eq!(started, first_group);
2513 table.end_recompose();
2514 assert!(
2515 table.needs_compact,
2516 "shrinking the first group should create gaps"
2517 );
2518
2519 table.compact();
2520
2521 let shifted_index = table
2522 .resolve_anchor(survivor_anchor)
2523 .expect("survivor anchor should still resolve after compaction");
2524 assert!(
2525 shifted_index < survivor_index,
2526 "survivor slot should move left when leading gaps are compacted"
2527 );
2528 assert_eq!(
2529 table
2530 .read_value_by_anchor::<Owned<String>>(survivor_anchor)
2531 .map(|value| value.with(|text| text.clone())),
2532 Some(String::from("survivor"))
2533 );
2534
2535 table
2536 .read_value_mut_by_anchor::<Owned<String>>(survivor_anchor)
2537 .expect("mutable anchor lookup should remain valid after compaction")
2538 .replace(String::from("survivor-updated"));
2539 assert_eq!(
2540 table
2541 .read_value::<Owned<String>>(shifted_index)
2542 .with(|text| text.clone()),
2543 "survivor-updated"
2544 );
2545 }
2546
2547 #[test]
2548 fn sibling_search_does_not_steal_nested_matching_group() {
2549 let mut table = SlotTable::new();
2550 let root_key = crate::hash_key(&"root");
2551 let target_key = crate::location_key("target", 10, 20);
2552 let container_key = crate::location_key("container", 11, 21);
2553
2554 table.reset();
2555 let root = table.start(root_key);
2556 table.set_group_scope(root, 1);
2557
2558 let container = table.start(container_key);
2559 table.set_group_scope(container, 10);
2560
2561 let nested = table.start(target_key);
2562 table.set_group_scope(nested, 22);
2563 table.use_value_slot(|| String::from("nested"));
2564 table.end();
2565
2566 table.end();
2567 table.end();
2568 table.flush_anchors_if_dirty();
2569
2570 let root_end = match table.slots[root] {
2571 Slot::Group { len, .. } => root + super::unpack_slot_len(len),
2572 _ => panic!("root should remain a group"),
2573 };
2574
2575 table.cursor = container;
2576 table.group_stack.push(GroupFrame {
2577 key: root_key,
2578 start: root,
2579 end: root_end,
2580 child_reuse: ChildReusePolicy::Normal,
2581 fresh_body: false,
2582 gap_boundary_key: root_key,
2583 });
2584
2585 let inserted = table.start(target_key);
2586
2587 assert_eq!(inserted, container);
2588 assert_eq!(
2589 table.get_group_scope(inserted),
2590 None,
2591 "search should not steal a nested descendant group",
2592 );
2593 }
2594
2595 #[test]
2596 fn explicit_keys_relocate_matching_sibling_groups() {
2597 let mut table = SlotTable::new();
2598 let root_key = crate::hash_key(&"root");
2599 let target_key = crate::hash_key(&"target");
2600 let other_key = crate::hash_key(&"other");
2601
2602 table.reset();
2603 let root = table.start(root_key);
2604 table.set_group_scope(root, 1);
2605
2606 let first = table.start(other_key);
2607 table.set_group_scope(first, 10);
2608 table.use_value_slot(|| String::from("other"));
2609 table.end();
2610
2611 let later = table.start(target_key);
2612 table.set_group_scope(later, 22);
2613 table.use_value_slot(|| String::from("later"));
2614 table.end();
2615
2616 table.end();
2617 table.flush_anchors_if_dirty();
2618
2619 let root_end = match table.slots[root] {
2620 Slot::Group { len, .. } => root + super::unpack_slot_len(len),
2621 _ => panic!("root should remain a group"),
2622 };
2623
2624 table.cursor = first;
2625 table.group_stack.push(GroupFrame {
2626 key: root_key,
2627 start: root,
2628 end: root_end,
2629 child_reuse: ChildReusePolicy::Normal,
2630 fresh_body: false,
2631 gap_boundary_key: root_key,
2632 });
2633
2634 let relocated = table.start(target_key);
2635
2636 assert_eq!(relocated, first);
2637 assert_eq!(
2638 table.get_group_scope(relocated),
2639 Some(22),
2640 "explicit keyed groups should preserve scope identity when reordered",
2641 );
2642 }
2643
2644 #[test]
2645 fn fresh_parent_does_not_restore_matching_child_from_replaced_subtree() {
2646 let mut table = SlotTable::new();
2647 let root_key = crate::hash_key(&"root");
2648 let old_parent_key = crate::hash_key(&"old-parent");
2649 let new_parent_key = crate::hash_key(&"new-parent");
2650 let shared_child_key = crate::hash_key(&"shared-child");
2651
2652 table.reset();
2653 let root = table.start(root_key);
2654 table.set_group_scope(root, 1);
2655
2656 let old_parent = table.start(old_parent_key);
2657 table.set_group_scope(old_parent, 10);
2658
2659 let old_child = table.start(shared_child_key);
2660 table.set_group_scope(old_child, 20);
2661 table.use_value_slot(|| String::from("old-child"));
2662 table.end();
2663
2664 table.end();
2665 table.end();
2666 table.flush_anchors_if_dirty();
2667
2668 table.reset();
2669 let reused_root = table.start(root_key);
2670 assert_eq!(reused_root, root);
2671
2672 let fresh_parent = table.start(new_parent_key);
2673 assert_eq!(fresh_parent, old_parent);
2674
2675 let fresh_child = table.start(shared_child_key);
2676 let fresh_value = table.use_value_slot(|| String::from("fresh-child"));
2677
2678 assert_eq!(fresh_child, old_child);
2679 assert_eq!(
2680 table.get_group_scope(fresh_child),
2681 None,
2682 "a fresh parent must not restore a descendant from the subtree it replaced",
2683 );
2684 assert_eq!(
2685 fresh_value,
2686 old_child + 1,
2687 "fresh child should reuse the slot position but not the previous descendant value slot",
2688 );
2689 assert_eq!(
2690 table.read_value::<String>(fresh_value),
2691 "fresh-child",
2692 "fresh child must not inherit the replaced subtree's remembered values",
2693 );
2694 }
2695
2696 #[test]
2697 fn end_recompose_marks_shrunk_group_tail_as_gaps_for_compaction() {
2698 let mut table = SlotTable::new();
2699
2700 table.reset();
2701 let group = table.start(100);
2702 table.set_group_scope(group, 7);
2703 let scope_slot = table.use_value_slot(|| String::from("scope"));
2704 let kept_slot = table.use_value_slot(|| String::from("keep"));
2705 let dropped_slot = table.use_value_slot(|| String::from("drop"));
2706 table.end();
2707 table.flush_anchors_if_dirty();
2708
2709 assert_eq!(table.read_value::<String>(scope_slot), "scope");
2710 assert_eq!(table.read_value::<String>(kept_slot), "keep");
2711 assert_eq!(table.read_value::<String>(dropped_slot), "drop");
2712
2713 table.reset();
2714 let started = table
2715 .start_recranpose_at_anchor(table.group_anchor_at(group), 7)
2716 .expect("recompose scope should be found");
2717 assert_eq!(started, group);
2718 let reused_kept_slot = table.use_value_slot(|| String::from("keep"));
2719 assert_eq!(reused_kept_slot, kept_slot);
2720 table.end_recompose();
2721
2722 assert!(
2723 table.needs_compact,
2724 "shrinking recompose should mark the old tail as gaps"
2725 );
2726
2727 table.compact();
2728
2729 assert_eq!(table.slots.len(), 3);
2730 assert_eq!(table.read_value::<String>(scope_slot), "scope");
2731 assert_eq!(table.read_value::<String>(kept_slot), "keep");
2732 }
2733
2734 #[test]
2735 fn marking_existing_nested_gap_preserves_child_group_metadata() {
2736 let mut table = SlotTable::new();
2737
2738 table.reset();
2739 let root = table.start(1);
2740 table.set_group_scope(root, 1);
2741
2742 let parent = table.start(2);
2743 table.set_group_scope(parent, 2);
2744
2745 let child = table.start(3);
2746 table.set_group_scope(child, 3);
2747 table.use_value_slot(|| String::from("child"));
2748 table.end();
2749
2750 table.end();
2751 table.end();
2752 table.flush_anchors_if_dirty();
2753
2754 let parent_end = match table.slots[parent] {
2755 Slot::Group { len, .. } => parent + super::unpack_slot_len(len),
2756 _ => panic!("parent should remain a group"),
2757 };
2758
2759 assert!(
2760 table.mark_range_as_gaps(parent, parent_end, Some(root)),
2761 "initial mark should convert the subtree to gaps",
2762 );
2763 assert!(
2764 table.mark_range_as_gaps(parent, parent_end, Some(root)),
2765 "re-marking an existing gap subtree should still preserve child metadata",
2766 );
2767
2768 assert!(
2769 matches!(table.slots[parent], Slot::Gap { .. }),
2770 "parent should be a preserved gap",
2771 );
2772 let parent_metadata = table
2773 .gap_metadata_at(parent)
2774 .expect("parent gap metadata should be preserved");
2775 assert_eq!(parent_metadata.group_key, Some(2));
2776 assert_eq!(parent_metadata.group_scope.to_option(), Some(2));
2777 assert_eq!(super::unpack_slot_len(parent_metadata.group_len), 3);
2778
2779 assert!(
2780 matches!(table.slots[child], Slot::Gap { .. }),
2781 "child should be a preserved gap",
2782 );
2783 let child_metadata = table
2784 .gap_metadata_at(child)
2785 .expect("child gap metadata should be preserved");
2786 assert_eq!(
2787 child_metadata.group_key,
2788 Some(3),
2789 "child gap metadata must survive repeated ancestor gap marking",
2790 );
2791 assert_eq!(child_metadata.group_scope.to_option(), Some(3));
2792 assert_eq!(super::unpack_slot_len(child_metadata.group_len), 2);
2793 }
2794
2795 #[test]
2796 fn start_recranpose_at_anchor_rejects_mismatched_scope_owner() {
2797 let mut table = SlotTable::new();
2798 let root_key = crate::hash_key(&"root");
2799 let group_key = crate::hash_key(&"group");
2800
2801 table.reset();
2802 let root = table.start(root_key);
2803 table.set_group_scope(root, 1);
2804
2805 let group = table.start(group_key);
2806 table.set_group_scope(group, 10);
2807 table.use_value_slot(|| String::from("first"));
2808 table.end();
2809 table.end();
2810 table.flush_anchors_if_dirty();
2811
2812 let anchor = table.group_anchor_at(group);
2813
2814 table.reset();
2815 let reused_root = table.start(root_key);
2816 assert_eq!(reused_root, root);
2817 let reused_group = table.start(group_key);
2818 assert_eq!(reused_group, group);
2819 table.set_group_scope(reused_group, 20);
2820 table.use_value_slot(|| String::from("second"));
2821 table.end();
2822 table.end();
2823 table.flush_anchors_if_dirty();
2824
2825 table.reset();
2826 assert_eq!(
2827 table.start_recranpose_at_anchor(anchor, 10),
2828 None,
2829 "a stale scope must not enter a live group that now belongs to a different owner",
2830 );
2831 assert_eq!(
2832 table.start_recranpose_at_anchor(anchor, 20),
2833 Some(group),
2834 "the current owner should still enter the group through its anchor",
2835 );
2836 }
2837
2838 #[test]
2839 fn gapped_group_preserves_child_value_slots_for_restore() {
2840 let mut table = SlotTable::new();
2841
2842 table.reset();
2843 let root = table.start(1);
2844 let group = table.start(2);
2845 let value_slot = table.use_value_slot(|| String::from("persist"));
2846 table.end();
2847 table.end();
2848 table.flush_anchors_if_dirty();
2849
2850 let group_end = match table.slots[group] {
2851 Slot::Group { len, .. } => group + super::unpack_slot_len(len),
2852 _ => panic!("group should remain a group"),
2853 };
2854
2855 assert!(
2856 table.mark_range_as_gaps(group, group_end, Some(root)),
2857 "group should become hidden behind a gap",
2858 );
2859 assert!(
2860 matches!(table.slots[value_slot], Slot::Value { .. }),
2861 "group child value slots must survive while the parent group is hidden",
2862 );
2863
2864 table.reset();
2865 let reused_root = table.start(1);
2866 assert_eq!(reused_root, root);
2867 let restored_group = table.start(2);
2868 assert_eq!(restored_group, group);
2869 let restored_slot = table.use_value_slot(|| String::from("new"));
2870 assert_eq!(restored_slot, value_slot);
2871 assert_eq!(table.read_value::<String>(restored_slot), "persist");
2872 }
2873
2874 #[test]
2875 fn compaction_drops_preserved_child_values_inside_hidden_gap_group() {
2876 let mut table = SlotTable::new();
2877
2878 table.reset();
2879 let root = table.start(1);
2880 let group = table.start(2);
2881 let _value_slot = table.use_value_slot(|| String::from("persist"));
2882 table.end();
2883 table.end();
2884
2885 let group_end = match table.slots[group] {
2886 Slot::Group { len, .. } => group + super::unpack_slot_len(len),
2887 _ => panic!("group should remain a group"),
2888 };
2889
2890 assert!(
2891 table.mark_range_as_gaps(group, group_end, Some(root)),
2892 "group should become hidden behind a gap",
2893 );
2894 table.compact();
2895
2896 assert_eq!(
2897 table.slots.len(),
2898 1,
2899 "compaction should remove the entire hidden group block, including preserved child values",
2900 );
2901 match table.slots[0] {
2902 Slot::Group { len, .. } => assert_eq!(super::unpack_slot_len(len), 1),
2903 _ => panic!("root group should remain after compaction"),
2904 }
2905 }
2906
2907 #[test]
2908 fn restored_hidden_group_can_be_gapped_again_before_compaction() {
2909 let mut table = SlotTable::new();
2910
2911 table.reset();
2912 let root = table.start(1);
2913 let group = table.start(2);
2914 let value_slot = table.use_value_slot(|| String::from("persist"));
2915 table.end();
2916 table.end();
2917 table.flush_anchors_if_dirty();
2918
2919 let group_anchor = table.group_anchor_at(group);
2920 let group_end = match table.slots[group] {
2921 Slot::Group { len, .. } => group + super::unpack_slot_len(len),
2922 _ => panic!("group should remain a group"),
2923 };
2924
2925 assert!(
2926 table.mark_range_as_gaps(group, group_end, Some(root)),
2927 "group should become hidden behind a gap",
2928 );
2929
2930 table.reset();
2931 let reused_root = table.start(1);
2932 assert_eq!(reused_root, root);
2933 let restored_group = table.start(2);
2934 assert_eq!(restored_group, group);
2935 let restored_slot = table.use_value_slot(|| String::from("new"));
2936 assert_eq!(restored_slot, value_slot);
2937 assert_eq!(table.read_value::<String>(restored_slot), "persist");
2938 table.end();
2939 table.end();
2940 table.flush_anchors_if_dirty();
2941
2942 assert_eq!(
2943 table.resolve_anchor(group_anchor),
2944 Some(group),
2945 "restored group should continue to own the original anchor before being hidden again",
2946 );
2947
2948 let restored_group_end = match table.slots[group] {
2949 Slot::Group { len, .. } => group + super::unpack_slot_len(len),
2950 _ => panic!("restored group should remain live before re-gapping"),
2951 };
2952
2953 assert!(
2954 table.mark_range_as_gaps(group, restored_group_end, Some(root)),
2955 "restored group should be able to hide behind a gap again immediately",
2956 );
2957
2958 table.compact();
2959
2960 assert_eq!(
2961 table.slots.len(),
2962 1,
2963 "compaction should remove the re-gapped restored group and its child values",
2964 );
2965 assert_eq!(
2966 table.resolve_anchor(group_anchor),
2967 None,
2968 "re-gapped group anchor should be released once compaction removes the hidden group",
2969 );
2970 }
2971
2972 #[test]
2973 fn orphaned_node_state_reports_preserved_gap_nodes() {
2974 let mut table = SlotTable::new();
2975 let anchor = AnchorId(1);
2976 table.slots.push(Slot::Gap { anchor });
2977 table.register_anchor(anchor, 0);
2978 table.set_gap_metadata(
2979 anchor,
2980 GapMetadata {
2981 preserved_node: Some((42, 7)),
2982 ..GapMetadata::default()
2983 },
2984 );
2985
2986 assert_eq!(
2987 table.orphaned_node_state(OrphanedNode::new(42, 7, anchor)),
2988 NodeSlotState::PreservedGap
2989 );
2990 }
2991
2992 #[test]
2993 fn orphaned_node_state_reports_restored_active_node() {
2994 let mut table = SlotTable::new();
2995 let anchor = AnchorId(1);
2996 table.slots.push(Slot::Gap { anchor });
2997 table.register_anchor(anchor, 0);
2998 table.set_gap_metadata(
2999 anchor,
3000 GapMetadata {
3001 preserved_node: Some((42, 7)),
3002 ..GapMetadata::default()
3003 },
3004 );
3005 table.set_slot_tracked(
3006 0,
3007 Slot::Node {
3008 anchor,
3009 id: 42,
3010 gen: 7,
3011 },
3012 );
3013
3014 assert_eq!(
3015 table.orphaned_node_state(OrphanedNode::new(42, 7, anchor)),
3016 NodeSlotState::Active
3017 );
3018 }
3019
3020 #[test]
3021 fn orphaned_node_state_reports_missing_after_anchor_is_gone() {
3022 let mut table = SlotTable::new();
3023 let anchor = AnchorId(1);
3024 table.slots.push(Slot::Node {
3025 anchor,
3026 id: 42,
3027 gen: 7,
3028 });
3029 table.register_anchor(anchor, 0);
3030 table.free_anchor(anchor);
3031
3032 assert_eq!(
3033 table.orphaned_node_state(OrphanedNode::new(42, 7, anchor)),
3034 NodeSlotState::Missing
3035 );
3036 }
3037
3038 #[test]
3039 fn exact_live_group_reuses_even_when_parent_restore_restricts_gap_search() {
3040 let mut table = SlotTable::new();
3041
3042 table.reset();
3043 let root = table.start(1);
3044 let child = table.start(2);
3045 table.set_group_scope(child, 2);
3046 let grandchild = table.start(3);
3047 table.set_group_scope(grandchild, 3);
3048 table.end();
3049 table.end();
3050 table.end();
3051 table.flush_anchors_if_dirty();
3052
3053 table.reset();
3054 table.cursor = child;
3055 table.group_stack.push(GroupFrame {
3056 key: 1,
3057 start: root,
3058 end: grandchild + 1,
3059 child_reuse: ChildReusePolicy::ParentRestoredFromGap,
3060 fresh_body: true,
3061 gap_boundary_key: 1,
3062 });
3063
3064 let reused_child = table.start(2);
3065 assert_eq!(
3066 reused_child, child,
3067 "restored parents must still reuse an exact live child group at the cursor",
3068 );
3069 assert_eq!(
3070 table.get_group_scope(reused_child),
3071 Some(2),
3072 "exact live group reuse must preserve the child scope",
3073 );
3074
3075 let reused_grandchild = table.start(3);
3076 assert_eq!(
3077 reused_grandchild, grandchild,
3078 "once the exact live child group is reused, its live descendants should stay aligned",
3079 );
3080 assert_eq!(table.get_group_scope(reused_grandchild), Some(3));
3081 }
3082
3083 #[test]
3084 fn exact_live_node_slot_reuses_even_when_parent_restore_restricts_other_live_slots() {
3085 let mut table = SlotTable::new();
3086
3087 table.reset();
3088 let root = table.start(1);
3089 let node_slot = table.cursor;
3090 table.record_node(42, 7);
3091 table.end();
3092 table.flush_anchors_if_dirty();
3093
3094 table.reset();
3095 table.cursor = node_slot;
3096 table.group_stack.push(GroupFrame {
3097 key: 1,
3098 start: root,
3099 end: node_slot + 1,
3100 child_reuse: ChildReusePolicy::ParentRestoredFromGap,
3101 fresh_body: true,
3102 gap_boundary_key: 1,
3103 });
3104
3105 assert_eq!(
3106 table.peek_node(),
3107 Some((42, 7)),
3108 "restored parents must still expose an exact live node slot at the cursor",
3109 );
3110
3111 table.record_node(42, 7);
3112
3113 assert_eq!(
3114 table.cursor,
3115 node_slot + 1,
3116 "recording the same node should reuse the existing slot instead of forcing a gap",
3117 );
3118 match table.slots.get(node_slot) {
3119 Some(Slot::Node { id, gen, .. }) => assert_eq!((*id, *gen), (42, 7)),
3120 _ => panic!("node slot should stay live after exact reuse"),
3121 }
3122 }
3123
3124 #[test]
3125 fn exact_live_value_slot_reuses_when_parent_is_restored_from_gap() {
3126 let mut table = SlotTable::new();
3127
3128 table.reset();
3129 let root = table.start(1);
3130 let value_slot = table.use_value_slot(|| String::from("persist"));
3131 table.end();
3132 table.flush_anchors_if_dirty();
3133
3134 table.reset();
3135 table.cursor = value_slot;
3136 table.group_stack.push(GroupFrame {
3137 key: 1,
3138 start: root,
3139 end: value_slot + 1,
3140 child_reuse: ChildReusePolicy::ParentRestoredFromGap,
3141 fresh_body: true,
3142 gap_boundary_key: 1,
3143 });
3144
3145 let reused_slot = table.use_value_slot(|| String::from("fresh"));
3146 assert_eq!(
3147 reused_slot, value_slot,
3148 "restored parents must reuse an exact live value slot at the cursor",
3149 );
3150 assert_eq!(
3151 table.read_value::<String>(reused_slot),
3152 "persist",
3153 "restored parents must keep their remembered value instead of recreating it",
3154 );
3155 }
3156
3157 #[test]
3158 fn marking_scope_slot_as_gap_deactivates_scope() {
3159 let runtime = crate::runtime::TestRuntime::new();
3160 let scope = RecomposeScope::new(runtime.handle());
3161 let mut table = SlotTable::new();
3162
3163 table.reset();
3164 let group = table.start(1);
3165 let _scope_slot = table.use_value_slot(|| scope.clone());
3166 table.end();
3167
3168 assert!(scope.is_active(), "scope should start active");
3169
3170 table.mark_range_as_gaps(group, group + 2, None);
3171
3172 assert!(
3173 !scope.is_active(),
3174 "scope stored in a slot must deactivate once that slot is converted into a gap"
3175 );
3176 }
3177}