Skip to main content

mtb_entity_slab/container/
order_cached_list.rs

1//! Order-cached linked-list container for entity allocation. Maintains an optional
2//! “order cache” on each node.
3//!
4//! ## Order representation 顺序表示
5//!
6//! The cache can be treated as either an exact position index (`OrderRepr::Exact`)
7//!  or a relative ordering hint (`OrderRepr::Relative`). The cache is only trusted
8//!  while the list considers it valid; once invalid, it will be rebuilt on demand.
9//!
10//! - **Exact mode:** the cache stays valid only when inserting at the end or
11//!   removing the last element; any other insertion/removal invalidates it.
12//!
13//! - **Relative mode:** insertions/removals do not invalidate the cache, but the
14//!   cached values are not required to be a contiguous index.
15//!
16//! The representation is bound to the node type via `IOrderCachedListNodeID::ORDER_REPR`.
17//! It is a compile-time constant: you cannot change it at runtime, and you cannot create
18//! two `OrderCachedList`s with different `OrderRepr` for the same node type.
19
20use crate::{
21    EntityListError, EntityListIter, EntityListNodeHead, EntityListRange, EntityListRangeDebug,
22    EntityListRes, IBasicEntityListID, IDBoundAlloc, container::list_base::NodeOps,
23};
24use std::cell::Cell;
25
26/// Representation of order cache in an order-cached list. This will
27/// determine when the order cache is invalidated.
28#[derive(Debug, Clone, Copy, PartialEq, Eq)]
29pub enum OrderRepr {
30    /// The order exactly represents the position in the list. e.g.
31    ///
32    /// ```text
33    /// List: (Head = 0) <-> A(1) <-> B(2) <-> C(3) <-> (Tail = usize::MAX)
34    /// ```
35    ///
36    /// Invalidates the order cache on any insertion or removal except
37    /// when inserting at the end or removing the last element.
38    Exact,
39
40    /// The order represents a relative position in the list, but not
41    /// necessarily the exact index. e.g.
42    ///
43    /// ```text
44    /// List: (Head = 0) <-> A(1) <-> B(3) <-> C(4) <-> (Tail = usize::MAX)
45    /// ```
46    Relative,
47}
48
49/// An entity pointer list node that supports order-cached lists.
50pub trait IOrderCachedListNodeID: IBasicEntityListID {
51    /// Representation type of the order cache.
52    const ORDER_REPR: OrderRepr;
53
54    /// Load the order from the entity object.
55    fn obj_load_order(obj: &Self::ObjectT) -> usize;
56
57    /// Store the order into the entity object.
58    fn obj_store_order(obj: &Self::ObjectT, order: usize);
59}
60
61/// Order-cached doubly-linked list.
62pub struct OrderCachedList<I: IOrderCachedListNodeID> {
63    /// Head sentinel node ID. Order is always `0` and do not represent a valid item.
64    pub head: I,
65    /// Tail sentinel node ID. Order is always `usize::MAX` and does not represent a valid item.
66    pub tail: I,
67    /// Combined length and order validity flag.
68    len_flag: Cell<usize>,
69}
70
71impl<I: IOrderCachedListNodeID> OrderCachedList<I> {
72    const VALID_BITMASK: usize = 1 << (usize::BITS - 1);
73    const LEN_MASK: usize = !Self::VALID_BITMASK;
74
75    /// Order value of the head sentinel.
76    pub const HEAD_ORDER: usize = 0;
77    /// Order value of the tail sentinel.
78    pub const TAIL_ORDER: usize = usize::MAX;
79    /// Maximum length supported by the list: `2^(usize::BITS - 1) - 1`.
80    pub const MAX_LEN: usize = Self::LEN_MASK;
81
82    /// Create a new empty order-cached list with given head and tail sentinels.
83    pub fn new(alloc: &IDBoundAlloc<I>) -> Self {
84        let head = I::new_sentinel(alloc);
85        let tail = I::new_sentinel(alloc);
86        head.set_next_id(alloc, Some(tail));
87        tail.set_prev_id(alloc, Some(head));
88        I::obj_store_order(head.deref_alloc(alloc), Self::HEAD_ORDER);
89        I::obj_store_order(tail.deref_alloc(alloc), Self::TAIL_ORDER);
90        Self {
91            head,
92            tail,
93            len_flag: Cell::new(0),
94        }
95    }
96
97    /// Get the length of the list.
98    pub fn len(&self) -> usize {
99        self.len_flag.get() & Self::LEN_MASK
100    }
101    /// Check if the list is empty.
102    pub fn is_empty(&self) -> bool {
103        self.len() == 0
104    }
105    /// try to get the front node ID, or None if the list is empty
106    #[inline]
107    pub fn get_front_id(&self, alloc: &IDBoundAlloc<I>) -> Option<I> {
108        let next = self.head.get_next_id(alloc)?;
109        if next == self.tail { None } else { Some(next) }
110    }
111    /// try to get the back node ID, or None if the list is empty
112    #[inline]
113    pub fn get_back_id(&self, alloc: &IDBoundAlloc<I>) -> Option<I> {
114        let prev = self.tail.get_prev_id(alloc)?;
115        if prev == self.head { None } else { Some(prev) }
116    }
117
118    /// Check whether the order cache is valid.
119    pub fn order_valid(&self) -> bool {
120        (self.len_flag.get() & Self::VALID_BITMASK) != 0
121    }
122
123    /// Invalidate the order cache.
124    pub fn invalidate_order(&self) {
125        self.len_flag.set(self.len_flag.get() & Self::LEN_MASK);
126    }
127
128    /// Rebuild the order cache regardless of its current validity.
129    pub fn rebuild_order(&self, alloc: &IDBoundAlloc<I>) {
130        let mut order = 1;
131        let mut curr_opt = self.head.get_next_id(alloc);
132        while let Some(curr) = curr_opt {
133            if curr == self.tail {
134                break;
135            }
136            I::obj_store_order(curr.deref_alloc(alloc), order);
137            order += 1;
138            curr_opt = curr.get_next_id(alloc);
139        }
140        // Now enable the valid flag
141        self.len_flag.set(self.len_flag.get() | Self::VALID_BITMASK);
142    }
143
144    /// Get the order of a node. If the order cache is invalid, it will be rebuilt first.
145    pub fn get_node_order(&self, alloc: &IDBoundAlloc<I>, node: I) -> usize {
146        if !self.order_valid() {
147            self.rebuild_order(alloc);
148        }
149        I::obj_load_order(node.deref_alloc(alloc))
150    }
151
152    /// Check whether node `a` comes before node `b` in the list.
153    pub fn node_comes_before(&self, alloc: &IDBoundAlloc<I>, a: I, b: I) -> bool {
154        let order_a = self.get_node_order(alloc, a);
155        let order_b = self.get_node_order(alloc, b);
156        order_a < order_b
157    }
158    /// Check whether node `a` comes after node `b` in the list.
159    pub fn node_comes_after(&self, alloc: &IDBoundAlloc<I>, a: I, b: I) -> bool {
160        let order_a = self.get_node_order(alloc, a);
161        let order_b = self.get_node_order(alloc, b);
162        order_a > order_b
163    }
164
165    /// Get the range covering all nodes in the order-cached list (excluding sentinels).
166    pub fn get_range(&self, alloc: &IDBoundAlloc<I>) -> EntityListRange<I> {
167        let front = self
168            .head
169            .get_next_id(alloc)
170            .expect("Broken order-cached list detected in get_range()");
171        EntityListRange {
172            start: front,
173            end: Some(self.tail),
174        }
175    }
176    /// Get the range covering all nodes including sentinels.
177    pub fn get_range_with_sentinels(&self) -> EntityListRange<I> {
178        EntityListRange {
179            start: self.head,
180            end: None,
181        }
182    }
183    /// Create a debug view for this range.
184    pub fn debug<'a>(&self, alloc: &'a IDBoundAlloc<I>) -> EntityListRangeDebug<'a, I> {
185        self.get_range(alloc).debug(alloc)
186    }
187    /// Create an iterator over the nodes in the order-cached list.
188    pub fn iter<'a>(&self, alloc: &'a IDBoundAlloc<I>) -> EntityListIter<'a, I> {
189        self.get_range(alloc).iter(alloc)
190    }
191
192    /// Create an iterator over all nodes including sentinels.
193    pub fn iter_with_sentinels<'a>(&self, alloc: &'a IDBoundAlloc<I>) -> EntityListIter<'a, I> {
194        self.get_range_with_sentinels().iter(alloc)
195    }
196
197    /// Apply a function to all nodes in the list (including sentinels).
198    pub fn forall_with_sentinel(
199        &self,
200        alloc: &IDBoundAlloc<I>,
201        mut f: impl FnMut(I, &I::ObjectT) -> EntityListRes<I>,
202    ) -> EntityListRes<I, ()> {
203        let mut curr = self.head;
204        loop {
205            f(curr, curr.deref_alloc(alloc))?;
206            if curr == self.tail {
207                break;
208            }
209            let next = curr.get_next_id(alloc).ok_or(EntityListError::ListBroken)?;
210            curr = next;
211        }
212        Ok(())
213    }
214
215    /// Unplug a node from the list.
216    ///
217    /// ### Signal invocation
218    ///
219    /// `node_unplug` will invoke `on_unplug` on `node` before modifying any links.
220    /// If the signal returns an error, the operation is aborted without modifying any links.
221    ///
222    /// ### Order maintaining
223    ///
224    /// If the order representation is `Exact`, the order cache will be invalidated unless the
225    /// unplugged node is the last node in the list. Otherwise, the order cache validity is left
226    /// unchanged.
227    pub fn node_unplug(&self, node: I, alloc: &IDBoundAlloc<I>) -> EntityListRes<I> {
228        if node == self.head || node == self.tail {
229            return Err(EntityListError::NodeIsSentinel);
230        }
231        node.on_unplug(alloc)?;
232        let node_obj = node.deref_alloc(alloc);
233
234        let Some(prev) = I::obj_get_prev_id(node_obj) else {
235            return Err(EntityListError::ListBroken);
236        };
237        let Some(next) = I::obj_get_next_id(node_obj) else {
238            return Err(EntityListError::ListBroken);
239        };
240
241        prev.set_next_id(alloc, Some(next));
242        next.set_prev_id(alloc, Some(prev));
243        I::obj_store_head(node_obj, EntityListNodeHead::none());
244        I::obj_store_order(node_obj, 0usize);
245
246        // Update order cache.
247        //
248        // When `Exact`: removing the last node keeps the cache valid; any other removal clears it.
249        // When `Relative`: just decrease the length part. `-1` does not affect the valid bit.
250        if I::ORDER_REPR == OrderRepr::Exact && next != self.tail {
251            self.len_flag.set(self.len() - 1);
252        } else {
253            self.len_flag.set(self.len_flag.get() - 1);
254        }
255        Ok(())
256    }
257
258    /// Add a new node after an existing node linked to `this` list.
259    ///
260    /// If the current node is linked to another list, then the operation is undefined.
261    /// You'll get a broken list without reporting an error.
262    ///
263    /// ### Signal invocation
264    ///
265    /// `node_add_next` will invoke `on_push_next` on `node` before modifying any links.
266    /// If the signal returns an error, the operation is aborted without modifying any links.
267    ///
268    /// ### Order maintaining
269    ///
270    /// The order cache will be invalidated unless the new node is added at the end of the list.
271    pub fn node_add_next(&self, node: I, new_node: I, alloc: &IDBoundAlloc<I>) -> EntityListRes<I> {
272        if node == new_node {
273            return Err(EntityListError::RepeatedNode);
274        }
275        if node == self.tail {
276            return Err(EntityListError::NodeIsSentinel);
277        }
278        node.on_push_next(new_node, alloc)?;
279
280        let node_obj = node.deref_alloc(alloc);
281        let new_node_obj = new_node.deref_alloc(alloc);
282
283        debug_assert!(
284            I::obj_is_attached(node_obj),
285            "Cannot add next to a detached node"
286        );
287        debug_assert!(
288            !I::obj_is_attached(new_node_obj),
289            "Cannot add a node that is already attached"
290        );
291
292        let Some(old_next) = I::obj_get_next_id(node_obj) else {
293            return Err(EntityListError::ListBroken);
294        };
295        I::obj_set_next_id(node_obj, Some(new_node));
296        I::obj_store_head(new_node_obj, EntityListNodeHead::from_id(node, old_next));
297        old_next.set_prev_id(alloc, Some(new_node));
298
299        debug_assert!(self.len() < Self::MAX_LEN, "List length overflow");
300        if old_next == self.tail && self.order_valid() {
301            I::obj_store_order(new_node_obj, I::obj_load_order(node_obj) + 1);
302            self.len_flag.set(self.len_flag.get() + 1);
303        } else {
304            // Invalidate order cache (use `len()` then `set` to clear valid bit)
305            self.len_flag.set(self.len() + 1);
306        }
307        Ok(())
308    }
309
310    /// Add a new node before an existing node linked to `this` list.
311    ///
312    /// If the current node is linked to another list, then the operation is undefined.
313    /// You'll get a broken list without reporting an error.
314    ///
315    /// ### Signal invocation
316    ///
317    /// `node_add_prev` will invoke `on_push_prev` on `node` before modifying any links.
318    /// If the signal returns an error, the operation is aborted without modifying any links.
319    ///
320    /// ### Order maintaining
321    ///
322    /// The order cache will be invalidated unless the `node` is the tail sentinel.
323    pub fn node_add_prev(&self, node: I, new_node: I, alloc: &IDBoundAlloc<I>) -> EntityListRes<I> {
324        if node == new_node {
325            return Err(EntityListError::RepeatedNode);
326        }
327        if node == self.head {
328            return Err(EntityListError::NodeIsSentinel);
329        }
330        node.on_push_prev(new_node, alloc)?;
331
332        let node_obj = node.deref_alloc(alloc);
333        let new_node_obj = new_node.deref_alloc(alloc);
334
335        debug_assert!(
336            I::obj_is_attached(node_obj),
337            "Cannot add prev to a detached node"
338        );
339        debug_assert!(
340            !I::obj_is_attached(new_node_obj),
341            "Cannot add a node that is already attached"
342        );
343        debug_assert!(self.len() < Self::MAX_LEN, "List length overflow");
344
345        let Some(old_prev) = I::obj_get_prev_id(node_obj) else {
346            return Err(EntityListError::ListBroken);
347        };
348        let old_prev_obj = old_prev.deref_alloc(alloc);
349
350        NodeOps::obj_set_prev_id(node_obj, Some(new_node));
351        I::obj_store_head(new_node_obj, EntityListNodeHead::from_id(old_prev, node));
352        NodeOps::obj_set_next_id(old_prev_obj, Some(new_node));
353
354        if node == self.tail && self.order_valid() {
355            let old_order = I::obj_load_order(old_prev_obj);
356            I::obj_store_order(new_node_obj, old_order + 1);
357            self.len_flag.set(self.len_flag.get() + 1);
358        } else {
359            // Invalidate order cache (use `len()` then `set` to clear valid bit)
360            self.len_flag.set(self.len() + 1);
361        }
362        Ok(())
363    }
364    /// Push a new node to the back of the list. Will not invalidate order cache if possible.
365    #[inline]
366    pub fn push_back_id(&self, new_node: I, alloc: &IDBoundAlloc<I>) -> EntityListRes<I> {
367        self.node_add_prev(self.tail, new_node, alloc)
368    }
369
370    /// Push a new node to the front of the list. Will invalidate order cache.
371    #[inline]
372    pub fn push_front_id(&self, new_node: I, alloc: &IDBoundAlloc<I>) -> EntityListRes<I> {
373        self.node_add_next(self.head, new_node, alloc)
374    }
375
376    /// Pop a node from the back of the list. Will not invalidate order cache if possible.
377    pub fn pop_back(&self, alloc: &IDBoundAlloc<I>) -> EntityListRes<I, I> {
378        let back_id = self.get_back_id(alloc).ok_or(EntityListError::EmptyList)?;
379        self.node_unplug(back_id, alloc)?;
380        Ok(back_id)
381    }
382
383    /// Pop a node from the front of the list. Will invalidate order cache if repr is `Exact`.
384    pub fn pop_front(&self, alloc: &IDBoundAlloc<I>) -> EntityListRes<I, I> {
385        let front_id = self.get_front_id(alloc).ok_or(EntityListError::EmptyList)?;
386        self.node_unplug(front_id, alloc)?;
387        Ok(front_id)
388    }
389}
390
391#[cfg(test)]
392mod tests {
393    use super::*;
394    use crate::{IEntityAllocID, IPoliciedID, IndexedID, entity_id};
395
396    #[derive(Debug, Clone)]
397    #[entity_id(ExactInstID, policy = 256, backend = index)]
398    struct ExactInst {
399        head: Cell<EntityListNodeHead<ExactInstID>>,
400        order: Cell<usize>,
401        value: usize,
402    }
403
404    type ExactInstAlloc = IDBoundAlloc<ExactInstID>;
405
406    impl ExactInst {
407        fn new(value: usize) -> Self {
408            Self {
409                head: Cell::new(EntityListNodeHead::none()),
410                order: Cell::new(0),
411                value,
412            }
413        }
414
415        fn new_id(alloc: &IDBoundAlloc<ExactInstID>, value: usize) -> ExactInstID {
416            ExactInstID::from_backend(IndexedID::allocate_from(alloc, Self::new(value)))
417        }
418    }
419
420    impl IBasicEntityListID for ExactInstID {
421        fn obj_load_head(obj: &Self::ObjectT) -> EntityListNodeHead<Self> {
422            obj.head.get()
423        }
424        fn obj_store_head(obj: &Self::ObjectT, head: EntityListNodeHead<Self>) {
425            obj.head.set(head);
426        }
427
428        fn obj_is_sentinel(obj: &Self::ObjectT) -> bool {
429            obj.value == usize::MAX
430        }
431        fn new_sentinel_obj() -> Self::ObjectT {
432            ExactInst {
433                head: Cell::new(EntityListNodeHead::none()),
434                order: Cell::new(0),
435                value: usize::MAX,
436            }
437        }
438
439        fn on_push_prev(self, _: Self, _: &IDBoundAlloc<ExactInstID>) -> EntityListRes<Self> {
440            Ok(())
441        }
442        fn on_push_next(self, _: Self, _: &IDBoundAlloc<ExactInstID>) -> EntityListRes<Self> {
443            Ok(())
444        }
445        fn on_unplug(self, _: &IDBoundAlloc<ExactInstID>) -> EntityListRes<Self> {
446            Ok(())
447        }
448    }
449
450    impl IOrderCachedListNodeID for ExactInstID {
451        const ORDER_REPR: OrderRepr = OrderRepr::Exact;
452
453        fn obj_load_order(obj: &Self::ObjectT) -> usize {
454            obj.order.get()
455        }
456        fn obj_store_order(obj: &Self::ObjectT, order: usize) {
457            obj.order.set(order);
458        }
459    }
460
461    #[derive(Debug, Clone)]
462    #[entity_id(RelativeInstID, policy = 256, backend = index)]
463    struct RelativeInst {
464        head: Cell<EntityListNodeHead<RelativeInstID>>,
465        order: Cell<usize>,
466        value: usize,
467    }
468
469    type RelativeInstAlloc = IDBoundAlloc<RelativeInstID>;
470
471    impl RelativeInst {
472        fn new(value: usize) -> Self {
473            Self {
474                head: Cell::new(EntityListNodeHead::none()),
475                order: Cell::new(0),
476                value,
477            }
478        }
479
480        fn new_id(alloc: &IDBoundAlloc<RelativeInstID>, value: usize) -> RelativeInstID {
481            type RelativeBackID = <RelativeInstID as IPoliciedID>::BackID;
482            RelativeInstID::from_backend(RelativeBackID::allocate_from(alloc, Self::new(value)))
483        }
484    }
485
486    impl IBasicEntityListID for RelativeInstID {
487        fn obj_load_head(obj: &Self::ObjectT) -> EntityListNodeHead<Self> {
488            obj.head.get()
489        }
490        fn obj_store_head(obj: &Self::ObjectT, head: EntityListNodeHead<Self>) {
491            obj.head.set(head);
492        }
493
494        fn obj_is_sentinel(obj: &Self::ObjectT) -> bool {
495            obj.value == usize::MAX
496        }
497        fn new_sentinel_obj() -> Self::ObjectT {
498            RelativeInst {
499                head: Cell::new(EntityListNodeHead::none()),
500                order: Cell::new(0),
501                value: usize::MAX,
502            }
503        }
504
505        fn on_push_prev(self, _: Self, _: &RelativeInstAlloc) -> EntityListRes<Self> {
506            Ok(())
507        }
508        fn on_push_next(self, _: Self, _: &RelativeInstAlloc) -> EntityListRes<Self> {
509            Ok(())
510        }
511        fn on_unplug(self, _: &RelativeInstAlloc) -> EntityListRes<Self> {
512            Ok(())
513        }
514    }
515
516    impl IOrderCachedListNodeID for RelativeInstID {
517        const ORDER_REPR: OrderRepr = OrderRepr::Relative;
518
519        fn obj_load_order(obj: &Self::ObjectT) -> usize {
520            obj.order.get()
521        }
522        fn obj_store_order(obj: &Self::ObjectT, order: usize) {
523            obj.order.set(order);
524        }
525    }
526
527    fn assert_contiguous_orders<I: IOrderCachedListNodeID>(
528        list: &OrderCachedList<I>,
529        alloc: &IDBoundAlloc<I>,
530        expected_len: usize,
531    ) {
532        let mut expected = 1;
533        let mut count = 0;
534        for (id, _) in list.iter(alloc) {
535            let order = list.get_node_order(alloc, id);
536            assert_eq!(order, expected, "Node order mismatch");
537            expected += 1;
538            count += 1;
539        }
540        assert_eq!(count, expected_len, "List length mismatch");
541    }
542
543    fn assert_strictly_increasing_orders<I: IOrderCachedListNodeID>(
544        list: &OrderCachedList<I>,
545        alloc: &IDBoundAlloc<I>,
546        expected_len: usize,
547    ) {
548        let mut last: Option<usize> = None;
549        let mut count = 0;
550        for (id, _) in list.iter(alloc) {
551            let order = list.get_node_order(alloc, id);
552            if let Some(prev) = last {
553                assert!(order > prev, "Node order is not strictly increasing");
554            }
555            last = Some(order);
556            count += 1;
557        }
558        assert_eq!(count, expected_len, "List length mismatch");
559    }
560
561    #[test]
562    fn test_order_cached_list_exact_basic() {
563        let alloc = ExactInstAlloc::new();
564        let list = OrderCachedList::<ExactInstID>::new(&alloc);
565
566        // Empty list behavior and error on popping from empty.
567        assert!(list.is_empty());
568        assert!(matches!(
569            list.pop_back(&alloc),
570            Err(EntityListError::EmptyList)
571        ));
572
573        // Push a few nodes and verify length is tracked.
574        for i in 0..5 {
575            let inst = ExactInst::new_id(&alloc, i);
576            list.push_back_id(inst, &alloc).unwrap();
577        }
578        assert_eq!(list.len(), 5);
579
580        // Rebuild cache and verify contiguous orders.
581        list.rebuild_order(&alloc);
582        assert!(list.order_valid());
583        assert_contiguous_orders(&list, &alloc, 5);
584
585        // Removing last keeps cache valid in Exact mode.
586        list.pop_back(&alloc).unwrap();
587        assert!(list.order_valid());
588        assert_eq!(list.len(), 4);
589        assert_contiguous_orders(&list, &alloc, 4);
590
591        // Removing a middle node invalidates cache in Exact mode.
592        let ids: Vec<_> = list.iter(&alloc).map(|(id, _)| id).collect();
593        let mid = ids[1];
594        list.node_unplug(mid, &alloc).unwrap();
595        assert!(!list.order_valid());
596        list.rebuild_order(&alloc);
597        assert_contiguous_orders(&list, &alloc, 3);
598
599        // Inserting not at end invalidates cache in Exact mode.
600        let front = list.get_front_id(&alloc).unwrap();
601        let new_id = ExactInst::new_id(&alloc, 99);
602        list.node_add_next(front, new_id, &alloc).unwrap();
603        assert!(!list.order_valid());
604        list.rebuild_order(&alloc);
605        assert_contiguous_orders(&list, &alloc, 4);
606
607        // Sentinel misuse should return an error.
608        let bad_new = ExactInst::new_id(&alloc, 100);
609        assert!(matches!(
610            list.node_add_next(list.tail, bad_new, &alloc),
611            Err(EntityListError::NodeIsSentinel)
612        ));
613    }
614
615    #[test]
616    fn test_order_cached_list_relative_semantics() {
617        let alloc = RelativeInstAlloc::new();
618        let list = OrderCachedList::<RelativeInstID>::new(&alloc);
619
620        // Initial push and rebuild gives contiguous orders.
621        for i in 0..4 {
622            let inst = RelativeInst::new_id(&alloc, i);
623            list.push_back_id(inst, &alloc).unwrap();
624        }
625        list.rebuild_order(&alloc);
626        assert!(list.order_valid());
627        assert_contiguous_orders(&list, &alloc, 4);
628
629        // Append keeps cache valid; orders remain strictly increasing.
630        let new_id = RelativeInst::new_id(&alloc, 10);
631        list.push_back_id(new_id, &alloc).unwrap();
632        assert!(list.order_valid());
633        assert_contiguous_orders(&list, &alloc, 5);
634
635        // Middle removal keeps cache valid in Relative mode.
636        let ids: Vec<_> = list.iter(&alloc).map(|(id, _)| id).collect();
637        let mid = ids[2];
638        list.node_unplug(mid, &alloc).unwrap();
639        assert!(list.order_valid());
640        assert_strictly_increasing_orders(&list, &alloc, 4);
641
642        // Inserting not at end invalidates cache; rebuild restores contiguous orders.
643        let front = list.get_front_id(&alloc).unwrap();
644        let insert_id = RelativeInst::new_id(&alloc, 11);
645        list.node_add_next(front, insert_id, &alloc).unwrap();
646        assert!(!list.order_valid());
647        list.rebuild_order(&alloc);
648        assert_contiguous_orders(&list, &alloc, 5);
649    }
650}