oxidd_manager_index/
manager.rs

1//! Index-Based Manager Implementation
2//!
3//! Abbreviations of generics:
4//!
5//! | Abbreviation | Meaning                           |
6//! | ------------ | --------------------------------- |
7//! | `N`          | Inner Node                        |
8//! | `ET`         | Edge Tag                          |
9//! | `TM`         | Terminal Manager                  |
10//! | `R`          | Diagram Rules                     |
11//! | `MD`         | Manager Data                      |
12//! | `NC`         | Inner Node Type Constructor       |
13//! | `TMC`        | Terminal Manager Type Constructor |
14//! | `RC`         | Diagram Rules Type Constructor    |
15//! | `OP`         | Operation                         |
16
17use std::cell::{Cell, UnsafeCell};
18use std::cmp::Ordering;
19use std::hash::{Hash, Hasher};
20use std::iter::FusedIterator;
21use std::marker::PhantomData;
22use std::mem::ManuallyDrop;
23use std::sync::atomic::AtomicU64;
24use std::sync::atomic::Ordering::{Acquire, Relaxed};
25use std::sync::Arc;
26
27use bitvec::vec::BitVec;
28use crossbeam_utils::CachePadded;
29use linear_hashtbl::raw::RawTable;
30use parking_lot::{Condvar, Mutex, MutexGuard};
31use rustc_hash::FxHasher;
32
33use oxidd_core::function::EdgeOfFunc;
34use oxidd_core::util::{AbortOnDrop, AllocResult, Borrowed, DropWith, GCContainer, OutOfMemory};
35use oxidd_core::{DiagramRules, InnerNode, LevelNo, Tag};
36
37use crate::node::NodeBase;
38use crate::terminal_manager::TerminalManager;
39use crate::util::{rwlock::RwLock, Invariant, TryLock};
40
41// === Type Constructors =======================================================
42
43/// Inner node type constructor
44pub trait InnerNodeCons<ET: Tag> {
45    type T<'id>: NodeBase + InnerNode<Edge<'id, Self::T<'id>, ET>> + Send + Sync;
46}
47
48/// Terminal manager type constructor
49pub trait TerminalManagerCons<NC: InnerNodeCons<ET>, ET: Tag, const TERMINALS: usize>:
50    Sized
51{
52    type TerminalNode: Send + Sync;
53    type T<'id>: Send
54        + Sync
55        + TerminalManager<'id, NC::T<'id>, ET, TERMINALS, TerminalNode = Self::TerminalNode>;
56}
57
58/// Diagram rules type constructor
59pub trait DiagramRulesCons<
60    NC: InnerNodeCons<ET>,
61    ET: Tag,
62    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
63    MDC: ManagerDataCons<NC, ET, TMC, Self, TERMINALS>,
64    const TERMINALS: usize,
65>: Sized
66{
67    type T<'id>: Send
68        + Sync
69        + DiagramRules<
70            Edge<'id, NC::T<'id>, ET>,
71            NC::T<'id>,
72            <TMC::T<'id> as TerminalManager<'id, NC::T<'id>, ET, TERMINALS>>::TerminalNode,
73        >;
74}
75
76/// Manager data type constructor
77pub trait ManagerDataCons<
78    NC: InnerNodeCons<ET>,
79    ET: Tag,
80    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
81    RC: DiagramRulesCons<NC, ET, TMC, Self, TERMINALS>,
82    const TERMINALS: usize,
83>: Sized
84{
85    type T<'id>: Send
86        + Sync
87        + DropWith<Edge<'id, NC::T<'id>, ET>>
88        + GCContainer<Manager<'id, NC::T<'id>, ET, TMC::T<'id>, RC::T<'id>, Self::T<'id>, TERMINALS>>;
89}
90
91// === Manager & Edges =========================================================
92
93/// "Signals" used to communicate with the garbage collection thread
94#[derive(Clone, Copy, PartialEq, Eq, Debug)]
95enum GCSignal {
96    RunGc,
97    Quit,
98}
99
100pub struct Store<'id, N, ET, TM, R, MD, const TERMINALS: usize>
101where
102    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
103    ET: Tag,
104    TM: TerminalManager<'id, N, ET, TERMINALS>,
105    MD: DropWith<Edge<'id, N, ET>>,
106{
107    inner_nodes: Box<SlotSlice<'id, N, TERMINALS>>,
108    manager: RwLock<Manager<'id, N, ET, TM, R, MD, TERMINALS>>,
109    terminal_manager: TM,
110    state: CachePadded<Mutex<SharedStoreState>>,
111    gc_signal: (Mutex<GCSignal>, Condvar),
112    workers: crate::workers::Workers,
113}
114
115unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Sync
116    for Store<'id, N, ET, TM, R, MD, TERMINALS>
117where
118    N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
119    ET: Tag + Send + Sync,
120    TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
121    MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
122{
123}
124
125/// Size of a pre-allocation (number of nodes)
126const CHUNK_SIZE: u32 = 64 * 1024;
127
128/// State of automatic background garbage collection
129#[derive(Clone, Copy, PartialEq, Eq, Debug)]
130enum GCState {
131    /// Automatic garbage collection is disabled entirely
132    Disabled,
133    /// Initial state
134    Init,
135    /// Garbage collection has been triggered because the state was `Init` and
136    /// the node count exceeded the high water mark
137    ///
138    /// The collecting thread sets this state back to `Init` if the node count
139    /// reaches a value less than the low water mark.
140    Triggered,
141}
142
143struct SharedStoreState {
144    /// IDs of the next free slots
145    ///
146    /// Each element of the vector corresponds to a linked list of free slots.
147    /// If a worker runs out of locally allocated free slots, it will first try
148    /// to take a list of free slots from here. If this is not possible, a new
149    /// chunk of uninitialized slots will be allocated. New entries to this
150    /// vector are added during garbage collection. Each list should not be
151    /// longer than `CHUNK_SIZE` (this is not a strict requirement, though).
152    next_free: Vec<u32>,
153
154    /// All slots in range `..allocated` are either initialized (a `node` or a
155    /// `next_free` ID), or `uninit` and pre-allocated by a worker.
156    ///
157    /// Note that this is an index into the slice, so no `- TERMINALS` is needed
158    /// to access the slot.
159    allocated: u32,
160
161    /// Eventually consistent count of inner nodes
162    ///
163    /// This is an `i64` and not an `u32` because of the following scenario:
164    /// Worker A creates n nodes such that its `node_count_delta` becomes n - 1
165    /// and the shared `node_count` is 1. At least two of the newly created
166    /// nodes are solely referenced by a [`Function`]. An application thread
167    /// drops these two functions. This will directly decrement the shared
168    /// `node_count` by 1 twice. If we used a `u32`, this would lead to an
169    /// overflow.
170    node_count: i64,
171
172    /// Background garbage collection state (see [`GCState`])
173    gc_state: GCState,
174    /// Low water mark for background garbage collection (see [`GCState`])
175    gc_lwm: u32,
176    /// High water mark for background garbage collection (see [`GCState`])
177    gc_hwm: u32,
178}
179
180#[repr(align(64))] // all fields on a single cache line
181struct LocalStoreState {
182    /// ID of the next free slot
183    ///
184    /// `0` means that there is no such slot so far. Either `initialized` can be
185    /// incremented or we are out of memory. Since `0` is always a terminal (we
186    /// require `TERMINALS >= 1`), there is no ambiguity.
187    next_free: Cell<u32>,
188
189    /// All slots in range `..initialized` are not `uninit`, i.e. either a
190    /// `node` or a `next_free` ID. All slots until the next multiple of
191    /// `CHUNK_SIZE` can be used by the local thread. So if
192    /// `initialized % CHUNK_SIZE == 0` the worker needs to allocate a new chunk
193    /// from the shared store state.
194    ///
195    /// Note that this is an index into the slice, so no `- TERMINALS` is needed
196    /// to access the slot.
197    initialized: Cell<u32>,
198
199    /// Address of the associated `Store`
200    ///
201    /// Before using slots from this `LocalStoreState`, the implementation needs
202    /// to ensure that the slots come from the respective `Store`.
203    current_store: Cell<usize>,
204
205    /// Local changes to the shared node counter
206    ///
207    /// In general, we synchronize with the shared counter if request slots from
208    /// the shared manager or we return the `next_free` list.  The latter
209    /// happens if this counter reaches `-CHUNK_SIZE`.
210    node_count_delta: Cell<i32>,
211}
212
213thread_local! {
214    static LOCAL_STORE_STATE: LocalStoreState = const {
215        LocalStoreState {
216            next_free: Cell::new(0),
217            initialized: Cell::new(0),
218            current_store: Cell::new(0),
219            node_count_delta: Cell::new(0),
220        }
221    };
222}
223
224struct LocalStoreStateGuard<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>(
225    &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
226)
227where
228    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
229    ET: Tag,
230    TM: TerminalManager<'id, N, ET, TERMINALS>,
231    MD: DropWith<Edge<'id, N, ET>>;
232
233union Slot<N> {
234    node: ManuallyDrop<N>,
235    next_free: u32,
236    #[allow(unused)]
237    uninit: (),
238}
239
240#[repr(transparent)]
241struct SlotSlice<'id, N, const TERMINALS: usize> {
242    phantom: PhantomData<Invariant<'id>>,
243    slots: [UnsafeCell<Slot<N>>],
244}
245
246/// Edge type, see [`oxidd_core::Edge`]
247///
248/// Internally, this is represented as a `u32` index.
249#[repr(transparent)]
250#[must_use]
251pub struct Edge<'id, N, ET>(
252    /// `node_index | edge_tag << (u32::BITS - Self::TAG_BITS)`
253    u32,
254    PhantomData<(Invariant<'id>, N, ET)>,
255);
256
257#[repr(C)]
258pub struct Manager<'id, N, ET, TM, R, MD, const TERMINALS: usize>
259where
260    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
261    ET: Tag,
262    TM: TerminalManager<'id, N, ET, TERMINALS>,
263    MD: DropWith<Edge<'id, N, ET>>,
264{
265    unique_table: Vec<Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>,
266    data: ManuallyDrop<MD>,
267    /// Pointer back to the store, obtained via [`Arc::as_ptr()`].
268    ///
269    /// Theoretically, we should be able to get the pointer from a `&Manager`
270    /// reference, but this leads to provenance issues.
271    store: *const Store<'id, N, ET, TM, R, MD, TERMINALS>,
272    gc_count: AtomicU64,
273    gc_ongoing: TryLock,
274    reorder_count: u64,
275}
276
277/// Type "constructor" for the manager from `InnerNodeCons` etc.
278type M<'id, NC, ET, TMC, RC, MDC, const TERMINALS: usize> = Manager<
279    'id,
280    <NC as InnerNodeCons<ET>>::T<'id>,
281    ET,
282    <TMC as TerminalManagerCons<NC, ET, TERMINALS>>::T<'id>,
283    <RC as DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>>::T<'id>,
284    <MDC as ManagerDataCons<NC, ET, TMC, RC, TERMINALS>>::T<'id>,
285    TERMINALS,
286>;
287
288unsafe impl<
289        'id,
290        N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
291        ET: Tag + Send + Sync,
292        TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
293        R,
294        MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
295        const TERMINALS: usize,
296    > Send for Manager<'id, N, ET, TM, R, MD, TERMINALS>
297{
298}
299unsafe impl<
300        'id,
301        N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
302        ET: Tag + Send + Sync,
303        TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
304        R,
305        MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
306        const TERMINALS: usize,
307    > Sync for Manager<'id, N, ET, TM, R, MD, TERMINALS>
308{
309}
310
311// --- Edge Impls --------------------------------------------------------------
312
313impl<N: NodeBase, ET: Tag> Edge<'_, N, ET> {
314    const TAG_BITS: u32 = {
315        let bits = usize::BITS - ET::MAX_VALUE.leading_zeros();
316        assert!(bits <= 16, "Maximum value of edge tag is too large");
317        bits
318    };
319    /// Shift amount to store/retrieve the tag from the most significant bits
320    const TAG_SHIFT: u32 = (u32::BITS - Self::TAG_BITS) % 32;
321
322    /// Mask corresponding to [`Self::TAG_BITS`]
323    const TAG_MASK: u32 = ((1 << Self::TAG_BITS) - 1) << Self::TAG_SHIFT;
324
325    /// SAFETY: The edge must be untagged.
326    #[inline(always)]
327    unsafe fn node_id_unchecked(&self) -> u32 {
328        debug_assert_eq!(self.0 & Self::TAG_MASK, 0);
329        self.0
330    }
331
332    /// Get the node ID, i.e. the edge value without any tags
333    #[inline(always)]
334    fn node_id(&self) -> usize {
335        (self.0 & !Self::TAG_MASK) as usize
336    }
337
338    /// Returns `true` iff the edge's tag != 0
339    #[inline(always)]
340    fn is_tagged(&self) -> bool {
341        self.0 & Self::TAG_MASK != 0
342    }
343
344    /// Get the raw representation of the edge (also called "edge value")
345    #[inline(always)]
346    pub fn raw(&self) -> u32 {
347        self.0
348    }
349
350    /// Get an edge from a terminal ID
351    ///
352    /// # Safety
353    ///
354    /// `id` must be a terminal ID, i.e. `id < TERMINALS`, and the caller must
355    /// update the reference count for the terminal accordingly.
356    #[inline(always)]
357    pub unsafe fn from_terminal_id(id: u32) -> Self {
358        Self(id, PhantomData)
359    }
360}
361
362impl<N, ET> PartialEq for Edge<'_, N, ET> {
363    #[inline(always)]
364    fn eq(&self, other: &Self) -> bool {
365        self.0 == other.0
366    }
367}
368
369impl<N, ET> Eq for Edge<'_, N, ET> {}
370
371impl<N, ET> PartialOrd for Edge<'_, N, ET> {
372    #[inline(always)]
373    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
374        Some(self.0.cmp(&other.0))
375    }
376}
377
378impl<N, ET> Ord for Edge<'_, N, ET> {
379    #[inline(always)]
380    fn cmp(&self, other: &Self) -> Ordering {
381        self.0.cmp(&other.0)
382    }
383}
384
385impl<N, ET> Hash for Edge<'_, N, ET> {
386    #[inline(always)]
387    fn hash<H: Hasher>(&self, state: &mut H) {
388        self.0.hash(state);
389    }
390}
391
392impl<N: NodeBase, ET: Tag> oxidd_core::Edge for Edge<'_, N, ET> {
393    type Tag = ET;
394
395    #[inline]
396    fn borrowed(&self) -> Borrowed<Self> {
397        Borrowed::new(Self(self.0, PhantomData))
398    }
399
400    #[inline]
401    fn with_tag(&self, tag: Self::Tag) -> Borrowed<Self> {
402        Borrowed::new(Self(
403            (self.0 & !Self::TAG_MASK) | ((tag.as_usize() as u32) << Self::TAG_SHIFT),
404            PhantomData,
405        ))
406    }
407
408    #[inline]
409    fn with_tag_owned(mut self, tag: Self::Tag) -> Self {
410        self.0 = (self.0 & !Self::TAG_MASK) | ((tag.as_usize() as u32) << Self::TAG_SHIFT);
411        self
412    }
413
414    #[inline]
415    fn tag(&self) -> Self::Tag {
416        ET::from_usize(((self.0 & Self::TAG_MASK) >> Self::TAG_SHIFT) as usize)
417    }
418
419    #[inline]
420    fn node_id(&self) -> oxidd_core::NodeID {
421        (self.0 & !Self::TAG_MASK) as oxidd_core::NodeID
422    }
423}
424
425impl<N, ET> Drop for Edge<'_, N, ET> {
426    #[inline(never)]
427    #[cold]
428    fn drop(&mut self) {
429        eprintln!(
430            "`Edge`s must not be dropped. Use `Manager::drop_edge()`. Backtrace:\n{}",
431            std::backtrace::Backtrace::capture()
432        );
433
434        #[cfg(feature = "static_leak_check")]
435        {
436            extern "C" {
437                #[link_name = "\n\n`Edge`s must not be dropped. Use `Manager::drop_edge()`.\n"]
438                fn trigger() -> !;
439            }
440            unsafe { trigger() }
441        }
442    }
443}
444
445// --- SlotSlice Impl ----------------------------------------------------------
446
447impl<'id, N: NodeBase, const TERMINALS: usize> SlotSlice<'id, N, TERMINALS> {
448    // Create a new slot slice for up to `capacity` nodes
449    fn new_boxed(capacity: u32) -> Box<Self> {
450        let mut vec: Vec<UnsafeCell<Slot<N>>> = Vec::with_capacity(capacity as usize);
451
452        // SAFETY: The new length is equal to the capacity. All elements are
453        // "initialized" as `Slot::uninit`.
454        //
455        // Clippy's `uninit_vec` lint is a bit too strict here. `Slot`s are
456        // somewhat like `MaybeUninit`, but Clippy wants `MaybeUninit`.
457        #[allow(clippy::uninit_vec)]
458        unsafe {
459            vec.set_len(capacity as usize)
460        };
461
462        let boxed = vec.into_boxed_slice();
463        // SAFETY: `SlotSlice` has `repr(transparent)` and thus the same
464        // representation as `[UnsafeCell<Slot<N>>]`.
465        unsafe { std::mem::transmute(boxed) }
466    }
467
468    /// SAFETY: `edge` must be untagged and reference an inner node
469    #[inline]
470    unsafe fn slot_pointer_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> *mut Slot<N> {
471        // SAFETY: `edge` is untagged
472        let id = unsafe { edge.node_id_unchecked() } as usize;
473        debug_assert!(id >= TERMINALS, "`edge` must reference an inner node");
474        debug_assert!(id - TERMINALS < self.slots.len());
475        // SAFETY: Indices derived from edges pointing to inner nodes are always
476        // in bounds.
477        unsafe { self.slots.get_unchecked(id - TERMINALS).get() }
478    }
479
480    /// Panics if `edge` references a terminal node
481    #[inline]
482    fn inner_node<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> &N {
483        let id = edge.node_id();
484        assert!(id >= TERMINALS, "`edge` must reference an inner node");
485        debug_assert!(id - TERMINALS < self.slots.len());
486        // SAFETY:
487        // - Indices derived from edges pointing to inner nodes are in bounds
488        // - If an edge points to an inner node, the node's `Slot` is immutable and a
489        //   properly initialized node
490        unsafe { &(*self.slots.get_unchecked(id - TERMINALS).get()).node }
491    }
492
493    /// SAFETY: `edge` must be untagged and reference an inner node
494    #[inline(always)]
495    unsafe fn inner_node_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> &N {
496        // SAFETY: If an edge points to an inner node, the node's `Slot` is
497        // immutable and a properly initialized node.
498        unsafe { &(*self.slot_pointer_unchecked(edge)).node }
499    }
500
501    /// SAFETY: `edge` must be untagged and reference an inner node
502    #[inline(always)]
503    unsafe fn clone_edge_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> Edge<'id, N, ET> {
504        unsafe { self.inner_node_unchecked(edge) }.retain();
505        Edge(edge.0, PhantomData)
506    }
507}
508
509// --- Store Impls -------------------------------------------------------------
510
511impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Store<'id, N, ET, TM, R, MD, TERMINALS>
512where
513    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
514    ET: Tag,
515    TM: TerminalManager<'id, N, ET, TERMINALS>,
516    MD: DropWith<Edge<'id, N, ET>>,
517{
518    const CHECK_TERMINALS: () = {
519        assert!(
520            usize::BITS >= u32::BITS,
521            "This manager implementation assumes usize::BITS >= u32::BITS"
522        );
523        assert!(TERMINALS >= 1, "TERMINALS must be >= 1");
524        assert!(
525            TERMINALS <= u32::MAX as usize,
526            "TERMINALS must fit into an u32"
527        );
528    };
529
530    #[inline(always)]
531    fn addr(&self) -> usize {
532        // TODO: Use the respective strict provenance method once stable
533        self as *const Self as usize
534    }
535
536    #[inline]
537    fn prepare_local_state(
538        &self,
539    ) -> Option<LocalStoreStateGuard<'_, 'id, N, ET, TM, R, MD, TERMINALS>> {
540        LOCAL_STORE_STATE.with(|state| {
541            if state.current_store.get() == 0 {
542                state.next_free.set(0);
543                state.initialized.set(0);
544                state.current_store.set(self.addr());
545                debug_assert_eq!(state.node_count_delta.get(), 0);
546                Some(LocalStoreStateGuard(self))
547            } else {
548                None
549            }
550        })
551    }
552
553    /// Add a node to the store. Does not perform any duplicate checks.
554    ///
555    /// Panics if the store is full.
556    #[inline]
557    fn add_node(&self, node: N) -> AllocResult<[Edge<'id, N, ET>; 2]> {
558        debug_assert_eq!(node.load_rc(Relaxed), 2);
559        let res = LOCAL_STORE_STATE.with(|state| {
560            let node_count_delta = if state.current_store.get() == self.addr() {
561                let delta = state.node_count_delta.get() + 1;
562                let id = state.next_free.get();
563                if id != 0 {
564                    // SAFETY: `id` is the ID of a free slot, we have exclusive access
565                    let (next_free, slot) = unsafe { self.use_free_slot(id) };
566                    state.next_free.set(next_free);
567                    state.node_count_delta.set(delta);
568                    return Ok((id, slot));
569                }
570
571                let index = state.initialized.get();
572                if index % CHUNK_SIZE != 0 {
573                    let slots = &self.inner_nodes.slots;
574                    debug_assert!((index as usize) < slots.len());
575                    // SAFETY: All slots in range
576                    // `index..index.next_multiple_of(CHUNK_SIZE)` are
577                    // uninitialized and pre-allocated, so we have exclusive
578                    // access to them.
579                    let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
580                    state.initialized.set(index + 1);
581                    state.node_count_delta.set(delta);
582                    return Ok((index + TERMINALS as u32, slot));
583                }
584
585                state.node_count_delta.set(0);
586                delta
587            } else {
588                1
589            };
590
591            self.get_slot_from_shared(node_count_delta)
592        });
593        match res {
594            Ok((id, slot)) => {
595                slot.node = ManuallyDrop::new(node);
596                Ok([Edge(id, PhantomData), Edge(id, PhantomData)])
597            }
598            Err(OutOfMemory) => {
599                node.drop_with(|e| self.drop_edge(e));
600                Err(OutOfMemory)
601            }
602        }
603    }
604
605    /// Get the free slot with ID `id` and load the `next_free` ID
606    ///
607    /// SAFETY: `id` must be the ID of a free slot and the current thread must
608    /// have exclusive access to it.
609    #[inline(always)]
610    unsafe fn use_free_slot(&self, id: u32) -> (u32, &mut Slot<N>) {
611        debug_assert!(id as usize >= TERMINALS);
612        let index = id as usize - TERMINALS;
613        debug_assert!(index < self.inner_nodes.slots.len());
614        // SAFETY: All next-free ids (>= TERMINALS) are valid, we have
615        // exclusive access to the node.
616        let slot = unsafe { &mut *self.inner_nodes.slots.get_unchecked(index).get() };
617        // SAFETY: The slot is free.
618        let next_free = unsafe { slot.next_free };
619        (next_free, slot)
620    }
621
622    /// Get a slot from the shared state
623    ///
624    /// `delta` is added to the shared node count.
625    ///
626    /// Returns the ID of a free slot.
627    #[cold]
628    fn get_slot_from_shared(&self, delta: i32) -> AllocResult<(u32, &mut Slot<N>)> {
629        LOCAL_STORE_STATE.with(|local| {
630            let mut shared = self.state.lock();
631
632            shared.node_count += delta as i64;
633            if shared.gc_state == GCState::Init && shared.node_count >= shared.gc_hwm as i64 {
634                shared.gc_state = GCState::Triggered;
635                self.gc_signal.1.notify_one();
636            }
637
638            if local.current_store.get() == self.addr() {
639                debug_assert_eq!(local.next_free.get(), 0);
640                debug_assert_eq!(local.initialized.get() % CHUNK_SIZE, 0);
641
642                if let Some(id) = shared.next_free.pop() {
643                    // SAFETY: `id` is the ID of a free slot, we have exclusive access
644                    let (next_free, slot) = unsafe { self.use_free_slot(id) };
645                    local.next_free.set(next_free);
646                    return Ok((id, slot));
647                }
648
649                let index = shared.allocated;
650                let slots = &self.inner_nodes.slots;
651                if (index as usize + CHUNK_SIZE as usize) < slots.len() {
652                    shared.allocated = (index / CHUNK_SIZE + 1) * CHUNK_SIZE;
653                    local.initialized.set(index + 1);
654                } else if (index as usize) < slots.len() {
655                    shared.allocated += 1;
656                } else {
657                    return Err(OutOfMemory);
658                }
659                // SAFETY: `index` is in bounds, the slot is uninitialized
660                let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
661                Ok((index + TERMINALS as u32, slot))
662            } else {
663                if let Some(id) = shared.next_free.pop() {
664                    // SAFETY: `id` is the ID of a free slot, we have exclusive access
665                    let (next_free, slot) = unsafe { self.use_free_slot(id) };
666                    shared.next_free.push(next_free);
667                    return Ok((id, slot));
668                }
669
670                let index = shared.allocated;
671                let slots = &self.inner_nodes.slots;
672                if (index as usize) >= slots.len() {
673                    return Err(OutOfMemory);
674                }
675                shared.allocated += 1;
676                // SAFETY: `index` is in bounds, the slot is uninitialized
677                let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
678                Ok((index + TERMINALS as u32, slot))
679            }
680        })
681    }
682
683    /// Drop an edge that originates from the unique table.
684    ///
685    /// Edges from the unique table are untagged and point to inner nodes, so
686    /// we need less case distinctions. Furthermore, we assume that the node's
687    /// children are still present in the unique table (waiting for their
688    /// revival or to be garbage collected), so we just decrement the children's
689    /// reference counters. There is a debug assertion that checks this
690    /// assumption.
691    ///
692    /// SAFETY: `edge` must be untagged and point to an inner node
693    unsafe fn drop_unique_table_edge(&self, edge: Edge<'id, N, ET>) {
694        // SAFETY (next 2): `edge` is untagged and points to an inner node
695        let slot_ptr = unsafe { self.inner_nodes.slot_pointer_unchecked(&edge) };
696        let id = unsafe { edge.node_id_unchecked() };
697        std::mem::forget(edge);
698
699        // SAFETY:
700        // - `edge` points to an inner node
701        // - We have shared access to nodes
702        // - `edge` is forgotten.
703        if unsafe { (*slot_ptr).node.release() } != 1 {
704            return;
705        }
706
707        // We only have exclusive access to the other fields of node after the
708        // fence. It synchronizes with the `NodeBase::release()` above (which is
709        // guaranteed to have `Release` order).
710        std::sync::atomic::fence(Acquire);
711
712        // SAFETY: Now, we have exclusive access to the slot. It contains a
713        // node. `id` is the ID of the slot.
714        unsafe { self.free_slot(&mut *slot_ptr, id) };
715    }
716
717    /// Free `slot`, i.e. drop the node and add its ID (`id`) to the free list
718    ///
719    /// SAFETY: `slot` must contain a node. `id` is the ID of `slot`.
720    #[inline]
721    unsafe fn free_slot(&self, slot: &mut Slot<N>, id: u32) {
722        // SAFETY: We don't use the node in `slot` again.
723        unsafe { ManuallyDrop::take(&mut slot.node) }.drop_with(|edge| self.drop_edge(edge));
724
725        LOCAL_STORE_STATE.with(|state| {
726            if state.current_store.get() == self.addr() {
727                slot.next_free = state.next_free.get();
728                state.next_free.set(id);
729
730                let delta = state.node_count_delta.get() - 1;
731                if delta > -(CHUNK_SIZE as i32) {
732                    state.node_count_delta.set(delta);
733                } else {
734                    let mut shared = self.state.lock();
735                    shared.next_free.push(state.next_free.replace(0));
736                    shared.node_count += state.node_count_delta.replace(0) as i64;
737                }
738            } else {
739                #[cold]
740                fn return_slot<N>(shared: &Mutex<SharedStoreState>, slot: &mut Slot<N>, id: u32) {
741                    let mut shared = shared.lock();
742                    slot.next_free = shared.next_free.pop().unwrap_or(0);
743                    shared.next_free.push(id);
744                    shared.node_count -= 1;
745                }
746                return_slot(&self.state, slot, id);
747            }
748        });
749    }
750
751    /// Drop an edge, assuming that it isn't the last one pointing to the
752    /// referenced node
753    ///
754    /// This assumption is fulfilled in case the node is still stored in the
755    /// unique table.
756    ///
757    /// There is a debug assertion that checks the aforementioned assumption. In
758    /// release builds, this function will simply leak the node.
759    #[inline]
760    fn drop_edge(&self, edge: Edge<'id, N, ET>) {
761        let id = edge.node_id();
762        if id >= TERMINALS {
763            // inner node
764            let node = self.inner_nodes.inner_node(&edge);
765            std::mem::forget(edge);
766            // SAFETY: `edge` is forgotten
767            let _old_rc = unsafe { node.release() };
768            debug_assert!(_old_rc > 1);
769        } else {
770            std::mem::forget(edge);
771            // SAFETY: `id` is a valid terminal ID
772            unsafe { self.terminal_manager.release(id) };
773        }
774    }
775
776    /// Clone `edge`
777    ///
778    /// `edge` may be tagged and point to an inner or a terminal node.
779    #[inline]
780    fn clone_edge(&self, edge: &Edge<'id, N, ET>) -> Edge<'id, N, ET> {
781        let id = edge.node_id();
782        if id >= TERMINALS {
783            // inner node
784            self.inner_nodes.inner_node(edge).retain();
785        } else {
786            // SAFETY: `id` is a valid terminal ID
787            unsafe { self.terminal_manager.retain(id) };
788        }
789        Edge(edge.0, PhantomData)
790    }
791}
792
793impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop for Store<'id, N, ET, TM, R, MD, TERMINALS>
794where
795    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
796    ET: Tag,
797    TM: TerminalManager<'id, N, ET, TERMINALS>,
798    MD: DropWith<Edge<'id, N, ET>>,
799{
800    fn drop(&mut self) {
801        let manager = self.manager.get_mut();
802        // We don't care about reference counters from here on.
803        // SAFETY: We don't use `manager.data` again.
804        unsafe { ManuallyDrop::take(&mut manager.data) }.drop_with(std::mem::forget);
805
806        if N::needs_drop() {
807            let unique_table = std::mem::take(&mut manager.unique_table);
808            for level in unique_table {
809                for edge in level.into_inner() {
810                    // SAFETY (next 2): `edge` is untagged and points to an
811                    // inner node
812                    let slot_ptr = unsafe { self.inner_nodes.slot_pointer_unchecked(&edge) };
813                    std::mem::forget(edge);
814                    // SAFETY: We have exclusive access to all nodes in the
815                    // store. `edge` points to an inner node.
816                    let node = unsafe { ManuallyDrop::take(&mut (*slot_ptr).node) };
817
818                    // We don't care about reference counts anymore.
819                    node.drop_with(std::mem::forget);
820                }
821            }
822        }
823    }
824}
825
826// --- LocalStoreStateGuard impl -----------------------------------------------
827
828impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop
829    for LocalStoreStateGuard<'_, 'id, N, ET, TM, R, MD, TERMINALS>
830where
831    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
832    ET: Tag,
833    TM: TerminalManager<'id, N, ET, TERMINALS>,
834    MD: DropWith<Edge<'id, N, ET>>,
835{
836    #[inline]
837    fn drop(&mut self) {
838        #[cold]
839        fn drop_slow<N>(
840            slots: &[UnsafeCell<Slot<N>>],
841            shared_state: &Mutex<SharedStoreState>,
842            terminals: u32,
843        ) {
844            LOCAL_STORE_STATE.with(|local| {
845                local.current_store.set(0);
846                let start = local.initialized.get();
847                let next_free = if start % CHUNK_SIZE != 0 {
848                    // We cannot simply give an uninitialized chunk back. Hence,
849                    // we prepend the slots to the free list.
850                    debug_assert!(start <= u32::MAX - CHUNK_SIZE);
851                    let end = (start / CHUNK_SIZE + 1) * CHUNK_SIZE;
852                    let last_slot = &slots[(end - 1) as usize];
853                    unsafe { &mut *last_slot.get() }.next_free = local.next_free.get();
854                    for (slot, next_id) in slots[start as usize..(end - 1) as usize]
855                        .iter()
856                        .zip(start + terminals + 1..)
857                    {
858                        unsafe { &mut *slot.get() }.next_free = next_id;
859                    }
860                    start + terminals
861                } else {
862                    local.next_free.get()
863                };
864
865                let mut shared = shared_state.lock();
866                if next_free != 0 {
867                    shared.next_free.push(next_free);
868                }
869                shared.node_count += local.node_count_delta.replace(0) as i64;
870            });
871        }
872
873        LOCAL_STORE_STATE.with(|local| {
874            if self.0.addr() == local.current_store.get()
875                && (local.next_free.get() != 0
876                    || local.initialized.get() % CHUNK_SIZE != 0
877                    || local.node_count_delta.get() != 0)
878            {
879                drop_slow(&self.0.inner_nodes.slots, &self.0.state, TERMINALS as u32);
880            }
881        });
882    }
883}
884
885// --- Manager Impls -----------------------------------------------------------
886
887impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Manager<'id, N, ET, TM, R, MD, TERMINALS>
888where
889    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
890    ET: Tag,
891    TM: TerminalManager<'id, N, ET, TERMINALS>,
892    MD: DropWith<Edge<'id, N, ET>>,
893{
894    // Get a reference to the store
895    fn store(&self) -> &Store<'id, N, ET, TM, R, MD, TERMINALS> {
896        // We can simply get the store pointer by subtracting the offset of
897        // `Manager` in `Store`. The only issue is that this violates Rust's
898        // (proposed) aliasing rules. Hence, we only provide a hint that the
899        // store's address can be computed without loading the value.
900        let offset = const {
901            std::mem::offset_of!(Store<'static, N, ET, TM, R, MD, TERMINALS>, manager)
902                + RwLock::<Self>::DATA_OFFSET
903        };
904        // SAFETY: The resulting pointer is in bounds of the `Store` allocation.
905        if unsafe { (self as *const Self as *const u8).sub(offset) } != self.store as *const u8 {
906            // SAFETY: The pointers above are equal after initialization of `self.store`.
907            unsafe { std::hint::unreachable_unchecked() };
908        }
909
910        // SAFETY: After initialization, `self.store` always points to the
911        // containing `Store`.
912        unsafe { &*self.store }
913    }
914}
915
916unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> oxidd_core::Manager
917    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
918where
919    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
920    ET: Tag,
921    TM: TerminalManager<'id, N, ET, TERMINALS>,
922    R: oxidd_core::DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
923    MD: DropWith<Edge<'id, N, ET>> + GCContainer<Self>,
924{
925    type Edge = Edge<'id, N, ET>;
926    type EdgeTag = ET;
927    type InnerNode = N;
928    type Terminal = TM::TerminalNode;
929    type TerminalRef<'a>
930        = TM::TerminalNodeRef<'a>
931    where
932        Self: 'a;
933    type Rules = R;
934
935    type TerminalIterator<'a>
936        = TM::Iterator<'a>
937    where
938        Self: 'a;
939    type NodeSet = NodeSet;
940    type LevelView<'a>
941        = LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>
942    where
943        Self: 'a;
944    type LevelIterator<'a>
945        = LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
946    where
947        Self: 'a;
948
949    #[inline]
950    fn get_node(&self, edge: &Self::Edge) -> oxidd_core::Node<Self> {
951        let store = self.store();
952        let id = edge.node_id();
953        if id >= TERMINALS {
954            oxidd_core::Node::Inner(store.inner_nodes.inner_node(edge))
955        } else {
956            // SAFETY: `id` is a valid terminal ID
957            oxidd_core::Node::Terminal(unsafe { store.terminal_manager.get_terminal(id) })
958        }
959    }
960
961    #[inline]
962    fn clone_edge(&self, edge: &Self::Edge) -> Self::Edge {
963        self.store().clone_edge(edge)
964    }
965
966    #[inline]
967    fn drop_edge(&self, edge: Self::Edge) {
968        self.store().drop_edge(edge)
969    }
970
971    #[inline]
972    fn num_inner_nodes(&self) -> usize {
973        self.unique_table
974            .iter()
975            .map(|level| level.lock().len())
976            .sum()
977    }
978
979    #[inline]
980    fn approx_num_inner_nodes(&self) -> usize {
981        let count = self.store().state.lock().node_count;
982        if count < 0 {
983            0
984        } else {
985            count as u64 as usize
986        }
987    }
988
989    #[inline]
990    fn num_levels(&self) -> LevelNo {
991        self.unique_table.len() as LevelNo
992    }
993
994    #[inline]
995    fn add_level(&mut self, f: impl FnOnce(LevelNo) -> Self::InnerNode) -> AllocResult<Self::Edge> {
996        let store = self.store();
997        let no = self.unique_table.len() as LevelNo;
998        assert!(no != LevelNo::MAX, "Too many levels");
999        let [e1, e2] = store.add_node(f(no))?;
1000        let mut set: LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS> = Default::default();
1001        set.insert(&store.inner_nodes, e1);
1002        self.unique_table.push(Mutex::new(set));
1003        Ok(e2)
1004    }
1005
1006    #[inline(always)]
1007    fn level(&self, no: LevelNo) -> Self::LevelView<'_> {
1008        LevelView {
1009            store: self.store(),
1010            level: no,
1011            set: self.unique_table[no as usize].lock(),
1012        }
1013    }
1014
1015    #[inline]
1016    fn levels(&self) -> Self::LevelIterator<'_> {
1017        LevelIter {
1018            store: self.store(),
1019            level_front: 0,
1020            level_back: self.unique_table.len() as LevelNo,
1021            it: self.unique_table.iter(),
1022        }
1023    }
1024
1025    #[inline]
1026    fn get_terminal(&self, terminal: Self::Terminal) -> AllocResult<Self::Edge> {
1027        self.store().terminal_manager.get_edge(terminal)
1028    }
1029
1030    #[inline]
1031    fn num_terminals(&self) -> usize {
1032        self.store().terminal_manager.len()
1033    }
1034
1035    #[inline]
1036    fn terminals(&self) -> Self::TerminalIterator<'_> {
1037        self.store().terminal_manager.iter()
1038    }
1039
1040    fn gc(&self) -> usize {
1041        if !self.gc_ongoing.try_lock() {
1042            // We don't want two concurrent garbage collections
1043            return 0;
1044        }
1045        self.gc_count.fetch_add(1, Relaxed);
1046        let guard = AbortOnDrop("Garbage collection panicked.");
1047
1048        #[cfg(feature = "statistics")]
1049        eprintln!(
1050            "[oxidd-manager-index] garbage collection started at {}",
1051            std::time::SystemTime::now()
1052                .duration_since(std::time::UNIX_EPOCH)
1053                .unwrap()
1054                .as_secs()
1055        );
1056
1057        self.data.pre_gc(self);
1058
1059        let store = self.store();
1060        let mut collected = 0;
1061        for level in &self.unique_table {
1062            let mut level = level.lock();
1063            collected += level.len() as u32;
1064            // SAFETY: We prepared the garbage collection, hence there are no
1065            // "weak" edges.
1066            unsafe { level.gc(store) };
1067            collected -= level.len() as u32;
1068        }
1069        collected += store.terminal_manager.gc();
1070
1071        // SAFETY: We called `pre_gc`, the garbage collection is done.
1072        unsafe { self.data.post_gc(self) };
1073        self.gc_ongoing.unlock();
1074        guard.defuse();
1075
1076        #[cfg(feature = "statistics")]
1077        eprintln!(
1078            "[oxidd-manager-index] garbage collection finished at {}: removed {} nodes",
1079            std::time::SystemTime::now()
1080                .duration_since(std::time::UNIX_EPOCH)
1081                .unwrap()
1082                .as_secs(),
1083            collected,
1084        );
1085        collected as usize
1086    }
1087
1088    fn reorder<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
1089        let guard = AbortOnDrop("Reordering panicked.");
1090        self.data.pre_gc(self);
1091        let res = f(self);
1092        // SAFETY: We called `pre_gc`, the reordering is done.
1093        unsafe { self.data.post_gc(self) };
1094        guard.defuse();
1095        // Depending on the reordering implementation, garbage collections are
1096        // preformed, but not necessarily through `Self::gc`. So we increment
1097        // the GC count here.
1098        *self.gc_count.get_mut() += 1;
1099        self.reorder_count += 1;
1100        res
1101    }
1102
1103    #[inline]
1104    fn gc_count(&self) -> u64 {
1105        self.gc_count.load(Relaxed)
1106    }
1107
1108    #[inline]
1109    fn reorder_count(&self) -> u64 {
1110        self.reorder_count
1111    }
1112}
1113
1114impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> oxidd_core::HasWorkers
1115    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
1116where
1117    N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
1118    ET: Tag + Send + Sync,
1119    TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
1120    R: oxidd_core::DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
1121    MD: DropWith<Edge<'id, N, ET>> + GCContainer<Self> + Send + Sync,
1122{
1123    type WorkerPool = crate::workers::Workers;
1124
1125    #[inline]
1126    fn workers(&self) -> &Self::WorkerPool {
1127        &self.store().workers
1128    }
1129}
1130
1131// === Unique Table ============================================================
1132
1133/// The underlying data structure for level views
1134///
1135/// Conceptually, every [`LevelViewSet`] is tied to a [`Manager`]. It is a hash
1136/// table ensuring the uniqueness of nodes. However, it does not store nodes
1137/// internally, but edges referencing nodes. These edges are always untagged and
1138/// reference inner nodes.
1139///
1140/// Because a [`LevelViewSet`] on its own is not sufficient to drop nodes
1141/// accordingly, this will simply leak all contained edges, not calling the
1142/// `Edge`'s `Drop` implementation.
1143struct LevelViewSet<'id, N, ET, TM, R, MD, const TERMINALS: usize>(
1144    RawTable<Edge<'id, N, ET>, u32>,
1145    PhantomData<(TM, R, MD)>,
1146);
1147
1148#[inline]
1149fn hash_node<N: NodeBase>(node: &N) -> u64 {
1150    let mut hasher = FxHasher::default();
1151    node.hash(&mut hasher);
1152    hasher.finish()
1153}
1154
1155impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>
1156where
1157    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1158    ET: Tag,
1159    TM: TerminalManager<'id, N, ET, TERMINALS>,
1160    MD: DropWith<Edge<'id, N, ET>>,
1161{
1162    /// Get the number of nodes on this level
1163    #[inline]
1164    fn len(&self) -> usize {
1165        self.0.len()
1166    }
1167
1168    /// Get an equality function for entries
1169    ///
1170    /// SAFETY: The returned function must be called on untagged edges
1171    /// referencing inner nodes only.
1172    #[inline]
1173    unsafe fn eq<'a>(
1174        nodes: &'a SlotSlice<'id, N, TERMINALS>,
1175        node: &'a N,
1176    ) -> impl Fn(&Edge<'id, N, ET>) -> bool + 'a {
1177        move |edge| unsafe { nodes.inner_node_unchecked(edge) == node }
1178    }
1179
1180    /// Reserve space for `additional` nodes on this level
1181    #[inline]
1182    fn reserve(&mut self, additional: usize) {
1183        // SAFETY: The hash table only contains untagged edges referencing inner
1184        // nodes.
1185        self.0.reserve(additional)
1186    }
1187
1188    #[inline]
1189    fn get(&self, nodes: &SlotSlice<'id, N, TERMINALS>, node: &N) -> Option<&Edge<'id, N, ET>> {
1190        let hash = hash_node(node);
1191        // SAFETY: The hash table only contains untagged edges referencing inner
1192        // nodes.
1193        self.0.get(hash, unsafe { Self::eq(nodes, node) })
1194    }
1195
1196    /// Insert the given edge, assuming that the referenced node is already
1197    /// stored in `nodes`.
1198    ///
1199    /// Returns `true` if the edge was inserted, `false` if it was already
1200    /// present.
1201    ///
1202    /// Panics if `edge` points to a terminal node. May furthermore panic if
1203    /// `edge` is tagged, depending on the configuration.
1204    #[inline]
1205    fn insert(&mut self, nodes: &SlotSlice<'id, N, TERMINALS>, edge: Edge<'id, N, ET>) -> bool {
1206        debug_assert!(!edge.is_tagged(), "`edge` must not be tagged");
1207        let node = nodes.inner_node(&edge);
1208        let hash = hash_node(node);
1209        // SAFETY (next 2): The hash table only contains untagged edges
1210        // referencing inner nodes.
1211        match self
1212            .0
1213            .find_or_find_insert_slot(hash, unsafe { Self::eq(nodes, node) })
1214        {
1215            Ok(_) => {
1216                // We need to drop `edge`. This simply amounts to decrementing
1217                // the reference counter, since there is still one edge
1218                // referencing `node` stored in here.
1219                std::mem::forget(edge);
1220                // SAFETY: We forgot `edge`.
1221                let _old_rc = unsafe { node.release() };
1222                debug_assert!(_old_rc > 1);
1223
1224                false
1225            }
1226            Err(slot) => {
1227                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1228                // We have exclusive access to the hash table and did not modify
1229                // it in between.
1230                unsafe { self.0.insert_in_slot_unchecked(hash, slot, edge) };
1231                true
1232            }
1233        }
1234    }
1235
1236    /// Get an edge for `node`
1237    ///
1238    /// If `node` is not yet present in the hash table, `insert` is called to
1239    /// insert the node into `nodes`.
1240    #[inline]
1241    fn get_or_insert(
1242        &mut self,
1243        nodes: &SlotSlice<'id, N, TERMINALS>,
1244        node: N,
1245        insert: impl FnOnce(N) -> AllocResult<[Edge<'id, N, ET>; 2]>,
1246        drop: impl FnOnce(N),
1247    ) -> AllocResult<Edge<'id, N, ET>> {
1248        let hash = hash_node(&node);
1249        // SAFETY (next 2): The hash table only contains untagged edges
1250        // referencing inner nodes.
1251        match self
1252            .0
1253            .find_or_find_insert_slot(hash, unsafe { Self::eq(nodes, &node) })
1254        {
1255            Ok(slot) => {
1256                drop(node);
1257                // SAFETY:
1258                // - `slot` was returned by `find_or_find_insert_slot`. We have exclusive access
1259                //   to the hash table and did not modify it in between.
1260                // - All edges in the table are untagged and refer to inner nodes.
1261                Ok(unsafe { nodes.clone_edge_unchecked(self.0.get_at_slot_unchecked(slot)) })
1262            }
1263            Err(slot) => {
1264                let [e1, e2] = insert(node)?;
1265                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1266                // We have exclusive access to the hash table and did not modify
1267                // it in between.
1268                unsafe { self.0.insert_in_slot_unchecked(hash, slot, e1) };
1269                Ok(e2)
1270            }
1271        }
1272    }
1273
1274    /// Perform garbage collection, i.e. remove all nodes without references
1275    /// besides the internal edge
1276    ///
1277    /// SAFETY: There must not be any "weak" edges, i.e. edges where the
1278    /// reference count is not materialized (apply cache implementations exploit
1279    /// this).
1280    unsafe fn gc(&mut self, store: &Store<'id, N, ET, TM, R, MD, TERMINALS>) {
1281        let inner_nodes = &*store.inner_nodes;
1282        self.0.retain(
1283            |edge| {
1284                // SAFETY: All edges in unique tables are untagged and point to
1285                // inner nodes.
1286                unsafe { inner_nodes.inner_node_unchecked(edge) }.load_rc(Acquire) != 1
1287            },
1288            |edge| {
1289                // SAFETY (next 2): `edge` is untagged and points to an inner node
1290                let slot_ptr = unsafe { inner_nodes.slot_pointer_unchecked(&edge) };
1291                let id = unsafe { edge.node_id_unchecked() };
1292                std::mem::forget(edge);
1293
1294                // SAFETY: Since `rc` is 1, this is the last reference. We use
1295                // `Acquire` order above and `Release` order when decrementing
1296                // reference counters, so we have exclusive node access now. It
1297                // contains a node. `id` is the ID of the slot.
1298                unsafe { store.free_slot(&mut *slot_ptr, id) };
1299            },
1300        );
1301    }
1302
1303    /// Remove `node`
1304    ///
1305    /// Returns `Some(edge)` if `node` was present, `None` otherwise
1306    ///
1307    /// SAFETY: There must not be any "weak" edges, i.e. edges where the
1308    /// reference count is not materialized (apply cache implementations exploit
1309    /// this).
1310    #[inline]
1311    unsafe fn remove(
1312        &mut self,
1313        nodes: &SlotSlice<'id, N, TERMINALS>,
1314        node: &N,
1315    ) -> Option<Edge<'id, N, ET>> {
1316        let hash = hash_node(node);
1317        // SAFETY: The hash table only contains untagged edges referencing inner
1318        // nodes.
1319        self.0.remove_entry(hash, unsafe { Self::eq(nodes, node) })
1320    }
1321
1322    /// Iterate over all edges pointing to nodes in the set
1323    #[inline]
1324    fn iter(&self) -> LevelViewIter<'_, 'id, N, ET> {
1325        LevelViewIter(self.0.iter())
1326    }
1327
1328    /// Iterator that consumes all [`Edge`]s in the set
1329    #[inline]
1330    fn drain(&mut self) -> linear_hashtbl::raw::Drain<Edge<'id, N, ET>, u32> {
1331        self.0.drain()
1332    }
1333}
1334
1335impl<N, ET, TM, R, MD, const TERMINALS: usize> Drop
1336    for LevelViewSet<'_, N, ET, TM, R, MD, TERMINALS>
1337{
1338    #[inline]
1339    fn drop(&mut self) {
1340        // If the nodes need drop, this is handled by the `Manager` `Drop` impl
1341        // or the `TakenLevelView` `Drop` impl, respectively.
1342        self.0.reset_no_drop();
1343    }
1344}
1345
1346impl<N, ET, TM, R, MD, const TERMINALS: usize> Default
1347    for LevelViewSet<'_, N, ET, TM, R, MD, TERMINALS>
1348{
1349    #[inline]
1350    fn default() -> Self {
1351        Self(Default::default(), PhantomData)
1352    }
1353}
1354
1355impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> IntoIterator
1356    for LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>
1357{
1358    type Item = Edge<'id, N, ET>;
1359
1360    type IntoIter = linear_hashtbl::raw::IntoIter<Edge<'id, N, ET>, u32>;
1361
1362    fn into_iter(self) -> Self::IntoIter {
1363        let this = ManuallyDrop::new(self);
1364        // SAFETY: We move out of `this` (and forget `this`)
1365        let set = unsafe { std::ptr::read(&this.0) };
1366        set.into_iter()
1367    }
1368}
1369
1370pub struct LevelViewIter<'a, 'id, N, ET>(linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET>, u32>);
1371
1372impl<'a, 'id, N, ET> Iterator for LevelViewIter<'a, 'id, N, ET> {
1373    type Item = &'a Edge<'id, N, ET>;
1374
1375    #[inline]
1376    fn next(&mut self) -> Option<Self::Item> {
1377        self.0.next()
1378    }
1379
1380    #[inline]
1381    fn size_hint(&self) -> (usize, Option<usize>) {
1382        self.0.size_hint()
1383    }
1384}
1385
1386impl<N, ET> ExactSizeIterator for LevelViewIter<'_, '_, N, ET> {
1387    #[inline(always)]
1388    fn len(&self) -> usize {
1389        self.0.len()
1390    }
1391}
1392impl<'a, 'id, N, ET> FusedIterator for LevelViewIter<'a, 'id, N, ET> where
1393    linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET>, u32>: FusedIterator
1394{
1395}
1396
1397pub struct LevelView<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1398where
1399    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1400    ET: Tag,
1401    TM: TerminalManager<'id, N, ET, TERMINALS>,
1402    MD: DropWith<Edge<'id, N, ET>>,
1403{
1404    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1405    level: LevelNo,
1406    set: MutexGuard<'a, LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>,
1407}
1408
1409unsafe impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1410    oxidd_core::LevelView<Edge<'id, N, ET>, N> for LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1411where
1412    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1413    ET: Tag,
1414    TM: TerminalManager<'id, N, ET, TERMINALS>,
1415    MD: DropWith<Edge<'id, N, ET>>,
1416{
1417    type Iterator<'b>
1418        = LevelViewIter<'b, 'id, N, ET>
1419    where
1420        Self: 'b,
1421        Edge<'id, N, ET>: 'b;
1422
1423    type Taken = TakenLevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>;
1424
1425    #[inline(always)]
1426    fn len(&self) -> usize {
1427        self.set.len()
1428    }
1429
1430    #[inline(always)]
1431    fn level_no(&self) -> LevelNo {
1432        self.level
1433    }
1434
1435    #[inline]
1436    fn reserve(&mut self, additional: usize) {
1437        self.set.reserve(additional);
1438    }
1439
1440    #[inline]
1441    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET>> {
1442        self.set.get(&self.store.inner_nodes, node)
1443    }
1444
1445    #[inline]
1446    fn insert(&mut self, edge: Edge<'id, N, ET>) -> bool {
1447        debug_assert!(!edge.is_tagged(), "`edge` should not be tagged");
1448        // No need to check if the node referenced by `edge` is stored in
1449        // `self.store` due to lifetime restrictions.
1450        let nodes = &self.store.inner_nodes;
1451        nodes.inner_node(&edge).assert_level_matches(self.level);
1452        self.set.insert(nodes, edge)
1453    }
1454
1455    #[inline(always)]
1456    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET>> {
1457        node.assert_level_matches(self.level);
1458        // No need to check if the children of `node` are stored in `self.store`
1459        // due to lifetime restrictions.
1460        self.set.get_or_insert(
1461            &self.store.inner_nodes,
1462            node,
1463            |node| self.store.add_node(node),
1464            |node| node.drop_with(|edge| self.store.drop_edge(edge)),
1465        )
1466    }
1467
1468    #[inline]
1469    unsafe fn gc(&mut self) {
1470        // SAFETY: Called from inside the closure of `Manager::reorder()`, hence
1471        // there are no "weak" edges.
1472        unsafe { self.set.gc(self.store) };
1473    }
1474
1475    #[inline]
1476    unsafe fn remove(&mut self, node: &N) -> bool {
1477        // SAFETY: Called from inside the closure of `Manager::reorder()`, hence
1478        // there are no "weak" edges.
1479        match unsafe { self.set.remove(&self.store.inner_nodes, node) } {
1480            Some(edge) => {
1481                // SAFETY: `edge` is untagged and points to an inner node
1482                unsafe { self.store.drop_unique_table_edge(edge) };
1483                true
1484            }
1485            None => false,
1486        }
1487    }
1488
1489    #[inline(always)]
1490    unsafe fn swap(&mut self, other: &mut Self) {
1491        std::mem::swap(&mut self.set, &mut other.set);
1492    }
1493
1494    #[inline]
1495    fn iter(&self) -> Self::Iterator<'_> {
1496        self.set.iter()
1497    }
1498
1499    #[inline(always)]
1500    fn take(&mut self) -> Self::Taken {
1501        TakenLevelView {
1502            store: self.store,
1503            level: self.level,
1504            set: std::mem::take(&mut self.set),
1505        }
1506    }
1507}
1508
1509pub struct TakenLevelView<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1510where
1511    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1512    ET: Tag,
1513    TM: TerminalManager<'id, N, ET, TERMINALS>,
1514    MD: DropWith<Edge<'id, N, ET>>,
1515{
1516    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1517    level: LevelNo,
1518    set: LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>,
1519}
1520
1521unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize>
1522    oxidd_core::LevelView<Edge<'id, N, ET>, N>
1523    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1524where
1525    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1526    ET: Tag,
1527    TM: TerminalManager<'id, N, ET, TERMINALS>,
1528    MD: DropWith<Edge<'id, N, ET>>,
1529{
1530    type Iterator<'b>
1531        = LevelViewIter<'b, 'id, N, ET>
1532    where
1533        Self: 'b,
1534        Edge<'id, N, ET>: 'b;
1535
1536    type Taken = Self;
1537
1538    #[inline(always)]
1539    fn len(&self) -> usize {
1540        self.set.len()
1541    }
1542
1543    #[inline(always)]
1544    fn level_no(&self) -> LevelNo {
1545        self.level
1546    }
1547
1548    #[inline]
1549    fn reserve(&mut self, additional: usize) {
1550        self.set.reserve(additional);
1551    }
1552
1553    #[inline]
1554    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET>> {
1555        self.set.get(&self.store.inner_nodes, node)
1556    }
1557
1558    #[inline]
1559    fn insert(&mut self, edge: Edge<'id, N, ET>) -> bool {
1560        debug_assert!(!edge.is_tagged(), "`edge` should not be tagged");
1561        // No need to check if the node referenced by `edge` is stored in
1562        // `self.store` due to lifetime restrictions.
1563        let nodes = &self.store.inner_nodes;
1564        nodes.inner_node(&edge).assert_level_matches(self.level);
1565        self.set.insert(nodes, edge)
1566    }
1567
1568    #[inline(always)]
1569    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET>> {
1570        node.assert_level_matches(self.level);
1571        // No need to check if the children of `node` are stored in `self.store`
1572        // due to lifetime restrictions.
1573        self.set.get_or_insert(
1574            &self.store.inner_nodes,
1575            node,
1576            |node| self.store.add_node(node),
1577            |node| node.drop_with(|edge| self.store.drop_edge(edge)),
1578        )
1579    }
1580
1581    #[inline]
1582    unsafe fn gc(&mut self) {
1583        // SAFETY: Called from inside the closure of `Manager::reorder()`, hence
1584        // there are no "weak" edges.
1585        unsafe { self.set.gc(self.store) };
1586    }
1587
1588    #[inline]
1589    unsafe fn remove(&mut self, node: &N) -> bool {
1590        // SAFETY: Called from inside the closure of `Manager::reorder()`, hence
1591        // there are no "weak" edges.
1592        match unsafe { self.set.remove(&self.store.inner_nodes, node) } {
1593            Some(edge) => {
1594                // SAFETY: `edge` is untagged and points to an inner node
1595                unsafe { self.store.drop_unique_table_edge(edge) };
1596                true
1597            }
1598            None => false,
1599        }
1600    }
1601
1602    #[inline(always)]
1603    unsafe fn swap(&mut self, other: &mut Self) {
1604        std::mem::swap(&mut self.set, &mut other.set);
1605    }
1606
1607    #[inline]
1608    fn iter(&self) -> Self::Iterator<'_> {
1609        self.set.iter()
1610    }
1611
1612    #[inline(always)]
1613    fn take(&mut self) -> Self::Taken {
1614        Self {
1615            store: self.store,
1616            level: self.level,
1617            set: std::mem::take(&mut self.set),
1618        }
1619    }
1620}
1621
1622impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop
1623    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1624where
1625    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1626    ET: Tag,
1627    TM: TerminalManager<'id, N, ET, TERMINALS>,
1628    MD: DropWith<Edge<'id, N, ET>>,
1629{
1630    fn drop(&mut self) {
1631        for edge in self.set.drain() {
1632            // SAFETY: Edges in unique tables are untagged and point to inner
1633            // nodes.
1634            unsafe { self.store.drop_unique_table_edge(edge) }
1635        }
1636    }
1637}
1638
1639// --- LevelIterator -----------------------------------------------------------
1640
1641pub struct LevelIter<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1642where
1643    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1644    ET: Tag,
1645    TM: TerminalManager<'id, N, ET, TERMINALS>,
1646    MD: DropWith<Edge<'id, N, ET>>,
1647{
1648    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1649    level_front: LevelNo,
1650    level_back: LevelNo,
1651    it: std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>,
1652}
1653
1654impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize> Iterator
1655    for LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1656where
1657    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1658    ET: Tag,
1659    TM: TerminalManager<'id, N, ET, TERMINALS>,
1660    MD: DropWith<Edge<'id, N, ET>>,
1661{
1662    type Item = LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>;
1663
1664    #[inline]
1665    fn next(&mut self) -> Option<Self::Item> {
1666        match self.it.next() {
1667            Some(mutex) => {
1668                let level = self.level_front;
1669                self.level_front += 1;
1670                Some(LevelView {
1671                    store: self.store,
1672                    level,
1673                    set: mutex.lock(),
1674                })
1675            }
1676            None => None,
1677        }
1678    }
1679
1680    #[inline]
1681    fn size_hint(&self) -> (usize, Option<usize>) {
1682        self.it.size_hint()
1683    }
1684}
1685
1686impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> ExactSizeIterator
1687    for LevelIter<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1688where
1689    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1690    ET: Tag,
1691    TM: TerminalManager<'id, N, ET, TERMINALS>,
1692    MD: DropWith<Edge<'id, N, ET>>,
1693{
1694    #[inline(always)]
1695    fn len(&self) -> usize {
1696        self.it.len()
1697    }
1698}
1699impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize> FusedIterator
1700    for LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1701where
1702    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1703    ET: Tag,
1704    TM: TerminalManager<'id, N, ET, TERMINALS>,
1705    MD: DropWith<Edge<'id, N, ET>>,
1706    std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>: FusedIterator,
1707{
1708}
1709
1710impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> DoubleEndedIterator
1711    for LevelIter<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1712where
1713    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1714    ET: Tag,
1715    TM: TerminalManager<'id, N, ET, TERMINALS>,
1716    MD: DropWith<Edge<'id, N, ET>>,
1717{
1718    fn next_back(&mut self) -> Option<Self::Item> {
1719        match self.it.next_back() {
1720            Some(mutex) => {
1721                self.level_back -= 1;
1722                Some(LevelView {
1723                    store: self.store,
1724                    level: self.level_back,
1725                    set: mutex.lock(),
1726                })
1727            }
1728            None => None,
1729        }
1730    }
1731}
1732
1733// === ManagerRef ==============================================================
1734
1735#[repr(transparent)]
1736pub struct ManagerRef<
1737    NC: InnerNodeCons<ET>,
1738    ET: Tag,
1739    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1740    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1741    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1742    const TERMINALS: usize,
1743>(
1744    Arc<
1745        Store<
1746            'static,
1747            NC::T<'static>,
1748            ET,
1749            TMC::T<'static>,
1750            RC::T<'static>,
1751            MDC::T<'static>,
1752            TERMINALS,
1753        >,
1754    >,
1755);
1756
1757impl<
1758        NC: InnerNodeCons<ET>,
1759        ET: Tag,
1760        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1761        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1762        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1763        const TERMINALS: usize,
1764    > Drop for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1765{
1766    fn drop(&mut self) {
1767        if Arc::strong_count(&self.0) == 2 {
1768            // This is the second last reference. The last reference belongs to
1769            // the gc thread. Terminate it.
1770            let gc_signal = &self.0.gc_signal;
1771            *gc_signal.0.lock() = GCSignal::Quit;
1772            gc_signal.1.notify_one();
1773        }
1774    }
1775}
1776
1777impl<
1778        NC: InnerNodeCons<ET>,
1779        ET: Tag,
1780        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1781        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1782        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1783        const TERMINALS: usize,
1784    > ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1785{
1786    /// Convert `self` into a raw pointer, e.g. for usage in a foreign function
1787    /// interface.
1788    ///
1789    /// This method does not change any reference counters. To avoid a memory
1790    /// leak, use [`Self::from_raw()`] to convert the pointer back into a
1791    /// `ManagerRef`.
1792    #[inline(always)]
1793    pub fn into_raw(self) -> *const std::ffi::c_void {
1794        let this = ManuallyDrop::new(self);
1795        // SAFETY: we move out of `this`
1796        Arc::into_raw(unsafe { std::ptr::read(&this.0) }) as _
1797    }
1798
1799    /// Convert `raw` into a `ManagerRef`
1800    ///
1801    /// # Safety
1802    ///
1803    /// `raw` must have been obtained via [`Self::into_raw()`]. This function
1804    /// does not change any reference counters, so calling this function
1805    /// multiple times for the same pointer may lead to use after free bugs
1806    /// depending on the usage of the returned `ManagerRef`.
1807    #[inline(always)]
1808    pub unsafe fn from_raw(raw: *const std::ffi::c_void) -> Self {
1809        // SAFETY: Invariants are upheld by the caller.
1810        Self(unsafe { Arc::from_raw(raw as _) })
1811    }
1812}
1813
1814impl<
1815        NC: InnerNodeCons<ET>,
1816        ET: Tag,
1817        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1818        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1819        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1820        const TERMINALS: usize,
1821    > Clone for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1822{
1823    #[inline]
1824    fn clone(&self) -> Self {
1825        Self(self.0.clone())
1826    }
1827}
1828
1829impl<
1830        NC: InnerNodeCons<ET>,
1831        ET: Tag,
1832        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1833        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1834        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1835        const TERMINALS: usize,
1836    > PartialEq for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1837{
1838    #[inline]
1839    fn eq(&self, other: &Self) -> bool {
1840        Arc::ptr_eq(&self.0, &other.0)
1841    }
1842}
1843impl<
1844        NC: InnerNodeCons<ET>,
1845        ET: Tag,
1846        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1847        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1848        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1849        const TERMINALS: usize,
1850    > Eq for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1851{
1852}
1853impl<
1854        NC: InnerNodeCons<ET>,
1855        ET: Tag,
1856        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1857        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1858        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1859        const TERMINALS: usize,
1860    > PartialOrd for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1861{
1862    #[inline]
1863    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1864        Some(self.cmp(other))
1865    }
1866}
1867impl<
1868        NC: InnerNodeCons<ET>,
1869        ET: Tag,
1870        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1871        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1872        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1873        const TERMINALS: usize,
1874    > Ord for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1875{
1876    #[inline]
1877    fn cmp(&self, other: &Self) -> Ordering {
1878        let a = &*self.0 as *const Store<_, _, _, _, _, TERMINALS>;
1879        let b = &*other.0 as *const _;
1880        a.cmp(&b)
1881    }
1882}
1883impl<
1884        NC: InnerNodeCons<ET>,
1885        ET: Tag,
1886        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1887        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1888        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1889        const TERMINALS: usize,
1890    > Hash for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1891{
1892    #[inline]
1893    fn hash<H: Hasher>(&self, state: &mut H) {
1894        let ptr = &*self.0 as *const Store<_, _, _, _, _, TERMINALS>;
1895        ptr.hash(state);
1896    }
1897}
1898
1899impl<
1900        'a,
1901        'id,
1902        NC: InnerNodeCons<ET>,
1903        ET: Tag,
1904        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1905        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1906        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1907        const TERMINALS: usize,
1908    > From<&'a M<'id, NC, ET, TMC, RC, MDC, TERMINALS>>
1909    for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1910{
1911    fn from(manager: &'a M<'id, NC, ET, TMC, RC, MDC, TERMINALS>) -> Self {
1912        let ptr = manager as *const _ as *const M<'static, NC, ET, TMC, RC, MDC, TERMINALS>;
1913        // SAFETY:
1914        // - We just changed "identifier" lifetimes.
1915        // - The pointer was obtained via `Arc::into_raw()`, and since we have a
1916        //   `&Manager` reference, the counter is at least 1.
1917        unsafe {
1918            let manager = &*ptr;
1919            Arc::increment_strong_count(manager.store);
1920            ManagerRef(Arc::from_raw(manager.store))
1921        }
1922    }
1923}
1924
1925impl<
1926        NC: InnerNodeCons<ET>,
1927        ET: Tag,
1928        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1929        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1930        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1931        const TERMINALS: usize,
1932    > oxidd_core::ManagerRef for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1933{
1934    type Manager<'id> = M<'id, NC, ET, TMC, RC, MDC, TERMINALS>;
1935
1936    fn with_manager_shared<F, T>(&self, f: F) -> T
1937    where
1938        F: for<'id> FnOnce(&Self::Manager<'id>) -> T,
1939    {
1940        let local_guard = self.0.prepare_local_state();
1941        let res = f(&self.0.manager.shared());
1942        drop(local_guard);
1943        res
1944    }
1945
1946    fn with_manager_exclusive<F, T>(&self, f: F) -> T
1947    where
1948        F: for<'id> FnOnce(&mut Self::Manager<'id>) -> T,
1949    {
1950        let local_guard = self.0.prepare_local_state();
1951        let res = f(&mut self.0.manager.exclusive());
1952        drop(local_guard);
1953        res
1954    }
1955}
1956
1957/// Create a new manager
1958pub fn new_manager<
1959    NC: InnerNodeCons<ET> + 'static,
1960    ET: Tag + Send + Sync + 'static,
1961    TMC: TerminalManagerCons<NC, ET, TERMINALS> + 'static,
1962    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS> + 'static,
1963    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS> + 'static,
1964    const TERMINALS: usize,
1965>(
1966    inner_node_capacity: u32,
1967    terminal_node_capacity: u32,
1968    threads: u32,
1969    data: MDC::T<'static>,
1970) -> ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS> {
1971    // Evaluate a few constants for assertions
1972    let _ = Store::<
1973        'static,
1974        NC::T<'static>,
1975        ET,
1976        TMC::T<'static>,
1977        RC::T<'static>,
1978        MDC::T<'static>,
1979        TERMINALS,
1980    >::CHECK_TERMINALS;
1981
1982    let max_inner_capacity = if usize::BITS > u32::BITS {
1983        ((1usize << (u32::BITS - Edge::<NC::T<'static>, ET>::TAG_BITS)) - TERMINALS) as u32
1984    } else {
1985        // 32 bit address space. Every node consumes 16 byte plus at least
1986        // 4 byte in the unique table. Furthermore, there is the apply cache.
1987        // If we try to reserve space for more than `u32::MAX / 24` nodes, we
1988        // will most likely run out of memory.
1989        u32::MAX / 24
1990    };
1991    let inner_node_capacity = std::cmp::min(inner_node_capacity, max_inner_capacity);
1992
1993    let gc_lwm = inner_node_capacity / 100 * 90;
1994    let gc_hwm = inner_node_capacity / 100 * 95;
1995
1996    let arc = Arc::new(Store {
1997        inner_nodes: SlotSlice::new_boxed(inner_node_capacity),
1998        state: CachePadded::new(Mutex::new(SharedStoreState {
1999            next_free: Vec::new(),
2000            allocated: 0,
2001            node_count: 0,
2002            gc_state: if gc_lwm < gc_hwm {
2003                GCState::Init
2004            } else {
2005                GCState::Disabled
2006            },
2007            gc_lwm,
2008            gc_hwm,
2009        })),
2010        manager: RwLock::new(Manager {
2011            unique_table: Vec::new(),
2012            data: ManuallyDrop::new(data),
2013            store: std::ptr::null(),
2014            gc_ongoing: TryLock::new(),
2015            gc_count: AtomicU64::new(0),
2016            reorder_count: 0,
2017        }),
2018        terminal_manager: TMC::T::<'static>::with_capacity(terminal_node_capacity),
2019        gc_signal: (Mutex::new(GCSignal::RunGc), Condvar::new()),
2020        workers: crate::workers::Workers::new(threads),
2021    });
2022
2023    let mut manager = arc.manager.exclusive();
2024    manager.store = Arc::as_ptr(&arc);
2025    drop(manager);
2026
2027    let store_addr = arc.addr();
2028    arc.workers.pool.spawn_broadcast(move |_| {
2029        // The workers are dedicated to this store.
2030        LOCAL_STORE_STATE.with(|state| state.current_store.set(store_addr))
2031    });
2032
2033    // spell-checker:ignore mref
2034    let gc_mref: ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS> = ManagerRef(arc.clone());
2035    std::thread::Builder::new()
2036        .name("oxidd mi gc".to_string())
2037        .spawn(move || {
2038            // The worker is dedicated to this store.
2039            LOCAL_STORE_STATE.with(|state| state.current_store.set(store_addr));
2040
2041            let store = &*gc_mref.0;
2042            loop {
2043                let mut lock = store.gc_signal.0.lock();
2044                store.gc_signal.1.wait(&mut lock);
2045                if *lock == GCSignal::Quit {
2046                    break;
2047                }
2048                drop(lock);
2049
2050                // parking_lot `Condvar`s have no spurious wakeups -> run gc now
2051                oxidd_core::ManagerRef::with_manager_shared(&gc_mref, |manager| {
2052                    oxidd_core::Manager::gc(manager);
2053                });
2054
2055                let mut shared = store.state.lock();
2056                LOCAL_STORE_STATE.with(|local| {
2057                    if local.next_free.get() != 0 {
2058                        shared.node_count += local.node_count_delta.replace(0) as i64;
2059                        shared.next_free.push(local.next_free.replace(0));
2060                    }
2061                });
2062
2063                if shared.node_count < shared.gc_lwm as i64 && shared.gc_state != GCState::Disabled
2064                {
2065                    shared.gc_state = GCState::Init;
2066                }
2067            }
2068        })
2069        .unwrap();
2070
2071    ManagerRef(arc)
2072}
2073
2074impl<
2075        NC: InnerNodeCons<ET>,
2076        ET: Tag + Send + Sync,
2077        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2078        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2079        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2080        const TERMINALS: usize,
2081    > oxidd_core::HasWorkers for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2082{
2083    type WorkerPool = crate::workers::Workers;
2084
2085    #[inline]
2086    fn workers(&self) -> &Self::WorkerPool {
2087        &self.0.workers
2088    }
2089}
2090
2091// === Function ================================================================
2092
2093#[repr(C)]
2094pub struct Function<
2095    NC: InnerNodeCons<ET>,
2096    ET: Tag,
2097    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2098    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2099    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2100    const TERMINALS: usize,
2101> {
2102    store: ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>,
2103    edge: ManuallyDrop<Edge<'static, NC::T<'static>, ET>>,
2104}
2105
2106impl<
2107        NC: InnerNodeCons<ET>,
2108        ET: Tag,
2109        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2110        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2111        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2112        const TERMINALS: usize,
2113    > Function<NC, ET, TMC, RC, MDC, TERMINALS>
2114{
2115    /// Convert `self` into a raw pointer and edge value, e.g. for usage in a
2116    /// foreign function interface.
2117    ///
2118    /// This method does not change any reference counters. To avoid a memory
2119    /// leak, use [`Self::from_raw()`] to convert pointer and edge value back
2120    /// into a `Function`.
2121    #[inline(always)]
2122    pub fn into_raw(self) -> (*const std::ffi::c_void, u32) {
2123        let this = ManuallyDrop::new(self);
2124        // SAFETY: We forget `this`
2125        (
2126            unsafe { std::ptr::read(&this.store) }.into_raw(),
2127            this.edge.0,
2128        )
2129    }
2130
2131    /// Convert `ptr` and `edge_val` into a `Function`
2132    ///
2133    /// # Safety
2134    ///
2135    /// `raw` and `edge_val` must have been obtained via [`Self::into_raw()`].
2136    /// This function does not change any reference counters, so calling this
2137    /// function multiple times for the same pointer may lead to use after free
2138    /// bugs depending on the usage of the returned `Function`.
2139    #[inline(always)]
2140    pub unsafe fn from_raw(ptr: *const std::ffi::c_void, edge_val: u32) -> Self {
2141        // SAFETY: Invariants are upheld by the caller.
2142        Self {
2143            store: unsafe { ManagerRef::from_raw(ptr) },
2144            edge: ManuallyDrop::new(Edge(edge_val, PhantomData)),
2145        }
2146    }
2147}
2148
2149impl<
2150        NC: InnerNodeCons<ET>,
2151        ET: Tag,
2152        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2153        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2154        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2155        const TERMINALS: usize,
2156    > Drop for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2157{
2158    #[inline]
2159    fn drop(&mut self) {
2160        // SAFETY: `self.edge` is never used again.
2161        let edge = unsafe { ManuallyDrop::take(&mut self.edge) };
2162        self.store.0.drop_edge(edge);
2163    }
2164}
2165
2166impl<
2167        NC: InnerNodeCons<ET>,
2168        ET: Tag,
2169        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2170        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2171        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2172        const TERMINALS: usize,
2173    > Clone for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2174{
2175    #[inline]
2176    fn clone(&self) -> Self {
2177        Self {
2178            store: self.store.clone(),
2179            edge: ManuallyDrop::new(self.store.0.clone_edge(&self.edge)),
2180        }
2181    }
2182}
2183
2184impl<
2185        NC: InnerNodeCons<ET>,
2186        ET: Tag,
2187        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2188        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2189        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2190        const TERMINALS: usize,
2191    > PartialEq for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2192{
2193    #[inline]
2194    fn eq(&self, other: &Self) -> bool {
2195        self.store == other.store && self.edge == other.edge
2196    }
2197}
2198impl<
2199        NC: InnerNodeCons<ET>,
2200        ET: Tag,
2201        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2202        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2203        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2204        const TERMINALS: usize,
2205    > Eq for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2206{
2207}
2208impl<
2209        NC: InnerNodeCons<ET>,
2210        ET: Tag,
2211        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2212        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2213        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2214        const TERMINALS: usize,
2215    > PartialOrd for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2216{
2217    #[inline]
2218    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2219        Some(self.cmp(other))
2220    }
2221}
2222impl<
2223        NC: InnerNodeCons<ET>,
2224        ET: Tag,
2225        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2226        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2227        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2228        const TERMINALS: usize,
2229    > Ord for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2230{
2231    #[inline]
2232    fn cmp(&self, other: &Self) -> Ordering {
2233        self.store
2234            .cmp(&other.store)
2235            .then(self.edge.cmp(&other.edge))
2236    }
2237}
2238impl<
2239        NC: InnerNodeCons<ET>,
2240        ET: Tag,
2241        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2242        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2243        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2244        const TERMINALS: usize,
2245    > Hash for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2246{
2247    #[inline]
2248    fn hash<H: Hasher>(&self, state: &mut H) {
2249        self.store.hash(state);
2250        self.edge.hash(state);
2251    }
2252}
2253
2254unsafe impl<
2255        NC: InnerNodeCons<ET>,
2256        ET: Tag,
2257        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2258        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2259        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2260        const TERMINALS: usize,
2261    > oxidd_core::function::Function for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2262{
2263    type Manager<'id> = M<'id, NC, ET, TMC, RC, MDC, TERMINALS>;
2264
2265    type ManagerRef = ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>;
2266
2267    #[inline]
2268    fn from_edge<'id>(manager: &Self::Manager<'id>, edge: EdgeOfFunc<'id, Self>) -> Self {
2269        #[allow(clippy::unnecessary_cast)] // this cast is necessary
2270        let ptr = manager as *const Self::Manager<'id> as *const Self::Manager<'static>;
2271        // SAFETY:
2272        // - We just changed "identifier" lifetimes.
2273        // - The pointer was obtained via `Arc::into_raw()`, and since we have a
2274        //   `&Manager` reference, the counter is at least 1.
2275        let store = unsafe {
2276            let manager = &*ptr;
2277            Arc::increment_strong_count(manager.store);
2278            ManagerRef(Arc::from_raw(manager.store))
2279        };
2280        // Avoid transmuting `edge` for changing lifetimes
2281        let id = edge.0;
2282        std::mem::forget(edge);
2283        Self {
2284            store,
2285            edge: ManuallyDrop::new(Edge(id, PhantomData)),
2286        }
2287    }
2288
2289    #[inline]
2290    fn as_edge<'id>(&self, manager: &Self::Manager<'id>) -> &EdgeOfFunc<'id, Self> {
2291        assert!(
2292            crate::util::ptr_eq_untyped(self.store.0.manager.data_ptr(), manager),
2293            "This function does not belong to `manager`"
2294        );
2295
2296        let ptr = &*self.edge as *const EdgeOfFunc<'static, Self> as *const EdgeOfFunc<'id, Self>;
2297        // SAFETY: Just changing lifetimes; we checked that `self.edge` belongs
2298        // to `manager` above.
2299        unsafe { &*ptr }
2300    }
2301
2302    #[inline]
2303    fn into_edge<'id>(self, manager: &Self::Manager<'id>) -> EdgeOfFunc<'id, Self> {
2304        assert!(
2305            crate::util::ptr_eq_untyped(self.store.0.manager.data_ptr(), manager),
2306            "This function does not belong to `manager`"
2307        );
2308        // We only want to drop the store ref (but `Function` implements `Drop`,
2309        // so we cannot destruct `self`).
2310        let mut this = ManuallyDrop::new(self);
2311        let edge = Edge(this.edge.0, PhantomData);
2312        // SAFETY: we forget `self`
2313        unsafe { std::ptr::drop_in_place(&mut this.store) };
2314        edge
2315    }
2316
2317    #[inline]
2318    fn manager_ref(&self) -> Self::ManagerRef {
2319        self.store.clone()
2320    }
2321
2322    fn with_manager_shared<F, T>(&self, f: F) -> T
2323    where
2324        F: for<'id> FnOnce(&Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2325    {
2326        let local_guard = self.store.0.prepare_local_state();
2327        let res = f(&self.store.0.manager.shared(), &self.edge);
2328        drop(local_guard);
2329        res
2330    }
2331
2332    fn with_manager_exclusive<F, T>(&self, f: F) -> T
2333    where
2334        F: for<'id> FnOnce(&mut Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2335    {
2336        let local_guard = self.store.0.prepare_local_state();
2337        let res = f(&mut self.store.0.manager.exclusive(), &self.edge);
2338        drop(local_guard);
2339        res
2340    }
2341}
2342
2343// === NodeSet =================================================================
2344
2345/// Node set implementation using a [bit vector][BitVec]
2346///
2347/// Since nodes are stored in an array, we can use a single bit vector. This
2348/// reduces space consumption dramatically and increases the performance.
2349#[derive(Default, Clone)]
2350pub struct NodeSet {
2351    len: usize,
2352    data: BitVec,
2353}
2354
2355impl PartialEq for NodeSet {
2356    #[inline]
2357    fn eq(&self, other: &Self) -> bool {
2358        self.len == other.len && self.data == other.data
2359    }
2360}
2361impl Eq for NodeSet {}
2362
2363impl<'id, N: NodeBase, ET: Tag> oxidd_core::util::NodeSet<Edge<'id, N, ET>> for NodeSet {
2364    #[inline(always)]
2365    fn len(&self) -> usize {
2366        self.len
2367    }
2368
2369    #[inline]
2370    fn insert(&mut self, edge: &Edge<'id, N, ET>) -> bool {
2371        let index = edge.node_id();
2372        if index < self.data.len() {
2373            if self.data[index] {
2374                return false;
2375            }
2376        } else {
2377            self.data.resize((index + 1).next_power_of_two(), false);
2378        }
2379        self.data.set(index, true);
2380        self.len += 1;
2381        true
2382    }
2383
2384    #[inline]
2385    fn contains(&self, edge: &Edge<'id, N, ET>) -> bool {
2386        let index = edge.node_id();
2387        if index < self.data.len() {
2388            self.data[index]
2389        } else {
2390            false
2391        }
2392    }
2393
2394    #[inline]
2395    fn remove(&mut self, edge: &Edge<'id, N, ET>) -> bool {
2396        let index = edge.node_id();
2397        if index < self.data.len() && self.data[index] {
2398            self.len -= 1;
2399            self.data.set(index, false);
2400            true
2401        } else {
2402            false
2403        }
2404    }
2405}
2406
2407// === Additional Trait Implementations ========================================
2408
2409impl<
2410        'id,
2411        N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2412        ET: Tag,
2413        TM: TerminalManager<'id, N, ET, TERMINALS>,
2414        R: DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
2415        MD: oxidd_core::HasApplyCache<Self, O> + GCContainer<Self> + DropWith<Edge<'id, N, ET>>,
2416        O: Copy,
2417        const TERMINALS: usize,
2418    > oxidd_core::HasApplyCache<Self, O> for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2419{
2420    type ApplyCache = MD::ApplyCache;
2421
2422    #[inline]
2423    fn apply_cache(&self) -> &Self::ApplyCache {
2424        self.data.apply_cache()
2425    }
2426
2427    #[inline]
2428    fn apply_cache_mut(&mut self) -> &mut Self::ApplyCache {
2429        self.data.apply_cache_mut()
2430    }
2431}
2432
2433impl<'id, T, N, ET, TM, R, MD, const TERMINALS: usize> AsRef<T>
2434    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2435where
2436    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2437    ET: Tag,
2438    TM: TerminalManager<'id, N, ET, TERMINALS>,
2439    MD: DropWith<Edge<'id, N, ET>> + AsRef<T>,
2440{
2441    #[inline(always)]
2442    fn as_ref(&self) -> &T {
2443        self.data.as_ref()
2444    }
2445}
2446
2447impl<'id, T, N, ET, TM, R, MD, const TERMINALS: usize> AsMut<T>
2448    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2449where
2450    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2451    ET: Tag,
2452    TM: TerminalManager<'id, N, ET, TERMINALS>,
2453    MD: DropWith<Edge<'id, N, ET>> + AsMut<T>,
2454{
2455    #[inline(always)]
2456    fn as_mut(&mut self) -> &mut T {
2457        self.data.as_mut()
2458    }
2459}