oxidd_manager_index/
manager.rs

1//! Index-Based Manager Implementation
2//!
3//! Abbreviations of generics:
4//!
5//! | Abbreviation | Meaning                           |
6//! | ------------ | --------------------------------- |
7//! | `N`          | Inner Node                        |
8//! | `ET`         | Edge Tag                          |
9//! | `TM`         | Terminal Manager                  |
10//! | `R`          | Diagram Rules                     |
11//! | `MD`         | Manager Data                      |
12//! | `NC`         | Inner Node Type Constructor       |
13//! | `TMC`        | Terminal Manager Type Constructor |
14//! | `RC`         | Diagram Rules Type Constructor    |
15//! | `OP`         | Operation                         |
16
17use std::cell::{Cell, UnsafeCell};
18use std::cmp::Ordering;
19use std::hash::{Hash, Hasher};
20use std::iter::FusedIterator;
21use std::marker::PhantomData;
22use std::mem::ManuallyDrop;
23use std::ops::Range;
24use std::sync::atomic::AtomicU64;
25use std::sync::atomic::Ordering::{Acquire, Relaxed};
26use std::sync::Arc;
27
28use crossbeam_utils::CachePadded;
29use derive_where::derive_where;
30use fixedbitset::FixedBitSet;
31use linear_hashtbl::raw::RawTable;
32use parking_lot::{Condvar, Mutex, MutexGuard};
33use rustc_hash::FxHasher;
34
35use oxidd_core::error::{DuplicateVarName, OutOfMemory};
36use oxidd_core::function::EdgeOfFunc;
37use oxidd_core::util::{AbortOnDrop, AllocResult, Borrowed, DropWith, OnDrop, VarNameMap};
38use oxidd_core::{DiagramRules, InnerNode, LevelNo, ManagerEventSubscriber, Tag, VarNo};
39
40use crate::node::NodeBase;
41use crate::terminal_manager::TerminalManager;
42use crate::util::{rwlock::RwLock, Invariant, TryLock, VarLevelMap};
43
44// === Type Constructors =======================================================
45
46/// Inner node type constructor
47pub trait InnerNodeCons<ET: Tag> {
48    type T<'id>: NodeBase + InnerNode<Edge<'id, Self::T<'id>, ET>> + Send + Sync;
49}
50
51/// Terminal manager type constructor
52pub trait TerminalManagerCons<NC: InnerNodeCons<ET>, ET: Tag, const TERMINALS: usize>:
53    Sized
54{
55    type TerminalNode: Send + Sync;
56    type T<'id>: Send
57        + Sync
58        + TerminalManager<'id, NC::T<'id>, ET, TERMINALS, TerminalNode = Self::TerminalNode>;
59}
60
61/// Diagram rules type constructor
62pub trait DiagramRulesCons<
63    NC: InnerNodeCons<ET>,
64    ET: Tag,
65    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
66    MDC: ManagerDataCons<NC, ET, TMC, Self, TERMINALS>,
67    const TERMINALS: usize,
68>: Sized
69{
70    type T<'id>: Send
71        + Sync
72        + DiagramRules<
73            Edge<'id, NC::T<'id>, ET>,
74            NC::T<'id>,
75            <TMC::T<'id> as TerminalManager<'id, NC::T<'id>, ET, TERMINALS>>::TerminalNode,
76        >;
77}
78
79/// Manager data type constructor
80pub trait ManagerDataCons<
81    NC: InnerNodeCons<ET>,
82    ET: Tag,
83    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
84    RC: DiagramRulesCons<NC, ET, TMC, Self, TERMINALS>,
85    const TERMINALS: usize,
86>: Sized
87{
88    type T<'id>: Send
89        + Sync
90        + DropWith<Edge<'id, NC::T<'id>, ET>>
91        + ManagerEventSubscriber<
92            Manager<'id, NC::T<'id>, ET, TMC::T<'id>, RC::T<'id>, Self::T<'id>, TERMINALS>,
93        >;
94}
95
96// === Manager & Edges =========================================================
97
98/// "Signals" used to communicate with the garbage collection thread
99#[derive(Clone, Copy, PartialEq, Eq, Debug)]
100enum GCSignal {
101    RunGc,
102    Quit,
103}
104
105pub struct Store<'id, N, ET, TM, R, MD, const TERMINALS: usize>
106where
107    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
108    ET: Tag,
109    TM: TerminalManager<'id, N, ET, TERMINALS>,
110    MD: DropWith<Edge<'id, N, ET>>,
111{
112    inner_nodes: Box<SlotSlice<'id, N, TERMINALS>>,
113    manager: RwLock<Manager<'id, N, ET, TM, R, MD, TERMINALS>>,
114    terminal_manager: TM,
115    state: CachePadded<Mutex<SharedStoreState>>,
116    gc_signal: (Mutex<GCSignal>, Condvar),
117    workers: crate::workers::Workers,
118}
119
120unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Sync
121    for Store<'id, N, ET, TM, R, MD, TERMINALS>
122where
123    N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
124    ET: Tag + Send + Sync,
125    TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
126    MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
127{
128}
129
130/// Size of a pre-allocation (number of nodes)
131const CHUNK_SIZE: u32 = 64 * 1024;
132
133/// State of automatic background garbage collection
134#[derive(Clone, Copy, PartialEq, Eq, Debug)]
135enum GCState {
136    /// Automatic garbage collection is disabled entirely
137    Disabled,
138    /// Initial state
139    Init,
140    /// Garbage collection has been triggered because the state was `Init` and
141    /// the node count exceeded the high water mark
142    ///
143    /// The collecting thread sets this state back to `Init` if the node count
144    /// reaches a value less than the low water mark.
145    Triggered,
146}
147
148struct SharedStoreState {
149    /// IDs of the next free slots
150    ///
151    /// Each element of the vector corresponds to a linked list of free slots.
152    /// If a worker runs out of locally allocated free slots, it will first try
153    /// to take a list of free slots from here. If this is not possible, a new
154    /// chunk of uninitialized slots will be allocated. New entries to this
155    /// vector are added during garbage collection. Each list should not be
156    /// longer than `CHUNK_SIZE` (this is not a strict requirement, though).
157    next_free: Vec<u32>,
158
159    /// All slots in range `..allocated` are either initialized (a `node` or a
160    /// `next_free` ID), or `uninit` and pre-allocated by a worker.
161    ///
162    /// Note that this is an index into the slice, so no `- TERMINALS` is needed
163    /// to access the slot.
164    allocated: u32,
165
166    /// Eventually consistent count of inner nodes
167    ///
168    /// This is an `i64` and not an `u32` because of the following scenario:
169    /// Worker A creates n nodes such that its `node_count_delta` becomes n - 1
170    /// and the shared `node_count` is 1. At least two of the newly created
171    /// nodes are solely referenced by a [`Function`]. An application thread
172    /// drops these two functions. This will directly decrement the shared
173    /// `node_count` by 1 twice. If we used a `u32`, this would lead to an
174    /// overflow.
175    node_count: i64,
176
177    /// Background garbage collection state (see [`GCState`])
178    gc_state: GCState,
179    /// Low water mark for background garbage collection (see [`GCState`])
180    gc_lwm: u32,
181    /// High water mark for background garbage collection (see [`GCState`])
182    gc_hwm: u32,
183}
184
185#[repr(align(64))] // all fields on a single cache line
186struct LocalStoreState {
187    /// ID of the next free slot
188    ///
189    /// `0` means that there is no such slot so far. Either `initialized` can be
190    /// incremented or we are out of memory. Since `0` is always a terminal (we
191    /// require `TERMINALS >= 1`), there is no ambiguity.
192    next_free: Cell<u32>,
193
194    /// All slots in range `..initialized` are not `uninit`, i.e., either a
195    /// `node` or a `next_free` ID. All slots until the next multiple of
196    /// `CHUNK_SIZE` can be used by the local thread. So if
197    /// `initialized % CHUNK_SIZE == 0` the worker needs to allocate a new chunk
198    /// from the shared store state.
199    ///
200    /// Note that this is an index into the slice, so no `- TERMINALS` is needed
201    /// to access the slot.
202    initialized: Cell<u32>,
203
204    /// Address of the associated `Store`
205    ///
206    /// Before using slots from this `LocalStoreState`, the implementation needs
207    /// to ensure that the slots come from the respective `Store`.
208    current_store: Cell<usize>,
209
210    /// Local changes to the shared node counter
211    ///
212    /// In general, we synchronize with the shared counter if request slots from
213    /// the shared manager or we return the `next_free` list.  The latter
214    /// happens if this counter reaches `-CHUNK_SIZE`.
215    node_count_delta: Cell<i32>,
216}
217
218thread_local! {
219    static LOCAL_STORE_STATE: LocalStoreState = const {
220        LocalStoreState {
221            next_free: Cell::new(0),
222            initialized: Cell::new(0),
223            current_store: Cell::new(0),
224            node_count_delta: Cell::new(0),
225        }
226    };
227}
228
229struct LocalStoreStateGuard<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>(
230    &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
231)
232where
233    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
234    ET: Tag,
235    TM: TerminalManager<'id, N, ET, TERMINALS>,
236    MD: DropWith<Edge<'id, N, ET>>;
237
238/// Node slot. We distinguish three states, corresponding to the variants:
239/// "containing a node," "free," and "uninitialized."
240union Slot<N> {
241    node: ManuallyDrop<N>,
242    /// ID of the next free slot or `0` if there is none. Subtract `TERMINALS`
243    /// to get the `SlotSlice` index.
244    next_free: u32,
245    #[allow(unused)]
246    uninit: (),
247}
248
249#[repr(transparent)]
250struct SlotSlice<'id, N, const TERMINALS: usize> {
251    phantom: PhantomData<Invariant<'id>>,
252    slots: [UnsafeCell<Slot<N>>],
253}
254
255/// Edge type, see [`oxidd_core::Edge`]
256///
257/// Internally, this is represented as a `u32` index.
258#[repr(transparent)]
259#[must_use]
260#[derive_where(PartialEq, Eq, PartialOrd, Ord, Hash)]
261pub struct Edge<'id, N, ET>(
262    /// `node_index | edge_tag << (u32::BITS - Self::TAG_BITS)`
263    u32,
264    #[derive_where(skip)] PhantomData<(Invariant<'id>, N, ET)>,
265);
266
267#[repr(C)]
268pub struct Manager<'id, N, ET, TM, R, MD, const TERMINALS: usize>
269where
270    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
271    ET: Tag,
272    TM: TerminalManager<'id, N, ET, TERMINALS>,
273    MD: DropWith<Edge<'id, N, ET>>,
274{
275    unique_table: Vec<Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>,
276    var_level_map: VarLevelMap,
277    var_name_map: VarNameMap,
278    data: ManuallyDrop<MD>,
279    /// Pointer back to the store, obtained via [`Arc::as_ptr()`].
280    ///
281    /// Theoretically, we should be able to get the pointer from a `&Manager`
282    /// reference, but this leads to provenance issues.
283    store: *const Store<'id, N, ET, TM, R, MD, TERMINALS>,
284    gc_count: AtomicU64,
285    reorder_count: u64,
286    gc_ongoing: TryLock,
287    reorder_gc_prepared: bool,
288}
289
290/// Type "constructor" for the manager from `InnerNodeCons` etc.
291type M<'id, NC, ET, TMC, RC, MDC, const TERMINALS: usize> = Manager<
292    'id,
293    <NC as InnerNodeCons<ET>>::T<'id>,
294    ET,
295    <TMC as TerminalManagerCons<NC, ET, TERMINALS>>::T<'id>,
296    <RC as DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>>::T<'id>,
297    <MDC as ManagerDataCons<NC, ET, TMC, RC, TERMINALS>>::T<'id>,
298    TERMINALS,
299>;
300
301unsafe impl<
302        'id,
303        N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
304        ET: Tag + Send + Sync,
305        TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
306        R,
307        MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
308        const TERMINALS: usize,
309    > Send for Manager<'id, N, ET, TM, R, MD, TERMINALS>
310{
311}
312unsafe impl<
313        'id,
314        N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
315        ET: Tag + Send + Sync,
316        TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
317        R,
318        MD: DropWith<Edge<'id, N, ET>> + Send + Sync,
319        const TERMINALS: usize,
320    > Sync for Manager<'id, N, ET, TM, R, MD, TERMINALS>
321{
322}
323
324// --- Edge Impls --------------------------------------------------------------
325
326impl<N: NodeBase, ET: Tag> Edge<'_, N, ET> {
327    const TAG_BITS: u32 = {
328        let bits = usize::BITS - ET::MAX_VALUE.leading_zeros();
329        assert!(bits <= 16, "Maximum value of edge tag is too large");
330        bits
331    };
332    /// Shift amount to store/retrieve the tag from the most significant bits
333    const TAG_SHIFT: u32 = (u32::BITS - Self::TAG_BITS) % 32;
334
335    /// Mask corresponding to [`Self::TAG_BITS`]
336    const TAG_MASK: u32 = ((1 << Self::TAG_BITS) - 1) << Self::TAG_SHIFT;
337
338    /// SAFETY: The edge must be untagged.
339    #[inline(always)]
340    unsafe fn node_id_unchecked(&self) -> u32 {
341        debug_assert_eq!(self.0 & Self::TAG_MASK, 0);
342        self.0
343    }
344
345    /// Get the node ID, i.e., the edge value without any tags
346    #[inline(always)]
347    fn node_id(&self) -> usize {
348        (self.0 & !Self::TAG_MASK) as usize
349    }
350
351    /// Returns `true` iff the edge's tag != 0
352    #[inline(always)]
353    fn is_tagged(&self) -> bool {
354        self.0 & Self::TAG_MASK != 0
355    }
356
357    /// Get the raw representation of the edge (also called "edge value")
358    #[inline(always)]
359    pub fn raw(&self) -> u32 {
360        self.0
361    }
362
363    /// Get an edge from a terminal ID
364    ///
365    /// # Safety
366    ///
367    /// `id` must be a terminal ID, i.e., `id < TERMINALS`, and the caller must
368    /// update the reference count for the terminal accordingly.
369    #[inline(always)]
370    pub unsafe fn from_terminal_id(id: u32) -> Self {
371        Self(id, PhantomData)
372    }
373}
374
375impl<N: NodeBase, ET: Tag> oxidd_core::Edge for Edge<'_, N, ET> {
376    type Tag = ET;
377
378    #[inline]
379    fn borrowed(&self) -> Borrowed<'_, Self> {
380        Borrowed::new(Self(self.0, PhantomData))
381    }
382
383    #[inline]
384    fn with_tag(&self, tag: Self::Tag) -> Borrowed<'_, Self> {
385        Borrowed::new(Self(
386            (self.0 & !Self::TAG_MASK) | ((tag.as_usize() as u32) << Self::TAG_SHIFT),
387            PhantomData,
388        ))
389    }
390
391    #[inline]
392    fn with_tag_owned(mut self, tag: Self::Tag) -> Self {
393        self.0 = (self.0 & !Self::TAG_MASK) | ((tag.as_usize() as u32) << Self::TAG_SHIFT);
394        self
395    }
396
397    #[inline]
398    fn tag(&self) -> Self::Tag {
399        ET::from_usize(((self.0 & Self::TAG_MASK) >> Self::TAG_SHIFT) as usize)
400    }
401
402    #[inline]
403    fn node_id(&self) -> oxidd_core::NodeID {
404        (self.0 & !Self::TAG_MASK) as oxidd_core::NodeID
405    }
406}
407
408impl<N, ET> Drop for Edge<'_, N, ET> {
409    #[inline(never)]
410    #[cold]
411    fn drop(&mut self) {
412        eprintln!(
413            "`Edge`s must not be dropped. Use `Manager::drop_edge()`. Backtrace:\n{}",
414            std::backtrace::Backtrace::capture()
415        );
416
417        #[cfg(feature = "static_leak_check")]
418        {
419            extern "C" {
420                #[link_name = "\n\n`Edge`s must not be dropped. Use `Manager::drop_edge()`.\n"]
421                fn trigger() -> !;
422            }
423            unsafe { trigger() }
424        }
425    }
426}
427
428// --- SlotSlice Impl ----------------------------------------------------------
429
430impl<'id, N: NodeBase, const TERMINALS: usize> SlotSlice<'id, N, TERMINALS> {
431    // Create a new slot slice for up to `capacity` nodes
432    fn new_boxed(capacity: u32) -> Box<Self> {
433        let mut vec: Vec<UnsafeCell<Slot<N>>> = Vec::with_capacity(capacity as usize);
434
435        // SAFETY: The new length is equal to the capacity. All elements are
436        // "initialized" as `Slot::uninit`.
437        //
438        // Clippy's `uninit_vec` lint is a bit too strict here. `Slot`s are
439        // somewhat like `MaybeUninit`, but Clippy wants `MaybeUninit`.
440        #[allow(clippy::uninit_vec)]
441        unsafe {
442            vec.set_len(capacity as usize)
443        };
444
445        let boxed = vec.into_boxed_slice();
446        // SAFETY: `SlotSlice` has `repr(transparent)` and thus the same
447        // representation as `[UnsafeCell<Slot<N>>]`.
448        unsafe { std::mem::transmute(boxed) }
449    }
450
451    /// SAFETY: `edge` must be untagged and reference an inner node
452    #[inline]
453    unsafe fn slot_pointer_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> *mut Slot<N> {
454        // SAFETY: `edge` is untagged
455        let id = unsafe { edge.node_id_unchecked() } as usize;
456        debug_assert!(id >= TERMINALS, "`edge` must reference an inner node");
457        debug_assert!(id - TERMINALS < self.slots.len());
458        // SAFETY: Indices derived from edges pointing to inner nodes are always
459        // in bounds.
460        unsafe { self.slots.get_unchecked(id - TERMINALS).get() }
461    }
462
463    /// Panics if `edge` references a terminal node
464    #[inline]
465    fn inner_node<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> &N {
466        let id = edge.node_id();
467        assert!(id >= TERMINALS, "`edge` must reference an inner node");
468        debug_assert!(id - TERMINALS < self.slots.len());
469        // SAFETY:
470        // - Indices derived from edges pointing to inner nodes are in bounds
471        // - If an edge points to an inner node, the node's `Slot` is immutable and a
472        //   properly initialized node
473        unsafe { &(*self.slots.get_unchecked(id - TERMINALS).get()).node }
474    }
475
476    /// SAFETY: `edge` must be untagged and reference an inner node
477    #[inline(always)]
478    unsafe fn inner_node_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> &N {
479        // SAFETY: If an edge points to an inner node, the node's `Slot` is
480        // immutable and a properly initialized node.
481        unsafe { &(*self.slot_pointer_unchecked(edge)).node }
482    }
483
484    /// SAFETY: `edge` must be untagged and reference an inner node
485    #[inline(always)]
486    unsafe fn clone_edge_unchecked<ET: Tag>(&self, edge: &Edge<'id, N, ET>) -> Edge<'id, N, ET> {
487        unsafe { self.inner_node_unchecked(edge) }.retain();
488        Edge(edge.0, PhantomData)
489    }
490}
491
492// --- Store Impls -------------------------------------------------------------
493
494impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Store<'id, N, ET, TM, R, MD, TERMINALS>
495where
496    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
497    ET: Tag,
498    TM: TerminalManager<'id, N, ET, TERMINALS>,
499    MD: DropWith<Edge<'id, N, ET>>,
500{
501    const CHECK_TERMINALS: () = {
502        assert!(TERMINALS >= 1, "TERMINALS must be >= 1");
503        assert!(
504            TERMINALS <= u32::MAX as usize,
505            "TERMINALS must fit into an u32"
506        );
507    };
508
509    #[inline(always)]
510    fn addr(&self) -> usize {
511        // TODO: Use `pointer::addr()` once we raise the MSRV to 1.84
512        self as *const Self as usize
513    }
514
515    #[inline]
516    fn prepare_local_state(
517        &self,
518    ) -> Option<LocalStoreStateGuard<'_, 'id, N, ET, TM, R, MD, TERMINALS>> {
519        LOCAL_STORE_STATE.with(|state| {
520            if state.current_store.get() == 0 {
521                state.next_free.set(0);
522                state.initialized.set(0);
523                state.current_store.set(self.addr());
524                debug_assert_eq!(state.node_count_delta.get(), 0);
525                Some(LocalStoreStateGuard(self))
526            } else {
527                None
528            }
529        })
530    }
531
532    /// Add a node to the store. Does not perform any duplicate checks.
533    ///
534    /// Panics if the store is full.
535    #[inline]
536    fn add_node(&self, node: N) -> AllocResult<[Edge<'id, N, ET>; 2]> {
537        debug_assert_eq!(node.load_rc(Relaxed), 2);
538        let res = LOCAL_STORE_STATE.with(|state| {
539            let node_count_delta = if state.current_store.get() == self.addr() {
540                let delta = state.node_count_delta.get() + 1;
541                let id = state.next_free.get();
542                if id != 0 {
543                    // SAFETY: `id` is the ID of a free slot, we have exclusive access
544                    let (next_free, slot) = unsafe { self.use_free_slot(id) };
545                    state.next_free.set(next_free);
546                    state.node_count_delta.set(delta);
547                    return Ok((id, slot));
548                }
549
550                let index = state.initialized.get();
551                if index % CHUNK_SIZE != 0 {
552                    let slots = &self.inner_nodes.slots;
553                    debug_assert!((index as usize) < slots.len());
554                    // SAFETY: All slots in range
555                    // `index..index.next_multiple_of(CHUNK_SIZE)` are
556                    // uninitialized and pre-allocated, so we have exclusive
557                    // access to them.
558                    let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
559                    state.initialized.set(index + 1);
560                    state.node_count_delta.set(delta);
561                    return Ok((index + TERMINALS as u32, slot));
562                }
563
564                state.node_count_delta.set(0);
565                delta
566            } else {
567                1
568            };
569
570            self.get_slot_from_shared(state, node_count_delta)
571        });
572        match res {
573            Ok((id, slot)) => {
574                slot.node = ManuallyDrop::new(node);
575                Ok([Edge(id, PhantomData), Edge(id, PhantomData)])
576            }
577            Err(OutOfMemory) => {
578                node.drop_with(|e| self.drop_edge(e));
579                Err(OutOfMemory)
580            }
581        }
582    }
583
584    /// Get the free slot with ID `id` and load the `next_free` ID
585    ///
586    /// SAFETY: `id` must be the ID of a free slot and the current thread must
587    /// have exclusive access to it.
588    #[inline(always)]
589    #[allow(clippy::mut_from_ref)]
590    unsafe fn use_free_slot(&self, id: u32) -> (u32, &mut Slot<N>) {
591        debug_assert!(id as usize >= TERMINALS);
592        let index = id as usize - TERMINALS;
593        debug_assert!(index < self.inner_nodes.slots.len());
594        // SAFETY: All next-free ids (>= TERMINALS) are valid, we have
595        // exclusive access to the node.
596        let slot = unsafe { &mut *self.inner_nodes.slots.get_unchecked(index).get() };
597        // SAFETY: The slot is free.
598        let next_free = unsafe { slot.next_free };
599        (next_free, slot)
600    }
601
602    /// Get a slot from the shared state
603    ///
604    /// `delta` is added to the shared node count.
605    ///
606    /// Unless out of memory, this method returns the ID of a free or
607    /// uninitialized slot as well as a reference pointing to that slot.
608    #[cold]
609    #[allow(clippy::mut_from_ref)]
610    fn get_slot_from_shared(
611        &self,
612        local: &LocalStoreState,
613        delta: i32,
614    ) -> AllocResult<(u32, &mut Slot<N>)> {
615        let mut shared = self.state.lock();
616
617        shared.node_count += delta as i64;
618        if shared.gc_state == GCState::Init && shared.node_count >= shared.gc_hwm as i64 {
619            shared.gc_state = GCState::Triggered;
620            self.gc_signal.1.notify_one();
621        }
622
623        if local.current_store.get() == self.addr() {
624            debug_assert_eq!(local.next_free.get(), 0);
625            debug_assert_eq!(local.initialized.get() % CHUNK_SIZE, 0);
626
627            if let Some(id) = shared.next_free.pop() {
628                // SAFETY: `id` is the ID of a free slot, we have exclusive access
629                let (next_free, slot) = unsafe { self.use_free_slot(id) };
630                local.next_free.set(next_free);
631                return Ok((id, slot));
632            }
633
634            let index = shared.allocated;
635            let slots = &self.inner_nodes.slots;
636            if (index as usize + CHUNK_SIZE as usize) < slots.len() {
637                shared.allocated = (index / CHUNK_SIZE + 1) * CHUNK_SIZE;
638                local.initialized.set(index + 1);
639            } else if (index as usize) < slots.len() {
640                shared.allocated += 1;
641            } else {
642                return Err(OutOfMemory);
643            }
644            // SAFETY: `index` is in bounds, the slot is uninitialized
645            let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
646            Ok((index + TERMINALS as u32, slot))
647        } else {
648            if let Some(id) = shared.next_free.pop() {
649                // SAFETY: `id` is the ID of a free slot, we have exclusive access
650                let (next_free, slot) = unsafe { self.use_free_slot(id) };
651                if next_free != 0 {
652                    shared.next_free.push(next_free);
653                }
654                return Ok((id, slot));
655            }
656
657            let index = shared.allocated;
658            let slots = &self.inner_nodes.slots;
659            if (index as usize) >= slots.len() {
660                return Err(OutOfMemory);
661            }
662            shared.allocated += 1;
663            // SAFETY: `index` is in bounds, the slot is uninitialized
664            let slot = unsafe { &mut *slots.get_unchecked(index as usize).get() };
665            Ok((index + TERMINALS as u32, slot))
666        }
667    }
668
669    /// Drop an edge that originates from the unique table.
670    ///
671    /// Edges from the unique table are untagged and point to inner nodes, so
672    /// we need less case distinctions. Furthermore, we assume that the node's
673    /// children are still present in the unique table (waiting for their
674    /// revival or to be garbage collected), so we just decrement the children's
675    /// reference counters. There is a debug assertion that checks this
676    /// assumption.
677    ///
678    /// SAFETY: `edge` must be untagged and point to an inner node
679    unsafe fn drop_unique_table_edge(&self, edge: Edge<'id, N, ET>) {
680        // SAFETY (next 2): `edge` is untagged and points to an inner node
681        let slot_ptr = unsafe { self.inner_nodes.slot_pointer_unchecked(&edge) };
682        let id = unsafe { edge.node_id_unchecked() };
683        std::mem::forget(edge);
684
685        // SAFETY:
686        // - `edge` points to an inner node
687        // - We have shared access to nodes
688        // - `edge` is forgotten.
689        if unsafe { (*slot_ptr).node.release() } != 1 {
690            return;
691        }
692
693        // We only have exclusive access to the other fields of node after the
694        // fence. It synchronizes with the `NodeBase::release()` above (which is
695        // guaranteed to have `Release` order).
696        std::sync::atomic::fence(Acquire);
697
698        // SAFETY: Now, we have exclusive access to the slot. It contains a
699        // node. `id` is the ID of the slot.
700        unsafe { self.free_slot(&mut *slot_ptr, id) };
701    }
702
703    /// Free `slot`, i.e., drop the node and add its ID (`id`) to the free list
704    ///
705    /// SAFETY: `slot` must contain a node. `id` is the ID of `slot`.
706    #[inline]
707    unsafe fn free_slot(&self, slot: &mut Slot<N>, id: u32) {
708        debug_assert!(id as usize >= TERMINALS);
709        // SAFETY: We don't use the node in `slot` again.
710        unsafe { ManuallyDrop::take(&mut slot.node) }.drop_with(|edge| self.drop_edge(edge));
711
712        LOCAL_STORE_STATE.with(|state| {
713            if state.current_store.get() == self.addr() {
714                slot.next_free = state.next_free.get();
715                state.next_free.set(id);
716
717                let delta = state.node_count_delta.get() - 1;
718                if delta > -(CHUNK_SIZE as i32) {
719                    state.node_count_delta.set(delta);
720                } else {
721                    let mut shared = self.state.lock();
722                    shared.next_free.push(state.next_free.replace(0));
723                    shared.node_count += state.node_count_delta.replace(0) as i64;
724                }
725            } else {
726                #[cold]
727                fn return_slot<N>(shared: &Mutex<SharedStoreState>, slot: &mut Slot<N>, id: u32) {
728                    let mut shared = shared.lock();
729                    slot.next_free = shared.next_free.pop().unwrap_or(0);
730                    shared.next_free.push(id);
731                    shared.node_count -= 1;
732                }
733                return_slot(&self.state, slot, id);
734            }
735        });
736    }
737
738    /// Drop an edge, assuming that it isn't the last one pointing to the
739    /// referenced node
740    ///
741    /// This assumption is fulfilled in case the node is still stored in the
742    /// unique table.
743    ///
744    /// There is a debug assertion that checks the aforementioned assumption. In
745    /// release builds, this function will simply leak the node.
746    #[inline]
747    fn drop_edge(&self, edge: Edge<'id, N, ET>) {
748        let id = edge.node_id();
749        if id >= TERMINALS {
750            // inner node
751            let node = self.inner_nodes.inner_node(&edge);
752            std::mem::forget(edge);
753            // SAFETY: `edge` is forgotten
754            let _old_rc = unsafe { node.release() };
755            debug_assert!(_old_rc > 1);
756        } else {
757            std::mem::forget(edge);
758            // SAFETY: `id` is a valid terminal ID and `edge` is forgotten
759            unsafe { self.terminal_manager.release(id) };
760        }
761    }
762
763    /// Clone `edge`
764    ///
765    /// `edge` may be tagged and point to an inner or a terminal node.
766    #[inline]
767    fn clone_edge(&self, edge: &Edge<'id, N, ET>) -> Edge<'id, N, ET> {
768        let id = edge.node_id();
769        if id >= TERMINALS {
770            // inner node
771            self.inner_nodes.inner_node(edge).retain();
772        } else {
773            // SAFETY: `id` is a valid terminal ID
774            unsafe { self.terminal_manager.retain(id) };
775        }
776        Edge(edge.0, PhantomData)
777    }
778}
779
780impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop for Store<'id, N, ET, TM, R, MD, TERMINALS>
781where
782    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
783    ET: Tag,
784    TM: TerminalManager<'id, N, ET, TERMINALS>,
785    MD: DropWith<Edge<'id, N, ET>>,
786{
787    fn drop(&mut self) {
788        let manager = self.manager.get_mut();
789        // We don't care about reference counters from here on.
790        // SAFETY: We don't use `manager.data` again.
791        unsafe { ManuallyDrop::take(&mut manager.data) }.drop_with(std::mem::forget);
792
793        if N::needs_drop() {
794            let unique_table = std::mem::take(&mut manager.unique_table);
795            for level in unique_table {
796                for edge in level.into_inner() {
797                    // SAFETY (next 2): `edge` is untagged and points to an
798                    // inner node
799                    let slot_ptr = unsafe { self.inner_nodes.slot_pointer_unchecked(&edge) };
800                    std::mem::forget(edge);
801                    // SAFETY: We have exclusive access to all nodes in the
802                    // store. `edge` points to an inner node.
803                    let node = unsafe { ManuallyDrop::take(&mut (*slot_ptr).node) };
804
805                    // We don't care about reference counts anymore.
806                    node.drop_with(std::mem::forget);
807                }
808            }
809        }
810    }
811}
812
813// --- LocalStoreStateGuard impl -----------------------------------------------
814
815impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop
816    for LocalStoreStateGuard<'_, 'id, N, ET, TM, R, MD, TERMINALS>
817where
818    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
819    ET: Tag,
820    TM: TerminalManager<'id, N, ET, TERMINALS>,
821    MD: DropWith<Edge<'id, N, ET>>,
822{
823    #[inline]
824    fn drop(&mut self) {
825        #[cold]
826        fn drop_slow<N>(
827            slots: &[UnsafeCell<Slot<N>>],
828            shared_state: &Mutex<SharedStoreState>,
829            terminals: u32,
830        ) {
831            LOCAL_STORE_STATE.with(|local| {
832                local.current_store.set(0);
833                let start = local.initialized.get();
834                let next_free = if start % CHUNK_SIZE != 0 {
835                    // We cannot simply give an uninitialized chunk back. Hence,
836                    // we prepend the slots to the free list.
837                    debug_assert!(start <= u32::MAX - CHUNK_SIZE);
838                    let end = (start / CHUNK_SIZE + 1) * CHUNK_SIZE;
839                    let last_slot = &slots[(end - 1) as usize];
840                    // SAFETY (next 2): We have exclusive access to the (uninitialized) slots in the
841                    // range from `local.initialized` up to the next multiple of `CHUNK_SIZE`.
842                    unsafe { &mut *last_slot.get() }.next_free = local.next_free.get();
843                    for (slot, next_id) in slots[start as usize..(end - 1) as usize]
844                        .iter()
845                        .zip(start + terminals + 1..)
846                    {
847                        unsafe { &mut *slot.get() }.next_free = next_id;
848                    }
849                    start + terminals
850                } else {
851                    local.next_free.get()
852                };
853
854                let mut shared = shared_state.lock();
855                if next_free != 0 {
856                    shared.next_free.push(next_free);
857                }
858                shared.node_count += local.node_count_delta.replace(0) as i64;
859            });
860        }
861
862        LOCAL_STORE_STATE.with(|local| {
863            if self.0.addr() == local.current_store.get()
864                && (local.next_free.get() != 0
865                    || local.initialized.get() % CHUNK_SIZE != 0
866                    || local.node_count_delta.get() != 0)
867            {
868                drop_slow(&self.0.inner_nodes.slots, &self.0.state, TERMINALS as u32);
869            }
870        });
871    }
872}
873
874// --- Manager Impls -----------------------------------------------------------
875
876impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Manager<'id, N, ET, TM, R, MD, TERMINALS>
877where
878    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
879    ET: Tag,
880    TM: TerminalManager<'id, N, ET, TERMINALS>,
881    R: oxidd_core::DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
882    MD: DropWith<Edge<'id, N, ET>> + ManagerEventSubscriber<Self>,
883{
884    /// Get a reference to the store
885    #[inline]
886    fn store(&self) -> &Store<'id, N, ET, TM, R, MD, TERMINALS> {
887        // We can simply get the store pointer by subtracting the offset of
888        // `Manager` in `Store`. The only issue is that this violates Rust's
889        // (proposed) aliasing rules. Hence, we only provide a hint that the
890        // store's address can be computed without loading the value.
891        let offset = const {
892            std::mem::offset_of!(Store<'static, N, ET, TM, R, MD, TERMINALS>, manager)
893                + RwLock::<Self>::DATA_OFFSET
894        };
895        // SAFETY:
896        // - The pointer resulting from the subtraction is in bounds of the `Store`
897        //   allocation.
898        // - The pointers are equal after initialization of `self.store`.
899        unsafe {
900            std::hint::assert_unchecked(std::ptr::eq(
901                std::ptr::from_ref(self).cast::<u8>().sub(offset).cast(),
902                self.store,
903            ))
904        };
905
906        // SAFETY: After initialization, `self.store` always points to the
907        // containing `Store`.
908        unsafe { &*self.store }
909    }
910
911    /// Initialize the manager data
912    fn init(&mut self) {
913        self.data.init(self);
914        MD::init_mut(self);
915    }
916}
917
918unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> oxidd_core::Manager
919    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
920where
921    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
922    ET: Tag,
923    TM: TerminalManager<'id, N, ET, TERMINALS>,
924    R: oxidd_core::DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
925    MD: DropWith<Edge<'id, N, ET>> + ManagerEventSubscriber<Self>,
926{
927    type Edge = Edge<'id, N, ET>;
928    type EdgeTag = ET;
929    type InnerNode = N;
930    type Terminal = TM::TerminalNode;
931    type TerminalRef<'a>
932        = TM::TerminalNodeRef<'a>
933    where
934        Self: 'a;
935    type Rules = R;
936
937    type TerminalIterator<'a>
938        = TM::Iterator<'a>
939    where
940        Self: 'a;
941    type NodeSet = NodeSet;
942    type LevelView<'a>
943        = LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>
944    where
945        Self: 'a;
946    type LevelIterator<'a>
947        = LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
948    where
949        Self: 'a;
950
951    #[inline]
952    fn get_node(&self, edge: &Self::Edge) -> oxidd_core::Node<'_, Self> {
953        let store = self.store();
954        let id = edge.node_id();
955        if id >= TERMINALS {
956            oxidd_core::Node::Inner(store.inner_nodes.inner_node(edge))
957        } else {
958            // SAFETY: `id` is a valid terminal ID
959            oxidd_core::Node::Terminal(unsafe { store.terminal_manager.get_terminal(id) })
960        }
961    }
962
963    #[inline]
964    fn clone_edge(&self, edge: &Self::Edge) -> Self::Edge {
965        self.store().clone_edge(edge)
966    }
967
968    #[inline]
969    fn drop_edge(&self, edge: Self::Edge) {
970        self.store().drop_edge(edge)
971    }
972
973    #[track_caller]
974    fn try_remove_node(&self, edge: Edge<'id, N, ET>, level: LevelNo) -> bool {
975        let id = edge.node_id();
976        let store = self.store();
977        if id < TERMINALS {
978            std::mem::forget(edge);
979            // SAFETY: `id` is a valid terminal ID and `edge` is forgotten
980            unsafe { store.terminal_manager.release(id) };
981            debug_assert_eq!(level, LevelNo::MAX, "`level` does not match");
982            return false;
983        }
984
985        // inner node
986        let node = store.inner_nodes.inner_node(&edge);
987        std::mem::forget(edge);
988        // SAFETY: `edge` is forgotten
989        let old_rc = unsafe { node.release() };
990
991        debug_assert_ne!(level, LevelNo::MAX, "`level` does not match");
992        debug_assert!(
993            (level as usize) < self.unique_table.len(),
994            "`level` out of range"
995        );
996        debug_assert!(node.check_level(|l| l == level), "`level` does not match");
997        debug_assert!(old_rc > 1);
998
999        if old_rc != 2 || !self.reorder_gc_prepared {
1000            return false;
1001        }
1002
1003        let Some(set) = self.unique_table.get(level as usize) else {
1004            return false;
1005        };
1006        let mut set = set.lock();
1007
1008        // Read the reference count again: Another thread may have created an
1009        // edge between our `node.release()` call and `set.lock()`.
1010        let rc = node.load_rc(Acquire);
1011        debug_assert_ne!(rc, 0);
1012        if rc != 1 {
1013            return false;
1014        }
1015
1016        // SAFETY: we checked that `reorder_gc_prepared` is true above
1017        let Some(edge) = (unsafe { set.remove(&store.inner_nodes, node) }) else {
1018            return false;
1019        };
1020
1021        // SAFETY (next 2): `edge` is untagged and points to an inner node
1022        let slot_ptr = unsafe { store.inner_nodes.slot_pointer_unchecked(&edge) };
1023        let id = unsafe { edge.node_id_unchecked() };
1024        std::mem::forget(edge);
1025
1026        // SAFETY: Since `rc` is 1, this is the last reference. We use `Acquire`
1027        // order above and `Release` order when decrementing reference counters,
1028        // so we have exclusive node access now. The slot contains a node. `id`
1029        // is the ID of the slot.
1030        unsafe { store.free_slot(&mut *slot_ptr, id) };
1031
1032        true
1033    }
1034
1035    #[inline]
1036    fn num_inner_nodes(&self) -> usize {
1037        self.unique_table
1038            .iter()
1039            .map(|level| level.lock().len())
1040            .sum()
1041    }
1042
1043    #[inline]
1044    fn approx_num_inner_nodes(&self) -> usize {
1045        let count = self.store().state.lock().node_count;
1046        if count < 0 {
1047            0
1048        } else {
1049            count as u64 as usize
1050        }
1051    }
1052
1053    #[inline(always)]
1054    fn num_levels(&self) -> LevelNo {
1055        self.unique_table.len() as LevelNo
1056    }
1057
1058    #[inline(always)]
1059    fn num_named_vars(&self) -> VarNo {
1060        self.var_name_map.named_count() as VarNo
1061    }
1062
1063    #[track_caller]
1064    fn add_vars(&mut self, additional: VarNo) -> Range<VarNo> {
1065        let len = self.unique_table.len() as VarNo;
1066        let new_len = len.checked_add(additional).expect("too many variables");
1067        let range = len..new_len;
1068
1069        self.data.pre_reorder(self);
1070        MD::pre_reorder_mut(self);
1071
1072        self.unique_table
1073            .resize_with(new_len as usize, || Mutex::new(LevelViewSet::default()));
1074        self.var_level_map.extend(additional);
1075        self.var_name_map.add_unnamed(additional);
1076
1077        debug_assert_eq!(new_len as usize, self.unique_table.len());
1078        debug_assert_eq!(new_len as usize, self.var_level_map.len());
1079        debug_assert_eq!(new_len, self.var_name_map.len());
1080
1081        self.data.post_reorder(self);
1082        MD::post_reorder_mut(self);
1083
1084        range
1085    }
1086
1087    #[track_caller]
1088    fn add_named_vars<S: Into<String>>(
1089        &mut self,
1090        names: impl IntoIterator<Item = S>,
1091    ) -> Result<Range<VarNo>, DuplicateVarName> {
1092        self.data.pre_reorder(self);
1093        MD::pre_reorder_mut(self);
1094
1095        let len = self.var_name_map.len();
1096        let mut on_drop = OnDrop::new(self, |this| {
1097            // This block is executed whenever `on_drop` gets dropped, i.e.,
1098            // even if iterating over `names` or converting a value of type `S`
1099            // into a `String` panics. This way, we ensure that the manager's
1100            // state remains consistent.
1101            let new_len = this.var_name_map.len();
1102            this.unique_table
1103                .resize_with(new_len as usize, || Mutex::new(LevelViewSet::default()));
1104            this.var_level_map.extend((new_len - len) as VarNo);
1105
1106            debug_assert_eq!(new_len as usize, this.unique_table.len());
1107            debug_assert_eq!(new_len as usize, this.var_level_map.len());
1108            debug_assert_eq!(new_len, this.var_name_map.len());
1109
1110            this.data.post_reorder(this);
1111            MD::post_reorder_mut(this);
1112        });
1113
1114        let mut names = names.into_iter();
1115        let range = on_drop.data_mut().var_name_map.add_named(names.by_ref())?;
1116        drop(on_drop);
1117
1118        if names.next().is_some() {
1119            // important: panic only after dropping `on_drop`
1120            panic!("too many variables");
1121        }
1122
1123        Ok(range)
1124    }
1125
1126    #[track_caller]
1127    fn add_named_vars_from_map(
1128        &mut self,
1129        map: VarNameMap,
1130    ) -> Result<Range<VarNo>, DuplicateVarName> {
1131        if !self.var_name_map.is_empty() {
1132            return self.add_named_vars(map.into_names_iter());
1133        }
1134
1135        self.data.pre_reorder(self);
1136        MD::pre_reorder_mut(self);
1137
1138        let n = map.len();
1139        self.unique_table
1140            .resize_with(n as usize, || Mutex::new(LevelViewSet::default()));
1141        self.var_level_map.extend(n);
1142        self.var_name_map = map;
1143
1144        debug_assert_eq!(n as usize, self.unique_table.len());
1145        debug_assert_eq!(n as usize, self.var_level_map.len());
1146        debug_assert_eq!(n, self.var_name_map.len());
1147
1148        self.data.post_reorder(self);
1149        MD::post_reorder_mut(self);
1150
1151        Ok(0..n)
1152    }
1153
1154    #[track_caller]
1155    #[inline(always)]
1156    fn var_name(&self, var: VarNo) -> &str {
1157        self.var_name_map.var_name(var)
1158    }
1159
1160    #[track_caller]
1161    #[inline(always)]
1162    fn set_var_name(
1163        &mut self,
1164        var: VarNo,
1165        name: impl Into<String>,
1166    ) -> Result<(), DuplicateVarName> {
1167        self.var_name_map.set_var_name(var, name)
1168    }
1169
1170    #[inline(always)]
1171    fn name_to_var(&self, name: impl AsRef<str>) -> Option<VarNo> {
1172        self.var_name_map.name_to_var(name)
1173    }
1174
1175    #[track_caller]
1176    #[inline(always)]
1177    fn var_to_level(&self, var: VarNo) -> LevelNo {
1178        self.var_level_map.var_to_level(var)
1179    }
1180
1181    #[track_caller]
1182    #[inline(always)]
1183    fn level_to_var(&self, level: LevelNo) -> VarNo {
1184        self.var_level_map.level_to_var(level)
1185    }
1186
1187    #[track_caller]
1188    #[inline(always)]
1189    fn level(&self, no: LevelNo) -> Self::LevelView<'_> {
1190        LevelView {
1191            store: self.store(),
1192            var_level_map: &self.var_level_map,
1193            allow_node_removal: self.reorder_gc_prepared,
1194            level: no,
1195            set: self.unique_table[no as usize].lock(),
1196        }
1197    }
1198
1199    #[inline]
1200    fn levels(&self) -> Self::LevelIterator<'_> {
1201        LevelIter {
1202            store: self.store(),
1203            var_level_map: &self.var_level_map,
1204            allow_node_removal: self.reorder_gc_prepared,
1205            level_front: 0,
1206            level_back: self.unique_table.len() as LevelNo,
1207            it: self.unique_table.iter(),
1208        }
1209    }
1210
1211    #[inline]
1212    fn get_terminal(&self, terminal: Self::Terminal) -> AllocResult<Self::Edge> {
1213        self.store().terminal_manager.get_edge(terminal)
1214    }
1215
1216    #[inline]
1217    fn num_terminals(&self) -> usize {
1218        self.store().terminal_manager.len()
1219    }
1220
1221    #[inline]
1222    fn terminals(&self) -> Self::TerminalIterator<'_> {
1223        self.store().terminal_manager.iter()
1224    }
1225
1226    fn gc(&self) -> usize {
1227        if !self.gc_ongoing.try_lock() {
1228            // We don't want two concurrent garbage collections
1229            return 0;
1230        }
1231        self.gc_count.fetch_add(1, Relaxed);
1232        let guard = AbortOnDrop("Garbage collection panicked.");
1233
1234        #[cfg(feature = "statistics")]
1235        eprintln!(
1236            "[oxidd-manager-index] garbage collection started at {}",
1237            std::time::SystemTime::now()
1238                .duration_since(std::time::UNIX_EPOCH)
1239                .unwrap()
1240                .as_secs()
1241        );
1242
1243        if !self.reorder_gc_prepared {
1244            self.data.pre_gc(self);
1245        }
1246
1247        let store = self.store();
1248        let mut collected = 0;
1249        for level in &self.unique_table {
1250            let mut level = level.lock();
1251            collected += level.len() as u32;
1252            // SAFETY: We prepared the garbage collection, hence there are no
1253            // "weak" edges.
1254            unsafe { level.gc(store) };
1255            collected -= level.len() as u32;
1256        }
1257        collected += store.terminal_manager.gc();
1258
1259        if !self.reorder_gc_prepared {
1260            // SAFETY: We called `pre_gc`, the garbage collection is done.
1261            unsafe { self.data.post_gc(self) };
1262        }
1263        self.gc_ongoing.unlock();
1264        guard.defuse();
1265
1266        #[cfg(feature = "statistics")]
1267        eprintln!(
1268            "[oxidd-manager-index] garbage collection finished at {}: removed {} nodes",
1269            std::time::SystemTime::now()
1270                .duration_since(std::time::UNIX_EPOCH)
1271                .unwrap()
1272                .as_secs(),
1273            collected,
1274        );
1275        collected as usize
1276    }
1277
1278    fn reorder<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
1279        if self.reorder_gc_prepared {
1280            // Reordering was already prepared (nested `reorder` call).
1281            // Important: We must return here to not finalize the reordering
1282            // before the parent `reorder` closure returns.
1283            return f(self);
1284        }
1285        let guard = AbortOnDrop("Reordering panicked.");
1286
1287        self.data.pre_gc(self);
1288        self.reorder_gc_prepared = true;
1289        self.data.pre_reorder(self);
1290        MD::pre_reorder_mut(self);
1291
1292        let res = f(self);
1293
1294        self.data.post_reorder(self);
1295        MD::post_reorder_mut(self);
1296        self.reorder_gc_prepared = false;
1297        // SAFETY: We called `pre_gc`, the reordering is done.
1298        unsafe { self.data.post_gc(self) };
1299
1300        guard.defuse();
1301        // Depending on the reordering implementation, garbage collections are
1302        // preformed, but not necessarily through `Self::gc`. So we increment
1303        // the GC count here.
1304        *self.gc_count.get_mut() += 1;
1305        self.reorder_count += 1;
1306        res
1307    }
1308
1309    #[inline]
1310    fn gc_count(&self) -> u64 {
1311        self.gc_count.load(Relaxed)
1312    }
1313
1314    #[inline]
1315    fn reorder_count(&self) -> u64 {
1316        self.reorder_count
1317    }
1318}
1319
1320impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> oxidd_core::HasWorkers
1321    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
1322where
1323    N: NodeBase + InnerNode<Edge<'id, N, ET>> + Send + Sync,
1324    ET: Tag + Send + Sync,
1325    TM: TerminalManager<'id, N, ET, TERMINALS> + Send + Sync,
1326    R: oxidd_core::DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
1327    MD: DropWith<Edge<'id, N, ET>> + ManagerEventSubscriber<Self> + Send + Sync,
1328{
1329    type WorkerPool = crate::workers::Workers;
1330
1331    #[inline]
1332    fn workers(&self) -> &Self::WorkerPool {
1333        &self.store().workers
1334    }
1335}
1336
1337// === Unique Table ============================================================
1338
1339/// The underlying data structure for level views
1340///
1341/// Conceptually, every [`LevelViewSet`] is tied to a [`Manager`]. It is a hash
1342/// table ensuring the uniqueness of nodes. However, it does not store nodes
1343/// internally, but edges referencing nodes. These edges are always untagged and
1344/// reference inner nodes.
1345///
1346/// Because a [`LevelViewSet`] on its own is not sufficient to drop nodes
1347/// accordingly, this will simply leak all contained edges, not calling the
1348/// `Edge`'s `Drop` implementation.
1349struct LevelViewSet<'id, N, ET, TM, R, MD, const TERMINALS: usize>(
1350    RawTable<Edge<'id, N, ET>, u32>,
1351    PhantomData<(TM, R, MD)>,
1352);
1353
1354#[inline]
1355fn hash_node<N: NodeBase>(node: &N) -> u64 {
1356    let mut hasher = FxHasher::default();
1357    node.hash(&mut hasher);
1358    hasher.finish()
1359}
1360
1361impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>
1362where
1363    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1364    ET: Tag,
1365    TM: TerminalManager<'id, N, ET, TERMINALS>,
1366    MD: DropWith<Edge<'id, N, ET>>,
1367{
1368    /// Get the number of nodes on this level
1369    #[inline]
1370    fn len(&self) -> usize {
1371        self.0.len()
1372    }
1373
1374    /// Get an equality function for entries
1375    ///
1376    /// SAFETY: The returned function must be called on untagged edges
1377    /// referencing inner nodes only.
1378    #[inline]
1379    unsafe fn eq<'a>(
1380        nodes: &'a SlotSlice<'id, N, TERMINALS>,
1381        node: &'a N,
1382    ) -> impl Fn(&Edge<'id, N, ET>) -> bool + 'a {
1383        move |edge| unsafe { nodes.inner_node_unchecked(edge) == node }
1384    }
1385
1386    /// Reserve space for `additional` nodes on this level
1387    #[inline]
1388    fn reserve(&mut self, additional: usize) {
1389        // SAFETY: The hash table only contains untagged edges referencing inner
1390        // nodes.
1391        self.0.reserve(additional)
1392    }
1393
1394    #[inline]
1395    fn get(&self, nodes: &SlotSlice<'id, N, TERMINALS>, node: &N) -> Option<&Edge<'id, N, ET>> {
1396        let hash = hash_node(node);
1397        // SAFETY: The hash table only contains untagged edges referencing inner
1398        // nodes.
1399        self.0.get(hash, unsafe { Self::eq(nodes, node) })
1400    }
1401
1402    /// Insert the given edge, assuming that the referenced node is already
1403    /// stored in `nodes`.
1404    ///
1405    /// Returns `true` if the edge was inserted, `false` if it was already
1406    /// present.
1407    ///
1408    /// Panics if `edge` points to a terminal node. May furthermore panic if
1409    /// `edge` is tagged, depending on the configuration.
1410    #[inline]
1411    fn insert(&mut self, nodes: &SlotSlice<'id, N, TERMINALS>, edge: Edge<'id, N, ET>) -> bool {
1412        debug_assert!(!edge.is_tagged(), "`edge` must not be tagged");
1413        let node = nodes.inner_node(&edge);
1414        let hash = hash_node(node);
1415        // SAFETY (next 2): The hash table only contains untagged edges
1416        // referencing inner nodes.
1417        match self
1418            .0
1419            .find_or_find_insert_slot(hash, unsafe { Self::eq(nodes, node) })
1420        {
1421            Ok(_) => {
1422                // We need to drop `edge`. This simply amounts to decrementing
1423                // the reference counter, since there is still one edge
1424                // referencing `node` stored in here.
1425                std::mem::forget(edge);
1426                // SAFETY: We forgot `edge`.
1427                let _old_rc = unsafe { node.release() };
1428                debug_assert!(_old_rc > 1);
1429
1430                false
1431            }
1432            Err(slot) => {
1433                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1434                // We have exclusive access to the hash table and did not modify
1435                // it in between.
1436                unsafe { self.0.insert_in_slot_unchecked(hash, slot, edge) };
1437                true
1438            }
1439        }
1440    }
1441
1442    /// Get an edge for `node`
1443    ///
1444    /// If `node` is not yet present in the hash table, `insert` is called to
1445    /// insert the node into `nodes`.
1446    #[inline]
1447    fn get_or_insert(
1448        &mut self,
1449        nodes: &SlotSlice<'id, N, TERMINALS>,
1450        node: N,
1451        insert: impl FnOnce(N) -> AllocResult<[Edge<'id, N, ET>; 2]>,
1452        drop: impl FnOnce(N),
1453    ) -> AllocResult<Edge<'id, N, ET>> {
1454        let hash = hash_node(&node);
1455        // SAFETY (next 2): The hash table only contains untagged edges
1456        // referencing inner nodes.
1457        match self
1458            .0
1459            .find_or_find_insert_slot(hash, unsafe { Self::eq(nodes, &node) })
1460        {
1461            Ok(slot) => {
1462                drop(node);
1463                // SAFETY:
1464                // - `slot` was returned by `find_or_find_insert_slot`. We have exclusive access
1465                //   to the hash table and did not modify it in between.
1466                // - All edges in the table are untagged and refer to inner nodes.
1467                Ok(unsafe { nodes.clone_edge_unchecked(self.0.get_at_slot_unchecked(slot)) })
1468            }
1469            Err(slot) => {
1470                let [e1, e2] = insert(node)?;
1471                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1472                // We have exclusive access to the hash table and did not modify
1473                // it in between.
1474                unsafe { self.0.insert_in_slot_unchecked(hash, slot, e1) };
1475                Ok(e2)
1476            }
1477        }
1478    }
1479
1480    /// Perform garbage collection, i.e., remove all nodes without references
1481    /// besides the internal edge
1482    ///
1483    /// SAFETY: There must not be any "weak" edges, i.e., edges where the
1484    /// reference count is not materialized (apply cache implementations exploit
1485    /// this).
1486    unsafe fn gc(&mut self, store: &Store<'id, N, ET, TM, R, MD, TERMINALS>) {
1487        let inner_nodes = &*store.inner_nodes;
1488        self.0.retain(
1489            |edge| {
1490                // SAFETY: All edges in unique tables are untagged and point to
1491                // inner nodes.
1492                unsafe { inner_nodes.inner_node_unchecked(edge) }.load_rc(Acquire) != 1
1493            },
1494            |edge| {
1495                // SAFETY (next 2): `edge` is untagged and points to an inner node
1496                let slot_ptr = unsafe { inner_nodes.slot_pointer_unchecked(&edge) };
1497                let id = unsafe { edge.node_id_unchecked() };
1498                std::mem::forget(edge);
1499
1500                // SAFETY: Since `rc` is 1, this is the last reference. We use
1501                // `Acquire` order above and `Release` order when decrementing
1502                // reference counters, so we have exclusive node access now. The
1503                // slot contains a node. `id` is the ID of the slot.
1504                unsafe { store.free_slot(&mut *slot_ptr, id) };
1505            },
1506        );
1507    }
1508
1509    /// Remove `node`
1510    ///
1511    /// Returns `Some(edge)` if `node` was present, `None` otherwise
1512    ///
1513    /// SAFETY: There must not be any "weak" edges, i.e., edges where the
1514    /// reference count is not materialized (apply cache implementations exploit
1515    /// this).
1516    #[inline]
1517    unsafe fn remove(
1518        &mut self,
1519        nodes: &SlotSlice<'id, N, TERMINALS>,
1520        node: &N,
1521    ) -> Option<Edge<'id, N, ET>> {
1522        let hash = hash_node(node);
1523        // SAFETY: The hash table only contains untagged edges referencing inner
1524        // nodes.
1525        self.0.remove_entry(hash, unsafe { Self::eq(nodes, node) })
1526    }
1527
1528    /// Iterate over all edges pointing to nodes in the set
1529    #[inline]
1530    fn iter(&self) -> LevelViewIter<'_, 'id, N, ET> {
1531        LevelViewIter(self.0.iter())
1532    }
1533
1534    /// Iterator that consumes all [`Edge`]s in the set
1535    #[inline]
1536    fn drain(&mut self) -> linear_hashtbl::raw::Drain<'_, Edge<'id, N, ET>, u32> {
1537        self.0.drain()
1538    }
1539}
1540
1541impl<N, ET, TM, R, MD, const TERMINALS: usize> Drop
1542    for LevelViewSet<'_, N, ET, TM, R, MD, TERMINALS>
1543{
1544    #[inline]
1545    fn drop(&mut self) {
1546        // If the nodes need drop, this is handled by the `Manager` `Drop` impl
1547        // or the `TakenLevelView` `Drop` impl, respectively.
1548        self.0.reset_no_drop();
1549    }
1550}
1551
1552impl<N, ET, TM, R, MD, const TERMINALS: usize> Default
1553    for LevelViewSet<'_, N, ET, TM, R, MD, TERMINALS>
1554{
1555    #[inline]
1556    fn default() -> Self {
1557        Self(Default::default(), PhantomData)
1558    }
1559}
1560
1561impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> IntoIterator
1562    for LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>
1563{
1564    type Item = Edge<'id, N, ET>;
1565
1566    type IntoIter = linear_hashtbl::raw::IntoIter<Edge<'id, N, ET>, u32>;
1567
1568    fn into_iter(self) -> Self::IntoIter {
1569        let this = ManuallyDrop::new(self);
1570        // SAFETY: We move out of `this` (and forget `this`)
1571        let set = unsafe { std::ptr::read(&this.0) };
1572        set.into_iter()
1573    }
1574}
1575
1576pub struct LevelViewIter<'a, 'id, N, ET>(linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET>, u32>);
1577
1578impl<'a, 'id, N, ET> Iterator for LevelViewIter<'a, 'id, N, ET> {
1579    type Item = &'a Edge<'id, N, ET>;
1580
1581    #[inline]
1582    fn next(&mut self) -> Option<Self::Item> {
1583        self.0.next()
1584    }
1585
1586    #[inline]
1587    fn size_hint(&self) -> (usize, Option<usize>) {
1588        self.0.size_hint()
1589    }
1590}
1591
1592impl<N, ET> ExactSizeIterator for LevelViewIter<'_, '_, N, ET> {
1593    #[inline(always)]
1594    fn len(&self) -> usize {
1595        self.0.len()
1596    }
1597}
1598impl<'a, 'id, N, ET> FusedIterator for LevelViewIter<'a, 'id, N, ET> where
1599    linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET>, u32>: FusedIterator
1600{
1601}
1602
1603pub struct LevelView<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1604where
1605    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1606    ET: Tag,
1607    TM: TerminalManager<'id, N, ET, TERMINALS>,
1608    MD: DropWith<Edge<'id, N, ET>>,
1609{
1610    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1611    var_level_map: &'a VarLevelMap,
1612    /// SAFETY invariant: If set to true, a garbage collection is prepared
1613    /// (i.e., there are no "weak" edges).
1614    allow_node_removal: bool,
1615    level: LevelNo,
1616    set: MutexGuard<'a, LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>,
1617}
1618
1619unsafe impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1620    oxidd_core::LevelView<Edge<'id, N, ET>, N> for LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1621where
1622    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1623    ET: Tag,
1624    TM: TerminalManager<'id, N, ET, TERMINALS>,
1625    MD: DropWith<Edge<'id, N, ET>>,
1626{
1627    type Iterator<'b>
1628        = LevelViewIter<'b, 'id, N, ET>
1629    where
1630        Self: 'b,
1631        Edge<'id, N, ET>: 'b;
1632
1633    type Taken = TakenLevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>;
1634
1635    #[inline(always)]
1636    fn len(&self) -> usize {
1637        self.set.len()
1638    }
1639
1640    #[inline(always)]
1641    fn level_no(&self) -> LevelNo {
1642        self.level
1643    }
1644
1645    #[inline]
1646    fn reserve(&mut self, additional: usize) {
1647        self.set.reserve(additional);
1648    }
1649
1650    #[inline]
1651    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET>> {
1652        self.set.get(&self.store.inner_nodes, node)
1653    }
1654
1655    #[inline]
1656    fn insert(&mut self, edge: Edge<'id, N, ET>) -> bool {
1657        debug_assert!(!edge.is_tagged(), "`edge` should not be tagged");
1658        // No need to check if the node referenced by `edge` is stored in
1659        // `self.store` due to lifetime restrictions.
1660        let nodes = &self.store.inner_nodes;
1661        nodes.inner_node(&edge).assert_level_matches(self.level);
1662        self.set.insert(nodes, edge)
1663    }
1664
1665    #[inline(always)]
1666    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET>> {
1667        node.assert_level_matches(self.level);
1668        // No need to check if the children of `node` are stored in `self.store`
1669        // due to lifetime restrictions.
1670        self.set.get_or_insert(
1671            &self.store.inner_nodes,
1672            node,
1673            |node| self.store.add_node(node),
1674            |node| node.drop_with(|edge| self.store.drop_edge(edge)),
1675        )
1676    }
1677
1678    #[inline]
1679    fn gc(&mut self) {
1680        if self.allow_node_removal {
1681            // SAFETY: By invariant, node removal is allowed.
1682            unsafe { self.set.gc(self.store) };
1683        }
1684    }
1685
1686    #[inline]
1687    fn remove(&mut self, node: &N) -> bool {
1688        if !self.allow_node_removal {
1689            return false;
1690        }
1691        // SAFETY: By invariant, node removal is allowed.
1692        match unsafe { self.set.remove(&self.store.inner_nodes, node) } {
1693            Some(edge) => {
1694                // SAFETY: `edge` is untagged and points to an inner node
1695                unsafe { self.store.drop_unique_table_edge(edge) };
1696                true
1697            }
1698            None => false,
1699        }
1700    }
1701
1702    #[inline]
1703    unsafe fn swap(&mut self, other: &mut Self) {
1704        self.var_level_map.swap_levels(self.level, other.level);
1705        std::mem::swap(&mut *self.set, &mut *other.set);
1706    }
1707
1708    #[inline]
1709    fn iter(&self) -> Self::Iterator<'_> {
1710        self.set.iter()
1711    }
1712
1713    #[inline(always)]
1714    fn take(&mut self) -> Self::Taken {
1715        TakenLevelView {
1716            store: self.store,
1717            var_level_map: self.var_level_map,
1718            allow_node_removal: self.allow_node_removal,
1719            level: self.level,
1720            set: std::mem::take(&mut self.set),
1721        }
1722    }
1723}
1724
1725pub struct TakenLevelView<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1726where
1727    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1728    ET: Tag,
1729    TM: TerminalManager<'id, N, ET, TERMINALS>,
1730    MD: DropWith<Edge<'id, N, ET>>,
1731{
1732    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1733    var_level_map: &'a VarLevelMap,
1734    /// SAFETY invariant: If set to true, a garbage collection is prepared
1735    /// (i.e., there are no "weak" edges).
1736    ///
1737    /// Note that due to lifetime restrictions there is no way to have a
1738    /// `TakenLevelView { allow_node_removal: true, .. }` when the associated
1739    /// `Manager` has `reorder_gc_prepared` set to `false`.
1740    allow_node_removal: bool,
1741    level: LevelNo,
1742    set: LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>,
1743}
1744
1745unsafe impl<'id, N, ET, TM, R, MD, const TERMINALS: usize>
1746    oxidd_core::LevelView<Edge<'id, N, ET>, N>
1747    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1748where
1749    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1750    ET: Tag,
1751    TM: TerminalManager<'id, N, ET, TERMINALS>,
1752    MD: DropWith<Edge<'id, N, ET>>,
1753{
1754    type Iterator<'b>
1755        = LevelViewIter<'b, 'id, N, ET>
1756    where
1757        Self: 'b,
1758        Edge<'id, N, ET>: 'b;
1759
1760    type Taken = Self;
1761
1762    #[inline(always)]
1763    fn len(&self) -> usize {
1764        self.set.len()
1765    }
1766
1767    #[inline(always)]
1768    fn level_no(&self) -> LevelNo {
1769        self.level
1770    }
1771
1772    #[inline]
1773    fn reserve(&mut self, additional: usize) {
1774        self.set.reserve(additional);
1775    }
1776
1777    #[inline]
1778    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET>> {
1779        self.set.get(&self.store.inner_nodes, node)
1780    }
1781
1782    #[inline]
1783    fn insert(&mut self, edge: Edge<'id, N, ET>) -> bool {
1784        debug_assert!(!edge.is_tagged(), "`edge` should not be tagged");
1785        // No need to check if the node referenced by `edge` is stored in
1786        // `self.store` due to lifetime restrictions.
1787        let nodes = &self.store.inner_nodes;
1788        nodes.inner_node(&edge).assert_level_matches(self.level);
1789        self.set.insert(nodes, edge)
1790    }
1791
1792    #[inline(always)]
1793    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET>> {
1794        node.assert_level_matches(self.level);
1795        // No need to check if the children of `node` are stored in `self.store`
1796        // due to lifetime restrictions.
1797        self.set.get_or_insert(
1798            &self.store.inner_nodes,
1799            node,
1800            |node| self.store.add_node(node),
1801            |node| node.drop_with(|edge| self.store.drop_edge(edge)),
1802        )
1803    }
1804
1805    #[inline]
1806    fn gc(&mut self) {
1807        if self.allow_node_removal {
1808            // SAFETY: By invariant, node removal is allowed.
1809            unsafe { self.set.gc(self.store) };
1810        }
1811    }
1812
1813    #[inline]
1814    fn remove(&mut self, node: &N) -> bool {
1815        if !self.allow_node_removal {
1816            return false;
1817        }
1818        // SAFETY: By invariant, node removal is allowed.
1819        match unsafe { self.set.remove(&self.store.inner_nodes, node) } {
1820            Some(edge) => {
1821                // SAFETY: `edge` is untagged and points to an inner node
1822                unsafe { self.store.drop_unique_table_edge(edge) };
1823                true
1824            }
1825            None => false,
1826        }
1827    }
1828
1829    #[inline(always)]
1830    unsafe fn swap(&mut self, other: &mut Self) {
1831        self.var_level_map.swap_levels(self.level, other.level);
1832        std::mem::swap(&mut self.set, &mut other.set);
1833    }
1834
1835    #[inline]
1836    fn iter(&self) -> Self::Iterator<'_> {
1837        self.set.iter()
1838    }
1839
1840    #[inline(always)]
1841    fn take(&mut self) -> Self::Taken {
1842        Self {
1843            store: self.store,
1844            var_level_map: self.var_level_map,
1845            allow_node_removal: self.allow_node_removal,
1846            level: self.level,
1847            set: std::mem::take(&mut self.set),
1848        }
1849    }
1850}
1851
1852impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> Drop
1853    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1854where
1855    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1856    ET: Tag,
1857    TM: TerminalManager<'id, N, ET, TERMINALS>,
1858    MD: DropWith<Edge<'id, N, ET>>,
1859{
1860    fn drop(&mut self) {
1861        for edge in self.set.drain() {
1862            // SAFETY: Edges in unique tables are untagged and point to inner
1863            // nodes.
1864            unsafe { self.store.drop_unique_table_edge(edge) }
1865        }
1866    }
1867}
1868
1869// --- LevelIterator -----------------------------------------------------------
1870
1871pub struct LevelIter<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize>
1872where
1873    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1874    ET: Tag,
1875    TM: TerminalManager<'id, N, ET, TERMINALS>,
1876    MD: DropWith<Edge<'id, N, ET>>,
1877{
1878    store: &'a Store<'id, N, ET, TM, R, MD, TERMINALS>,
1879    var_level_map: &'a VarLevelMap,
1880    /// SAFETY invariant: If set to true, a garbage collection is prepared
1881    /// (i.e., there are no "weak" edges).
1882    allow_node_removal: bool,
1883    level_front: LevelNo,
1884    level_back: LevelNo,
1885    it: std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>,
1886}
1887
1888impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize> Iterator
1889    for LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1890where
1891    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1892    ET: Tag,
1893    TM: TerminalManager<'id, N, ET, TERMINALS>,
1894    MD: DropWith<Edge<'id, N, ET>>,
1895{
1896    type Item = LevelView<'a, 'id, N, ET, TM, R, MD, TERMINALS>;
1897
1898    #[inline]
1899    fn next(&mut self) -> Option<Self::Item> {
1900        let mutex = self.it.next()?;
1901        let level = self.level_front;
1902        self.level_front += 1;
1903        Some(LevelView {
1904            store: self.store,
1905            var_level_map: self.var_level_map,
1906            allow_node_removal: self.allow_node_removal,
1907            level,
1908            set: mutex.lock(),
1909        })
1910    }
1911
1912    #[inline]
1913    fn size_hint(&self) -> (usize, Option<usize>) {
1914        self.it.size_hint()
1915    }
1916}
1917
1918impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> ExactSizeIterator
1919    for LevelIter<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1920where
1921    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1922    ET: Tag,
1923    TM: TerminalManager<'id, N, ET, TERMINALS>,
1924    MD: DropWith<Edge<'id, N, ET>>,
1925{
1926    #[inline(always)]
1927    fn len(&self) -> usize {
1928        self.it.len()
1929    }
1930}
1931impl<'a, 'id, N, ET, TM, R, MD, const TERMINALS: usize> FusedIterator
1932    for LevelIter<'a, 'id, N, ET, TM, R, MD, TERMINALS>
1933where
1934    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1935    ET: Tag,
1936    TM: TerminalManager<'id, N, ET, TERMINALS>,
1937    MD: DropWith<Edge<'id, N, ET>>,
1938    std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, TERMINALS>>>: FusedIterator,
1939{
1940}
1941
1942impl<'id, N, ET, TM, R, MD, const TERMINALS: usize> DoubleEndedIterator
1943    for LevelIter<'_, 'id, N, ET, TM, R, MD, TERMINALS>
1944where
1945    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
1946    ET: Tag,
1947    TM: TerminalManager<'id, N, ET, TERMINALS>,
1948    MD: DropWith<Edge<'id, N, ET>>,
1949{
1950    fn next_back(&mut self) -> Option<Self::Item> {
1951        let mutex = self.it.next_back()?;
1952        self.level_back -= 1;
1953        Some(LevelView {
1954            store: self.store,
1955            var_level_map: self.var_level_map,
1956            allow_node_removal: self.allow_node_removal,
1957            level: self.level_back,
1958            set: mutex.lock(),
1959        })
1960    }
1961}
1962
1963// === ManagerRef ==============================================================
1964
1965#[repr(transparent)]
1966#[derive_where(Clone)]
1967pub struct ManagerRef<
1968    NC: InnerNodeCons<ET>,
1969    ET: Tag,
1970    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1971    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1972    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1973    const TERMINALS: usize,
1974>(
1975    Arc<
1976        Store<
1977            'static,
1978            NC::T<'static>,
1979            ET,
1980            TMC::T<'static>,
1981            RC::T<'static>,
1982            MDC::T<'static>,
1983            TERMINALS,
1984        >,
1985    >,
1986);
1987
1988impl<
1989        NC: InnerNodeCons<ET>,
1990        ET: Tag,
1991        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
1992        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
1993        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
1994        const TERMINALS: usize,
1995    > Drop for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
1996{
1997    fn drop(&mut self) {
1998        if Arc::strong_count(&self.0) == 2 {
1999            // This is the second last reference. The last reference belongs to
2000            // the gc thread. Terminate it.
2001            let gc_signal = &self.0.gc_signal;
2002            *gc_signal.0.lock() = GCSignal::Quit;
2003            gc_signal.1.notify_one();
2004        }
2005    }
2006}
2007
2008impl<
2009        NC: InnerNodeCons<ET>,
2010        ET: Tag,
2011        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2012        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2013        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2014        const TERMINALS: usize,
2015    > ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2016{
2017    /// Convert `self` into a raw pointer, e.g., for usage in a foreign function
2018    /// interface.
2019    ///
2020    /// This method does not change any reference counters. To avoid a memory
2021    /// leak, use [`Self::from_raw()`] to convert the pointer back into a
2022    /// `ManagerRef`.
2023    #[inline(always)]
2024    pub fn into_raw(self) -> *const std::ffi::c_void {
2025        let this = ManuallyDrop::new(self);
2026        // SAFETY: we move out of `this`
2027        Arc::into_raw(unsafe { std::ptr::read(&this.0) }).cast()
2028    }
2029
2030    /// Convert `raw` into a `ManagerRef`
2031    ///
2032    /// # Safety
2033    ///
2034    /// `raw` must have been obtained via [`Self::into_raw()`]. This function
2035    /// does not change any reference counters, so calling this function
2036    /// multiple times for the same pointer may lead to use after free bugs
2037    /// depending on the usage of the returned `ManagerRef`.
2038    #[inline(always)]
2039    pub unsafe fn from_raw(raw: *const std::ffi::c_void) -> Self {
2040        // SAFETY: Invariants are upheld by the caller.
2041        Self(unsafe { Arc::from_raw(raw.cast()) })
2042    }
2043}
2044
2045impl<
2046        NC: InnerNodeCons<ET>,
2047        ET: Tag,
2048        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2049        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2050        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2051        const TERMINALS: usize,
2052    > PartialEq for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2053{
2054    #[inline]
2055    fn eq(&self, other: &Self) -> bool {
2056        Arc::ptr_eq(&self.0, &other.0)
2057    }
2058}
2059impl<
2060        NC: InnerNodeCons<ET>,
2061        ET: Tag,
2062        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2063        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2064        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2065        const TERMINALS: usize,
2066    > Eq for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2067{
2068}
2069impl<
2070        NC: InnerNodeCons<ET>,
2071        ET: Tag,
2072        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2073        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2074        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2075        const TERMINALS: usize,
2076    > PartialOrd for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2077{
2078    #[inline]
2079    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2080        Some(self.cmp(other))
2081    }
2082}
2083impl<
2084        NC: InnerNodeCons<ET>,
2085        ET: Tag,
2086        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2087        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2088        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2089        const TERMINALS: usize,
2090    > Ord for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2091{
2092    #[inline]
2093    fn cmp(&self, other: &Self) -> Ordering {
2094        std::ptr::from_ref(&*self.0).cmp(&std::ptr::from_ref(&*other.0))
2095    }
2096}
2097impl<
2098        NC: InnerNodeCons<ET>,
2099        ET: Tag,
2100        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2101        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2102        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2103        const TERMINALS: usize,
2104    > Hash for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2105{
2106    #[inline]
2107    fn hash<H: Hasher>(&self, state: &mut H) {
2108        std::ptr::from_ref(&*self.0).hash(state);
2109    }
2110}
2111
2112impl<
2113        'a,
2114        'id,
2115        NC: InnerNodeCons<ET>,
2116        ET: Tag,
2117        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2118        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2119        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2120        const TERMINALS: usize,
2121    > From<&'a M<'id, NC, ET, TMC, RC, MDC, TERMINALS>>
2122    for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2123{
2124    fn from(manager: &'a M<'id, NC, ET, TMC, RC, MDC, TERMINALS>) -> Self {
2125        let ptr: *const M<'static, NC, ET, TMC, RC, MDC, TERMINALS> =
2126            std::ptr::from_ref(manager).cast();
2127        // SAFETY:
2128        // - We just changed "identifier" lifetimes.
2129        // - The pointer was obtained via `Arc::into_raw()`, and since we have a
2130        //   `&Manager` reference, the counter is at least 1.
2131        ManagerRef(unsafe {
2132            let manager = &*ptr;
2133            Arc::increment_strong_count(manager.store);
2134            Arc::from_raw(manager.store)
2135        })
2136    }
2137}
2138
2139impl<
2140        NC: InnerNodeCons<ET>,
2141        ET: Tag,
2142        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2143        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2144        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2145        const TERMINALS: usize,
2146    > oxidd_core::ManagerRef for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2147{
2148    type Manager<'id> = M<'id, NC, ET, TMC, RC, MDC, TERMINALS>;
2149
2150    fn with_manager_shared<F, T>(&self, f: F) -> T
2151    where
2152        F: for<'id> FnOnce(&Self::Manager<'id>) -> T,
2153    {
2154        let local_guard = self.0.prepare_local_state();
2155        let res = f(&self.0.manager.shared());
2156        drop(local_guard);
2157        res
2158    }
2159
2160    fn with_manager_exclusive<F, T>(&self, f: F) -> T
2161    where
2162        F: for<'id> FnOnce(&mut Self::Manager<'id>) -> T,
2163    {
2164        let local_guard = self.0.prepare_local_state();
2165        let res = f(&mut self.0.manager.exclusive());
2166        drop(local_guard);
2167        res
2168    }
2169}
2170
2171/// Create a new manager
2172pub fn new_manager<
2173    NC: InnerNodeCons<ET> + 'static,
2174    ET: Tag + Send + Sync + 'static,
2175    TMC: TerminalManagerCons<NC, ET, TERMINALS> + 'static,
2176    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS> + 'static,
2177    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS> + 'static,
2178    const TERMINALS: usize,
2179>(
2180    inner_node_capacity: u32,
2181    terminal_node_capacity: u32,
2182    threads: u32,
2183    data: MDC::T<'static>,
2184) -> ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS> {
2185    // Evaluate constant for assertions
2186    let () = Store::<
2187        'static,
2188        NC::T<'static>,
2189        ET,
2190        TMC::T<'static>,
2191        RC::T<'static>,
2192        MDC::T<'static>,
2193        TERMINALS,
2194    >::CHECK_TERMINALS;
2195
2196    let max_inner_capacity = if usize::BITS > u32::BITS {
2197        ((1usize << (u32::BITS - Edge::<NC::T<'static>, ET>::TAG_BITS)) - TERMINALS) as u32
2198    } else {
2199        // 32 bit address space. Every node consumes 16 byte plus at least
2200        // 4 byte in the unique table. Furthermore, there is the apply cache.
2201        // If we try to reserve space for more than `u32::MAX / 24` nodes, we
2202        // will most likely run out of memory.
2203        u32::MAX / 24
2204    };
2205    let inner_node_capacity = std::cmp::min(inner_node_capacity, max_inner_capacity);
2206
2207    let gc_lwm = inner_node_capacity / 100 * 90;
2208    let gc_hwm = inner_node_capacity / 100 * 95;
2209
2210    let arc = Arc::new(Store {
2211        inner_nodes: SlotSlice::new_boxed(inner_node_capacity),
2212        state: CachePadded::new(Mutex::new(SharedStoreState {
2213            next_free: Vec::new(),
2214            allocated: 0,
2215            node_count: 0,
2216            gc_state: if gc_lwm < gc_hwm {
2217                GCState::Init
2218            } else {
2219                GCState::Disabled
2220            },
2221            gc_lwm,
2222            gc_hwm,
2223        })),
2224        manager: RwLock::new(Manager {
2225            unique_table: Vec::new(),
2226            var_level_map: VarLevelMap::new(),
2227            var_name_map: VarNameMap::new(),
2228            data: ManuallyDrop::new(data),
2229            store: std::ptr::null(),
2230            gc_count: AtomicU64::new(0),
2231            reorder_count: 0,
2232            gc_ongoing: TryLock::new(),
2233            reorder_gc_prepared: false,
2234        }),
2235        terminal_manager: TMC::T::<'static>::with_capacity(terminal_node_capacity),
2236        gc_signal: (Mutex::new(GCSignal::RunGc), Condvar::new()),
2237        workers: crate::workers::Workers::new(threads),
2238    });
2239
2240    let mut manager = arc.manager.exclusive();
2241    manager.store = Arc::as_ptr(&arc);
2242    drop(manager);
2243
2244    let store_addr = arc.addr();
2245    arc.workers.pool.spawn_broadcast(move |_| {
2246        // The workers are dedicated to this store.
2247        LOCAL_STORE_STATE.with(|state| state.current_store.set(store_addr))
2248    });
2249
2250    // spell-checker:ignore mref
2251    let gc_mref: ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS> = ManagerRef(arc.clone());
2252    std::thread::Builder::new()
2253        .name("oxidd mi gc".to_string())
2254        .spawn(move || {
2255            // The worker is dedicated to this store.
2256            LOCAL_STORE_STATE.with(|state| state.current_store.set(store_addr));
2257
2258            let store = &*gc_mref.0;
2259            loop {
2260                let mut lock = store.gc_signal.0.lock();
2261                store.gc_signal.1.wait(&mut lock);
2262                if *lock == GCSignal::Quit {
2263                    break;
2264                }
2265                drop(lock);
2266
2267                // parking_lot `Condvar`s have no spurious wakeups -> run gc now
2268                oxidd_core::ManagerRef::with_manager_shared(&gc_mref, |manager| {
2269                    oxidd_core::Manager::gc(manager);
2270                });
2271
2272                let mut shared = store.state.lock();
2273                LOCAL_STORE_STATE.with(|local| {
2274                    if local.next_free.get() != 0 {
2275                        shared.node_count += local.node_count_delta.replace(0) as i64;
2276                        shared.next_free.push(local.next_free.replace(0));
2277                    }
2278                });
2279
2280                if shared.node_count < shared.gc_lwm as i64 && shared.gc_state != GCState::Disabled
2281                {
2282                    shared.gc_state = GCState::Init;
2283                }
2284            }
2285        })
2286        .unwrap();
2287
2288    // initialize the manager data
2289    let local_guard = arc.prepare_local_state();
2290    arc.manager.exclusive().init();
2291    drop(local_guard);
2292
2293    ManagerRef(arc)
2294}
2295
2296impl<
2297        NC: InnerNodeCons<ET>,
2298        ET: Tag + Send + Sync,
2299        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2300        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2301        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2302        const TERMINALS: usize,
2303    > oxidd_core::HasWorkers for ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>
2304{
2305    type WorkerPool = crate::workers::Workers;
2306
2307    #[inline]
2308    fn workers(&self) -> &Self::WorkerPool {
2309        &self.0.workers
2310    }
2311}
2312
2313// === Function ================================================================
2314
2315#[repr(C)]
2316#[derive_where(PartialEq, Eq, PartialOrd, Ord, Hash)]
2317pub struct Function<
2318    NC: InnerNodeCons<ET>,
2319    ET: Tag,
2320    TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2321    RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2322    MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2323    const TERMINALS: usize,
2324> {
2325    store: ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>,
2326    edge: ManuallyDrop<Edge<'static, NC::T<'static>, ET>>,
2327}
2328
2329impl<
2330        NC: InnerNodeCons<ET>,
2331        ET: Tag,
2332        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2333        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2334        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2335        const TERMINALS: usize,
2336    > Function<NC, ET, TMC, RC, MDC, TERMINALS>
2337{
2338    /// Convert `self` into a raw pointer and edge value, e.g., for usage in a
2339    /// foreign function interface.
2340    ///
2341    /// This method does not change any reference counters. To avoid a memory
2342    /// leak, use [`Self::from_raw()`] to convert pointer and edge value back
2343    /// into a `Function`.
2344    #[inline(always)]
2345    pub fn into_raw(self) -> (*const std::ffi::c_void, u32) {
2346        let this = ManuallyDrop::new(self);
2347        // SAFETY: We forget `this`
2348        (
2349            unsafe { std::ptr::read(&this.store) }.into_raw(),
2350            this.edge.0,
2351        )
2352    }
2353
2354    /// Convert `ptr` and `edge_val` into a `Function`
2355    ///
2356    /// # Safety
2357    ///
2358    /// `raw` and `edge_val` must have been obtained via [`Self::into_raw()`].
2359    /// This function does not change any reference counters, so calling this
2360    /// function multiple times for the same pointer may lead to use after free
2361    /// bugs depending on the usage of the returned `Function`.
2362    #[inline(always)]
2363    pub unsafe fn from_raw(ptr: *const std::ffi::c_void, edge_val: u32) -> Self {
2364        // SAFETY: Invariants are upheld by the caller.
2365        Self {
2366            store: unsafe { ManagerRef::from_raw(ptr) },
2367            edge: ManuallyDrop::new(Edge(edge_val, PhantomData)),
2368        }
2369    }
2370}
2371
2372impl<
2373        NC: InnerNodeCons<ET>,
2374        ET: Tag,
2375        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2376        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2377        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2378        const TERMINALS: usize,
2379    > Drop for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2380{
2381    #[inline]
2382    fn drop(&mut self) {
2383        // SAFETY: `self.edge` is never used again.
2384        let edge = unsafe { ManuallyDrop::take(&mut self.edge) };
2385        self.store.0.drop_edge(edge);
2386    }
2387}
2388
2389impl<
2390        NC: InnerNodeCons<ET>,
2391        ET: Tag,
2392        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2393        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2394        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2395        const TERMINALS: usize,
2396    > Clone for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2397{
2398    #[inline]
2399    fn clone(&self) -> Self {
2400        Self {
2401            store: self.store.clone(),
2402            edge: ManuallyDrop::new(self.store.0.clone_edge(&self.edge)),
2403        }
2404    }
2405}
2406
2407unsafe impl<
2408        NC: InnerNodeCons<ET>,
2409        ET: Tag,
2410        TMC: TerminalManagerCons<NC, ET, TERMINALS>,
2411        RC: DiagramRulesCons<NC, ET, TMC, MDC, TERMINALS>,
2412        MDC: ManagerDataCons<NC, ET, TMC, RC, TERMINALS>,
2413        const TERMINALS: usize,
2414    > oxidd_core::function::Function for Function<NC, ET, TMC, RC, MDC, TERMINALS>
2415{
2416    const REPR_ID: &str = "<none>";
2417
2418    type Manager<'id> = M<'id, NC, ET, TMC, RC, MDC, TERMINALS>;
2419
2420    type ManagerRef = ManagerRef<NC, ET, TMC, RC, MDC, TERMINALS>;
2421
2422    #[inline]
2423    fn from_edge<'id>(manager: &Self::Manager<'id>, edge: EdgeOfFunc<'id, Self>) -> Self {
2424        let ptr: *const Self::Manager<'static> = std::ptr::from_ref(manager).cast();
2425        // SAFETY:
2426        // - We just changed "identifier" lifetimes.
2427        // - The pointer was obtained via `Arc::into_raw()`, and since we have a
2428        //   `&Manager` reference, the counter is at least 1.
2429        let store = ManagerRef(unsafe {
2430            let manager = &*ptr;
2431            Arc::increment_strong_count(manager.store);
2432            Arc::from_raw(manager.store)
2433        });
2434        // Avoid transmuting `edge` for changing lifetimes
2435        let id = edge.0;
2436        std::mem::forget(edge);
2437        Self {
2438            store,
2439            edge: ManuallyDrop::new(Edge(id, PhantomData)),
2440        }
2441    }
2442
2443    #[inline]
2444    fn as_edge<'id>(&self, manager: &Self::Manager<'id>) -> &EdgeOfFunc<'id, Self> {
2445        assert!(
2446            std::ptr::eq(self.store.0.manager.data_ptr().cast(), manager),
2447            "This function does not belong to `manager`"
2448        );
2449
2450        let ptr: *const EdgeOfFunc<'id, Self> = std::ptr::from_ref(&*self.edge).cast();
2451        // SAFETY: Just changing lifetimes; we checked that `self.edge` belongs
2452        // to `manager` above.
2453        unsafe { &*ptr }
2454    }
2455
2456    #[inline]
2457    fn into_edge<'id>(self, manager: &Self::Manager<'id>) -> EdgeOfFunc<'id, Self> {
2458        assert!(
2459            std::ptr::eq(self.store.0.manager.data_ptr().cast(), manager),
2460            "This function does not belong to `manager`"
2461        );
2462        // We only want to drop the store ref (but `Function` implements `Drop`,
2463        // so we cannot destruct `self`).
2464        let mut this = ManuallyDrop::new(self);
2465        let edge = Edge(this.edge.0, PhantomData);
2466        // SAFETY: we forget `self`
2467        unsafe { std::ptr::drop_in_place(&mut this.store) };
2468        edge
2469    }
2470
2471    #[inline]
2472    fn manager_ref(&self) -> Self::ManagerRef {
2473        self.store.clone()
2474    }
2475
2476    fn with_manager_shared<F, T>(&self, f: F) -> T
2477    where
2478        F: for<'id> FnOnce(&Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2479    {
2480        let local_guard = self.store.0.prepare_local_state();
2481        let res = f(&self.store.0.manager.shared(), &self.edge);
2482        drop(local_guard);
2483        res
2484    }
2485
2486    fn with_manager_exclusive<F, T>(&self, f: F) -> T
2487    where
2488        F: for<'id> FnOnce(&mut Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2489    {
2490        let local_guard = self.store.0.prepare_local_state();
2491        let res = f(&mut self.store.0.manager.exclusive(), &self.edge);
2492        drop(local_guard);
2493        res
2494    }
2495}
2496
2497// === NodeSet =================================================================
2498
2499/// Node set implementation using a [bit set][FixedBitSet]
2500///
2501/// Since nodes are stored in an array, we can use a single bit vector. This
2502/// reduces space consumption dramatically and increases the performance.
2503#[derive(Default, Clone, PartialEq, Eq)]
2504pub struct NodeSet {
2505    len: usize,
2506    data: FixedBitSet,
2507}
2508
2509impl<'id, N: NodeBase, ET: Tag> oxidd_core::util::NodeSet<Edge<'id, N, ET>> for NodeSet {
2510    #[inline(always)]
2511    fn len(&self) -> usize {
2512        self.len
2513    }
2514
2515    #[inline]
2516    fn insert(&mut self, edge: &Edge<'id, N, ET>) -> bool {
2517        let index = edge.node_id();
2518        if index < self.data.len() && self.data.contains(index) {
2519            return false;
2520        }
2521        self.data.grow_and_insert(index);
2522        self.len += 1;
2523        true
2524    }
2525
2526    #[inline]
2527    fn contains(&self, edge: &Edge<'id, N, ET>) -> bool {
2528        let index = edge.node_id();
2529        if index < self.data.len() {
2530            self.data.contains(index)
2531        } else {
2532            false
2533        }
2534    }
2535
2536    #[inline]
2537    fn remove(&mut self, edge: &Edge<'id, N, ET>) -> bool {
2538        let index = edge.node_id();
2539        if index < self.data.len() && self.data.contains(index) {
2540            self.len -= 1;
2541            self.data.remove(index);
2542            true
2543        } else {
2544            false
2545        }
2546    }
2547}
2548
2549// === Additional Trait Implementations ========================================
2550
2551impl<
2552        'id,
2553        N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2554        ET: Tag,
2555        TM: TerminalManager<'id, N, ET, TERMINALS>,
2556        R: DiagramRules<Edge<'id, N, ET>, N, TM::TerminalNode>,
2557        MD: oxidd_core::HasApplyCache<Self, O>
2558            + ManagerEventSubscriber<Self>
2559            + DropWith<Edge<'id, N, ET>>,
2560        O: Copy,
2561        const TERMINALS: usize,
2562    > oxidd_core::HasApplyCache<Self, O> for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2563{
2564    type ApplyCache = MD::ApplyCache;
2565
2566    #[inline]
2567    fn apply_cache(&self) -> &Self::ApplyCache {
2568        self.data.apply_cache()
2569    }
2570
2571    #[inline]
2572    fn apply_cache_mut(&mut self) -> &mut Self::ApplyCache {
2573        self.data.apply_cache_mut()
2574    }
2575}
2576
2577impl<'id, T, N, ET, TM, R, MD, const TERMINALS: usize> AsRef<T>
2578    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2579where
2580    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2581    ET: Tag,
2582    TM: TerminalManager<'id, N, ET, TERMINALS>,
2583    MD: DropWith<Edge<'id, N, ET>> + AsRef<T>,
2584{
2585    #[inline(always)]
2586    fn as_ref(&self) -> &T {
2587        self.data.as_ref()
2588    }
2589}
2590
2591impl<'id, T, N, ET, TM, R, MD, const TERMINALS: usize> AsMut<T>
2592    for Manager<'id, N, ET, TM, R, MD, TERMINALS>
2593where
2594    N: NodeBase + InnerNode<Edge<'id, N, ET>>,
2595    ET: Tag,
2596    TM: TerminalManager<'id, N, ET, TERMINALS>,
2597    MD: DropWith<Edge<'id, N, ET>> + AsMut<T>,
2598{
2599    #[inline(always)]
2600    fn as_mut(&mut self) -> &mut T {
2601        self.data.as_mut()
2602    }
2603}