oxidd_manager_pointer/
manager.rs

1//! Pointer-Based Manager Implementation
2//!
3//! Abbreviations of generics:
4//!
5//! | Abbreviation | Meaning                           |
6//! | ------------ | --------------------------------- |
7//! | `N`          | Inner Node                        |
8//! | `ET`         | Edge Tag                          |
9//! | `TM`         | Terminal Manager                  |
10//! | `R`          | Diagram Rules                     |
11//! | `MD`         | Manager Data                      |
12//! | `NC`         | Inner Node Type Constructor       |
13//! | `TMC`        | Terminal Manager Type Constructor |
14//! | `RC`         | Diagram Rules Type Constructor    |
15//! | `OP`         | Operation                         |
16
17use std::collections::HashMap;
18use std::hash::{BuildHasherDefault, Hasher};
19use std::iter::FusedIterator;
20use std::marker::PhantomData;
21use std::mem::{align_of, ManuallyDrop};
22use std::ops::Range;
23use std::ptr::{addr_of, addr_of_mut, NonNull};
24use std::sync::atomic::AtomicU64;
25use std::sync::atomic::Ordering::{Acquire, Relaxed};
26
27use arcslab::{ArcSlab, ArcSlabRef, AtomicRefCounted, ExtHandle, IntHandle};
28use derive_where::derive_where;
29use fixedbitset::FixedBitSet;
30use linear_hashtbl::raw::RawTable;
31use parking_lot::Mutex;
32use parking_lot::MutexGuard;
33use rustc_hash::FxHasher;
34
35use oxidd_core::error::DuplicateVarName;
36use oxidd_core::function::EdgeOfFunc;
37use oxidd_core::util::{AbortOnDrop, AllocResult, Borrowed, DropWith, OnDrop, VarNameMap};
38use oxidd_core::{
39    DiagramRules, HasApplyCache, InnerNode, LevelNo, ManagerEventSubscriber, Node, Tag, VarNo,
40};
41
42use crate::node::NodeBase;
43use crate::terminal_manager::TerminalManager;
44use crate::util::{rwlock::RwLock, Invariant, TryLock, VarLevelMap};
45
46// === Type Constructors =======================================================
47
48/// Inner node type constructor
49pub trait InnerNodeCons<ET: Tag, const TAG_BITS: u32> {
50    type T<'id>: NodeBase + InnerNode<Edge<'id, Self::T<'id>, ET, TAG_BITS>>;
51}
52
53/// Terminal manager type constructor
54pub trait TerminalManagerCons<
55    NC: InnerNodeCons<ET, TAG_BITS>,
56    ET: Tag,
57    RC: DiagramRulesCons<NC, ET, Self, MDC, PAGE_SIZE, TAG_BITS>,
58    MDC: ManagerDataCons<NC, ET, Self, RC, PAGE_SIZE, TAG_BITS>,
59    const PAGE_SIZE: usize,
60    const TAG_BITS: u32,
61>: Sized
62{
63    type TerminalNode;
64    type T<'id>: TerminalManager<
65        'id,
66        NC::T<'id>,
67        ET,
68        MDC::T<'id>,
69        PAGE_SIZE,
70        TAG_BITS,
71        TerminalNode = Self::TerminalNode,
72    >;
73}
74
75/// Diagram rules type constructor
76pub trait DiagramRulesCons<
77    NC: InnerNodeCons<ET, TAG_BITS>,
78    ET: Tag,
79    TMC: TerminalManagerCons<NC, ET, Self, MDC, PAGE_SIZE, TAG_BITS>,
80    MDC: ManagerDataCons<NC, ET, TMC, Self, PAGE_SIZE, TAG_BITS>,
81    const PAGE_SIZE: usize,
82    const TAG_BITS: u32,
83>: Sized
84{
85    type T<'id>: DiagramRules<
86        Edge<'id, NC::T<'id>, ET, TAG_BITS>,
87        NC::T<'id>,
88        <TMC::T<'id> as TerminalManager<'id, NC::T<'id>, ET, MDC::T<'id>, PAGE_SIZE, TAG_BITS>>::TerminalNode,
89    >;
90}
91
92/// Manager data type constructor
93pub trait ManagerDataCons<
94    NC: InnerNodeCons<ET, TAG_BITS>,
95    ET: Tag,
96    TMC: TerminalManagerCons<NC, ET, RC, Self, PAGE_SIZE, TAG_BITS>,
97    RC: DiagramRulesCons<NC, ET, TMC, Self, PAGE_SIZE, TAG_BITS>,
98    const PAGE_SIZE: usize,
99    const TAG_BITS: u32,
100>: Sized
101{
102    type T<'id>: DropWith<Edge<'id, NC::T<'id>, ET, TAG_BITS>>
103        + ManagerEventSubscriber<
104            Manager<
105                'id,
106                NC::T<'id>,
107                ET,
108                TMC::T<'id>,
109                RC::T<'id>,
110                Self::T<'id>,
111                PAGE_SIZE,
112                TAG_BITS,
113            >,
114        >;
115}
116
117// === Manager & Edges =========================================================
118
119pub struct StoreInner<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
120where
121    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
122    ET: Tag,
123    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
124    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
125{
126    manager: RwLock<Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>,
127    terminal_manager: TM,
128    workers: crate::workers::Workers,
129}
130
131#[repr(transparent)]
132#[must_use]
133#[derive_where(PartialEq, Eq, PartialOrd, Ord, Hash)]
134pub struct Edge<'id, N, ET, const TAG_BITS: u32>(
135    /// Points to an `InnerNode` (if `ptr & (1 << TAG_BITS) == 0`) or a terminal
136    /// node (`ptr & (1 << TAG_BITS) == 1`)
137    ///
138    /// SAFETY invariant: The pointer (as integer) is `>= 1 << (TAG_BITS + 1)`
139    /// (i.e. the pointer with all tags removed is still non-null)
140    NonNull<()>,
141    #[derive_where(skip)] PhantomData<(Invariant<'id>, N, ET)>,
142);
143
144unsafe impl<N: Send + Sync, ET: Send + Sync, const TAG_BITS: u32> Send
145    for Edge<'_, N, ET, TAG_BITS>
146{
147}
148unsafe impl<N: Send + Sync, ET: Send + Sync, const TAG_BITS: u32> Sync
149    for Edge<'_, N, ET, TAG_BITS>
150{
151}
152
153#[repr(C)]
154pub struct Manager<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
155where
156    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
157    ET: Tag,
158    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
159    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
160{
161    unique_table: Vec<Mutex<LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>>,
162    var_level_map: VarLevelMap,
163    var_name_map: VarNameMap,
164    data: ManuallyDrop<MD>,
165    store_inner: *const StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>,
166    gc_count: AtomicU64,
167    reorder_count: u64,
168    gc_ongoing: TryLock,
169    reorder_gc_prepared: bool,
170    phantom: PhantomData<(TM, R)>,
171}
172
173type M<'id, NC, ET, TMC, RC, MDC, const PAGE_SIZE: usize, const TAG_BITS: u32> = Manager<
174    'id,
175    <NC as InnerNodeCons<ET, TAG_BITS>>::T<'id>,
176    ET,
177    <TMC as TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>>::T<'id>,
178    <RC as DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>>::T<'id>,
179    <MDC as ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>>::T<'id>,
180    PAGE_SIZE,
181    TAG_BITS,
182>;
183
184unsafe impl<
185        'id,
186        N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>> + Send + Sync,
187        ET: Tag + Send + Sync,
188        TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS> + Send + Sync,
189        R,
190        MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + Send + Sync,
191        const PAGE_SIZE: usize,
192        const TAG_BITS: u32,
193    > Send for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
194{
195}
196unsafe impl<
197        'id,
198        N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>> + Send + Sync,
199        ET: Tag + Send + Sync,
200        TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS> + Send + Sync,
201        R,
202        MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + Send + Sync,
203        const PAGE_SIZE: usize,
204        const TAG_BITS: u32,
205    > Sync for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
206{
207}
208
209impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> Drop
210    for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
211where
212    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
213    ET: Tag,
214    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
215    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
216{
217    fn drop(&mut self) {
218        if !N::needs_drop() {
219            // There is no need to free nodes, so we can just forget all edges
220            // and let the `ArcSlab` deallocate its pages.
221            unsafe { ManuallyDrop::take(&mut self.data) }.drop_with(std::mem::forget);
222        } else {
223            unsafe { ManuallyDrop::take(&mut self.data) }.drop_with(|edge| {
224                if edge.is_inner() {
225                    // SAFETY: `edge` points to an inner node
226                    unsafe { edge.drop_inner() };
227                } else {
228                    TM::drop_edge(edge);
229                }
230            });
231
232            let unique_table = std::mem::take(&mut self.unique_table);
233            for level in unique_table {
234                for edge in level.into_inner() {
235                    unsafe { Self::drop_from_unique_table(edge) };
236                }
237            }
238        }
239    }
240}
241
242#[repr(transparent)]
243#[derive_where(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
244pub struct ManagerRef<
245    NC: InnerNodeCons<ET, TAG_BITS>,
246    ET: Tag,
247    TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
248    RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
249    MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
250    const PAGE_SIZE: usize,
251    const TAG_BITS: u32,
252>(
253    ArcSlabRef<
254        NC::T<'static>,
255        StoreInner<
256            'static,
257            NC::T<'static>,
258            ET,
259            TMC::T<'static>,
260            RC::T<'static>,
261            MDC::T<'static>,
262            PAGE_SIZE,
263            TAG_BITS,
264        >,
265        PAGE_SIZE,
266    >,
267);
268
269impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
270    StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
271where
272    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
273    ET: Tag,
274    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
275    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
276{
277    unsafe fn init_in(slot: *mut Self, data: MD, threads: u32) {
278        let data = RwLock::new(Manager {
279            unique_table: Vec::new(),
280            var_level_map: VarLevelMap::new(),
281            var_name_map: VarNameMap::new(),
282            data: ManuallyDrop::new(data),
283            store_inner: slot,
284            gc_count: AtomicU64::new(0),
285            reorder_count: 0,
286            gc_ongoing: TryLock::new(),
287            reorder_gc_prepared: false,
288            phantom: PhantomData,
289        });
290        unsafe { std::ptr::write(addr_of_mut!((*slot).manager), data) };
291
292        unsafe { TM::new_in(addr_of_mut!((*slot).terminal_manager)) };
293
294        let workers = crate::workers::Workers::new(threads);
295        unsafe { std::ptr::write(addr_of_mut!((*slot).workers), workers) };
296    }
297}
298
299impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
300    StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
301where
302    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
303    ET: Tag,
304    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
305    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
306{
307    #[inline(always)]
308    fn from_terminal_manager_ptr(ptr: *const TM) -> *const Self {
309        let byte_offset = const { std::mem::offset_of!(Self, terminal_manager) as isize };
310        // SAFETY: For all uses of this function, `ptr` points to a terminal
311        // manager contained in a `StoreInner` allocation
312        unsafe { ptr.byte_offset(-byte_offset) }.cast()
313    }
314
315    #[inline(always)]
316    fn from_manager_ptr(
317        ptr: *const RwLock<Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>,
318    ) -> *const Self {
319        let byte_offset = const { std::mem::offset_of!(Self, manager) as isize };
320        // SAFETY: For all uses of this function, `ptr` points to the manager
321        // field contained in a `StoreInner` allocation
322        unsafe { ptr.byte_offset(-byte_offset) }.cast()
323    }
324}
325
326/// Add a node to `store` and return the corresponding `Edge`
327///
328/// The caller should ensure that no node is inserted twice
329#[inline]
330fn add_node<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>(
331    store: &ArcSlab<N, StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>, PAGE_SIZE>,
332    node: N,
333) -> AllocResult<[Edge<'id, N, ET, TAG_BITS>; 2]>
334where
335    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
336    ET: Tag,
337    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
338    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
339{
340    let ptr = IntHandle::into_raw(store.add_item(node)).cast();
341    Ok([Edge(ptr, PhantomData), Edge(ptr, PhantomData)])
342}
343
344impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
345    Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
346where
347    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
348    ET: Tag,
349    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
350    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
351{
352    /// Get the pointer to `StoreInner` for this manager
353    ///
354    /// This method must not be called during of store and manager. After
355    /// initialization, the returned pointer is safe to dereference.
356    #[inline(always)]
357    fn store_inner_ptr(&self) -> *const StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS> {
358        let store_inner = self.store_inner;
359        // We can simply get the store pointer by subtracting the offset of
360        // `Manager` in `Store`. The only issue is that this violates Rust's
361        // (proposed) aliasing rules. Hence, we only provide a hint that the
362        // store's address can be computed without loading the value.
363        let offset_ptr = StoreInner::from_manager_ptr(RwLock::from_data_ptr(self));
364        // SAFETY: after initialization, the pointers are equal
365        unsafe { std::hint::assert_unchecked(std::ptr::eq(store_inner, offset_ptr)) };
366        store_inner
367    }
368
369    /// Get the node store / `ArcSlab` for this manager
370    ///
371    /// Actually, this is the `ArcSlab`, in which this manager is stored. This
372    /// means that it is only safe to call this method in case this `Manager`
373    /// is embedded in an `ArcSlab<N, StoreInner<..>, PAGE_SIZE>`. But this
374    /// holds by construction.
375    ///
376    /// This method must not be called during of store and manager.
377    #[inline(always)]
378    fn store(
379        &self,
380    ) -> &ArcSlab<N, StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>, PAGE_SIZE> {
381        let ptr = ArcSlab::from_data_ptr(self.store_inner_ptr());
382        // SAFETY: After initialization, the pointer is guaranteed to be valid
383        unsafe { &*ptr }
384    }
385
386    #[inline]
387    fn terminal_manager(&self) -> *const TM {
388        let store_inner = self.store_inner_ptr();
389        unsafe { addr_of!((*store_inner).terminal_manager) }
390    }
391
392    /// Drop the given edge
393    ///
394    /// This is just a wrapper around [`Edge::drop_inner_untagged`] with
395    /// less type parameters.
396    ///
397    /// SAFETY: `edge` must be untagged and point to an inner node
398    #[inline(always)]
399    unsafe fn drop_from_unique_table(edge: Edge<'id, N, ET, TAG_BITS>) {
400        // SAFETY: `edge` is untagged and points to an inner node, the type
401        // parameters match
402        unsafe { edge.drop_from_unique_table::<TM, R, MD, PAGE_SIZE>() }
403    }
404}
405
406impl<'id, N: NodeBase, ET: Tag, const TAG_BITS: u32> Edge<'id, N, ET, TAG_BITS> {
407    /// Mask corresponding to `TAG_BITS`
408    const TAG_MASK: usize = {
409        assert!(ET::MAX_VALUE < (1 << TAG_BITS), "`TAG_BITS` is too small");
410        // The mask we compute below has potentially less bits set than
411        // `(1 << TAG_BITS) - 1`. If `ET::MAX_VALUE + 1` is a power of two, all
412        // all bit patterns after applying the mask actually correspond to valid
413        // bit patterns for the `ET`, so the compiler may optimize an
414        // `assert!(value <= Self::MAX_VALUE)` in the `Tag::from_usize()`
415        // implementation away, which would otherwise be required for safety.
416        (ET::MAX_VALUE + 1).next_power_of_two() - 1
417    };
418
419    /// `TAG_BITS` (for the user-defined edge tag) plus 1 for the distinction
420    /// between inner and terminal nodes
421    const ALL_TAG_BITS: u32 = {
422        assert!(
423            align_of::<N>() >= 1 << (TAG_BITS + 1),
424            "`TAG_BITS` is too large"
425        );
426        TAG_BITS + 1
427    };
428
429    /// Mask corresponding to `Self::ALL_TAG_BITS`
430    const ALL_TAG_MASK: usize = (1 << Self::ALL_TAG_BITS) - 1;
431
432    #[inline(always)]
433    pub fn as_ptr(&self) -> NonNull<()> {
434        self.0
435    }
436
437    /// Create an edge from a raw pointer
438    ///
439    /// This method does neither modify the pointer nor change reference
440    /// counters in any way. The user is responsible for pointer tagging etc.
441    /// Hence, using this method is dangerous. Still, it is required by
442    /// `TerminalManager` implementations.
443    ///
444    /// # Safety
445    ///
446    /// `ptr` must be tagged accordingly.
447    ///
448    /// If the pointer is tagged as referring to an inner node, then it must
449    /// actually point to an inner node stored in the manager associated with
450    /// the `'id` brand. Furthermore, the caller must have ownership of one
451    /// reference (in terms of the reference count).
452    ///
453    /// If the pointer is tagged as referring to a terminal node, then the
454    /// the pointer must be valid as defined by the `TerminalManager`
455    /// implementation of the manager associated with the `'id` brand.
456    #[inline(always)]
457    pub unsafe fn from_ptr(ptr: NonNull<()>) -> Self {
458        Self(ptr, PhantomData)
459    }
460
461    /// Get the address portion of the underlying pointer
462    #[inline(always)]
463    pub fn addr(&self) -> usize {
464        sptr::Strict::addr(self.0.as_ptr())
465    }
466
467    /// Get the inner node referenced by this edge
468    ///
469    /// # Safety
470    ///
471    /// `self` must be untagged and point to an inner node
472    #[inline]
473    unsafe fn inner_node_unchecked(&self) -> &N {
474        debug_assert_eq!(self.addr() & Self::ALL_TAG_MASK, 0);
475        let ptr: NonNull<N> = self.0.cast();
476        unsafe { ptr.as_ref() }
477    }
478
479    /// Drop an edge pointing to an inner node, assuming that it isn't the last
480    /// one pointing to that node
481    ///
482    /// This assumption is fulfilled in case the node is still stored in the
483    /// unique table.
484    ///
485    /// There is a debug assertion that checks the aforementioned assumption. In
486    /// release builds, this function will simply leak the node.
487    ///
488    /// SAFETY: `self` must point to an inner node
489    #[inline]
490    unsafe fn drop_inner(self) {
491        debug_assert!(self.is_inner());
492        let ptr: NonNull<N> = self.all_untagged_ptr().cast();
493        std::mem::forget(self);
494        // SAFETY: `self` points to an inner node and by the type invariant, we
495        // have shared access. Also, `self` forgotten now.
496        let _old_rc = unsafe { ptr.as_ref().release() };
497        debug_assert!(_old_rc > 1);
498    }
499
500    /// Drop an edge from the unique table
501    ///
502    /// Dropping an edge from the unique table corresponds to dropping the last
503    /// reference.
504    ///
505    /// # Safety
506    ///
507    /// - `self` must be untagged and point to an inner node
508    /// - `TM`, `R`, `MD` and `PAGE_SIZE` must be the types/values this edge has
509    ///   been created with
510    #[inline]
511    unsafe fn drop_from_unique_table<TM, R, MD, const PAGE_SIZE: usize>(self)
512    where
513        N: InnerNode<Self>,
514        TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
515        MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
516    {
517        let handle: IntHandle<
518            'id,
519            N,
520            Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>,
521            PAGE_SIZE,
522        > = {
523            debug_assert_eq!(self.addr() & Self::ALL_TAG_MASK, 0);
524            let ptr: NonNull<N> = self.0.cast();
525            std::mem::forget(self);
526            // SAFETY: By the type invariant, `ptr` was created from an
527            // `IntHandle`, the caller ensures that the type/const arguments
528            // of `IntHandle` match. Due to lifetime restrictions the `ArcSlab`
529            // outlives the `IntHandle` we create.
530            unsafe { IntHandle::from_raw(ptr) }
531        };
532        IntHandle::drop_with(handle, |node| {
533            node.drop_with(|edge| {
534                if edge.is_inner() {
535                    // SAFETY: `edge` points to an inner node
536                    unsafe { edge.drop_inner() };
537                } else {
538                    TM::drop_edge(edge);
539                }
540            })
541        })
542    }
543
544    /// Forcibly drop the last edge, i.e., one that comes from the unique table
545    ///
546    /// # Safety
547    ///
548    /// - `self` must be untagged and point to an inner node
549    /// - `self` must be the last reference to the node. Beware of relaxed
550    ///   memory (e.g., use [`Acquire`] ordering to check that `this` is the
551    ///   last reference).
552    /// - `TM`, `R`, `MD` and `PAGE_SIZE` must be the types/values this edge has
553    ///   been created with
554    #[inline]
555    unsafe fn force_drop<TM, R, MD, const PAGE_SIZE: usize>(self)
556    where
557        N: InnerNode<Self>,
558        TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
559        MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
560    {
561        let handle: IntHandle<
562            'id,
563            N,
564            Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>,
565            PAGE_SIZE,
566        > = {
567            debug_assert_eq!(self.addr() & Self::ALL_TAG_MASK, 0);
568            let ptr: NonNull<N> = self.0.cast();
569            std::mem::forget(self);
570            // SAFETY: By the type invariant, `ptr` was created from an
571            // `IntHandle`, the caller ensures that the type/const arguments
572            // of `IntHandle` match. Due to lifetime restrictions the `ArcSlab`
573            // outlives the `IntHandle` we create.
574            unsafe { IntHandle::from_raw(ptr) }
575        };
576        // SAFETY: `handle` is the last reference
577        unsafe { IntHandle::force_into_inner(handle) }.drop_with(|edge| {
578            if edge.is_inner() {
579                // SAFETY: `edge` points to an inner node
580                unsafe { edge.drop_inner() };
581            } else {
582                TM::drop_edge(edge);
583            }
584        })
585    }
586
587    /// Clone an edge pointing to an inner node
588    ///
589    /// SAFETY: `self` must be untagged and point to an inner node
590    #[inline]
591    unsafe fn clone_inner_unchecked(&self) -> Self {
592        unsafe { self.inner_node_unchecked() }.retain();
593        Self(self.0, PhantomData)
594    }
595
596    /// Returns `true` if this edge points to an inner node
597    #[inline]
598    pub fn is_inner(&self) -> bool {
599        (self.addr() & (1 << TAG_BITS)) == 0
600    }
601
602    /// Get the underlying pointer with all tag bits set to 0
603    #[inline]
604    fn all_untagged_ptr(&self) -> NonNull<()> {
605        let ptr = sptr::Strict::map_addr(self.0.as_ptr(), |p| p & !Self::ALL_TAG_MASK);
606        // SAFETY: the (tagged) pointer is `>= (1 << ALL_TAG_BITS)`
607        unsafe { NonNull::new_unchecked(ptr) }
608    }
609
610    /// Get the underlying pointer tagged with `tag`
611    #[inline]
612    fn retag_ptr(&self, tag: ET) -> NonNull<()> {
613        let tag_val = tag.as_usize();
614        debug_assert!(tag_val <= ET::MAX_VALUE);
615        // Note that we assert `ET::MAX_VALUE <= Self::TAG_MASK` during the computation
616        // of `Self::TAG_MASK`
617        let ptr = sptr::Strict::map_addr(self.0.as_ptr(), |p| (p & !Self::TAG_MASK) | tag_val);
618        // SAFETY: even an untagged pointer is non-null
619        unsafe { NonNull::new_unchecked(ptr) }
620    }
621}
622
623impl<N, ET, const TAG_BITS: u32> Drop for Edge<'_, N, ET, TAG_BITS> {
624    #[inline(never)]
625    #[cold]
626    fn drop(&mut self) {
627        eprintln!(
628            "`Edge`s must not be dropped. Use `Manager::drop_edge()`. Backtrace:\n{}",
629            std::backtrace::Backtrace::capture()
630        );
631
632        #[cfg(feature = "static_leak_check")]
633        {
634            extern "C" {
635                #[link_name = "\n\n`Edge`s must not be dropped. Use `Manager::drop_edge()`.\n"]
636                fn trigger() -> !;
637            }
638            unsafe { trigger() }
639        }
640    }
641}
642
643unsafe impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> oxidd_core::Manager
644    for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
645where
646    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
647    ET: Tag,
648    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
649    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
650    MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + ManagerEventSubscriber<Self>,
651{
652    type Edge = Edge<'id, N, ET, TAG_BITS>;
653    type EdgeTag = ET;
654    type InnerNode = N;
655    type Terminal = TM::TerminalNode;
656    type TerminalRef<'a>
657        = TM::TerminalNodeRef<'a>
658    where
659        Self: 'a;
660    type Rules = R;
661
662    type TerminalIterator<'a>
663        = TM::Iterator<'a>
664    where
665        Self: 'a;
666
667    type NodeSet = NodeSet<PAGE_SIZE, TAG_BITS>;
668
669    type LevelView<'a>
670        = LevelView<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
671    where
672        Self: 'a;
673    type LevelIterator<'a>
674        = LevelIter<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
675    where
676        Self: 'a;
677
678    #[inline]
679    fn get_node(&self, edge: &Self::Edge) -> Node<'_, Self> {
680        if edge.is_inner() {
681            let ptr: NonNull<Self::InnerNode> = edge.all_untagged_ptr().cast();
682            // SAFETY: dereferencing untagged edges pointing to inner nodes is safe
683            Node::Inner(unsafe { ptr.as_ref() })
684        } else {
685            let terminal_manager = unsafe { &*self.terminal_manager() };
686            Node::Terminal(terminal_manager.deref_edge(edge))
687        }
688    }
689
690    #[inline]
691    fn clone_edge(&self, edge: &Self::Edge) -> Self::Edge {
692        if edge.is_inner() {
693            let ptr: NonNull<Self::InnerNode> = edge.all_untagged_ptr().cast();
694            // SAFETY: dereferencing untagged edges pointing to inner nodes is safe
695            unsafe { ptr.as_ref() }.retain();
696            Edge(edge.0, PhantomData)
697        } else {
698            TM::clone_edge(edge)
699        }
700    }
701
702    #[inline]
703    fn drop_edge(&self, edge: Self::Edge) {
704        if edge.is_inner() {
705            // SAFETY: `edge` points to an inner node
706            unsafe { edge.drop_inner() };
707        } else {
708            TM::drop_edge(edge);
709        }
710    }
711
712    #[track_caller]
713    fn try_remove_node(&self, edge: Self::Edge, level: LevelNo) -> bool {
714        if !edge.is_inner() {
715            TM::drop_edge(edge);
716            debug_assert_eq!(level, LevelNo::MAX, "`level` does not match");
717            return false;
718        }
719
720        let node_ptr: NonNull<Self::InnerNode> = edge.all_untagged_ptr().cast();
721        std::mem::forget(edge);
722        // SAFETY: `node_ptr` points to an inner node and by the type invariant
723        // of `Edge`, we have shared access.
724        let node = unsafe { node_ptr.as_ref() };
725        // SAFETY: `edge` is forgotten
726        let old_rc = unsafe { node.release() };
727
728        debug_assert_ne!(level, LevelNo::MAX, "`level` does not match");
729        debug_assert!(
730            (level as usize) < self.unique_table.len(),
731            "`level` out of range"
732        );
733        debug_assert!(node.check_level(|l| l == level), "`level` does not match");
734        debug_assert!(old_rc > 1);
735
736        if old_rc != 2 || !self.reorder_gc_prepared {
737            return false;
738        }
739
740        let Some(set) = self.unique_table.get(level as usize) else {
741            return false;
742        };
743        let mut set = set.lock();
744
745        // Read the reference count again: Another thread may have created an
746        // edge between our `node.release()` call and `set.lock()`.
747        let rc = node.load_rc(Acquire);
748        debug_assert_ne!(rc, 0);
749        if rc != 1 {
750            return false;
751        }
752
753        // SAFETY: we checked that `reorder_gc_prepared` is true above
754        let Some(edge) = (unsafe { set.remove(node) }) else {
755            return false;
756        };
757
758        // SAFETY: Since `rc` is 1, this is the last reference. We use `Acquire`
759        // order above and `Release` order when decrementing reference counters,
760        // so we have exclusive node access now. Additionally, `edge` is
761        // untagged and points to an inner node. The type/const arguments match
762        // the ones from the manager.
763        unsafe { edge.force_drop::<TM, R, MD, PAGE_SIZE>() };
764
765        true
766    }
767
768    #[inline]
769    fn num_inner_nodes(&self) -> usize {
770        self.store().num_items()
771    }
772
773    #[inline(always)]
774    fn num_levels(&self) -> LevelNo {
775        self.unique_table.len() as LevelNo
776    }
777
778    #[inline(always)]
779    fn num_named_vars(&self) -> VarNo {
780        self.var_name_map.named_count() as VarNo
781    }
782
783    #[track_caller]
784    fn add_vars(&mut self, additional: VarNo) -> Range<VarNo> {
785        let len = self.unique_table.len() as VarNo;
786        let new_len = len.checked_add(additional).expect("too many variables");
787        let range = len as VarNo..new_len as VarNo;
788
789        self.data.pre_reorder(self);
790        MD::pre_reorder_mut(self);
791
792        self.unique_table
793            .resize_with(new_len as usize, || Mutex::new(LevelViewSet::default()));
794        self.var_level_map.extend(additional);
795        self.var_name_map.add_unnamed(additional);
796
797        debug_assert_eq!(new_len as usize, self.unique_table.len());
798        debug_assert_eq!(new_len as usize, self.var_level_map.len());
799        debug_assert_eq!(new_len, self.var_name_map.len());
800
801        self.data.post_reorder(self);
802        MD::post_reorder_mut(self);
803
804        range
805    }
806
807    #[track_caller]
808    fn add_named_vars<S: Into<String>>(
809        &mut self,
810        names: impl IntoIterator<Item = S>,
811    ) -> Result<Range<VarNo>, DuplicateVarName> {
812        self.data.pre_reorder(self);
813        MD::pre_reorder_mut(self);
814
815        let len = self.var_name_map.len();
816        let mut on_drop = OnDrop::new(self, |this| {
817            // This block is executed whenever `on_drop` gets dropped, i.e.,
818            // even if iterating over `names` or converting a value of type `S`
819            // into a `String` panics. This way, we ensure that the manager's
820            // state remains consistent.
821            let new_len = this.var_name_map.len();
822            this.unique_table
823                .resize_with(new_len as usize, || Mutex::new(LevelViewSet::default()));
824            this.var_level_map.extend((new_len - len) as VarNo);
825
826            debug_assert_eq!(new_len as usize, this.unique_table.len());
827            debug_assert_eq!(new_len as usize, this.var_level_map.len());
828            debug_assert_eq!(new_len, this.var_name_map.len());
829
830            this.data.post_reorder(this);
831            MD::post_reorder_mut(this);
832        });
833
834        let mut names = names.into_iter();
835        let range = on_drop.data_mut().var_name_map.add_named(names.by_ref())?;
836        drop(on_drop);
837
838        if names.next().is_some() {
839            // important: panic only after dropping `on_drop`
840            panic!("too many variables");
841        }
842
843        Ok(range)
844    }
845
846    #[track_caller]
847    fn add_named_vars_from_map(
848        &mut self,
849        map: VarNameMap,
850    ) -> Result<Range<VarNo>, DuplicateVarName> {
851        if !self.var_name_map.is_empty() {
852            return self.add_named_vars(map.into_names_iter());
853        }
854
855        self.data.pre_reorder(self);
856        MD::pre_reorder_mut(self);
857
858        let n = map.len();
859        self.unique_table
860            .resize_with(n as usize, || Mutex::new(LevelViewSet::default()));
861        self.var_level_map.extend(n);
862        self.var_name_map = map;
863
864        debug_assert_eq!(n as usize, self.unique_table.len());
865        debug_assert_eq!(n as usize, self.var_level_map.len());
866        debug_assert_eq!(n, self.var_name_map.len());
867
868        self.data.post_reorder(self);
869        MD::post_reorder_mut(self);
870
871        Ok(0..n)
872    }
873
874    #[track_caller]
875    #[inline(always)]
876    fn var_name(&self, var: VarNo) -> &str {
877        self.var_name_map.var_name(var)
878    }
879
880    #[track_caller]
881    #[inline(always)]
882    fn set_var_name(
883        &mut self,
884        var: VarNo,
885        name: impl Into<String>,
886    ) -> Result<(), DuplicateVarName> {
887        self.var_name_map.set_var_name(var, name)
888    }
889
890    #[inline(always)]
891    fn name_to_var(&self, name: impl AsRef<str>) -> Option<VarNo> {
892        self.var_name_map.name_to_var(name)
893    }
894
895    #[track_caller]
896    #[inline(always)]
897    fn var_to_level(&self, var: VarNo) -> LevelNo {
898        self.var_level_map.var_to_level(var)
899    }
900
901    #[track_caller]
902    #[inline(always)]
903    fn level_to_var(&self, level: LevelNo) -> VarNo {
904        self.var_level_map.level_to_var(level)
905    }
906
907    #[track_caller]
908    #[inline(always)]
909    fn level(&self, no: LevelNo) -> Self::LevelView<'_> {
910        LevelView {
911            store: self.store(),
912            var_level_map: &self.var_level_map,
913            allow_node_removal: self.reorder_gc_prepared,
914            level: no,
915            set: self.unique_table[no as usize].lock(),
916        }
917    }
918
919    #[inline]
920    fn levels(&self) -> Self::LevelIterator<'_> {
921        LevelIter {
922            store: self.store(),
923            var_level_map: &self.var_level_map,
924            allow_node_removal: self.reorder_gc_prepared,
925            level_front: 0,
926            level_back: self.unique_table.len() as LevelNo,
927            it: self.unique_table.iter(),
928        }
929    }
930
931    #[inline]
932    fn get_terminal(&self, terminal: Self::Terminal) -> AllocResult<Self::Edge> {
933        unsafe { TM::get(self.terminal_manager(), terminal) }
934    }
935
936    #[inline]
937    fn num_terminals(&self) -> usize {
938        let terminal_manager = unsafe { &*self.terminal_manager() };
939        terminal_manager.len()
940    }
941
942    #[inline]
943    fn terminals(&self) -> Self::TerminalIterator<'_> {
944        unsafe { TM::iter(self.terminal_manager()) }
945    }
946
947    fn gc(&self) -> usize {
948        if !self.gc_ongoing.try_lock() {
949            // We don't want multiple garbage collections at the same time.
950            return 0;
951        }
952        self.gc_count.fetch_add(1, Relaxed);
953        let guard = AbortOnDrop("Garbage collection panicked.");
954        if !self.reorder_gc_prepared {
955            self.data.pre_gc(self);
956        }
957
958        let mut collected = 0;
959        for level in &self.unique_table {
960            let mut level = level.lock();
961            collected += level.len();
962            // SAFETY: We prepared the garbage collection, hence there are no
963            // "weak" edges.
964            unsafe { level.gc() };
965            collected -= level.len();
966        }
967        collected += unsafe { &*self.terminal_manager() }.gc();
968
969        if !self.reorder_gc_prepared {
970            // SAFETY: We called `pre_gc`, the garbage collection is done.
971            unsafe { self.data.post_gc(self) };
972        }
973        self.gc_ongoing.unlock();
974        guard.defuse();
975        collected
976    }
977
978    fn reorder<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
979        if self.reorder_gc_prepared {
980            // Reordering was already prepared (nested `reorder` call).
981            // Important: We must return here to not finalize the reordering
982            // before the parent `reorder` closure returns.
983            return f(self);
984        }
985        let guard = AbortOnDrop("Reordering panicked.");
986
987        self.data.pre_gc(self);
988        self.reorder_gc_prepared = true;
989        self.data.pre_reorder(self);
990        MD::pre_reorder_mut(self);
991
992        let res = f(self);
993
994        self.data.post_reorder(self);
995        MD::post_reorder_mut(self);
996        self.reorder_gc_prepared = false;
997        // SAFETY: We called `pre_gc()` and the reordering is done.
998        unsafe { self.data.post_gc(self) };
999
1000        guard.defuse();
1001        // Depending on the reordering implementation, garbage collections are
1002        // preformed, but not necessarily through `Self::gc`. So we increment
1003        // the GC count here.
1004        *self.gc_count.get_mut() += 1;
1005        self.reorder_count += 1;
1006        res
1007    }
1008
1009    #[inline]
1010    fn gc_count(&self) -> u64 {
1011        self.gc_count.load(Relaxed)
1012    }
1013
1014    #[inline]
1015    fn reorder_count(&self) -> u64 {
1016        self.reorder_count
1017    }
1018}
1019
1020impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> oxidd_core::HasWorkers
1021    for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1022where
1023    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>> + Send + Sync,
1024    ET: Tag + Send + Sync,
1025    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS> + Send + Sync,
1026    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1027    MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + ManagerEventSubscriber<Self> + Send + Sync,
1028{
1029    type WorkerPool = crate::workers::Workers;
1030
1031    #[inline]
1032    fn workers(&self) -> &Self::WorkerPool {
1033        &self.store().data().workers
1034    }
1035}
1036
1037pub struct LevelIter<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1038where
1039    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1040    ET: Tag,
1041    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1042    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1043{
1044    store: &'a ArcSlab<N, StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>, PAGE_SIZE>,
1045    var_level_map: &'a VarLevelMap,
1046    /// SAFETY invariant: If set to true, a garbage collection is prepared
1047    /// (i.e., there are no "weak" edges).
1048    allow_node_removal: bool,
1049    level_front: LevelNo,
1050    level_back: LevelNo,
1051    it: std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>>,
1052}
1053
1054impl<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> Iterator
1055    for LevelIter<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1056where
1057    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1058    ET: Tag,
1059    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1060    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1061    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1062{
1063    type Item = LevelView<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>;
1064
1065    #[inline]
1066    fn next(&mut self) -> Option<Self::Item> {
1067        let mutex = self.it.next()?;
1068        let level = self.level_front;
1069        self.level_front += 1;
1070        Some(LevelView {
1071            store: self.store,
1072            var_level_map: self.var_level_map,
1073            allow_node_removal: self.allow_node_removal,
1074            level,
1075            set: mutex.lock(),
1076        })
1077    }
1078
1079    #[inline]
1080    fn size_hint(&self) -> (usize, Option<usize>) {
1081        self.it.size_hint()
1082    }
1083}
1084
1085impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> ExactSizeIterator
1086    for LevelIter<'_, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1087where
1088    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1089    ET: Tag,
1090    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1091    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1092    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1093{
1094    #[inline]
1095    fn len(&self) -> usize {
1096        self.it.len()
1097    }
1098}
1099
1100impl<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> FusedIterator
1101    for LevelIter<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1102where
1103    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1104    ET: Tag,
1105    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1106    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1107    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1108    std::slice::Iter<'a, Mutex<LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>>:
1109        FusedIterator,
1110{
1111}
1112
1113impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> DoubleEndedIterator
1114    for LevelIter<'_, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1115where
1116    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1117    ET: Tag,
1118    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1119    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1120    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1121{
1122    #[inline]
1123    fn next_back(&mut self) -> Option<Self::Item> {
1124        let mutex = self.it.next_back()?;
1125        self.level_back -= 1;
1126        Some(LevelView {
1127            store: self.store,
1128            var_level_map: self.var_level_map,
1129            allow_node_removal: self.allow_node_removal,
1130            level: self.level_back,
1131            set: mutex.lock(),
1132        })
1133    }
1134}
1135
1136impl<N: NodeBase, ET: Tag, const TAG_BITS: u32> oxidd_core::Edge for Edge<'_, N, ET, TAG_BITS> {
1137    type Tag = ET;
1138
1139    #[inline]
1140    fn borrowed(&self) -> Borrowed<'_, Self> {
1141        Borrowed::new(Self(self.0, PhantomData))
1142    }
1143
1144    #[inline]
1145    fn with_tag(&self, tag: Self::Tag) -> Borrowed<'_, Self> {
1146        Borrowed::new(Self(self.retag_ptr(tag), PhantomData))
1147    }
1148
1149    #[inline]
1150    fn with_tag_owned(mut self, tag: Self::Tag) -> Self {
1151        self.0 = self.retag_ptr(tag);
1152        self
1153    }
1154
1155    #[inline]
1156    fn tag(&self) -> Self::Tag {
1157        ET::from_usize(self.addr() & Self::TAG_MASK)
1158    }
1159
1160    #[inline]
1161    fn node_id(&self) -> oxidd_core::NodeID {
1162        self.addr() & !Self::ALL_TAG_MASK
1163    }
1164}
1165
1166// === Level Views =============================================================
1167
1168/// The underlying data structure for level views
1169///
1170/// Conceptually, every [`LevelViewSet`] is tied to a [`Manager`]. It is a hash
1171/// table ensuring the uniqueness of nodes. However, it does not store nodes
1172/// internally, but edges referencing nodes. These edges are always untagged and
1173/// reference inner nodes.
1174///
1175/// Because a [`LevelViewSet`] on its own is not sufficient to drop nodes
1176/// accordingly, this will simply leak all contained edges, not calling the
1177/// `Edge`'s `Drop` implementation.
1178struct LevelViewSet<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>(
1179    RawTable<Edge<'id, N, ET, TAG_BITS>>,
1180    PhantomData<(TM, R, MD)>,
1181);
1182
1183#[inline]
1184fn hash_node<N: NodeBase>(node: &N) -> u64 {
1185    let mut hasher = FxHasher::default();
1186    node.hash(&mut hasher);
1187    hasher.finish()
1188}
1189
1190impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1191    LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1192where
1193    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1194    ET: Tag,
1195    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1196    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1197{
1198    /// Get the number of nodes on this level
1199    #[inline]
1200    fn len(&self) -> usize {
1201        self.0.len()
1202    }
1203
1204    /// Get an equality function for entries
1205    ///
1206    /// SAFETY: The returned function must be called on untagged edges
1207    /// referencing inner nodes only.
1208    #[inline]
1209    unsafe fn eq(node: &N) -> impl Fn(&Edge<'id, N, ET, TAG_BITS>) -> bool + '_ {
1210        move |edge| unsafe { edge.inner_node_unchecked() == node }
1211    }
1212
1213    /// Reserve space for `additional` nodes on this level
1214    #[inline]
1215    fn reserve(&mut self, additional: usize) {
1216        // SAFETY: The hash table only contains untagged edges referencing inner
1217        // nodes.
1218        self.0.reserve(additional)
1219    }
1220
1221    #[inline]
1222    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET, TAG_BITS>> {
1223        let hash = hash_node(node);
1224        // SAFETY: The hash table only contains untagged edges referencing inner
1225        // nodes.
1226        self.0.get(hash, unsafe { Self::eq(node) })
1227    }
1228
1229    /// Insert the given edge, assuming that the referenced node is already
1230    /// stored in `nodes`.
1231    ///
1232    /// Returns `true` if the edge was inserted, `false` if it was already
1233    /// present.
1234    ///
1235    /// Panics if `edge` points to a terminal node. May furthermore panic if
1236    /// `edge` is tagged, depending on the configuration.
1237    #[inline]
1238    fn insert(&mut self, edge: Edge<'id, N, ET, TAG_BITS>) -> bool {
1239        assert_eq!(
1240            edge.addr() & Edge::<N, ET, TAG_BITS>::ALL_TAG_MASK,
1241            0,
1242            "can only insert untagged edges pointing to inner nodes"
1243        );
1244        let edge = ManuallyDrop::new(edge);
1245        let node = unsafe { edge.inner_node_unchecked() };
1246        let hash = hash_node(node);
1247        // SAFETY (next 2): The hash table only contains untagged edges
1248        // referencing inner nodes.
1249        match self
1250            .0
1251            .find_or_find_insert_slot(hash, unsafe { Self::eq(node) })
1252        {
1253            Ok(_) => {
1254                // We need to drop `edge`. This simply amounts to decrementing
1255                // the reference counter, since there is still one edge
1256                // referencing `node` stored in here.
1257
1258                // SAFETY: We forget `edge`.
1259                let _old_rc = unsafe { node.release() };
1260                debug_assert!(_old_rc > 1);
1261
1262                false
1263            }
1264            Err(slot) => {
1265                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1266                // We have exclusive access to the hash table and did not modify
1267                // it in between.
1268                unsafe {
1269                    self.0
1270                        .insert_in_slot_unchecked(hash, slot, ManuallyDrop::into_inner(edge))
1271                };
1272                true
1273            }
1274        }
1275    }
1276
1277    /// Get an edge for `node`
1278    ///
1279    /// If `node` is not yet present in the hash table, `insert` is called to
1280    /// insert the node into `nodes`.
1281    #[inline]
1282    fn get_or_insert(
1283        &mut self,
1284        node: N,
1285        insert: impl FnOnce(N) -> AllocResult<[Edge<'id, N, ET, TAG_BITS>; 2]>,
1286    ) -> AllocResult<Edge<'id, N, ET, TAG_BITS>> {
1287        let hash = hash_node(&node);
1288        // SAFETY (next 2): The hash table only contains untagged edges
1289        // referencing inner nodes.
1290        match self
1291            .0
1292            .find_or_find_insert_slot(hash, unsafe { Self::eq(&node) })
1293        {
1294            Ok(slot) => {
1295                node.drop_with(|edge| {
1296                    if edge.is_inner() {
1297                        // SAFETY: `edge` points to an inner node
1298                        unsafe { edge.drop_inner() };
1299                    } else {
1300                        TM::drop_edge(edge);
1301                    }
1302                });
1303                // SAFETY:
1304                // - `slot` was returned by `find_or_find_insert_slot`. We have exclusive access
1305                //   to the hash table and did not modify it in between.
1306                // - All edges in the table are untagged and refer to inner nodes.
1307                Ok(unsafe { self.0.get_at_slot_unchecked(slot).clone_inner_unchecked() })
1308            }
1309            Err(slot) => {
1310                let [e1, e2] = insert(node)?;
1311                // SAFETY: `slot` was returned by `find_or_find_insert_slot`.
1312                // We have exclusive access to the hash table and did not modify
1313                // it in between.
1314                unsafe { self.0.insert_in_slot_unchecked(hash, slot, e1) };
1315                Ok(e2)
1316            }
1317        }
1318    }
1319
1320    /// Perform garbage collection, i.e. remove all nodes without references
1321    /// besides the internal edge
1322    ///
1323    /// SAFETY: There must not be any "weak" edges, i.e. edges where the
1324    /// reference count is not materialized (apply cache implementations exploit
1325    /// this).
1326    unsafe fn gc(&mut self) {
1327        self.0.retain(
1328            |edge| {
1329                // SAFETY: All edges in unique tables are untagged and point to
1330                // inner nodes.
1331                unsafe { edge.inner_node_unchecked() }.load_rc(Acquire) != 1
1332            },
1333            |edge| {
1334                // SAFETY: Since `rc` is 1, this is the last reference. We use
1335                // `Acquire` order above and `Release` order when decrementing
1336                // reference counters, so we have exclusive node access now.
1337                // Additionally, `edge` is untagged and points to an inner node.
1338                // The type/const arguments match the ones from the manager.
1339                unsafe { edge.force_drop::<TM, R, MD, PAGE_SIZE>() };
1340            },
1341        );
1342    }
1343
1344    /// Remove `node`
1345    ///
1346    /// Returns `Some(edge)` if `node` was present, `None` otherwise
1347    ///
1348    /// SAFETY: There must not be any "weak" edges, i.e. edges where the
1349    /// reference count is not materialized (apply cache implementations exploit
1350    /// this).
1351    #[inline]
1352    unsafe fn remove(&mut self, node: &N) -> Option<Edge<'id, N, ET, TAG_BITS>> {
1353        let hash = hash_node(node);
1354        // SAFETY: The hash table only contains untagged edges referencing inner
1355        // nodes.
1356        self.0.remove_entry(hash, unsafe { Self::eq(node) })
1357    }
1358
1359    /// Iterate over all edges pointing to nodes in the set
1360    #[inline]
1361    fn iter(&self) -> LevelViewIter<'_, 'id, N, ET, TAG_BITS> {
1362        LevelViewIter(self.0.iter())
1363    }
1364
1365    /// Iterator that consumes all [`Edge`]s in the set
1366    #[inline]
1367    fn drain(&mut self) -> linear_hashtbl::raw::Drain<'_, Edge<'id, N, ET, TAG_BITS>> {
1368        self.0.drain()
1369    }
1370}
1371
1372impl<N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> Drop
1373    for LevelViewSet<'_, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1374{
1375    #[inline]
1376    fn drop(&mut self) {
1377        // If the nodes need drop, this is handled by the `Manager` `Drop` impl
1378        // or the `TakenLevelView` `Drop` impl, respectively.
1379        self.0.reset_no_drop();
1380    }
1381}
1382
1383impl<N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> Default
1384    for LevelViewSet<'_, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1385{
1386    #[inline]
1387    fn default() -> Self {
1388        Self(Default::default(), PhantomData)
1389    }
1390}
1391
1392impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> IntoIterator
1393    for LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1394{
1395    type Item = Edge<'id, N, ET, TAG_BITS>;
1396
1397    type IntoIter = linear_hashtbl::raw::IntoIter<Edge<'id, N, ET, TAG_BITS>>;
1398
1399    fn into_iter(self) -> Self::IntoIter {
1400        let this = ManuallyDrop::new(self);
1401        // SAFETY: We move out of `this` (and forget `this`)
1402        let set = unsafe { std::ptr::read(&this.0) };
1403        set.into_iter()
1404    }
1405}
1406
1407/// Level view provided by the unique table
1408pub struct LevelView<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1409where
1410    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1411    ET: Tag,
1412    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1413    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1414{
1415    store: &'a ArcSlab<N, StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>, PAGE_SIZE>,
1416    var_level_map: &'a VarLevelMap,
1417    /// SAFETY invariant: If set to true, a garbage collection is prepared
1418    /// (i.e., there are no "weak" edges).
1419    allow_node_removal: bool,
1420    level: LevelNo,
1421    set: MutexGuard<'a, LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>,
1422}
1423
1424unsafe impl<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1425    oxidd_core::LevelView<Edge<'id, N, ET, TAG_BITS>, N>
1426    for LevelView<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1427where
1428    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1429    ET: Tag,
1430    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1431    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1432    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>
1433        + ManagerEventSubscriber<Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>,
1434{
1435    type Iterator<'b>
1436        = LevelViewIter<'b, 'id, N, ET, TAG_BITS>
1437    where
1438        Self: 'b;
1439
1440    type Taken = TakenLevelView<'a, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>;
1441
1442    #[inline(always)]
1443    fn len(&self) -> usize {
1444        self.set.len()
1445    }
1446
1447    #[inline(always)]
1448    fn level_no(&self) -> LevelNo {
1449        self.level
1450    }
1451
1452    #[inline]
1453    fn reserve(&mut self, additional: usize) {
1454        self.set.reserve(additional)
1455    }
1456
1457    #[inline]
1458    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET, TAG_BITS>> {
1459        self.set.get(node)
1460    }
1461
1462    #[inline]
1463    fn insert(&mut self, edge: Edge<'id, N, ET, TAG_BITS>) -> bool {
1464        assert_eq!(
1465            edge.addr() & Edge::<N, ET, TAG_BITS>::ALL_TAG_MASK,
1466            0,
1467            "can only insert untagged edges pointing to inner nodes"
1468        );
1469        unsafe { edge.inner_node_unchecked() }.assert_level_matches(self.level);
1470        self.set.insert(edge)
1471    }
1472
1473    #[inline(always)]
1474    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET, TAG_BITS>> {
1475        node.assert_level_matches(self.level);
1476        // No need to check if the children of `node` are stored in `self.store`
1477        // due to lifetime restrictions.
1478        LevelViewSet::get_or_insert(&mut *self.set, node, |node| add_node(self.store, node))
1479    }
1480
1481    #[inline]
1482    fn gc(&mut self) {
1483        if self.allow_node_removal {
1484            // SAFETY: By invariant, node removal is allowed.
1485            unsafe { self.set.gc() };
1486        }
1487    }
1488
1489    #[inline]
1490    fn remove(&mut self, node: &N) -> bool {
1491        if !self.allow_node_removal {
1492            return false;
1493        }
1494        // SAFETY: By invariant, node removal is allowed.
1495        match unsafe { self.set.remove(node) } {
1496            Some(edge) => {
1497                // SAFETY: `edge` is untagged, the type parameters match
1498                unsafe { edge.drop_from_unique_table::<TM, R, MD, PAGE_SIZE>() };
1499                true
1500            }
1501            None => false,
1502        }
1503    }
1504
1505    #[inline]
1506    unsafe fn swap(&mut self, other: &mut Self) {
1507        self.var_level_map.swap_levels(self.level, other.level);
1508        std::mem::swap(&mut *self.set, &mut *other.set);
1509    }
1510
1511    #[inline]
1512    fn iter(&self) -> Self::Iterator<'_> {
1513        self.set.iter()
1514    }
1515
1516    #[inline(always)]
1517    fn take(&mut self) -> Self::Taken {
1518        TakenLevelView {
1519            store: self.store,
1520            var_level_map: self.var_level_map,
1521            allow_node_removal: self.allow_node_removal,
1522            level: self.level,
1523            set: std::mem::take(&mut self.set),
1524        }
1525    }
1526}
1527
1528/// Owned level view provided by the unique table
1529pub struct TakenLevelView<'a, 'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1530where
1531    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1532    ET: Tag,
1533    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1534    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1535{
1536    store: &'a ArcSlab<N, StoreInner<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>, PAGE_SIZE>,
1537    var_level_map: &'a VarLevelMap,
1538    /// SAFETY invariant: If set to true, a garbage collection is prepared
1539    /// (i.e., there are no "weak" edges).
1540    ///
1541    /// Note that due to lifetime restrictions there is no way to have a
1542    /// `TakenLevelView { allow_node_removal: true, .. }` when the associated
1543    /// `Manager` has `reorder_gc_prepared` set to `false`.
1544    allow_node_removal: bool,
1545    level: LevelNo,
1546    set: LevelViewSet<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>,
1547}
1548
1549unsafe impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32>
1550    oxidd_core::LevelView<Edge<'id, N, ET, TAG_BITS>, N>
1551    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1552where
1553    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1554    ET: Tag,
1555    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1556    R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
1557    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>
1558        + ManagerEventSubscriber<Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>>,
1559{
1560    type Iterator<'b>
1561        = LevelViewIter<'b, 'id, N, ET, TAG_BITS>
1562    where
1563        Self: 'b;
1564
1565    type Taken = Self;
1566
1567    #[inline(always)]
1568    fn len(&self) -> usize {
1569        self.set.len()
1570    }
1571
1572    #[inline(always)]
1573    fn level_no(&self) -> LevelNo {
1574        self.level
1575    }
1576
1577    #[inline]
1578    fn reserve(&mut self, additional: usize) {
1579        self.set.reserve(additional)
1580    }
1581
1582    #[inline]
1583    fn get(&self, node: &N) -> Option<&Edge<'id, N, ET, TAG_BITS>> {
1584        self.set.get(node)
1585    }
1586
1587    #[inline]
1588    fn insert(&mut self, edge: Edge<'id, N, ET, TAG_BITS>) -> bool {
1589        assert_eq!(
1590            edge.addr() & Edge::<N, ET, TAG_BITS>::ALL_TAG_MASK,
1591            0,
1592            "can only insert untagged edges pointing to inner nodes"
1593        );
1594        unsafe { edge.inner_node_unchecked() }.assert_level_matches(self.level);
1595        self.set.insert(edge)
1596    }
1597
1598    #[inline(always)]
1599    fn get_or_insert(&mut self, node: N) -> AllocResult<Edge<'id, N, ET, TAG_BITS>> {
1600        node.assert_level_matches(self.level);
1601        // No need to check if the children of `node` are stored in `self.store`
1602        // due to lifetime restrictions.
1603        self.set
1604            .get_or_insert(node, |node| add_node(self.store, node))
1605    }
1606
1607    #[inline]
1608    fn gc(&mut self) {
1609        if self.allow_node_removal {
1610            // SAFETY: By invariant, node removal is allowed.
1611            unsafe { self.set.gc() };
1612        }
1613    }
1614
1615    #[inline]
1616    fn remove(&mut self, node: &N) -> bool {
1617        if !self.allow_node_removal {
1618            return false;
1619        }
1620        // SAFETY: Called from inside the closure of `Manager::reorder()`, hence
1621        // there are no "weak" edges.
1622        match unsafe { self.set.remove(node) } {
1623            Some(edge) => {
1624                // SAFETY: `edge` is untagged, the type parameters match
1625                unsafe { edge.drop_from_unique_table::<TM, R, MD, PAGE_SIZE>() };
1626                true
1627            }
1628            None => false,
1629        }
1630    }
1631
1632    #[inline]
1633    unsafe fn swap(&mut self, other: &mut Self) {
1634        self.var_level_map.swap_levels(self.level, other.level);
1635        std::mem::swap(&mut self.set, &mut other.set);
1636    }
1637
1638    #[inline]
1639    fn iter(&self) -> Self::Iterator<'_> {
1640        self.set.iter()
1641    }
1642
1643    #[inline(always)]
1644    fn take(&mut self) -> Self::Taken {
1645        TakenLevelView {
1646            store: self.store,
1647            var_level_map: self.var_level_map,
1648            allow_node_removal: self.allow_node_removal,
1649            level: self.level,
1650            set: std::mem::take(&mut self.set),
1651        }
1652    }
1653}
1654
1655impl<'id, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> Drop
1656    for TakenLevelView<'_, 'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
1657where
1658    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
1659    ET: Tag,
1660    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
1661    MD: DropWith<Edge<'id, N, ET, TAG_BITS>>,
1662{
1663    fn drop(&mut self) {
1664        for edge in self.set.drain() {
1665            // SAFETY: `edge` is untagged and points to an inner node, the type
1666            // parameters match
1667            unsafe { edge.drop_from_unique_table::<TM, R, MD, PAGE_SIZE>() };
1668        }
1669    }
1670}
1671
1672// --- Level Views: Iterator ---------------------------------------------------
1673
1674/// Iterator over entries (as [`Edge`]s) of a level view
1675pub struct LevelViewIter<'a, 'id, N, ET, const TAG_BITS: u32>(
1676    linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET, TAG_BITS>>,
1677);
1678
1679impl<'a, 'id, InnerNode, ET, const TAG_BITS: u32> Iterator
1680    for LevelViewIter<'a, 'id, InnerNode, ET, TAG_BITS>
1681{
1682    type Item = &'a Edge<'id, InnerNode, ET, TAG_BITS>;
1683
1684    #[inline]
1685    fn next(&mut self) -> Option<Self::Item> {
1686        self.0.next()
1687    }
1688
1689    #[inline]
1690    fn size_hint(&self) -> (usize, Option<usize>) {
1691        self.0.size_hint()
1692    }
1693}
1694
1695impl<N, ET, const TAG_BITS: u32> ExactSizeIterator for LevelViewIter<'_, '_, N, ET, TAG_BITS> {
1696    #[inline(always)]
1697    fn len(&self) -> usize {
1698        self.0.len()
1699    }
1700}
1701impl<'a, 'id, N, ET, const TAG_BITS: u32> FusedIterator for LevelViewIter<'a, 'id, N, ET, TAG_BITS> where
1702    linear_hashtbl::raw::Iter<'a, Edge<'id, N, ET, TAG_BITS>, u32>: FusedIterator
1703{
1704}
1705
1706// === Node Set ================================================================
1707
1708/// Node set implementation using [bit sets][FixedBitSet]
1709///
1710/// Since nodes are stored on large pages, we can use one bit vector per page.
1711/// This reduces space consumption dramatically and increases the performance.
1712#[derive(Clone, PartialEq, Eq, Default)]
1713pub struct NodeSet<const PAGE_SIZE: usize, const TAG_BITS: u32> {
1714    len: usize,
1715    data: HashMap<usize, FixedBitSet, BuildHasherDefault<FxHasher>>,
1716}
1717
1718impl<const PAGE_SIZE: usize, const TAG_BITS: u32> NodeSet<PAGE_SIZE, TAG_BITS> {
1719    const NODES_PER_PAGE: usize = PAGE_SIZE >> (TAG_BITS + 1);
1720
1721    #[inline]
1722    fn page_offset<InnerNode, ET>(edge: &Edge<'_, InnerNode, ET, TAG_BITS>) -> (usize, usize) {
1723        let node_id = sptr::Strict::addr(edge.0.as_ptr()) >> TAG_BITS;
1724        let page = node_id / Self::NODES_PER_PAGE;
1725        let offset = node_id % Self::NODES_PER_PAGE;
1726        (page, offset)
1727    }
1728}
1729
1730impl<'id, InnerNode, ET, const PAGE_SIZE: usize, const TAG_BITS: u32>
1731    oxidd_core::util::NodeSet<Edge<'id, InnerNode, ET, TAG_BITS>> for NodeSet<PAGE_SIZE, TAG_BITS>
1732{
1733    #[inline(always)]
1734    fn len(&self) -> usize {
1735        self.len
1736    }
1737
1738    fn insert(&mut self, edge: &Edge<'id, InnerNode, ET, TAG_BITS>) -> bool {
1739        let (page, offset) = Self::page_offset(edge);
1740        match self.data.entry(page) {
1741            std::collections::hash_map::Entry::Occupied(mut e) => {
1742                let page = e.get_mut();
1743                if page.contains(offset) {
1744                    false
1745                } else {
1746                    page.insert(offset);
1747                    self.len += 1;
1748                    true
1749                }
1750            }
1751            std::collections::hash_map::Entry::Vacant(e) => {
1752                let mut page = FixedBitSet::with_capacity(Self::NODES_PER_PAGE);
1753                page.insert(offset);
1754                e.insert(page);
1755                self.len += 1;
1756                true
1757            }
1758        }
1759    }
1760
1761    #[inline]
1762    fn contains(&self, edge: &Edge<'id, InnerNode, ET, TAG_BITS>) -> bool {
1763        let (page, offset) = Self::page_offset(edge);
1764        match self.data.get(&page) {
1765            Some(page) => page.contains(offset),
1766            None => false,
1767        }
1768    }
1769
1770    fn remove(&mut self, edge: &Edge<'id, InnerNode, ET, TAG_BITS>) -> bool {
1771        let (page, offset) = Self::page_offset(edge);
1772        match self.data.get_mut(&page) {
1773            Some(page) => {
1774                if page.contains(offset) {
1775                    page.remove(offset);
1776                    self.len -= 1;
1777                    true
1778                } else {
1779                    false
1780                }
1781            }
1782            None => false,
1783        }
1784    }
1785}
1786
1787// === `ManagerRef` & Creation =================================================
1788
1789impl<
1790        NC: InnerNodeCons<ET, TAG_BITS>,
1791        ET: Tag,
1792        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1793        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1794        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1795        const PAGE_SIZE: usize,
1796        const TAG_BITS: u32,
1797    > ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1798{
1799    /// Convert `self` into a raw pointer, e.g. for usage in a foreign function
1800    /// interface.
1801    ///
1802    /// This method does not change any reference counters. To avoid a memory
1803    /// leak, use [`Self::from_raw()`] to convert the pointer back into a
1804    /// `ManagerRef`.
1805    #[inline(always)]
1806    pub fn into_raw(self) -> *const std::ffi::c_void {
1807        ArcSlabRef::into_raw(self.0).as_ptr().cast()
1808    }
1809
1810    /// Convert `raw` into a `ManagerRef`
1811    ///
1812    /// # Safety
1813    ///
1814    /// `raw` must have been obtained via [`Self::into_raw()`]. This function
1815    /// does not change any reference counters, so calling this function
1816    /// multiple times for the same pointer may lead to use after free bugs
1817    /// depending on the usage of the returned `ManagerRef`.
1818    #[inline(always)]
1819    pub unsafe fn from_raw(raw: *const std::ffi::c_void) -> Self {
1820        let ptr = NonNull::new(raw.cast_mut().cast()).expect("expected a non-null pointer");
1821        // SAFETY: Invariants are upheld by the caller.
1822        Self(unsafe { ArcSlabRef::from_raw(ptr) })
1823    }
1824}
1825
1826impl<
1827        'a,
1828        'id,
1829        NC: InnerNodeCons<ET, TAG_BITS>,
1830        ET: Tag,
1831        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1832        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1833        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1834        const PAGE_SIZE: usize,
1835        const TAG_BITS: u32,
1836    > From<&'a M<'id, NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>>
1837    for ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1838{
1839    #[inline]
1840    fn from(manager: &'a M<'id, NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>) -> Self {
1841        manager.store().retain();
1842        let store_ptr: *const ArcSlab<NC::T<'id>, _, PAGE_SIZE> =
1843            ArcSlab::<NC::T<'id>, _, PAGE_SIZE>::from_data_ptr(manager.store_inner_ptr());
1844        let store_ptr: *mut ArcSlab<NC::T<'static>, _, PAGE_SIZE> = store_ptr.cast_mut().cast();
1845        Self(unsafe { ArcSlabRef::from_raw(NonNull::new_unchecked(store_ptr)) })
1846    }
1847}
1848
1849impl<
1850        NC: InnerNodeCons<ET, TAG_BITS>,
1851        ET: Tag,
1852        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1853        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1854        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1855        const PAGE_SIZE: usize,
1856        const TAG_BITS: u32,
1857    > oxidd_core::ManagerRef for ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1858{
1859    type Manager<'id> = M<'id, NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>;
1860
1861    #[inline]
1862    fn with_manager_shared<F, T>(&self, f: F) -> T
1863    where
1864        F: for<'id> FnOnce(&Self::Manager<'id>) -> T,
1865    {
1866        f(&*self.0.data().manager.shared())
1867    }
1868
1869    #[inline]
1870    fn with_manager_exclusive<F, T>(&self, f: F) -> T
1871    where
1872        F: for<'id> FnOnce(&mut Self::Manager<'id>) -> T,
1873    {
1874        f(&mut *self.0.data().manager.exclusive())
1875    }
1876}
1877
1878impl<
1879        NC: InnerNodeCons<ET, TAG_BITS>,
1880        ET: Tag + Sync + Send,
1881        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1882        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1883        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1884        const PAGE_SIZE: usize,
1885        const TAG_BITS: u32,
1886    > oxidd_core::HasWorkers for ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1887where
1888    NC::T<'static>: Send + Sync,
1889    TMC::T<'static>: Send + Sync,
1890    MDC::T<'static>: Send + Sync,
1891{
1892    type WorkerPool = crate::workers::Workers;
1893
1894    #[inline]
1895    fn workers(&self) -> &Self::WorkerPool {
1896        &self.0.data().workers
1897    }
1898}
1899
1900/// Create a new manager
1901pub fn new_manager<
1902    NC: InnerNodeCons<ET, TAG_BITS>,
1903    ET: Tag,
1904    TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1905    RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1906    MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1907    const PAGE_SIZE: usize,
1908    const TAG_BITS: u32,
1909>(
1910    data: MDC::T<'static>,
1911    threads: u32,
1912) -> ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS> {
1913    // Evaluate a few constants for assertions
1914    let _ = Edge::<'static, NC::T<'static>, ET, TAG_BITS>::TAG_MASK;
1915    let _ = Edge::<'static, NC::T<'static>, ET, TAG_BITS>::ALL_TAG_BITS;
1916
1917    let arc = unsafe { ArcSlab::new_with(|slot| StoreInner::init_in(slot, data, threads)) };
1918
1919    // initialize the manager data
1920    {
1921        let guard = &mut arc.data().manager.exclusive();
1922        let manager = &mut *guard;
1923        MDC::T::<'static>::init(&manager.data, manager);
1924        MDC::T::<'static>::init_mut(manager);
1925    }
1926
1927    ManagerRef(arc)
1928}
1929
1930// === Functions ===============================================================
1931
1932#[repr(transparent)]
1933#[derive_where(PartialEq, Eq, PartialOrd, Ord, Hash)]
1934pub struct Function<
1935    NC: InnerNodeCons<ET, TAG_BITS>,
1936    ET: Tag,
1937    TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1938    RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1939    MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1940    const PAGE_SIZE: usize,
1941    const TAG_BITS: u32,
1942>(NonNull<()>, PhantomData<(NC, ET, TMC, RC, MDC)>);
1943
1944unsafe impl<
1945        NC: InnerNodeCons<ET, TAG_BITS>,
1946        ET: Tag,
1947        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1948        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1949        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1950        const PAGE_SIZE: usize,
1951        const TAG_BITS: u32,
1952    > Send for Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1953where
1954    for<'id> NC::T<'id>: Send + Sync,
1955    for<'id> TMC::T<'id>: Send + Sync,
1956    for<'id> MDC::T<'id>: Send + Sync,
1957{
1958}
1959
1960unsafe impl<
1961        NC: InnerNodeCons<ET, TAG_BITS>,
1962        ET: Tag,
1963        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1964        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1965        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1966        const PAGE_SIZE: usize,
1967        const TAG_BITS: u32,
1968    > Sync for Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1969where
1970    for<'id> NC::T<'id>: Send + Sync,
1971    for<'id> TMC::T<'id>: Send + Sync,
1972    for<'id> MDC::T<'id>: Send + Sync,
1973{
1974}
1975
1976impl<
1977        NC: InnerNodeCons<ET, TAG_BITS>,
1978        ET: Tag,
1979        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
1980        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
1981        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
1982        const PAGE_SIZE: usize,
1983        const TAG_BITS: u32,
1984    > Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
1985{
1986    #[inline]
1987    fn store(
1988        &self,
1989    ) -> NonNull<
1990        ArcSlab<
1991            NC::T<'static>,
1992            StoreInner<
1993                'static,
1994                NC::T<'static>,
1995                ET,
1996                TMC::T<'static>,
1997                RC::T<'static>,
1998                MDC::T<'static>,
1999                PAGE_SIZE,
2000                TAG_BITS,
2001            >,
2002            PAGE_SIZE,
2003        >,
2004    > {
2005        let edge: ManuallyDrop<Edge<'static, NC::T<'static>, ET, TAG_BITS>> =
2006            ManuallyDrop::new(Edge(self.0, PhantomData));
2007        if edge.is_inner() {
2008            let ptr: NonNull<NC::T<'static>> = edge.all_untagged_ptr().cast();
2009            let handle = ManuallyDrop::new(unsafe { ExtHandle::from_raw(ptr) });
2010            NonNull::from(ExtHandle::slab(&*handle))
2011        } else {
2012            let ptr = ArcSlab::from_data_ptr(StoreInner::from_terminal_manager_ptr(
2013                TerminalManager::terminal_manager(&*edge).as_ptr(),
2014            ));
2015            unsafe { NonNull::new_unchecked(ptr.cast_mut()) }
2016        }
2017    }
2018
2019    /// Convert `self` into a raw pointer, e.g. for usage in a foreign function
2020    /// interface.
2021    ///
2022    /// This method does not change any reference counters. To avoid a memory
2023    /// leak, use [`Self::from_raw()`] to convert the pointer back into a
2024    /// `Function`.
2025    #[inline(always)]
2026    pub fn into_raw(self) -> *const std::ffi::c_void {
2027        let ptr = self.0;
2028        std::mem::forget(self);
2029        ptr.as_ptr().cast()
2030    }
2031
2032    /// Convert `raw` into a `Function`
2033    ///
2034    /// # Safety
2035    ///
2036    /// `raw` must have been obtained via [`Self::into_raw()`]. This function
2037    /// does not change any reference counters, so calling this function
2038    /// multiple times for the same pointer may lead to use after free bugs
2039    /// depending on the usage of the returned `Function`.
2040    #[inline(always)]
2041    pub unsafe fn from_raw(raw: *const std::ffi::c_void) -> Self {
2042        let ptr = NonNull::new(raw.cast_mut().cast()).expect("expected a non-null pointer");
2043        Self(ptr, PhantomData)
2044    }
2045}
2046
2047impl<
2048        NC: InnerNodeCons<ET, TAG_BITS>,
2049        ET: Tag,
2050        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
2051        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
2052        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
2053        const PAGE_SIZE: usize,
2054        const TAG_BITS: u32,
2055    > Clone for Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
2056{
2057    #[inline]
2058    fn clone(&self) -> Self {
2059        let mut edge: ManuallyDrop<Edge<'static, NC::T<'static>, ET, TAG_BITS>> =
2060            ManuallyDrop::new(Edge(self.0, PhantomData));
2061        if edge.is_inner() {
2062            edge.0 = edge.all_untagged_ptr();
2063            unsafe { edge.inner_node_unchecked() }.retain();
2064        } else {
2065            std::mem::forget(TMC::T::<'static>::clone_edge(&*edge));
2066        }
2067        let store = self.store();
2068        unsafe { store.as_ref() }.retain();
2069        Self(self.0, PhantomData)
2070    }
2071}
2072
2073impl<
2074        NC: InnerNodeCons<ET, TAG_BITS>,
2075        ET: Tag,
2076        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
2077        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
2078        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
2079        const PAGE_SIZE: usize,
2080        const TAG_BITS: u32,
2081    > Drop for Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
2082{
2083    fn drop(&mut self) {
2084        let store = self.store();
2085        let edge: Edge<'static, NC::T<'static>, ET, TAG_BITS> = Edge(self.0, PhantomData);
2086        if edge.is_inner() {
2087            // SAFETY: `edge` points to an inner node
2088            unsafe { edge.drop_inner() };
2089        } else {
2090            TMC::T::<'static>::drop_edge(edge);
2091        }
2092        // We own a reference count, `store` is valid, and we do not use the
2093        // `store` pointer afterwards.
2094        unsafe { ArcSlab::release(store) };
2095    }
2096}
2097
2098unsafe impl<
2099        NC: InnerNodeCons<ET, TAG_BITS>,
2100        ET: Tag,
2101        TMC: TerminalManagerCons<NC, ET, RC, MDC, PAGE_SIZE, TAG_BITS>,
2102        RC: DiagramRulesCons<NC, ET, TMC, MDC, PAGE_SIZE, TAG_BITS>,
2103        MDC: ManagerDataCons<NC, ET, TMC, RC, PAGE_SIZE, TAG_BITS>,
2104        const PAGE_SIZE: usize,
2105        const TAG_BITS: u32,
2106    > oxidd_core::function::Function for Function<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>
2107{
2108    const REPR_ID: &str = "<none>";
2109
2110    type Manager<'id> =
2111        Manager<'id, NC::T<'id>, ET, TMC::T<'id>, RC::T<'id>, MDC::T<'id>, PAGE_SIZE, TAG_BITS>;
2112
2113    type ManagerRef = ManagerRef<NC, ET, TMC, RC, MDC, PAGE_SIZE, TAG_BITS>;
2114
2115    #[inline]
2116    fn from_edge<'id>(manager: &Self::Manager<'id>, edge: EdgeOfFunc<'id, Self>) -> Self {
2117        manager.store().retain();
2118        Self(ManuallyDrop::new(edge).0, PhantomData)
2119    }
2120
2121    #[inline]
2122    fn as_edge<'id>(&self, manager: &Self::Manager<'id>) -> &EdgeOfFunc<'id, Self> {
2123        assert!(std::ptr::eq(self.store().as_ptr().cast(), manager.store()));
2124        // SAFETY: `Function` and `Edge` have the same representation
2125        unsafe { std::mem::transmute(self) }
2126    }
2127
2128    #[inline]
2129    fn into_edge<'id>(self, manager: &Self::Manager<'id>) -> EdgeOfFunc<'id, Self> {
2130        let store = manager.store();
2131        assert!(std::ptr::eq(self.store().as_ptr().cast(), store));
2132        unsafe { ArcSlab::release(NonNull::from(store)) };
2133        Edge(ManuallyDrop::new(self).0, PhantomData)
2134    }
2135
2136    #[inline]
2137    fn manager_ref(&self) -> Self::ManagerRef {
2138        let store_ptr = self.store();
2139        unsafe { store_ptr.as_ref() }.retain();
2140        ManagerRef(unsafe { ArcSlabRef::from_raw(store_ptr) })
2141    }
2142
2143    #[inline]
2144    fn with_manager_shared<F, T>(&self, f: F) -> T
2145    where
2146        F: for<'id> FnOnce(&Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2147    {
2148        let edge = ManuallyDrop::new(Edge(self.0, PhantomData));
2149        let store_ptr = self.store();
2150        f(
2151            &*unsafe { store_ptr.as_ref() }.data().manager.shared(),
2152            &*edge,
2153        )
2154    }
2155
2156    #[inline]
2157    fn with_manager_exclusive<F, T>(&self, f: F) -> T
2158    where
2159        F: for<'id> FnOnce(&mut Self::Manager<'id>, &EdgeOfFunc<'id, Self>) -> T,
2160    {
2161        let edge = ManuallyDrop::new(Edge(self.0, PhantomData));
2162        let store_ptr = self.store();
2163        f(
2164            &mut *unsafe { store_ptr.as_ref() }.data().manager.exclusive(),
2165            &*edge,
2166        )
2167    }
2168}
2169
2170// === Additional Trait Implementations ========================================
2171
2172impl<
2173        'id,
2174        N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
2175        ET: Tag,
2176        TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
2177        R: DiagramRules<Edge<'id, N, ET, TAG_BITS>, N, TM::TerminalNode>,
2178        MD: HasApplyCache<Self, O>
2179            + ManagerEventSubscriber<Self>
2180            + DropWith<Edge<'id, N, ET, TAG_BITS>>,
2181        O: Copy,
2182        const PAGE_SIZE: usize,
2183        const TAG_BITS: u32,
2184    > HasApplyCache<Self, O> for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
2185{
2186    type ApplyCache = MD::ApplyCache;
2187
2188    #[inline]
2189    fn apply_cache(&self) -> &Self::ApplyCache {
2190        self.data.apply_cache()
2191    }
2192
2193    #[inline]
2194    fn apply_cache_mut(&mut self) -> &mut Self::ApplyCache {
2195        self.data.apply_cache_mut()
2196    }
2197}
2198
2199impl<'id, T, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> AsRef<T>
2200    for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
2201where
2202    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
2203    ET: Tag,
2204    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
2205    MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + AsRef<T>,
2206{
2207    #[inline(always)]
2208    fn as_ref(&self) -> &T {
2209        self.data.as_ref()
2210    }
2211}
2212
2213impl<'id, T, N, ET, TM, R, MD, const PAGE_SIZE: usize, const TAG_BITS: u32> AsMut<T>
2214    for Manager<'id, N, ET, TM, R, MD, PAGE_SIZE, TAG_BITS>
2215where
2216    N: NodeBase + InnerNode<Edge<'id, N, ET, TAG_BITS>>,
2217    ET: Tag,
2218    TM: TerminalManager<'id, N, ET, MD, PAGE_SIZE, TAG_BITS>,
2219    MD: DropWith<Edge<'id, N, ET, TAG_BITS>> + AsMut<T>,
2220{
2221    #[inline(always)]
2222    fn as_mut(&mut self) -> &mut T {
2223        self.data.as_mut()
2224    }
2225}