Skip to main content

grafeo_core/graph/lpg/store/
mod.rs

1//! The in-memory LPG graph store.
2//!
3//! This is where your nodes and edges actually live. Most users interact
4//! through [`GrafeoDB`](grafeo_engine::GrafeoDB), but algorithm implementers
5//! sometimes need the raw [`LpgStore`] for direct adjacency traversal.
6//!
7//! Key features:
8//! - MVCC versioning - concurrent readers don't block each other
9//! - Columnar properties with zone maps for fast filtering
10//! - Forward and backward adjacency indexes
11
12mod edge_ops;
13mod graph_store_impl;
14mod index;
15mod node_ops;
16mod property_ops;
17mod schema;
18mod search;
19mod statistics;
20mod traversal;
21mod versioning;
22
23#[cfg(test)]
24mod tests;
25
26use super::PropertyStorage;
27#[cfg(not(feature = "tiered-storage"))]
28use super::{EdgeRecord, NodeRecord};
29use crate::index::adjacency::ChunkedAdjacency;
30use crate::statistics::Statistics;
31use arcstr::ArcStr;
32use dashmap::DashMap;
33#[cfg(not(feature = "tiered-storage"))]
34use grafeo_common::mvcc::VersionChain;
35use grafeo_common::types::{EdgeId, EpochId, HashableValue, NodeId, PropertyKey, Value};
36use grafeo_common::utils::hash::{FxHashMap, FxHashSet};
37use parking_lot::RwLock;
38use std::cmp::Ordering as CmpOrdering;
39use std::sync::Arc;
40use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
41
42#[cfg(feature = "vector-index")]
43use crate::index::vector::HnswIndex;
44
45#[cfg(feature = "tiered-storage")]
46use crate::storage::EpochStore;
47use grafeo_common::memory::arena::AllocError;
48#[cfg(feature = "tiered-storage")]
49use grafeo_common::memory::arena::ArenaAllocator;
50#[cfg(feature = "tiered-storage")]
51use grafeo_common::mvcc::VersionIndex;
52
53/// Compares two values for ordering (used for range checks).
54pub(super) fn compare_values_for_range(a: &Value, b: &Value) -> Option<CmpOrdering> {
55    match (a, b) {
56        (Value::Int64(a), Value::Int64(b)) => Some(a.cmp(b)),
57        (Value::Float64(a), Value::Float64(b)) => a.partial_cmp(b),
58        (Value::String(a), Value::String(b)) => Some(a.cmp(b)),
59        (Value::Bool(a), Value::Bool(b)) => Some(a.cmp(b)),
60        (Value::Timestamp(a), Value::Timestamp(b)) => Some(a.cmp(b)),
61        (Value::Date(a), Value::Date(b)) => Some(a.cmp(b)),
62        (Value::Time(a), Value::Time(b)) => Some(a.cmp(b)),
63        _ => None,
64    }
65}
66
67/// Checks if a value is within a range.
68pub(super) fn value_in_range(
69    value: &Value,
70    min: Option<&Value>,
71    max: Option<&Value>,
72    min_inclusive: bool,
73    max_inclusive: bool,
74) -> bool {
75    // Check lower bound
76    if let Some(min_val) = min {
77        match compare_values_for_range(value, min_val) {
78            Some(CmpOrdering::Less) => return false,
79            Some(CmpOrdering::Equal) if !min_inclusive => return false,
80            None => return false, // Can't compare
81            _ => {}
82        }
83    }
84
85    // Check upper bound
86    if let Some(max_val) = max {
87        match compare_values_for_range(value, max_val) {
88            Some(CmpOrdering::Greater) => return false,
89            Some(CmpOrdering::Equal) if !max_inclusive => return false,
90            None => return false,
91            _ => {}
92        }
93    }
94
95    true
96}
97
98/// Configuration for the LPG store.
99///
100/// The defaults work well for most cases. Tune `backward_edges` if you only
101/// traverse in one direction (saves memory), or adjust capacities if you know
102/// your graph size upfront (avoids reallocations).
103#[derive(Debug, Clone)]
104pub struct LpgStoreConfig {
105    /// Maintain backward adjacency for incoming edge queries. Turn off if
106    /// you only traverse outgoing edges - saves ~50% adjacency memory.
107    pub backward_edges: bool,
108    /// Initial capacity for nodes (avoids early reallocations).
109    pub initial_node_capacity: usize,
110    /// Initial capacity for edges (avoids early reallocations).
111    pub initial_edge_capacity: usize,
112}
113
114impl Default for LpgStoreConfig {
115    fn default() -> Self {
116        Self {
117            backward_edges: true,
118            initial_node_capacity: 1024,
119            initial_edge_capacity: 4096,
120        }
121    }
122}
123
124/// The core in-memory graph storage.
125///
126/// Everything lives here: nodes, edges, properties, adjacency indexes, and
127/// version chains for MVCC. Concurrent reads never block each other.
128///
129/// Most users should go through `GrafeoDB` (from the `grafeo_engine` crate) which
130/// adds transaction management and query execution. Use `LpgStore` directly
131/// when you need raw performance for algorithm implementations.
132///
133/// # Example
134///
135/// ```
136/// use grafeo_core::graph::lpg::LpgStore;
137/// use grafeo_core::graph::Direction;
138///
139/// let store = LpgStore::new().expect("arena allocation");
140///
141/// // Create a small social network
142/// let alix = store.create_node(&["Person"]);
143/// let gus = store.create_node(&["Person"]);
144/// store.create_edge(alix, gus, "KNOWS");
145///
146/// // Traverse outgoing edges
147/// for neighbor in store.neighbors(alix, Direction::Outgoing) {
148///     println!("Alix knows node {:?}", neighbor);
149/// }
150/// ```
151///
152/// # Lock Ordering
153///
154/// `LpgStore` contains multiple `RwLock` fields that must be acquired in a
155/// consistent order to prevent deadlocks. Always acquire locks in this order:
156///
157/// ## Level 1 - Entity Storage (mutually exclusive via feature flag)
158/// 1. `nodes` / `node_versions`
159/// 2. `edges` / `edge_versions`
160///
161/// ## Level 2 - Catalogs (acquire as pairs when writing)
162/// 3. `label_to_id` + `id_to_label`
163/// 4. `edge_type_to_id` + `id_to_edge_type`
164///
165/// ## Level 3 - Indexes
166/// 5. `label_index`
167/// 6. `node_labels`
168/// 7. `property_indexes`
169///
170/// ## Level 4 - Statistics
171/// 8. `statistics`
172///
173/// ## Level 5 - Nested Locks (internal to other structs)
174/// 9. `PropertyStorage::columns` (via `node_properties`/`edge_properties`)
175/// 10. `ChunkedAdjacency::lists` (via `forward_adj`/`backward_adj`)
176///
177/// ## Rules
178/// - Catalog pairs must be acquired together when writing.
179/// - Never hold entity locks while acquiring catalog locks in a different scope.
180/// - Statistics lock is always last.
181/// - Read locks are generally safe, but avoid read-to-write upgrades.
182pub struct LpgStore {
183    /// Configuration.
184    #[allow(dead_code)]
185    pub(super) config: LpgStoreConfig,
186
187    /// Node records indexed by NodeId, with version chains for MVCC.
188    /// Used when `tiered-storage` feature is disabled.
189    /// Lock order: 1
190    #[cfg(not(feature = "tiered-storage"))]
191    pub(super) nodes: RwLock<FxHashMap<NodeId, VersionChain<NodeRecord>>>,
192
193    /// Edge records indexed by EdgeId, with version chains for MVCC.
194    /// Used when `tiered-storage` feature is disabled.
195    /// Lock order: 2
196    #[cfg(not(feature = "tiered-storage"))]
197    pub(super) edges: RwLock<FxHashMap<EdgeId, VersionChain<EdgeRecord>>>,
198
199    // === Tiered Storage Fields (feature-gated) ===
200    //
201    // Lock ordering for arena access:
202    //   version_lock (read/write) → arena read lock (via arena_allocator.arena())
203    //
204    // Rules:
205    // - Acquire arena read lock *after* version locks, never before.
206    // - Multiple threads may call arena.read_at() concurrently (shared refs only).
207    // - Never acquire arena write lock (alloc_new_chunk) while holding version locks.
208    // - freeze_epoch order: node_versions.read() → arena.read_at(),
209    //   then edge_versions.read() → arena.read_at().
210    /// Arena allocator for hot data storage.
211    /// Data is stored in per-epoch arenas for fast allocation and bulk deallocation.
212    #[cfg(feature = "tiered-storage")]
213    pub(super) arena_allocator: Arc<ArenaAllocator>,
214
215    /// Node version indexes - store metadata and arena offsets.
216    /// The actual NodeRecord data is stored in the arena.
217    /// Lock order: 1
218    #[cfg(feature = "tiered-storage")]
219    pub(super) node_versions: RwLock<FxHashMap<NodeId, VersionIndex>>,
220
221    /// Edge version indexes - store metadata and arena offsets.
222    /// The actual EdgeRecord data is stored in the arena.
223    /// Lock order: 2
224    #[cfg(feature = "tiered-storage")]
225    pub(super) edge_versions: RwLock<FxHashMap<EdgeId, VersionIndex>>,
226
227    /// Cold storage for frozen epochs.
228    /// Contains compressed epoch blocks for historical data.
229    #[cfg(feature = "tiered-storage")]
230    pub(super) epoch_store: Arc<EpochStore>,
231
232    /// Property storage for nodes.
233    pub(super) node_properties: PropertyStorage<NodeId>,
234
235    /// Property storage for edges.
236    pub(super) edge_properties: PropertyStorage<EdgeId>,
237
238    /// Label name to ID mapping.
239    /// Lock order: 3 (acquire with id_to_label)
240    pub(super) label_to_id: RwLock<FxHashMap<ArcStr, u32>>,
241
242    /// Label ID to name mapping.
243    /// Lock order: 3 (acquire with label_to_id)
244    pub(super) id_to_label: RwLock<Vec<ArcStr>>,
245
246    /// Edge type name to ID mapping.
247    /// Lock order: 4 (acquire with id_to_edge_type)
248    pub(super) edge_type_to_id: RwLock<FxHashMap<ArcStr, u32>>,
249
250    /// Edge type ID to name mapping.
251    /// Lock order: 4 (acquire with edge_type_to_id)
252    pub(super) id_to_edge_type: RwLock<Vec<ArcStr>>,
253
254    /// Forward adjacency lists (outgoing edges).
255    pub(super) forward_adj: ChunkedAdjacency,
256
257    /// Backward adjacency lists (incoming edges).
258    /// Only populated if config.backward_edges is true.
259    pub(super) backward_adj: Option<ChunkedAdjacency>,
260
261    /// Label index: label_id -> set of node IDs.
262    /// Lock order: 5
263    pub(super) label_index: RwLock<Vec<FxHashMap<NodeId, ()>>>,
264
265    /// Node labels: node_id -> set of label IDs.
266    /// Reverse mapping to efficiently get labels for a node.
267    /// Lock order: 6
268    pub(super) node_labels: RwLock<FxHashMap<NodeId, FxHashSet<u32>>>,
269
270    /// Property indexes: property_key -> (value -> set of node IDs).
271    ///
272    /// When a property is indexed, lookups by value are O(1) instead of O(n).
273    /// Use [`create_property_index`] to enable indexing for a property.
274    /// Lock order: 7
275    pub(super) property_indexes:
276        RwLock<FxHashMap<PropertyKey, DashMap<HashableValue, FxHashSet<NodeId>>>>,
277
278    /// Vector indexes: "label:property" -> HNSW index.
279    ///
280    /// Created via [`GrafeoDB::create_vector_index`](grafeo_engine::GrafeoDB::create_vector_index).
281    /// Lock order: 7 (same level as property_indexes, disjoint keys)
282    #[cfg(feature = "vector-index")]
283    pub(super) vector_indexes: RwLock<FxHashMap<String, Arc<HnswIndex>>>,
284
285    /// Text indexes: "label:property" -> inverted index with BM25 scoring.
286    ///
287    /// Created via [`GrafeoDB::create_text_index`](grafeo_engine::GrafeoDB::create_text_index).
288    /// Lock order: 7 (same level as property_indexes, disjoint keys)
289    #[cfg(feature = "text-index")]
290    pub(super) text_indexes:
291        RwLock<FxHashMap<String, Arc<RwLock<crate::index::text::InvertedIndex>>>>,
292
293    /// Next node ID.
294    pub(super) next_node_id: AtomicU64,
295
296    /// Next edge ID.
297    pub(super) next_edge_id: AtomicU64,
298
299    /// Current epoch.
300    pub(super) current_epoch: AtomicU64,
301
302    /// Live (non-deleted) node count, maintained incrementally.
303    /// Avoids O(n) full scan in `compute_statistics()`.
304    pub(super) live_node_count: AtomicI64,
305
306    /// Live (non-deleted) edge count, maintained incrementally.
307    /// Avoids O(m) full scan in `compute_statistics()`.
308    pub(super) live_edge_count: AtomicI64,
309
310    /// Per-edge-type live counts, indexed by edge_type_id.
311    /// Avoids O(m) edge scan in `compute_statistics()`.
312    /// Lock order: 8 (same level as statistics)
313    pub(super) edge_type_live_counts: RwLock<Vec<i64>>,
314
315    /// Statistics for cost-based optimization.
316    /// Lock order: 8 (always last)
317    pub(super) statistics: RwLock<Arc<Statistics>>,
318
319    /// Whether statistics need full recomputation (e.g., after rollback).
320    pub(super) needs_stats_recompute: AtomicBool,
321
322    /// Named graphs, each an independent `LpgStore` partition.
323    /// Zero overhead for single-graph databases (empty HashMap).
324    /// Lock order: 9 (after statistics)
325    named_graphs: RwLock<FxHashMap<String, Arc<LpgStore>>>,
326}
327
328impl LpgStore {
329    /// Creates a new LPG store with default configuration.
330    ///
331    /// # Errors
332    ///
333    /// Returns [`AllocError`] if the arena allocator cannot be initialized
334    /// (only possible with the `tiered-storage` feature).
335    // FIXME: propagate Result to callers
336    pub fn new() -> Result<Self, AllocError> {
337        Self::with_config(LpgStoreConfig::default())
338    }
339
340    /// Creates a new LPG store with custom configuration.
341    ///
342    /// # Errors
343    ///
344    /// Returns [`AllocError`] if the arena allocator cannot be initialized
345    /// (only possible with the `tiered-storage` feature).
346    // FIXME: propagate Result to callers
347    pub fn with_config(config: LpgStoreConfig) -> Result<Self, AllocError> {
348        let backward_adj = if config.backward_edges {
349            Some(ChunkedAdjacency::new())
350        } else {
351            None
352        };
353
354        Ok(Self {
355            #[cfg(not(feature = "tiered-storage"))]
356            nodes: RwLock::new(FxHashMap::default()),
357            #[cfg(not(feature = "tiered-storage"))]
358            edges: RwLock::new(FxHashMap::default()),
359            #[cfg(feature = "tiered-storage")]
360            arena_allocator: Arc::new(ArenaAllocator::new()?),
361            #[cfg(feature = "tiered-storage")]
362            node_versions: RwLock::new(FxHashMap::default()),
363            #[cfg(feature = "tiered-storage")]
364            edge_versions: RwLock::new(FxHashMap::default()),
365            #[cfg(feature = "tiered-storage")]
366            epoch_store: Arc::new(EpochStore::new()),
367            node_properties: PropertyStorage::new(),
368            edge_properties: PropertyStorage::new(),
369            label_to_id: RwLock::new(FxHashMap::default()),
370            id_to_label: RwLock::new(Vec::new()),
371            edge_type_to_id: RwLock::new(FxHashMap::default()),
372            id_to_edge_type: RwLock::new(Vec::new()),
373            forward_adj: ChunkedAdjacency::new(),
374            backward_adj,
375            label_index: RwLock::new(Vec::new()),
376            node_labels: RwLock::new(FxHashMap::default()),
377            property_indexes: RwLock::new(FxHashMap::default()),
378            #[cfg(feature = "vector-index")]
379            vector_indexes: RwLock::new(FxHashMap::default()),
380            #[cfg(feature = "text-index")]
381            text_indexes: RwLock::new(FxHashMap::default()),
382            next_node_id: AtomicU64::new(0),
383            next_edge_id: AtomicU64::new(0),
384            current_epoch: AtomicU64::new(0),
385            live_node_count: AtomicI64::new(0),
386            live_edge_count: AtomicI64::new(0),
387            edge_type_live_counts: RwLock::new(Vec::new()),
388            statistics: RwLock::new(Arc::new(Statistics::new())),
389            needs_stats_recompute: AtomicBool::new(false),
390            named_graphs: RwLock::new(FxHashMap::default()),
391            config,
392        })
393    }
394
395    /// Returns the current epoch.
396    #[must_use]
397    pub fn current_epoch(&self) -> EpochId {
398        EpochId::new(self.current_epoch.load(Ordering::Acquire))
399    }
400
401    /// Creates a new epoch.
402    #[doc(hidden)]
403    pub fn new_epoch(&self) -> EpochId {
404        let id = self.current_epoch.fetch_add(1, Ordering::AcqRel) + 1;
405        EpochId::new(id)
406    }
407
408    /// Syncs the store epoch to match an external epoch counter.
409    ///
410    /// Used by the transaction manager to keep the store's epoch in step
411    /// after a transaction commit advances the global epoch.
412    #[doc(hidden)]
413    pub fn sync_epoch(&self, epoch: EpochId) {
414        self.current_epoch
415            .fetch_max(epoch.as_u64(), Ordering::AcqRel);
416    }
417
418    /// Removes all data from the store, resetting it to an empty state.
419    ///
420    /// Acquires locks in the documented ordering to prevent deadlocks.
421    /// After clearing, the store behaves as if freshly constructed.
422    pub fn clear(&self) {
423        // Level 1: Entity storage
424        #[cfg(not(feature = "tiered-storage"))]
425        {
426            self.nodes.write().clear();
427            self.edges.write().clear();
428        }
429        #[cfg(feature = "tiered-storage")]
430        {
431            self.node_versions.write().clear();
432            self.edge_versions.write().clear();
433            // Arena allocator chunks are leaked; epochs are cleared via epoch_store.
434        }
435
436        // Level 2: Catalogs (acquire as pairs)
437        {
438            self.label_to_id.write().clear();
439            self.id_to_label.write().clear();
440        }
441        {
442            self.edge_type_to_id.write().clear();
443            self.id_to_edge_type.write().clear();
444        }
445
446        // Level 3: Indexes
447        self.label_index.write().clear();
448        self.node_labels.write().clear();
449        self.property_indexes.write().clear();
450        #[cfg(feature = "vector-index")]
451        self.vector_indexes.write().clear();
452        #[cfg(feature = "text-index")]
453        self.text_indexes.write().clear();
454
455        // Nested: Properties and adjacency
456        self.node_properties.clear();
457        self.edge_properties.clear();
458        self.forward_adj.clear();
459        if let Some(ref backward) = self.backward_adj {
460            backward.clear();
461        }
462
463        // Atomics: ID counters
464        self.next_node_id.store(0, Ordering::Release);
465        self.next_edge_id.store(0, Ordering::Release);
466        self.current_epoch.store(0, Ordering::Release);
467
468        // Level 4: Statistics
469        self.live_node_count.store(0, Ordering::Release);
470        self.live_edge_count.store(0, Ordering::Release);
471        self.edge_type_live_counts.write().clear();
472        *self.statistics.write() = Arc::new(Statistics::new());
473        self.needs_stats_recompute.store(false, Ordering::Release);
474    }
475
476    /// Returns whether backward adjacency (incoming edge index) is available.
477    ///
478    /// When backward adjacency is enabled (the default), bidirectional search
479    /// algorithms can traverse from the target toward the source.
480    #[must_use]
481    pub fn has_backward_adjacency(&self) -> bool {
482        self.backward_adj.is_some()
483    }
484
485    // === Named Graph Management ===
486
487    /// Returns a named graph by name, or `None` if it does not exist.
488    #[must_use]
489    pub fn graph(&self, name: &str) -> Option<Arc<LpgStore>> {
490        self.named_graphs.read().get(name).cloned()
491    }
492
493    /// Returns a named graph, creating it if it does not exist.
494    ///
495    /// # Errors
496    ///
497    /// Returns [`AllocError`] if a new store cannot be allocated.
498    // FIXME: propagate Result to callers
499    pub fn graph_or_create(&self, name: &str) -> Result<Arc<LpgStore>, AllocError> {
500        {
501            let graphs = self.named_graphs.read();
502            if let Some(g) = graphs.get(name) {
503                return Ok(Arc::clone(g));
504            }
505        }
506        let mut graphs = self.named_graphs.write();
507        // Double-check after acquiring write lock
508        if let Some(g) = graphs.get(name) {
509            return Ok(Arc::clone(g));
510        }
511        let store = Arc::new(LpgStore::new()?);
512        graphs.insert(name.to_string(), Arc::clone(&store));
513        Ok(store)
514    }
515
516    /// Creates a named graph. Returns `true` on success, `false` if it already exists.
517    ///
518    /// # Errors
519    ///
520    /// Returns [`AllocError`] if the new store cannot be allocated.
521    // FIXME: propagate Result to callers
522    pub fn create_graph(&self, name: &str) -> Result<bool, AllocError> {
523        let mut graphs = self.named_graphs.write();
524        if graphs.contains_key(name) {
525            return Ok(false);
526        }
527        graphs.insert(name.to_string(), Arc::new(LpgStore::new()?));
528        Ok(true)
529    }
530
531    /// Drops a named graph. Returns `false` if it did not exist.
532    pub fn drop_graph(&self, name: &str) -> bool {
533        self.named_graphs.write().remove(name).is_some()
534    }
535
536    /// Returns all named graph names.
537    #[must_use]
538    pub fn graph_names(&self) -> Vec<String> {
539        self.named_graphs.read().keys().cloned().collect()
540    }
541
542    /// Returns the number of named graphs.
543    #[must_use]
544    pub fn graph_count(&self) -> usize {
545        self.named_graphs.read().len()
546    }
547
548    /// Clears a specific graph, or the default graph if `name` is `None`.
549    pub fn clear_graph(&self, name: Option<&str>) {
550        match name {
551            Some(n) => {
552                if let Some(g) = self.named_graphs.read().get(n) {
553                    g.clear();
554                }
555            }
556            None => self.clear(),
557        }
558    }
559
560    /// Copies all data from the source graph to the destination graph.
561    /// Creates the destination graph if it does not exist.
562    ///
563    /// # Errors
564    ///
565    /// Returns [`AllocError`] if the destination store cannot be allocated.
566    // FIXME: propagate Result to callers
567    pub fn copy_graph(&self, source: Option<&str>, dest: Option<&str>) -> Result<(), AllocError> {
568        let _src = match source {
569            Some(n) => self.graph(n),
570            None => None, // default graph
571        };
572        let _dest_graph = dest.map(|n| self.graph_or_create(n)).transpose()?;
573        // Full graph copy is complex (requires iterating all entities).
574        // For now, this creates the destination graph structure.
575        // Full entity-level copy will be implemented when needed.
576        Ok(())
577    }
578
579    // === Internal Helpers ===
580
581    pub(super) fn get_or_create_label_id(&self, label: &str) -> u32 {
582        {
583            let label_to_id = self.label_to_id.read();
584            if let Some(&id) = label_to_id.get(label) {
585                return id;
586            }
587        }
588
589        let mut label_to_id = self.label_to_id.write();
590        let mut id_to_label = self.id_to_label.write();
591
592        // Double-check after acquiring write lock
593        if let Some(&id) = label_to_id.get(label) {
594            return id;
595        }
596
597        let id = id_to_label.len() as u32;
598
599        let label: ArcStr = label.into();
600        label_to_id.insert(label.clone(), id);
601        id_to_label.push(label);
602
603        id
604    }
605
606    pub(super) fn get_or_create_edge_type_id(&self, edge_type: &str) -> u32 {
607        {
608            let type_to_id = self.edge_type_to_id.read();
609            if let Some(&id) = type_to_id.get(edge_type) {
610                return id;
611            }
612        }
613
614        let mut type_to_id = self.edge_type_to_id.write();
615        let mut id_to_type = self.id_to_edge_type.write();
616
617        // Double-check
618        if let Some(&id) = type_to_id.get(edge_type) {
619            return id;
620        }
621
622        let id = id_to_type.len() as u32;
623        let edge_type: ArcStr = edge_type.into();
624        type_to_id.insert(edge_type.clone(), id);
625        id_to_type.push(edge_type);
626
627        // Grow edge type live counts to match
628        let mut counts = self.edge_type_live_counts.write();
629        while counts.len() <= id as usize {
630            counts.push(0);
631        }
632
633        id
634    }
635
636    /// Increments the live edge count for a given edge type.
637    pub(super) fn increment_edge_type_count(&self, type_id: u32) {
638        let mut counts = self.edge_type_live_counts.write();
639        if counts.len() <= type_id as usize {
640            counts.resize(type_id as usize + 1, 0);
641        }
642        counts[type_id as usize] += 1;
643    }
644
645    /// Decrements the live edge count for a given edge type.
646    pub(super) fn decrement_edge_type_count(&self, type_id: u32) {
647        let mut counts = self.edge_type_live_counts.write();
648        if type_id < counts.len() as u32 {
649            counts[type_id as usize] -= 1;
650        }
651    }
652}
653
654impl Default for LpgStore {
655    fn default() -> Self {
656        // FIXME: propagate Result to callers (Default trait cannot return Result)
657        Self::new().expect("failed to allocate arena for default LpgStore")
658    }
659}