Skip to main content

grafeo_core/graph/lpg/store/
mod.rs

1//! The in-memory LPG graph store.
2//!
3//! This is where your nodes and edges actually live. Most users interact
4//! through [`GrafeoDB`](grafeo_engine::GrafeoDB), but algorithm implementers
5//! sometimes need the raw [`LpgStore`] for direct adjacency traversal.
6//!
7//! Key features:
8//! - MVCC versioning - concurrent readers don't block each other
9//! - Columnar properties with zone maps for fast filtering
10//! - Forward and backward adjacency indexes
11
12mod edge_ops;
13mod graph_store_impl;
14mod index;
15mod memory;
16mod node_ops;
17mod property_ops;
18mod schema;
19mod search;
20mod statistics;
21mod traversal;
22mod versioning;
23
24#[cfg(test)]
25mod tests;
26
27use super::PropertyStorage;
28#[cfg(not(feature = "tiered-storage"))]
29use super::{EdgeRecord, NodeRecord};
30use crate::index::adjacency::ChunkedAdjacency;
31use crate::statistics::Statistics;
32use arcstr::ArcStr;
33use dashmap::DashMap;
34#[cfg(not(feature = "tiered-storage"))]
35use grafeo_common::mvcc::VersionChain;
36use grafeo_common::types::{
37    EdgeId, EpochId, HashableValue, NodeId, PropertyKey, TransactionId, Value,
38};
39use grafeo_common::utils::hash::{FxHashMap, FxHashSet};
40use parking_lot::RwLock;
41use std::cmp::Ordering as CmpOrdering;
42use std::sync::Arc;
43use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
44
45#[cfg(feature = "vector-index")]
46use crate::index::vector::HnswIndex;
47
48#[cfg(feature = "tiered-storage")]
49use crate::storage::EpochStore;
50use grafeo_common::memory::arena::AllocError;
51#[cfg(feature = "tiered-storage")]
52use grafeo_common::memory::arena::ArenaAllocator;
53#[cfg(feature = "tiered-storage")]
54use grafeo_common::mvcc::VersionIndex;
55#[cfg(feature = "temporal")]
56use grafeo_common::temporal::VersionLog;
57
58/// Undo entry for a property mutation within a transaction.
59///
60/// Captures the previous state of a property so it can be restored on rollback.
61#[derive(Debug, Clone)]
62pub enum PropertyUndoEntry {
63    /// A node property was changed or added.
64    NodeProperty {
65        /// The node that was modified.
66        node_id: NodeId,
67        /// The property key that was set or removed.
68        key: PropertyKey,
69        /// The previous value, or `None` if the property did not exist before.
70        old_value: Option<Value>,
71    },
72    /// An edge property was changed or added.
73    EdgeProperty {
74        /// The edge that was modified.
75        edge_id: EdgeId,
76        /// The property key that was set or removed.
77        key: PropertyKey,
78        /// The previous value, or `None` if the property did not exist before.
79        old_value: Option<Value>,
80    },
81    /// A label was added to a node.
82    LabelAdded {
83        /// The node that had a label added.
84        node_id: NodeId,
85        /// The label string that was added.
86        label: String,
87    },
88    /// A label was removed from a node.
89    LabelRemoved {
90        /// The node that had a label removed.
91        node_id: NodeId,
92        /// The label string that was removed.
93        label: String,
94    },
95    /// A node was deleted (for rollback restoration).
96    NodeDeleted {
97        /// The node that was deleted.
98        node_id: NodeId,
99        /// The labels the node had before deletion.
100        labels: Vec<String>,
101        /// The properties the node had before deletion.
102        properties: Vec<(PropertyKey, Value)>,
103    },
104    /// An edge was deleted (for rollback restoration).
105    EdgeDeleted {
106        /// The edge that was deleted.
107        edge_id: EdgeId,
108        /// The source node.
109        src: NodeId,
110        /// The destination node.
111        dst: NodeId,
112        /// The edge type name.
113        edge_type: String,
114        /// The properties the edge had before deletion.
115        properties: Vec<(PropertyKey, Value)>,
116    },
117}
118
119/// Compares two values for ordering (used for range checks).
120pub(super) fn compare_values_for_range(a: &Value, b: &Value) -> Option<CmpOrdering> {
121    match (a, b) {
122        (Value::Int64(a), Value::Int64(b)) => Some(a.cmp(b)),
123        (Value::Float64(a), Value::Float64(b)) => a.partial_cmp(b),
124        (Value::Int64(a), Value::Float64(b)) => (*a as f64).partial_cmp(b),
125        (Value::Float64(a), Value::Int64(b)) => a.partial_cmp(&(*b as f64)),
126        (Value::String(a), Value::String(b)) => Some(a.cmp(b)),
127        (Value::Bool(a), Value::Bool(b)) => Some(a.cmp(b)),
128        (Value::Timestamp(a), Value::Timestamp(b)) => Some(a.cmp(b)),
129        (Value::Date(a), Value::Date(b)) => Some(a.cmp(b)),
130        (Value::Time(a), Value::Time(b)) => Some(a.cmp(b)),
131        _ => None,
132    }
133}
134
135/// Checks if a value is within a range.
136pub(super) fn value_in_range(
137    value: &Value,
138    min: Option<&Value>,
139    max: Option<&Value>,
140    min_inclusive: bool,
141    max_inclusive: bool,
142) -> bool {
143    // Check lower bound
144    if let Some(min_val) = min {
145        match compare_values_for_range(value, min_val) {
146            Some(CmpOrdering::Less) => return false,
147            Some(CmpOrdering::Equal) if !min_inclusive => return false,
148            None => return false, // Can't compare
149            _ => {}
150        }
151    }
152
153    // Check upper bound
154    if let Some(max_val) = max {
155        match compare_values_for_range(value, max_val) {
156            Some(CmpOrdering::Greater) => return false,
157            Some(CmpOrdering::Equal) if !max_inclusive => return false,
158            None => return false,
159            _ => {}
160        }
161    }
162
163    true
164}
165
166/// Configuration for the LPG store.
167///
168/// The defaults work well for most cases. Tune `backward_edges` if you only
169/// traverse in one direction (saves memory), or adjust capacities if you know
170/// your graph size upfront (avoids reallocations).
171#[derive(Debug, Clone)]
172pub struct LpgStoreConfig {
173    /// Maintain backward adjacency for incoming edge queries. Turn off if
174    /// you only traverse outgoing edges - saves ~50% adjacency memory.
175    pub backward_edges: bool,
176    /// Initial capacity for nodes (avoids early reallocations).
177    pub initial_node_capacity: usize,
178    /// Initial capacity for edges (avoids early reallocations).
179    pub initial_edge_capacity: usize,
180}
181
182impl Default for LpgStoreConfig {
183    fn default() -> Self {
184        Self {
185            backward_edges: true,
186            initial_node_capacity: 1024,
187            initial_edge_capacity: 4096,
188        }
189    }
190}
191
192/// Bidirectional label name/ID registry.
193///
194/// Combines the name-to-ID and ID-to-name mappings behind a single lock,
195/// reducing lock acquisitions on both the read path (`build_node`) and the
196/// write path (`get_or_create_label_id`).
197pub(super) struct LabelRegistry {
198    /// Label name to numeric ID.
199    name_to_id: FxHashMap<ArcStr, u32>,
200    /// Numeric ID to label name (index = ID).
201    id_to_name: Vec<ArcStr>,
202}
203
204impl LabelRegistry {
205    fn new() -> Self {
206        Self {
207            name_to_id: FxHashMap::default(),
208            id_to_name: Vec::new(),
209        }
210    }
211
212    /// Looks up an existing label ID by name.
213    pub(super) fn get_id(&self, name: &str) -> Option<u32> {
214        self.name_to_id.get(name).copied()
215    }
216
217    /// Returns the label name for a given ID.
218    pub(super) fn get_name(&self, id: u32) -> Option<&ArcStr> {
219        self.id_to_name.get(id as usize)
220    }
221
222    /// Returns or creates a label ID for the given name.
223    pub(super) fn get_or_create(&mut self, name: &str) -> u32 {
224        if let Some(&id) = self.name_to_id.get(name) {
225            return id;
226        }
227        let id = self.id_to_name.len() as u32;
228        let label: ArcStr = name.into();
229        self.name_to_id.insert(label.clone(), id);
230        self.id_to_name.push(label);
231        id
232    }
233
234    /// Returns the total number of distinct labels.
235    pub(super) fn len(&self) -> usize {
236        self.id_to_name.len()
237    }
238
239    /// Returns the ID-to-name slice for iteration.
240    pub(super) fn names(&self) -> &[ArcStr] {
241        &self.id_to_name
242    }
243
244    /// Clears all label mappings.
245    pub(super) fn clear(&mut self) {
246        self.name_to_id.clear();
247        self.id_to_name.clear();
248    }
249
250    /// Estimates heap memory usage in bytes.
251    pub(super) fn heap_bytes(&self) -> usize {
252        let map_bytes = self.name_to_id.capacity()
253            * (std::mem::size_of::<ArcStr>() + std::mem::size_of::<u32>());
254        let vec_bytes = self.id_to_name.capacity() * std::mem::size_of::<ArcStr>();
255        let string_bytes: usize = self.id_to_name.iter().map(|s| s.len()).sum();
256        map_bytes + vec_bytes + string_bytes
257    }
258}
259
260/// The core in-memory graph storage.
261///
262/// Everything lives here: nodes, edges, properties, adjacency indexes, and
263/// version chains for MVCC. Concurrent reads never block each other.
264///
265/// Most users should go through `GrafeoDB` (from the `grafeo_engine` crate) which
266/// adds transaction management and query execution. Use `LpgStore` directly
267/// when you need raw performance for algorithm implementations.
268///
269/// # Example
270///
271/// ```
272/// use grafeo_core::graph::lpg::LpgStore;
273/// use grafeo_core::graph::Direction;
274///
275/// let store = LpgStore::new().expect("arena allocation");
276///
277/// // Create a small social network
278/// let alix = store.create_node(&["Person"]);
279/// let gus = store.create_node(&["Person"]);
280/// store.create_edge(alix, gus, "KNOWS");
281///
282/// // Traverse outgoing edges
283/// for neighbor in store.neighbors(alix, Direction::Outgoing) {
284///     println!("Alix knows node {:?}", neighbor);
285/// }
286/// ```
287///
288/// # Lock Ordering
289///
290/// `LpgStore` contains multiple `RwLock` fields that must be acquired in a
291/// consistent order to prevent deadlocks. Always acquire locks in this order:
292///
293/// ## Level 1: Entity Storage (mutually exclusive via feature flag)
294/// 1. `nodes` / `node_versions`
295/// 2. `edges` / `edge_versions`
296///
297/// ## Level 2: Catalogs
298/// 3. `label_registry`
299/// 4. `edge_type_to_id` + `id_to_edge_type`
300///
301/// ## Level 3: Indexes
302/// 5. `label_index`
303/// 6. `node_labels`
304/// 7. `property_indexes`
305///
306/// ## Level 4: Statistics
307/// 8. `statistics`
308///
309/// ## Level 5: Nested Locks (internal to other structs)
310/// 9. `PropertyStorage::columns` (via `node_properties`/`edge_properties`)
311/// 10. `ChunkedAdjacency::lists` (via `forward_adj`/`backward_adj`)
312///
313/// ## Rules
314/// - Never hold entity locks while acquiring catalog locks in a different scope.
315/// - Statistics lock is always last.
316/// - Read locks are generally safe, but avoid read-to-write upgrades.
317pub struct LpgStore {
318    /// Node records indexed by NodeId, with version chains for MVCC.
319    /// Used when `tiered-storage` feature is disabled.
320    /// Lock order: 1
321    #[cfg(not(feature = "tiered-storage"))]
322    pub(super) nodes: RwLock<FxHashMap<NodeId, VersionChain<NodeRecord>>>,
323
324    /// Edge records indexed by EdgeId, with version chains for MVCC.
325    /// Used when `tiered-storage` feature is disabled.
326    /// Lock order: 2
327    #[cfg(not(feature = "tiered-storage"))]
328    pub(super) edges: RwLock<FxHashMap<EdgeId, VersionChain<EdgeRecord>>>,
329
330    // === Tiered Storage Fields (feature-gated) ===
331    //
332    // Lock ordering for arena access:
333    //   version_lock (read/write) → arena read lock (via arena_allocator.arena())
334    //
335    // Rules:
336    // - Acquire arena read lock *after* version locks, never before.
337    // - Multiple threads may call arena.read_at() concurrently (shared refs only).
338    // - Never acquire arena write lock (alloc_new_chunk) while holding version locks.
339    // - freeze_epoch order: node_versions.read() → arena.read_at(),
340    //   then edge_versions.read() → arena.read_at().
341    /// Arena allocator for hot data storage.
342    /// Data is stored in per-epoch arenas for fast allocation and bulk deallocation.
343    #[cfg(feature = "tiered-storage")]
344    pub(super) arena_allocator: Arc<ArenaAllocator>,
345
346    /// Node version indexes - store metadata and arena offsets.
347    /// The actual NodeRecord data is stored in the arena.
348    /// Lock order: 1
349    #[cfg(feature = "tiered-storage")]
350    pub(super) node_versions: RwLock<FxHashMap<NodeId, VersionIndex>>,
351
352    /// Edge version indexes - store metadata and arena offsets.
353    /// The actual EdgeRecord data is stored in the arena.
354    /// Lock order: 2
355    #[cfg(feature = "tiered-storage")]
356    pub(super) edge_versions: RwLock<FxHashMap<EdgeId, VersionIndex>>,
357
358    /// Cold storage for frozen epochs.
359    /// Contains compressed epoch blocks for historical data.
360    #[cfg(feature = "tiered-storage")]
361    pub(super) epoch_store: Arc<EpochStore>,
362
363    /// Property storage for nodes.
364    pub(super) node_properties: PropertyStorage<NodeId>,
365
366    /// Property storage for edges.
367    pub(super) edge_properties: PropertyStorage<EdgeId>,
368
369    /// Bidirectional label name/ID registry.
370    /// Lock order: 3
371    pub(super) label_registry: RwLock<LabelRegistry>,
372
373    /// Edge type name to ID mapping.
374    /// Lock order: 4 (acquire with id_to_edge_type)
375    pub(super) edge_type_to_id: RwLock<FxHashMap<ArcStr, u32>>,
376
377    /// Edge type ID to name mapping.
378    /// Lock order: 4 (acquire with edge_type_to_id)
379    pub(super) id_to_edge_type: RwLock<Vec<ArcStr>>,
380
381    /// Forward adjacency lists (outgoing edges).
382    pub(super) forward_adj: ChunkedAdjacency,
383
384    /// Backward adjacency lists (incoming edges).
385    /// Only populated if config.backward_edges is true.
386    pub(super) backward_adj: Option<ChunkedAdjacency>,
387
388    /// Label index: label_id -> set of node IDs.
389    /// Lock order: 5
390    pub(super) label_index: RwLock<Vec<FxHashMap<NodeId, ()>>>,
391
392    /// Node labels: node_id -> set of label IDs.
393    /// Reverse mapping to efficiently get labels for a node.
394    /// Lock order: 6
395    #[cfg(not(feature = "temporal"))]
396    pub(super) node_labels: RwLock<FxHashMap<NodeId, FxHashSet<u32>>>,
397    /// Versioned node labels: node_id -> version log of label sets.
398    /// Lock order: 6
399    #[cfg(feature = "temporal")]
400    pub(super) node_labels: RwLock<FxHashMap<NodeId, VersionLog<FxHashSet<u32>>>>,
401
402    /// Property indexes: property_key -> (value -> set of node IDs).
403    ///
404    /// When a property is indexed, lookups by value are O(1) instead of O(n).
405    /// Use [`create_property_index`] to enable indexing for a property.
406    /// Lock order: 7
407    pub(super) property_indexes:
408        RwLock<FxHashMap<PropertyKey, DashMap<HashableValue, FxHashSet<NodeId>>>>,
409
410    /// Vector indexes: "label:property" -> HNSW index.
411    ///
412    /// Created via [`GrafeoDB::create_vector_index`](grafeo_engine::GrafeoDB::create_vector_index).
413    /// Lock order: 7 (same level as property_indexes, disjoint keys)
414    #[cfg(feature = "vector-index")]
415    pub(super) vector_indexes: RwLock<FxHashMap<String, Arc<HnswIndex>>>,
416
417    /// Text indexes: "label:property" -> inverted index with BM25 scoring.
418    ///
419    /// Created via [`GrafeoDB::create_text_index`](grafeo_engine::GrafeoDB::create_text_index).
420    /// Lock order: 7 (same level as property_indexes, disjoint keys)
421    #[cfg(feature = "text-index")]
422    pub(super) text_indexes:
423        RwLock<FxHashMap<String, Arc<RwLock<crate::index::text::InvertedIndex>>>>,
424
425    /// Next node ID.
426    pub(super) next_node_id: AtomicU64,
427
428    /// Next edge ID.
429    pub(super) next_edge_id: AtomicU64,
430
431    /// Current epoch.
432    pub(super) current_epoch: AtomicU64,
433
434    /// Live (non-deleted) node count, maintained incrementally.
435    /// Avoids O(n) full scan in `compute_statistics()`.
436    pub(super) live_node_count: AtomicI64,
437
438    /// Live (non-deleted) edge count, maintained incrementally.
439    /// Avoids O(m) full scan in `compute_statistics()`.
440    pub(super) live_edge_count: AtomicI64,
441
442    /// Per-edge-type live counts, indexed by edge_type_id.
443    /// Avoids O(m) edge scan in `compute_statistics()`.
444    /// Lock order: 8 (same level as statistics)
445    pub(super) edge_type_live_counts: RwLock<Vec<i64>>,
446
447    /// Statistics for cost-based optimization.
448    /// Lock order: 8 (always last)
449    pub(super) statistics: RwLock<Arc<Statistics>>,
450
451    /// Whether statistics need full recomputation (e.g., after rollback).
452    pub(super) needs_stats_recompute: AtomicBool,
453
454    /// Named graphs, each an independent `LpgStore` partition.
455    /// Zero overhead for single-graph databases (empty HashMap).
456    /// Lock order: 9 (after statistics)
457    named_graphs: RwLock<FxHashMap<String, Arc<LpgStore>>>,
458
459    /// Undo log for property mutations within transactions.
460    ///
461    /// Maps transaction IDs to a list of undo entries that capture the
462    /// previous property values. On rollback, entries are replayed in
463    /// reverse order to restore properties. On commit, the entries are
464    /// simply discarded.
465    /// Lock order: 10 (after named_graphs, independent of other locks)
466    property_undo_log: RwLock<FxHashMap<TransactionId, Vec<PropertyUndoEntry>>>,
467}
468
469impl LpgStore {
470    /// Creates a new LPG store with default configuration.
471    ///
472    /// # Errors
473    ///
474    /// Returns [`AllocError`] if the arena allocator cannot be initialized
475    /// (only possible with the `tiered-storage` feature).
476    pub fn new() -> Result<Self, AllocError> {
477        Self::with_config(LpgStoreConfig::default())
478    }
479
480    /// Creates a new LPG store with custom configuration.
481    ///
482    /// # Errors
483    ///
484    /// Returns [`AllocError`] if the arena allocator cannot be initialized
485    /// (only possible with the `tiered-storage` feature).
486    pub fn with_config(config: LpgStoreConfig) -> Result<Self, AllocError> {
487        let backward_adj = if config.backward_edges {
488            Some(ChunkedAdjacency::new())
489        } else {
490            None
491        };
492
493        Ok(Self {
494            #[cfg(not(feature = "tiered-storage"))]
495            nodes: RwLock::new(FxHashMap::default()),
496            #[cfg(not(feature = "tiered-storage"))]
497            edges: RwLock::new(FxHashMap::default()),
498            #[cfg(feature = "tiered-storage")]
499            arena_allocator: Arc::new(ArenaAllocator::new()?),
500            #[cfg(feature = "tiered-storage")]
501            node_versions: RwLock::new(FxHashMap::default()),
502            #[cfg(feature = "tiered-storage")]
503            edge_versions: RwLock::new(FxHashMap::default()),
504            #[cfg(feature = "tiered-storage")]
505            epoch_store: Arc::new(EpochStore::new()),
506            node_properties: PropertyStorage::new(),
507            edge_properties: PropertyStorage::new(),
508            label_registry: RwLock::new(LabelRegistry::new()),
509            edge_type_to_id: RwLock::new(FxHashMap::default()),
510            id_to_edge_type: RwLock::new(Vec::new()),
511            forward_adj: ChunkedAdjacency::new(),
512            backward_adj,
513            label_index: RwLock::new(Vec::with_capacity(16)),
514            node_labels: RwLock::new(FxHashMap::default()),
515            property_indexes: RwLock::new(FxHashMap::default()),
516            #[cfg(feature = "vector-index")]
517            vector_indexes: RwLock::new(FxHashMap::default()),
518            #[cfg(feature = "text-index")]
519            text_indexes: RwLock::new(FxHashMap::default()),
520            next_node_id: AtomicU64::new(0),
521            next_edge_id: AtomicU64::new(0),
522            current_epoch: AtomicU64::new(0),
523            live_node_count: AtomicI64::new(0),
524            live_edge_count: AtomicI64::new(0),
525            edge_type_live_counts: RwLock::new(Vec::new()),
526            statistics: RwLock::new(Arc::new(Statistics::new())),
527            needs_stats_recompute: AtomicBool::new(false),
528            named_graphs: RwLock::new(FxHashMap::default()),
529            property_undo_log: RwLock::new(FxHashMap::default()),
530        })
531    }
532
533    /// Returns the current epoch.
534    #[must_use]
535    pub fn current_epoch(&self) -> EpochId {
536        EpochId::new(self.current_epoch.load(Ordering::Acquire))
537    }
538
539    /// Creates a new epoch.
540    #[doc(hidden)]
541    pub fn new_epoch(&self) -> EpochId {
542        let id = self.current_epoch.fetch_add(1, Ordering::AcqRel) + 1;
543        EpochId::new(id)
544    }
545
546    /// Syncs the store epoch to match an external epoch counter.
547    ///
548    /// Used by the transaction manager to keep the store's epoch in step
549    /// after a transaction commit advances the global epoch.
550    #[doc(hidden)]
551    pub fn sync_epoch(&self, epoch: EpochId) {
552        self.current_epoch
553            .fetch_max(epoch.as_u64(), Ordering::AcqRel);
554    }
555
556    /// Removes all data from the store, resetting it to an empty state.
557    ///
558    /// Acquires locks in the documented ordering to prevent deadlocks.
559    /// After clearing, the store behaves as if freshly constructed.
560    pub fn clear(&self) {
561        // Level 1: Entity storage
562        #[cfg(not(feature = "tiered-storage"))]
563        {
564            self.nodes.write().clear();
565            self.edges.write().clear();
566        }
567        #[cfg(feature = "tiered-storage")]
568        {
569            self.node_versions.write().clear();
570            self.edge_versions.write().clear();
571            // Arena allocator chunks are leaked; epochs are cleared via epoch_store.
572        }
573
574        // Level 2: Catalogs
575        self.label_registry.write().clear();
576        {
577            self.edge_type_to_id.write().clear();
578            self.id_to_edge_type.write().clear();
579        }
580
581        // Level 3: Indexes
582        self.label_index.write().clear();
583        self.node_labels.write().clear();
584        self.property_indexes.write().clear();
585        #[cfg(feature = "vector-index")]
586        self.vector_indexes.write().clear();
587        #[cfg(feature = "text-index")]
588        self.text_indexes.write().clear();
589
590        // Nested: Properties and adjacency
591        self.node_properties.clear();
592        self.edge_properties.clear();
593        self.forward_adj.clear();
594        if let Some(ref backward) = self.backward_adj {
595            backward.clear();
596        }
597
598        // Atomics: ID counters
599        self.next_node_id.store(0, Ordering::Release);
600        self.next_edge_id.store(0, Ordering::Release);
601        self.current_epoch.store(0, Ordering::Release);
602
603        // Level 4: Statistics
604        self.live_node_count.store(0, Ordering::Release);
605        self.live_edge_count.store(0, Ordering::Release);
606        self.edge_type_live_counts.write().clear();
607        *self.statistics.write() = Arc::new(Statistics::new());
608        self.needs_stats_recompute.store(false, Ordering::Release);
609
610        // Level 5: Undo log
611        self.property_undo_log.write().clear();
612    }
613
614    /// Returns whether backward adjacency (incoming edge index) is available.
615    ///
616    /// When backward adjacency is enabled (the default), bidirectional search
617    /// algorithms can traverse from the target toward the source.
618    #[must_use]
619    pub fn has_backward_adjacency(&self) -> bool {
620        self.backward_adj.is_some()
621    }
622
623    // === Named Graph Management ===
624
625    /// Returns a named graph by name, or `None` if it does not exist.
626    #[must_use]
627    pub fn graph(&self, name: &str) -> Option<Arc<LpgStore>> {
628        self.named_graphs.read().get(name).cloned()
629    }
630
631    /// Returns a named graph, creating it if it does not exist.
632    ///
633    /// # Errors
634    ///
635    /// Returns [`AllocError`] if a new store cannot be allocated.
636    pub fn graph_or_create(&self, name: &str) -> Result<Arc<LpgStore>, AllocError> {
637        {
638            let graphs = self.named_graphs.read();
639            if let Some(g) = graphs.get(name) {
640                return Ok(Arc::clone(g));
641            }
642        }
643        let mut graphs = self.named_graphs.write();
644        // Double-check after acquiring write lock
645        if let Some(g) = graphs.get(name) {
646            return Ok(Arc::clone(g));
647        }
648        let store = Arc::new(LpgStore::new()?);
649        graphs.insert(name.to_string(), Arc::clone(&store));
650        Ok(store)
651    }
652
653    /// Creates a named graph. Returns `true` on success, `false` if it already exists.
654    ///
655    /// # Errors
656    ///
657    /// Returns [`AllocError`] if the new store cannot be allocated.
658    pub fn create_graph(&self, name: &str) -> Result<bool, AllocError> {
659        let mut graphs = self.named_graphs.write();
660        if graphs.contains_key(name) {
661            return Ok(false);
662        }
663        graphs.insert(name.to_string(), Arc::new(LpgStore::new()?));
664        Ok(true)
665    }
666
667    /// Drops a named graph. Returns `false` if it did not exist.
668    pub fn drop_graph(&self, name: &str) -> bool {
669        self.named_graphs.write().remove(name).is_some()
670    }
671
672    /// Returns all named graph names.
673    #[must_use]
674    pub fn graph_names(&self) -> Vec<String> {
675        self.named_graphs.read().keys().cloned().collect()
676    }
677
678    /// Returns the number of named graphs.
679    #[must_use]
680    pub fn graph_count(&self) -> usize {
681        self.named_graphs.read().len()
682    }
683
684    /// Clears a specific graph, or the default graph if `name` is `None`.
685    pub fn clear_graph(&self, name: Option<&str>) {
686        match name {
687            Some(n) => {
688                if let Some(g) = self.named_graphs.read().get(n) {
689                    g.clear();
690                }
691            }
692            None => self.clear(),
693        }
694    }
695
696    /// Copies all data from the source graph to the destination graph.
697    /// Creates the destination graph if it does not exist.
698    ///
699    /// # Errors
700    ///
701    /// Returns [`AllocError`] if the destination store cannot be allocated.
702    pub fn copy_graph(&self, source: Option<&str>, dest: Option<&str>) -> Result<(), AllocError> {
703        let _src = match source {
704            Some(n) => self.graph(n),
705            None => None, // default graph
706        };
707        let _dest_graph = dest.map(|n| self.graph_or_create(n)).transpose()?;
708        // Full graph copy is complex (requires iterating all entities).
709        // For now, this creates the destination graph structure.
710        // Full entity-level copy will be implemented when needed.
711        Ok(())
712    }
713
714    // === Internal Helpers ===
715
716    pub(super) fn get_or_create_label_id(&self, label: &str) -> u32 {
717        if let Some(id) = self.label_registry.read().get_id(label) {
718            return id;
719        }
720        self.label_registry.write().get_or_create(label)
721    }
722
723    pub(super) fn get_or_create_edge_type_id(&self, edge_type: &str) -> u32 {
724        {
725            let type_to_id = self.edge_type_to_id.read();
726            if let Some(&id) = type_to_id.get(edge_type) {
727                return id;
728            }
729        }
730
731        let mut type_to_id = self.edge_type_to_id.write();
732        let mut id_to_type = self.id_to_edge_type.write();
733
734        // Double-check
735        if let Some(&id) = type_to_id.get(edge_type) {
736            return id;
737        }
738
739        let id = id_to_type.len() as u32;
740        let edge_type: ArcStr = edge_type.into();
741        type_to_id.insert(edge_type.clone(), id);
742        id_to_type.push(edge_type);
743
744        // Grow edge type live counts to match
745        let mut counts = self.edge_type_live_counts.write();
746        while counts.len() <= id as usize {
747            counts.push(0);
748        }
749
750        id
751    }
752
753    /// Increments the live edge count for a given edge type.
754    pub(super) fn increment_edge_type_count(&self, type_id: u32) {
755        let mut counts = self.edge_type_live_counts.write();
756        if counts.len() <= type_id as usize {
757            counts.resize(type_id as usize + 1, 0);
758        }
759        counts[type_id as usize] += 1;
760    }
761
762    /// Decrements the live edge count for a given edge type.
763    pub(super) fn decrement_edge_type_count(&self, type_id: u32) {
764        let mut counts = self.edge_type_live_counts.write();
765        if type_id < counts.len() as u32 {
766            counts[type_id as usize] -= 1;
767        }
768    }
769}