Skip to main content

grafeo_core/graph/lpg/store/
mod.rs

1//! The in-memory LPG graph store.
2//!
3//! This is where your nodes and edges actually live. Most users interact
4//! through [`GrafeoDB`](grafeo_engine::GrafeoDB), but algorithm implementers
5//! sometimes need the raw [`LpgStore`] for direct adjacency traversal.
6//!
7//! Key features:
8//! - MVCC versioning - concurrent readers don't block each other
9//! - Columnar properties with zone maps for fast filtering
10//! - Forward and backward adjacency indexes
11
12mod edge_ops;
13mod graph_store_impl;
14mod index;
15mod memory;
16mod node_ops;
17mod property_ops;
18mod schema;
19mod search;
20mod statistics;
21mod traversal;
22mod versioning;
23
24#[cfg(test)]
25mod tests;
26
27use super::PropertyStorage;
28#[cfg(not(feature = "tiered-storage"))]
29use super::{EdgeRecord, NodeRecord};
30use crate::index::adjacency::ChunkedAdjacency;
31use crate::statistics::Statistics;
32use arcstr::ArcStr;
33use dashmap::DashMap;
34#[cfg(not(feature = "tiered-storage"))]
35use grafeo_common::mvcc::VersionChain;
36use grafeo_common::types::{
37    EdgeId, EpochId, HashableValue, NodeId, PropertyKey, TransactionId, Value,
38};
39use grafeo_common::utils::hash::{FxHashMap, FxHashSet};
40use parking_lot::RwLock;
41use std::cmp::Ordering as CmpOrdering;
42use std::sync::Arc;
43use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
44
45#[cfg(feature = "vector-index")]
46use crate::index::vector::HnswIndex;
47
48#[cfg(feature = "tiered-storage")]
49use crate::storage::EpochStore;
50use grafeo_common::memory::arena::AllocError;
51#[cfg(feature = "tiered-storage")]
52use grafeo_common::memory::arena::ArenaAllocator;
53#[cfg(feature = "tiered-storage")]
54use grafeo_common::mvcc::VersionIndex;
55#[cfg(feature = "temporal")]
56use grafeo_common::temporal::VersionLog;
57
58/// Undo entry for a property mutation within a transaction.
59///
60/// Captures the previous state of a property so it can be restored on rollback.
61#[derive(Debug, Clone)]
62pub enum PropertyUndoEntry {
63    /// A node property was changed or added.
64    NodeProperty {
65        /// The node that was modified.
66        node_id: NodeId,
67        /// The property key that was set or removed.
68        key: PropertyKey,
69        /// The previous value, or `None` if the property did not exist before.
70        old_value: Option<Value>,
71    },
72    /// An edge property was changed or added.
73    EdgeProperty {
74        /// The edge that was modified.
75        edge_id: EdgeId,
76        /// The property key that was set or removed.
77        key: PropertyKey,
78        /// The previous value, or `None` if the property did not exist before.
79        old_value: Option<Value>,
80    },
81    /// A label was added to a node.
82    LabelAdded {
83        /// The node that had a label added.
84        node_id: NodeId,
85        /// The label string that was added.
86        label: String,
87    },
88    /// A label was removed from a node.
89    LabelRemoved {
90        /// The node that had a label removed.
91        node_id: NodeId,
92        /// The label string that was removed.
93        label: String,
94    },
95    /// A node was deleted (for rollback restoration).
96    NodeDeleted {
97        /// The node that was deleted.
98        node_id: NodeId,
99        /// The labels the node had before deletion.
100        labels: Vec<String>,
101        /// The properties the node had before deletion.
102        properties: Vec<(PropertyKey, Value)>,
103    },
104    /// An edge was deleted (for rollback restoration).
105    EdgeDeleted {
106        /// The edge that was deleted.
107        edge_id: EdgeId,
108        /// The source node.
109        src: NodeId,
110        /// The destination node.
111        dst: NodeId,
112        /// The edge type name.
113        edge_type: String,
114        /// The properties the edge had before deletion.
115        properties: Vec<(PropertyKey, Value)>,
116    },
117}
118
119/// Compares two values for ordering (used for range checks).
120pub(super) fn compare_values_for_range(a: &Value, b: &Value) -> Option<CmpOrdering> {
121    match (a, b) {
122        (Value::Int64(a), Value::Int64(b)) => Some(a.cmp(b)),
123        (Value::Float64(a), Value::Float64(b)) => a.partial_cmp(b),
124        (Value::Int64(a), Value::Float64(b)) => (*a as f64).partial_cmp(b),
125        (Value::Float64(a), Value::Int64(b)) => a.partial_cmp(&(*b as f64)),
126        (Value::String(a), Value::String(b)) => Some(a.cmp(b)),
127        (Value::Bool(a), Value::Bool(b)) => Some(a.cmp(b)),
128        (Value::Timestamp(a), Value::Timestamp(b)) => Some(a.cmp(b)),
129        (Value::Date(a), Value::Date(b)) => Some(a.cmp(b)),
130        (Value::Time(a), Value::Time(b)) => Some(a.cmp(b)),
131        _ => None,
132    }
133}
134
135/// Checks if a value is within a range.
136pub(super) fn value_in_range(
137    value: &Value,
138    min: Option<&Value>,
139    max: Option<&Value>,
140    min_inclusive: bool,
141    max_inclusive: bool,
142) -> bool {
143    // Check lower bound
144    if let Some(min_val) = min {
145        match compare_values_for_range(value, min_val) {
146            Some(CmpOrdering::Less) => return false,
147            Some(CmpOrdering::Equal) if !min_inclusive => return false,
148            None => return false, // Can't compare
149            _ => {}
150        }
151    }
152
153    // Check upper bound
154    if let Some(max_val) = max {
155        match compare_values_for_range(value, max_val) {
156            Some(CmpOrdering::Greater) => return false,
157            Some(CmpOrdering::Equal) if !max_inclusive => return false,
158            None => return false,
159            _ => {}
160        }
161    }
162
163    true
164}
165
166/// Configuration for the LPG store.
167///
168/// The defaults work well for most cases. Tune `backward_edges` if you only
169/// traverse in one direction (saves memory), or adjust capacities if you know
170/// your graph size upfront (avoids reallocations).
171#[derive(Debug, Clone)]
172pub struct LpgStoreConfig {
173    /// Maintain backward adjacency for incoming edge queries. Turn off if
174    /// you only traverse outgoing edges - saves ~50% adjacency memory.
175    pub backward_edges: bool,
176    /// Initial capacity for nodes (avoids early reallocations).
177    pub initial_node_capacity: usize,
178    /// Initial capacity for edges (avoids early reallocations).
179    pub initial_edge_capacity: usize,
180}
181
182impl Default for LpgStoreConfig {
183    fn default() -> Self {
184        Self {
185            backward_edges: true,
186            initial_node_capacity: 1024,
187            initial_edge_capacity: 4096,
188        }
189    }
190}
191
192/// The core in-memory graph storage.
193///
194/// Everything lives here: nodes, edges, properties, adjacency indexes, and
195/// version chains for MVCC. Concurrent reads never block each other.
196///
197/// Most users should go through `GrafeoDB` (from the `grafeo_engine` crate) which
198/// adds transaction management and query execution. Use `LpgStore` directly
199/// when you need raw performance for algorithm implementations.
200///
201/// # Example
202///
203/// ```
204/// use grafeo_core::graph::lpg::LpgStore;
205/// use grafeo_core::graph::Direction;
206///
207/// let store = LpgStore::new().expect("arena allocation");
208///
209/// // Create a small social network
210/// let alix = store.create_node(&["Person"]);
211/// let gus = store.create_node(&["Person"]);
212/// store.create_edge(alix, gus, "KNOWS");
213///
214/// // Traverse outgoing edges
215/// for neighbor in store.neighbors(alix, Direction::Outgoing) {
216///     println!("Alix knows node {:?}", neighbor);
217/// }
218/// ```
219///
220/// # Lock Ordering
221///
222/// `LpgStore` contains multiple `RwLock` fields that must be acquired in a
223/// consistent order to prevent deadlocks. Always acquire locks in this order:
224///
225/// ## Level 1 - Entity Storage (mutually exclusive via feature flag)
226/// 1. `nodes` / `node_versions`
227/// 2. `edges` / `edge_versions`
228///
229/// ## Level 2 - Catalogs (acquire as pairs when writing)
230/// 3. `label_to_id` + `id_to_label`
231/// 4. `edge_type_to_id` + `id_to_edge_type`
232///
233/// ## Level 3 - Indexes
234/// 5. `label_index`
235/// 6. `node_labels`
236/// 7. `property_indexes`
237///
238/// ## Level 4 - Statistics
239/// 8. `statistics`
240///
241/// ## Level 5 - Nested Locks (internal to other structs)
242/// 9. `PropertyStorage::columns` (via `node_properties`/`edge_properties`)
243/// 10. `ChunkedAdjacency::lists` (via `forward_adj`/`backward_adj`)
244///
245/// ## Rules
246/// - Catalog pairs must be acquired together when writing.
247/// - Never hold entity locks while acquiring catalog locks in a different scope.
248/// - Statistics lock is always last.
249/// - Read locks are generally safe, but avoid read-to-write upgrades.
250pub struct LpgStore {
251    /// Node records indexed by NodeId, with version chains for MVCC.
252    /// Used when `tiered-storage` feature is disabled.
253    /// Lock order: 1
254    #[cfg(not(feature = "tiered-storage"))]
255    pub(super) nodes: RwLock<FxHashMap<NodeId, VersionChain<NodeRecord>>>,
256
257    /// Edge records indexed by EdgeId, with version chains for MVCC.
258    /// Used when `tiered-storage` feature is disabled.
259    /// Lock order: 2
260    #[cfg(not(feature = "tiered-storage"))]
261    pub(super) edges: RwLock<FxHashMap<EdgeId, VersionChain<EdgeRecord>>>,
262
263    // === Tiered Storage Fields (feature-gated) ===
264    //
265    // Lock ordering for arena access:
266    //   version_lock (read/write) → arena read lock (via arena_allocator.arena())
267    //
268    // Rules:
269    // - Acquire arena read lock *after* version locks, never before.
270    // - Multiple threads may call arena.read_at() concurrently (shared refs only).
271    // - Never acquire arena write lock (alloc_new_chunk) while holding version locks.
272    // - freeze_epoch order: node_versions.read() → arena.read_at(),
273    //   then edge_versions.read() → arena.read_at().
274    /// Arena allocator for hot data storage.
275    /// Data is stored in per-epoch arenas for fast allocation and bulk deallocation.
276    #[cfg(feature = "tiered-storage")]
277    pub(super) arena_allocator: Arc<ArenaAllocator>,
278
279    /// Node version indexes - store metadata and arena offsets.
280    /// The actual NodeRecord data is stored in the arena.
281    /// Lock order: 1
282    #[cfg(feature = "tiered-storage")]
283    pub(super) node_versions: RwLock<FxHashMap<NodeId, VersionIndex>>,
284
285    /// Edge version indexes - store metadata and arena offsets.
286    /// The actual EdgeRecord data is stored in the arena.
287    /// Lock order: 2
288    #[cfg(feature = "tiered-storage")]
289    pub(super) edge_versions: RwLock<FxHashMap<EdgeId, VersionIndex>>,
290
291    /// Cold storage for frozen epochs.
292    /// Contains compressed epoch blocks for historical data.
293    #[cfg(feature = "tiered-storage")]
294    pub(super) epoch_store: Arc<EpochStore>,
295
296    /// Property storage for nodes.
297    pub(super) node_properties: PropertyStorage<NodeId>,
298
299    /// Property storage for edges.
300    pub(super) edge_properties: PropertyStorage<EdgeId>,
301
302    /// Label name to ID mapping.
303    /// Lock order: 3 (acquire with id_to_label)
304    pub(super) label_to_id: RwLock<FxHashMap<ArcStr, u32>>,
305
306    /// Label ID to name mapping.
307    /// Lock order: 3 (acquire with label_to_id)
308    pub(super) id_to_label: RwLock<Vec<ArcStr>>,
309
310    /// Edge type name to ID mapping.
311    /// Lock order: 4 (acquire with id_to_edge_type)
312    pub(super) edge_type_to_id: RwLock<FxHashMap<ArcStr, u32>>,
313
314    /// Edge type ID to name mapping.
315    /// Lock order: 4 (acquire with edge_type_to_id)
316    pub(super) id_to_edge_type: RwLock<Vec<ArcStr>>,
317
318    /// Forward adjacency lists (outgoing edges).
319    pub(super) forward_adj: ChunkedAdjacency,
320
321    /// Backward adjacency lists (incoming edges).
322    /// Only populated if config.backward_edges is true.
323    pub(super) backward_adj: Option<ChunkedAdjacency>,
324
325    /// Label index: label_id -> set of node IDs.
326    /// Lock order: 5
327    pub(super) label_index: RwLock<Vec<FxHashMap<NodeId, ()>>>,
328
329    /// Node labels: node_id -> set of label IDs.
330    /// Reverse mapping to efficiently get labels for a node.
331    /// Lock order: 6
332    #[cfg(not(feature = "temporal"))]
333    pub(super) node_labels: RwLock<FxHashMap<NodeId, FxHashSet<u32>>>,
334    /// Versioned node labels: node_id -> version log of label sets.
335    /// Lock order: 6
336    #[cfg(feature = "temporal")]
337    pub(super) node_labels: RwLock<FxHashMap<NodeId, VersionLog<FxHashSet<u32>>>>,
338
339    /// Property indexes: property_key -> (value -> set of node IDs).
340    ///
341    /// When a property is indexed, lookups by value are O(1) instead of O(n).
342    /// Use [`create_property_index`] to enable indexing for a property.
343    /// Lock order: 7
344    pub(super) property_indexes:
345        RwLock<FxHashMap<PropertyKey, DashMap<HashableValue, FxHashSet<NodeId>>>>,
346
347    /// Vector indexes: "label:property" -> HNSW index.
348    ///
349    /// Created via [`GrafeoDB::create_vector_index`](grafeo_engine::GrafeoDB::create_vector_index).
350    /// Lock order: 7 (same level as property_indexes, disjoint keys)
351    #[cfg(feature = "vector-index")]
352    pub(super) vector_indexes: RwLock<FxHashMap<String, Arc<HnswIndex>>>,
353
354    /// Text indexes: "label:property" -> inverted index with BM25 scoring.
355    ///
356    /// Created via [`GrafeoDB::create_text_index`](grafeo_engine::GrafeoDB::create_text_index).
357    /// Lock order: 7 (same level as property_indexes, disjoint keys)
358    #[cfg(feature = "text-index")]
359    pub(super) text_indexes:
360        RwLock<FxHashMap<String, Arc<RwLock<crate::index::text::InvertedIndex>>>>,
361
362    /// Next node ID.
363    pub(super) next_node_id: AtomicU64,
364
365    /// Next edge ID.
366    pub(super) next_edge_id: AtomicU64,
367
368    /// Current epoch.
369    pub(super) current_epoch: AtomicU64,
370
371    /// Live (non-deleted) node count, maintained incrementally.
372    /// Avoids O(n) full scan in `compute_statistics()`.
373    pub(super) live_node_count: AtomicI64,
374
375    /// Live (non-deleted) edge count, maintained incrementally.
376    /// Avoids O(m) full scan in `compute_statistics()`.
377    pub(super) live_edge_count: AtomicI64,
378
379    /// Per-edge-type live counts, indexed by edge_type_id.
380    /// Avoids O(m) edge scan in `compute_statistics()`.
381    /// Lock order: 8 (same level as statistics)
382    pub(super) edge_type_live_counts: RwLock<Vec<i64>>,
383
384    /// Statistics for cost-based optimization.
385    /// Lock order: 8 (always last)
386    pub(super) statistics: RwLock<Arc<Statistics>>,
387
388    /// Whether statistics need full recomputation (e.g., after rollback).
389    pub(super) needs_stats_recompute: AtomicBool,
390
391    /// Named graphs, each an independent `LpgStore` partition.
392    /// Zero overhead for single-graph databases (empty HashMap).
393    /// Lock order: 9 (after statistics)
394    named_graphs: RwLock<FxHashMap<String, Arc<LpgStore>>>,
395
396    /// Undo log for property mutations within transactions.
397    ///
398    /// Maps transaction IDs to a list of undo entries that capture the
399    /// previous property values. On rollback, entries are replayed in
400    /// reverse order to restore properties. On commit, the entries are
401    /// simply discarded.
402    /// Lock order: 10 (after named_graphs, independent of other locks)
403    property_undo_log: RwLock<FxHashMap<TransactionId, Vec<PropertyUndoEntry>>>,
404}
405
406impl LpgStore {
407    /// Creates a new LPG store with default configuration.
408    ///
409    /// # Errors
410    ///
411    /// Returns [`AllocError`] if the arena allocator cannot be initialized
412    /// (only possible with the `tiered-storage` feature).
413    pub fn new() -> Result<Self, AllocError> {
414        Self::with_config(LpgStoreConfig::default())
415    }
416
417    /// Creates a new LPG store with custom configuration.
418    ///
419    /// # Errors
420    ///
421    /// Returns [`AllocError`] if the arena allocator cannot be initialized
422    /// (only possible with the `tiered-storage` feature).
423    pub fn with_config(config: LpgStoreConfig) -> Result<Self, AllocError> {
424        let backward_adj = if config.backward_edges {
425            Some(ChunkedAdjacency::new())
426        } else {
427            None
428        };
429
430        Ok(Self {
431            #[cfg(not(feature = "tiered-storage"))]
432            nodes: RwLock::new(FxHashMap::default()),
433            #[cfg(not(feature = "tiered-storage"))]
434            edges: RwLock::new(FxHashMap::default()),
435            #[cfg(feature = "tiered-storage")]
436            arena_allocator: Arc::new(ArenaAllocator::new()?),
437            #[cfg(feature = "tiered-storage")]
438            node_versions: RwLock::new(FxHashMap::default()),
439            #[cfg(feature = "tiered-storage")]
440            edge_versions: RwLock::new(FxHashMap::default()),
441            #[cfg(feature = "tiered-storage")]
442            epoch_store: Arc::new(EpochStore::new()),
443            node_properties: PropertyStorage::new(),
444            edge_properties: PropertyStorage::new(),
445            label_to_id: RwLock::new(FxHashMap::default()),
446            id_to_label: RwLock::new(Vec::new()),
447            edge_type_to_id: RwLock::new(FxHashMap::default()),
448            id_to_edge_type: RwLock::new(Vec::new()),
449            forward_adj: ChunkedAdjacency::new(),
450            backward_adj,
451            label_index: RwLock::new(Vec::with_capacity(16)),
452            node_labels: RwLock::new(FxHashMap::default()),
453            property_indexes: RwLock::new(FxHashMap::default()),
454            #[cfg(feature = "vector-index")]
455            vector_indexes: RwLock::new(FxHashMap::default()),
456            #[cfg(feature = "text-index")]
457            text_indexes: RwLock::new(FxHashMap::default()),
458            next_node_id: AtomicU64::new(0),
459            next_edge_id: AtomicU64::new(0),
460            current_epoch: AtomicU64::new(0),
461            live_node_count: AtomicI64::new(0),
462            live_edge_count: AtomicI64::new(0),
463            edge_type_live_counts: RwLock::new(Vec::new()),
464            statistics: RwLock::new(Arc::new(Statistics::new())),
465            needs_stats_recompute: AtomicBool::new(false),
466            named_graphs: RwLock::new(FxHashMap::default()),
467            property_undo_log: RwLock::new(FxHashMap::default()),
468        })
469    }
470
471    /// Returns the current epoch.
472    #[must_use]
473    pub fn current_epoch(&self) -> EpochId {
474        EpochId::new(self.current_epoch.load(Ordering::Acquire))
475    }
476
477    /// Creates a new epoch.
478    #[doc(hidden)]
479    pub fn new_epoch(&self) -> EpochId {
480        let id = self.current_epoch.fetch_add(1, Ordering::AcqRel) + 1;
481        EpochId::new(id)
482    }
483
484    /// Syncs the store epoch to match an external epoch counter.
485    ///
486    /// Used by the transaction manager to keep the store's epoch in step
487    /// after a transaction commit advances the global epoch.
488    #[doc(hidden)]
489    pub fn sync_epoch(&self, epoch: EpochId) {
490        self.current_epoch
491            .fetch_max(epoch.as_u64(), Ordering::AcqRel);
492    }
493
494    /// Removes all data from the store, resetting it to an empty state.
495    ///
496    /// Acquires locks in the documented ordering to prevent deadlocks.
497    /// After clearing, the store behaves as if freshly constructed.
498    pub fn clear(&self) {
499        // Level 1: Entity storage
500        #[cfg(not(feature = "tiered-storage"))]
501        {
502            self.nodes.write().clear();
503            self.edges.write().clear();
504        }
505        #[cfg(feature = "tiered-storage")]
506        {
507            self.node_versions.write().clear();
508            self.edge_versions.write().clear();
509            // Arena allocator chunks are leaked; epochs are cleared via epoch_store.
510        }
511
512        // Level 2: Catalogs (acquire as pairs)
513        {
514            self.label_to_id.write().clear();
515            self.id_to_label.write().clear();
516        }
517        {
518            self.edge_type_to_id.write().clear();
519            self.id_to_edge_type.write().clear();
520        }
521
522        // Level 3: Indexes
523        self.label_index.write().clear();
524        self.node_labels.write().clear();
525        self.property_indexes.write().clear();
526        #[cfg(feature = "vector-index")]
527        self.vector_indexes.write().clear();
528        #[cfg(feature = "text-index")]
529        self.text_indexes.write().clear();
530
531        // Nested: Properties and adjacency
532        self.node_properties.clear();
533        self.edge_properties.clear();
534        self.forward_adj.clear();
535        if let Some(ref backward) = self.backward_adj {
536            backward.clear();
537        }
538
539        // Atomics: ID counters
540        self.next_node_id.store(0, Ordering::Release);
541        self.next_edge_id.store(0, Ordering::Release);
542        self.current_epoch.store(0, Ordering::Release);
543
544        // Level 4: Statistics
545        self.live_node_count.store(0, Ordering::Release);
546        self.live_edge_count.store(0, Ordering::Release);
547        self.edge_type_live_counts.write().clear();
548        *self.statistics.write() = Arc::new(Statistics::new());
549        self.needs_stats_recompute.store(false, Ordering::Release);
550
551        // Level 5: Undo log
552        self.property_undo_log.write().clear();
553    }
554
555    /// Returns whether backward adjacency (incoming edge index) is available.
556    ///
557    /// When backward adjacency is enabled (the default), bidirectional search
558    /// algorithms can traverse from the target toward the source.
559    #[must_use]
560    pub fn has_backward_adjacency(&self) -> bool {
561        self.backward_adj.is_some()
562    }
563
564    // === Named Graph Management ===
565
566    /// Returns a named graph by name, or `None` if it does not exist.
567    #[must_use]
568    pub fn graph(&self, name: &str) -> Option<Arc<LpgStore>> {
569        self.named_graphs.read().get(name).cloned()
570    }
571
572    /// Returns a named graph, creating it if it does not exist.
573    ///
574    /// # Errors
575    ///
576    /// Returns [`AllocError`] if a new store cannot be allocated.
577    pub fn graph_or_create(&self, name: &str) -> Result<Arc<LpgStore>, AllocError> {
578        {
579            let graphs = self.named_graphs.read();
580            if let Some(g) = graphs.get(name) {
581                return Ok(Arc::clone(g));
582            }
583        }
584        let mut graphs = self.named_graphs.write();
585        // Double-check after acquiring write lock
586        if let Some(g) = graphs.get(name) {
587            return Ok(Arc::clone(g));
588        }
589        let store = Arc::new(LpgStore::new()?);
590        graphs.insert(name.to_string(), Arc::clone(&store));
591        Ok(store)
592    }
593
594    /// Creates a named graph. Returns `true` on success, `false` if it already exists.
595    ///
596    /// # Errors
597    ///
598    /// Returns [`AllocError`] if the new store cannot be allocated.
599    pub fn create_graph(&self, name: &str) -> Result<bool, AllocError> {
600        let mut graphs = self.named_graphs.write();
601        if graphs.contains_key(name) {
602            return Ok(false);
603        }
604        graphs.insert(name.to_string(), Arc::new(LpgStore::new()?));
605        Ok(true)
606    }
607
608    /// Drops a named graph. Returns `false` if it did not exist.
609    pub fn drop_graph(&self, name: &str) -> bool {
610        self.named_graphs.write().remove(name).is_some()
611    }
612
613    /// Returns all named graph names.
614    #[must_use]
615    pub fn graph_names(&self) -> Vec<String> {
616        self.named_graphs.read().keys().cloned().collect()
617    }
618
619    /// Returns the number of named graphs.
620    #[must_use]
621    pub fn graph_count(&self) -> usize {
622        self.named_graphs.read().len()
623    }
624
625    /// Clears a specific graph, or the default graph if `name` is `None`.
626    pub fn clear_graph(&self, name: Option<&str>) {
627        match name {
628            Some(n) => {
629                if let Some(g) = self.named_graphs.read().get(n) {
630                    g.clear();
631                }
632            }
633            None => self.clear(),
634        }
635    }
636
637    /// Copies all data from the source graph to the destination graph.
638    /// Creates the destination graph if it does not exist.
639    ///
640    /// # Errors
641    ///
642    /// Returns [`AllocError`] if the destination store cannot be allocated.
643    pub fn copy_graph(&self, source: Option<&str>, dest: Option<&str>) -> Result<(), AllocError> {
644        let _src = match source {
645            Some(n) => self.graph(n),
646            None => None, // default graph
647        };
648        let _dest_graph = dest.map(|n| self.graph_or_create(n)).transpose()?;
649        // Full graph copy is complex (requires iterating all entities).
650        // For now, this creates the destination graph structure.
651        // Full entity-level copy will be implemented when needed.
652        Ok(())
653    }
654
655    // === Internal Helpers ===
656
657    pub(super) fn get_or_create_label_id(&self, label: &str) -> u32 {
658        {
659            let label_to_id = self.label_to_id.read();
660            if let Some(&id) = label_to_id.get(label) {
661                return id;
662            }
663        }
664
665        let mut label_to_id = self.label_to_id.write();
666        let mut id_to_label = self.id_to_label.write();
667
668        // Double-check after acquiring write lock
669        if let Some(&id) = label_to_id.get(label) {
670            return id;
671        }
672
673        let id = id_to_label.len() as u32;
674
675        let label: ArcStr = label.into();
676        label_to_id.insert(label.clone(), id);
677        id_to_label.push(label);
678
679        id
680    }
681
682    pub(super) fn get_or_create_edge_type_id(&self, edge_type: &str) -> u32 {
683        {
684            let type_to_id = self.edge_type_to_id.read();
685            if let Some(&id) = type_to_id.get(edge_type) {
686                return id;
687            }
688        }
689
690        let mut type_to_id = self.edge_type_to_id.write();
691        let mut id_to_type = self.id_to_edge_type.write();
692
693        // Double-check
694        if let Some(&id) = type_to_id.get(edge_type) {
695            return id;
696        }
697
698        let id = id_to_type.len() as u32;
699        let edge_type: ArcStr = edge_type.into();
700        type_to_id.insert(edge_type.clone(), id);
701        id_to_type.push(edge_type);
702
703        // Grow edge type live counts to match
704        let mut counts = self.edge_type_live_counts.write();
705        while counts.len() <= id as usize {
706            counts.push(0);
707        }
708
709        id
710    }
711
712    /// Increments the live edge count for a given edge type.
713    pub(super) fn increment_edge_type_count(&self, type_id: u32) {
714        let mut counts = self.edge_type_live_counts.write();
715        if counts.len() <= type_id as usize {
716            counts.resize(type_id as usize + 1, 0);
717        }
718        counts[type_id as usize] += 1;
719    }
720
721    /// Decrements the live edge count for a given edge type.
722    pub(super) fn decrement_edge_type_count(&self, type_id: u32) {
723        let mut counts = self.edge_type_live_counts.write();
724        if type_id < counts.len() as u32 {
725            counts[type_id as usize] -= 1;
726        }
727    }
728}