Skip to main content

grafeo_core/graph/lpg/store/
mod.rs

1//! The in-memory LPG graph store.
2//!
3//! This is where your nodes and edges actually live. Most users interact
4//! through [`GrafeoDB`](grafeo_engine::GrafeoDB), but algorithm implementers
5//! sometimes need the raw [`LpgStore`] for direct adjacency traversal.
6//!
7//! Key features:
8//! - MVCC versioning - concurrent readers don't block each other
9//! - Columnar properties with zone maps for fast filtering
10//! - Forward and backward adjacency indexes
11
12mod edge_ops;
13mod graph_store_impl;
14mod index;
15mod memory;
16mod node_ops;
17mod property_ops;
18mod schema;
19mod search;
20mod statistics;
21mod traversal;
22mod versioning;
23
24#[cfg(test)]
25mod tests;
26
27use super::PropertyStorage;
28#[cfg(not(feature = "tiered-storage"))]
29use super::{EdgeRecord, NodeRecord};
30use crate::index::adjacency::ChunkedAdjacency;
31use crate::statistics::Statistics;
32use arcstr::ArcStr;
33use dashmap::DashMap;
34#[cfg(not(feature = "tiered-storage"))]
35use grafeo_common::mvcc::VersionChain;
36use grafeo_common::types::{
37    EdgeId, EpochId, HashableValue, NodeId, PropertyKey, TransactionId, Value,
38};
39use grafeo_common::utils::hash::{FxHashMap, FxHashSet};
40use parking_lot::RwLock;
41use std::cmp::Ordering as CmpOrdering;
42use std::sync::Arc;
43use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
44
45#[cfg(feature = "vector-index")]
46use crate::index::vector::HnswIndex;
47
48#[cfg(feature = "tiered-storage")]
49use crate::storage::EpochStore;
50use grafeo_common::memory::arena::AllocError;
51#[cfg(feature = "tiered-storage")]
52use grafeo_common::memory::arena::ArenaAllocator;
53#[cfg(feature = "tiered-storage")]
54use grafeo_common::mvcc::VersionIndex;
55
56/// Undo entry for a property mutation within a transaction.
57///
58/// Captures the previous state of a property so it can be restored on rollback.
59#[derive(Debug, Clone)]
60pub enum PropertyUndoEntry {
61    /// A node property was changed or added.
62    NodeProperty {
63        /// The node that was modified.
64        node_id: NodeId,
65        /// The property key that was set or removed.
66        key: PropertyKey,
67        /// The previous value, or `None` if the property did not exist before.
68        old_value: Option<Value>,
69    },
70    /// An edge property was changed or added.
71    EdgeProperty {
72        /// The edge that was modified.
73        edge_id: EdgeId,
74        /// The property key that was set or removed.
75        key: PropertyKey,
76        /// The previous value, or `None` if the property did not exist before.
77        old_value: Option<Value>,
78    },
79    /// A label was added to a node.
80    LabelAdded {
81        /// The node that had a label added.
82        node_id: NodeId,
83        /// The label string that was added.
84        label: String,
85    },
86    /// A label was removed from a node.
87    LabelRemoved {
88        /// The node that had a label removed.
89        node_id: NodeId,
90        /// The label string that was removed.
91        label: String,
92    },
93    /// A node was deleted (for rollback restoration).
94    NodeDeleted {
95        /// The node that was deleted.
96        node_id: NodeId,
97        /// The labels the node had before deletion.
98        labels: Vec<String>,
99        /// The properties the node had before deletion.
100        properties: Vec<(PropertyKey, Value)>,
101    },
102    /// An edge was deleted (for rollback restoration).
103    EdgeDeleted {
104        /// The edge that was deleted.
105        edge_id: EdgeId,
106        /// The source node.
107        src: NodeId,
108        /// The destination node.
109        dst: NodeId,
110        /// The edge type name.
111        edge_type: String,
112        /// The properties the edge had before deletion.
113        properties: Vec<(PropertyKey, Value)>,
114    },
115}
116
117/// Compares two values for ordering (used for range checks).
118pub(super) fn compare_values_for_range(a: &Value, b: &Value) -> Option<CmpOrdering> {
119    match (a, b) {
120        (Value::Int64(a), Value::Int64(b)) => Some(a.cmp(b)),
121        (Value::Float64(a), Value::Float64(b)) => a.partial_cmp(b),
122        (Value::Int64(a), Value::Float64(b)) => (*a as f64).partial_cmp(b),
123        (Value::Float64(a), Value::Int64(b)) => a.partial_cmp(&(*b as f64)),
124        (Value::String(a), Value::String(b)) => Some(a.cmp(b)),
125        (Value::Bool(a), Value::Bool(b)) => Some(a.cmp(b)),
126        (Value::Timestamp(a), Value::Timestamp(b)) => Some(a.cmp(b)),
127        (Value::Date(a), Value::Date(b)) => Some(a.cmp(b)),
128        (Value::Time(a), Value::Time(b)) => Some(a.cmp(b)),
129        _ => None,
130    }
131}
132
133/// Checks if a value is within a range.
134pub(super) fn value_in_range(
135    value: &Value,
136    min: Option<&Value>,
137    max: Option<&Value>,
138    min_inclusive: bool,
139    max_inclusive: bool,
140) -> bool {
141    // Check lower bound
142    if let Some(min_val) = min {
143        match compare_values_for_range(value, min_val) {
144            Some(CmpOrdering::Less) => return false,
145            Some(CmpOrdering::Equal) if !min_inclusive => return false,
146            None => return false, // Can't compare
147            _ => {}
148        }
149    }
150
151    // Check upper bound
152    if let Some(max_val) = max {
153        match compare_values_for_range(value, max_val) {
154            Some(CmpOrdering::Greater) => return false,
155            Some(CmpOrdering::Equal) if !max_inclusive => return false,
156            None => return false,
157            _ => {}
158        }
159    }
160
161    true
162}
163
164/// Configuration for the LPG store.
165///
166/// The defaults work well for most cases. Tune `backward_edges` if you only
167/// traverse in one direction (saves memory), or adjust capacities if you know
168/// your graph size upfront (avoids reallocations).
169#[derive(Debug, Clone)]
170pub struct LpgStoreConfig {
171    /// Maintain backward adjacency for incoming edge queries. Turn off if
172    /// you only traverse outgoing edges - saves ~50% adjacency memory.
173    pub backward_edges: bool,
174    /// Initial capacity for nodes (avoids early reallocations).
175    pub initial_node_capacity: usize,
176    /// Initial capacity for edges (avoids early reallocations).
177    pub initial_edge_capacity: usize,
178}
179
180impl Default for LpgStoreConfig {
181    fn default() -> Self {
182        Self {
183            backward_edges: true,
184            initial_node_capacity: 1024,
185            initial_edge_capacity: 4096,
186        }
187    }
188}
189
190/// The core in-memory graph storage.
191///
192/// Everything lives here: nodes, edges, properties, adjacency indexes, and
193/// version chains for MVCC. Concurrent reads never block each other.
194///
195/// Most users should go through `GrafeoDB` (from the `grafeo_engine` crate) which
196/// adds transaction management and query execution. Use `LpgStore` directly
197/// when you need raw performance for algorithm implementations.
198///
199/// # Example
200///
201/// ```
202/// use grafeo_core::graph::lpg::LpgStore;
203/// use grafeo_core::graph::Direction;
204///
205/// let store = LpgStore::new().expect("arena allocation");
206///
207/// // Create a small social network
208/// let alix = store.create_node(&["Person"]);
209/// let gus = store.create_node(&["Person"]);
210/// store.create_edge(alix, gus, "KNOWS");
211///
212/// // Traverse outgoing edges
213/// for neighbor in store.neighbors(alix, Direction::Outgoing) {
214///     println!("Alix knows node {:?}", neighbor);
215/// }
216/// ```
217///
218/// # Lock Ordering
219///
220/// `LpgStore` contains multiple `RwLock` fields that must be acquired in a
221/// consistent order to prevent deadlocks. Always acquire locks in this order:
222///
223/// ## Level 1 - Entity Storage (mutually exclusive via feature flag)
224/// 1. `nodes` / `node_versions`
225/// 2. `edges` / `edge_versions`
226///
227/// ## Level 2 - Catalogs (acquire as pairs when writing)
228/// 3. `label_to_id` + `id_to_label`
229/// 4. `edge_type_to_id` + `id_to_edge_type`
230///
231/// ## Level 3 - Indexes
232/// 5. `label_index`
233/// 6. `node_labels`
234/// 7. `property_indexes`
235///
236/// ## Level 4 - Statistics
237/// 8. `statistics`
238///
239/// ## Level 5 - Nested Locks (internal to other structs)
240/// 9. `PropertyStorage::columns` (via `node_properties`/`edge_properties`)
241/// 10. `ChunkedAdjacency::lists` (via `forward_adj`/`backward_adj`)
242///
243/// ## Rules
244/// - Catalog pairs must be acquired together when writing.
245/// - Never hold entity locks while acquiring catalog locks in a different scope.
246/// - Statistics lock is always last.
247/// - Read locks are generally safe, but avoid read-to-write upgrades.
248pub struct LpgStore {
249    /// Node records indexed by NodeId, with version chains for MVCC.
250    /// Used when `tiered-storage` feature is disabled.
251    /// Lock order: 1
252    #[cfg(not(feature = "tiered-storage"))]
253    pub(super) nodes: RwLock<FxHashMap<NodeId, VersionChain<NodeRecord>>>,
254
255    /// Edge records indexed by EdgeId, with version chains for MVCC.
256    /// Used when `tiered-storage` feature is disabled.
257    /// Lock order: 2
258    #[cfg(not(feature = "tiered-storage"))]
259    pub(super) edges: RwLock<FxHashMap<EdgeId, VersionChain<EdgeRecord>>>,
260
261    // === Tiered Storage Fields (feature-gated) ===
262    //
263    // Lock ordering for arena access:
264    //   version_lock (read/write) → arena read lock (via arena_allocator.arena())
265    //
266    // Rules:
267    // - Acquire arena read lock *after* version locks, never before.
268    // - Multiple threads may call arena.read_at() concurrently (shared refs only).
269    // - Never acquire arena write lock (alloc_new_chunk) while holding version locks.
270    // - freeze_epoch order: node_versions.read() → arena.read_at(),
271    //   then edge_versions.read() → arena.read_at().
272    /// Arena allocator for hot data storage.
273    /// Data is stored in per-epoch arenas for fast allocation and bulk deallocation.
274    #[cfg(feature = "tiered-storage")]
275    pub(super) arena_allocator: Arc<ArenaAllocator>,
276
277    /// Node version indexes - store metadata and arena offsets.
278    /// The actual NodeRecord data is stored in the arena.
279    /// Lock order: 1
280    #[cfg(feature = "tiered-storage")]
281    pub(super) node_versions: RwLock<FxHashMap<NodeId, VersionIndex>>,
282
283    /// Edge version indexes - store metadata and arena offsets.
284    /// The actual EdgeRecord data is stored in the arena.
285    /// Lock order: 2
286    #[cfg(feature = "tiered-storage")]
287    pub(super) edge_versions: RwLock<FxHashMap<EdgeId, VersionIndex>>,
288
289    /// Cold storage for frozen epochs.
290    /// Contains compressed epoch blocks for historical data.
291    #[cfg(feature = "tiered-storage")]
292    pub(super) epoch_store: Arc<EpochStore>,
293
294    /// Property storage for nodes.
295    pub(super) node_properties: PropertyStorage<NodeId>,
296
297    /// Property storage for edges.
298    pub(super) edge_properties: PropertyStorage<EdgeId>,
299
300    /// Label name to ID mapping.
301    /// Lock order: 3 (acquire with id_to_label)
302    pub(super) label_to_id: RwLock<FxHashMap<ArcStr, u32>>,
303
304    /// Label ID to name mapping.
305    /// Lock order: 3 (acquire with label_to_id)
306    pub(super) id_to_label: RwLock<Vec<ArcStr>>,
307
308    /// Edge type name to ID mapping.
309    /// Lock order: 4 (acquire with id_to_edge_type)
310    pub(super) edge_type_to_id: RwLock<FxHashMap<ArcStr, u32>>,
311
312    /// Edge type ID to name mapping.
313    /// Lock order: 4 (acquire with edge_type_to_id)
314    pub(super) id_to_edge_type: RwLock<Vec<ArcStr>>,
315
316    /// Forward adjacency lists (outgoing edges).
317    pub(super) forward_adj: ChunkedAdjacency,
318
319    /// Backward adjacency lists (incoming edges).
320    /// Only populated if config.backward_edges is true.
321    pub(super) backward_adj: Option<ChunkedAdjacency>,
322
323    /// Label index: label_id -> set of node IDs.
324    /// Lock order: 5
325    pub(super) label_index: RwLock<Vec<FxHashMap<NodeId, ()>>>,
326
327    /// Node labels: node_id -> set of label IDs.
328    /// Reverse mapping to efficiently get labels for a node.
329    /// Lock order: 6
330    pub(super) node_labels: RwLock<FxHashMap<NodeId, FxHashSet<u32>>>,
331
332    /// Property indexes: property_key -> (value -> set of node IDs).
333    ///
334    /// When a property is indexed, lookups by value are O(1) instead of O(n).
335    /// Use [`create_property_index`] to enable indexing for a property.
336    /// Lock order: 7
337    pub(super) property_indexes:
338        RwLock<FxHashMap<PropertyKey, DashMap<HashableValue, FxHashSet<NodeId>>>>,
339
340    /// Vector indexes: "label:property" -> HNSW index.
341    ///
342    /// Created via [`GrafeoDB::create_vector_index`](grafeo_engine::GrafeoDB::create_vector_index).
343    /// Lock order: 7 (same level as property_indexes, disjoint keys)
344    #[cfg(feature = "vector-index")]
345    pub(super) vector_indexes: RwLock<FxHashMap<String, Arc<HnswIndex>>>,
346
347    /// Text indexes: "label:property" -> inverted index with BM25 scoring.
348    ///
349    /// Created via [`GrafeoDB::create_text_index`](grafeo_engine::GrafeoDB::create_text_index).
350    /// Lock order: 7 (same level as property_indexes, disjoint keys)
351    #[cfg(feature = "text-index")]
352    pub(super) text_indexes:
353        RwLock<FxHashMap<String, Arc<RwLock<crate::index::text::InvertedIndex>>>>,
354
355    /// Next node ID.
356    pub(super) next_node_id: AtomicU64,
357
358    /// Next edge ID.
359    pub(super) next_edge_id: AtomicU64,
360
361    /// Current epoch.
362    pub(super) current_epoch: AtomicU64,
363
364    /// Live (non-deleted) node count, maintained incrementally.
365    /// Avoids O(n) full scan in `compute_statistics()`.
366    pub(super) live_node_count: AtomicI64,
367
368    /// Live (non-deleted) edge count, maintained incrementally.
369    /// Avoids O(m) full scan in `compute_statistics()`.
370    pub(super) live_edge_count: AtomicI64,
371
372    /// Per-edge-type live counts, indexed by edge_type_id.
373    /// Avoids O(m) edge scan in `compute_statistics()`.
374    /// Lock order: 8 (same level as statistics)
375    pub(super) edge_type_live_counts: RwLock<Vec<i64>>,
376
377    /// Statistics for cost-based optimization.
378    /// Lock order: 8 (always last)
379    pub(super) statistics: RwLock<Arc<Statistics>>,
380
381    /// Whether statistics need full recomputation (e.g., after rollback).
382    pub(super) needs_stats_recompute: AtomicBool,
383
384    /// Named graphs, each an independent `LpgStore` partition.
385    /// Zero overhead for single-graph databases (empty HashMap).
386    /// Lock order: 9 (after statistics)
387    named_graphs: RwLock<FxHashMap<String, Arc<LpgStore>>>,
388
389    /// Undo log for property mutations within transactions.
390    ///
391    /// Maps transaction IDs to a list of undo entries that capture the
392    /// previous property values. On rollback, entries are replayed in
393    /// reverse order to restore properties. On commit, the entries are
394    /// simply discarded.
395    /// Lock order: 10 (after named_graphs, independent of other locks)
396    property_undo_log: RwLock<FxHashMap<TransactionId, Vec<PropertyUndoEntry>>>,
397}
398
399impl LpgStore {
400    /// Creates a new LPG store with default configuration.
401    ///
402    /// # Errors
403    ///
404    /// Returns [`AllocError`] if the arena allocator cannot be initialized
405    /// (only possible with the `tiered-storage` feature).
406    pub fn new() -> Result<Self, AllocError> {
407        Self::with_config(LpgStoreConfig::default())
408    }
409
410    /// Creates a new LPG store with custom configuration.
411    ///
412    /// # Errors
413    ///
414    /// Returns [`AllocError`] if the arena allocator cannot be initialized
415    /// (only possible with the `tiered-storage` feature).
416    pub fn with_config(config: LpgStoreConfig) -> Result<Self, AllocError> {
417        let backward_adj = if config.backward_edges {
418            Some(ChunkedAdjacency::new())
419        } else {
420            None
421        };
422
423        Ok(Self {
424            #[cfg(not(feature = "tiered-storage"))]
425            nodes: RwLock::new(FxHashMap::default()),
426            #[cfg(not(feature = "tiered-storage"))]
427            edges: RwLock::new(FxHashMap::default()),
428            #[cfg(feature = "tiered-storage")]
429            arena_allocator: Arc::new(ArenaAllocator::new()?),
430            #[cfg(feature = "tiered-storage")]
431            node_versions: RwLock::new(FxHashMap::default()),
432            #[cfg(feature = "tiered-storage")]
433            edge_versions: RwLock::new(FxHashMap::default()),
434            #[cfg(feature = "tiered-storage")]
435            epoch_store: Arc::new(EpochStore::new()),
436            node_properties: PropertyStorage::new(),
437            edge_properties: PropertyStorage::new(),
438            label_to_id: RwLock::new(FxHashMap::default()),
439            id_to_label: RwLock::new(Vec::new()),
440            edge_type_to_id: RwLock::new(FxHashMap::default()),
441            id_to_edge_type: RwLock::new(Vec::new()),
442            forward_adj: ChunkedAdjacency::new(),
443            backward_adj,
444            label_index: RwLock::new(Vec::with_capacity(16)),
445            node_labels: RwLock::new(FxHashMap::default()),
446            property_indexes: RwLock::new(FxHashMap::default()),
447            #[cfg(feature = "vector-index")]
448            vector_indexes: RwLock::new(FxHashMap::default()),
449            #[cfg(feature = "text-index")]
450            text_indexes: RwLock::new(FxHashMap::default()),
451            next_node_id: AtomicU64::new(0),
452            next_edge_id: AtomicU64::new(0),
453            current_epoch: AtomicU64::new(0),
454            live_node_count: AtomicI64::new(0),
455            live_edge_count: AtomicI64::new(0),
456            edge_type_live_counts: RwLock::new(Vec::new()),
457            statistics: RwLock::new(Arc::new(Statistics::new())),
458            needs_stats_recompute: AtomicBool::new(false),
459            named_graphs: RwLock::new(FxHashMap::default()),
460            property_undo_log: RwLock::new(FxHashMap::default()),
461        })
462    }
463
464    /// Returns the current epoch.
465    #[must_use]
466    pub fn current_epoch(&self) -> EpochId {
467        EpochId::new(self.current_epoch.load(Ordering::Acquire))
468    }
469
470    /// Creates a new epoch.
471    #[doc(hidden)]
472    pub fn new_epoch(&self) -> EpochId {
473        let id = self.current_epoch.fetch_add(1, Ordering::AcqRel) + 1;
474        EpochId::new(id)
475    }
476
477    /// Syncs the store epoch to match an external epoch counter.
478    ///
479    /// Used by the transaction manager to keep the store's epoch in step
480    /// after a transaction commit advances the global epoch.
481    #[doc(hidden)]
482    pub fn sync_epoch(&self, epoch: EpochId) {
483        self.current_epoch
484            .fetch_max(epoch.as_u64(), Ordering::AcqRel);
485    }
486
487    /// Removes all data from the store, resetting it to an empty state.
488    ///
489    /// Acquires locks in the documented ordering to prevent deadlocks.
490    /// After clearing, the store behaves as if freshly constructed.
491    pub fn clear(&self) {
492        // Level 1: Entity storage
493        #[cfg(not(feature = "tiered-storage"))]
494        {
495            self.nodes.write().clear();
496            self.edges.write().clear();
497        }
498        #[cfg(feature = "tiered-storage")]
499        {
500            self.node_versions.write().clear();
501            self.edge_versions.write().clear();
502            // Arena allocator chunks are leaked; epochs are cleared via epoch_store.
503        }
504
505        // Level 2: Catalogs (acquire as pairs)
506        {
507            self.label_to_id.write().clear();
508            self.id_to_label.write().clear();
509        }
510        {
511            self.edge_type_to_id.write().clear();
512            self.id_to_edge_type.write().clear();
513        }
514
515        // Level 3: Indexes
516        self.label_index.write().clear();
517        self.node_labels.write().clear();
518        self.property_indexes.write().clear();
519        #[cfg(feature = "vector-index")]
520        self.vector_indexes.write().clear();
521        #[cfg(feature = "text-index")]
522        self.text_indexes.write().clear();
523
524        // Nested: Properties and adjacency
525        self.node_properties.clear();
526        self.edge_properties.clear();
527        self.forward_adj.clear();
528        if let Some(ref backward) = self.backward_adj {
529            backward.clear();
530        }
531
532        // Atomics: ID counters
533        self.next_node_id.store(0, Ordering::Release);
534        self.next_edge_id.store(0, Ordering::Release);
535        self.current_epoch.store(0, Ordering::Release);
536
537        // Level 4: Statistics
538        self.live_node_count.store(0, Ordering::Release);
539        self.live_edge_count.store(0, Ordering::Release);
540        self.edge_type_live_counts.write().clear();
541        *self.statistics.write() = Arc::new(Statistics::new());
542        self.needs_stats_recompute.store(false, Ordering::Release);
543
544        // Level 5: Undo log
545        self.property_undo_log.write().clear();
546    }
547
548    /// Returns whether backward adjacency (incoming edge index) is available.
549    ///
550    /// When backward adjacency is enabled (the default), bidirectional search
551    /// algorithms can traverse from the target toward the source.
552    #[must_use]
553    pub fn has_backward_adjacency(&self) -> bool {
554        self.backward_adj.is_some()
555    }
556
557    // === Named Graph Management ===
558
559    /// Returns a named graph by name, or `None` if it does not exist.
560    #[must_use]
561    pub fn graph(&self, name: &str) -> Option<Arc<LpgStore>> {
562        self.named_graphs.read().get(name).cloned()
563    }
564
565    /// Returns a named graph, creating it if it does not exist.
566    ///
567    /// # Errors
568    ///
569    /// Returns [`AllocError`] if a new store cannot be allocated.
570    pub fn graph_or_create(&self, name: &str) -> Result<Arc<LpgStore>, AllocError> {
571        {
572            let graphs = self.named_graphs.read();
573            if let Some(g) = graphs.get(name) {
574                return Ok(Arc::clone(g));
575            }
576        }
577        let mut graphs = self.named_graphs.write();
578        // Double-check after acquiring write lock
579        if let Some(g) = graphs.get(name) {
580            return Ok(Arc::clone(g));
581        }
582        let store = Arc::new(LpgStore::new()?);
583        graphs.insert(name.to_string(), Arc::clone(&store));
584        Ok(store)
585    }
586
587    /// Creates a named graph. Returns `true` on success, `false` if it already exists.
588    ///
589    /// # Errors
590    ///
591    /// Returns [`AllocError`] if the new store cannot be allocated.
592    pub fn create_graph(&self, name: &str) -> Result<bool, AllocError> {
593        let mut graphs = self.named_graphs.write();
594        if graphs.contains_key(name) {
595            return Ok(false);
596        }
597        graphs.insert(name.to_string(), Arc::new(LpgStore::new()?));
598        Ok(true)
599    }
600
601    /// Drops a named graph. Returns `false` if it did not exist.
602    pub fn drop_graph(&self, name: &str) -> bool {
603        self.named_graphs.write().remove(name).is_some()
604    }
605
606    /// Returns all named graph names.
607    #[must_use]
608    pub fn graph_names(&self) -> Vec<String> {
609        self.named_graphs.read().keys().cloned().collect()
610    }
611
612    /// Returns the number of named graphs.
613    #[must_use]
614    pub fn graph_count(&self) -> usize {
615        self.named_graphs.read().len()
616    }
617
618    /// Clears a specific graph, or the default graph if `name` is `None`.
619    pub fn clear_graph(&self, name: Option<&str>) {
620        match name {
621            Some(n) => {
622                if let Some(g) = self.named_graphs.read().get(n) {
623                    g.clear();
624                }
625            }
626            None => self.clear(),
627        }
628    }
629
630    /// Copies all data from the source graph to the destination graph.
631    /// Creates the destination graph if it does not exist.
632    ///
633    /// # Errors
634    ///
635    /// Returns [`AllocError`] if the destination store cannot be allocated.
636    pub fn copy_graph(&self, source: Option<&str>, dest: Option<&str>) -> Result<(), AllocError> {
637        let _src = match source {
638            Some(n) => self.graph(n),
639            None => None, // default graph
640        };
641        let _dest_graph = dest.map(|n| self.graph_or_create(n)).transpose()?;
642        // Full graph copy is complex (requires iterating all entities).
643        // For now, this creates the destination graph structure.
644        // Full entity-level copy will be implemented when needed.
645        Ok(())
646    }
647
648    // === Internal Helpers ===
649
650    pub(super) fn get_or_create_label_id(&self, label: &str) -> u32 {
651        {
652            let label_to_id = self.label_to_id.read();
653            if let Some(&id) = label_to_id.get(label) {
654                return id;
655            }
656        }
657
658        let mut label_to_id = self.label_to_id.write();
659        let mut id_to_label = self.id_to_label.write();
660
661        // Double-check after acquiring write lock
662        if let Some(&id) = label_to_id.get(label) {
663            return id;
664        }
665
666        let id = id_to_label.len() as u32;
667
668        let label: ArcStr = label.into();
669        label_to_id.insert(label.clone(), id);
670        id_to_label.push(label);
671
672        id
673    }
674
675    pub(super) fn get_or_create_edge_type_id(&self, edge_type: &str) -> u32 {
676        {
677            let type_to_id = self.edge_type_to_id.read();
678            if let Some(&id) = type_to_id.get(edge_type) {
679                return id;
680            }
681        }
682
683        let mut type_to_id = self.edge_type_to_id.write();
684        let mut id_to_type = self.id_to_edge_type.write();
685
686        // Double-check
687        if let Some(&id) = type_to_id.get(edge_type) {
688            return id;
689        }
690
691        let id = id_to_type.len() as u32;
692        let edge_type: ArcStr = edge_type.into();
693        type_to_id.insert(edge_type.clone(), id);
694        id_to_type.push(edge_type);
695
696        // Grow edge type live counts to match
697        let mut counts = self.edge_type_live_counts.write();
698        while counts.len() <= id as usize {
699            counts.push(0);
700        }
701
702        id
703    }
704
705    /// Increments the live edge count for a given edge type.
706    pub(super) fn increment_edge_type_count(&self, type_id: u32) {
707        let mut counts = self.edge_type_live_counts.write();
708        if counts.len() <= type_id as usize {
709            counts.resize(type_id as usize + 1, 0);
710        }
711        counts[type_id as usize] += 1;
712    }
713
714    /// Decrements the live edge count for a given edge type.
715    pub(super) fn decrement_edge_type_count(&self, type_id: u32) {
716        let mut counts = self.edge_type_live_counts.write();
717        if type_id < counts.len() as u32 {
718            counts[type_id as usize] -= 1;
719        }
720    }
721}