grafeo_core/graph/lpg/store/mod.rs
1//! The in-memory LPG graph store.
2//!
3//! This is where your nodes and edges actually live. Most users interact
4//! through [`GrafeoDB`](grafeo_engine::GrafeoDB), but algorithm implementers
5//! sometimes need the raw [`LpgStore`] for direct adjacency traversal.
6//!
7//! Key features:
8//! - MVCC versioning - concurrent readers don't block each other
9//! - Columnar properties with zone maps for fast filtering
10//! - Forward and backward adjacency indexes
11
12mod edge_ops;
13mod graph_store_impl;
14mod index;
15mod memory;
16mod node_ops;
17mod property_ops;
18mod schema;
19mod search;
20mod statistics;
21mod traversal;
22mod versioning;
23
24#[cfg(test)]
25mod tests;
26
27use super::PropertyStorage;
28#[cfg(not(feature = "tiered-storage"))]
29use super::{EdgeRecord, NodeRecord};
30use crate::index::adjacency::ChunkedAdjacency;
31use crate::statistics::Statistics;
32use arcstr::ArcStr;
33use dashmap::DashMap;
34#[cfg(not(feature = "tiered-storage"))]
35use grafeo_common::mvcc::VersionChain;
36use grafeo_common::types::{
37 EdgeId, EpochId, HashableValue, NodeId, PropertyKey, TransactionId, Value,
38};
39use grafeo_common::utils::hash::{FxHashMap, FxHashSet};
40use parking_lot::RwLock;
41use std::cmp::Ordering as CmpOrdering;
42use std::sync::Arc;
43use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
44
45#[cfg(feature = "vector-index")]
46use crate::index::vector::VectorIndexKind;
47
48#[cfg(feature = "tiered-storage")]
49use crate::codec::EpochStore;
50use grafeo_common::memory::arena::AllocError;
51#[cfg(feature = "tiered-storage")]
52use grafeo_common::memory::arena::ArenaAllocator;
53#[cfg(feature = "tiered-storage")]
54use grafeo_common::mvcc::VersionIndex;
55#[cfg(feature = "temporal")]
56use grafeo_common::temporal::VersionLog;
57
58/// Undo entry for a property mutation within a transaction.
59///
60/// Captures the previous state of a property so it can be restored on rollback.
61#[derive(Debug, Clone)]
62#[non_exhaustive]
63pub enum PropertyUndoEntry {
64 /// A node property was changed or added.
65 NodeProperty {
66 /// The node that was modified.
67 node_id: NodeId,
68 /// The property key that was set or removed.
69 key: PropertyKey,
70 /// The previous value, or `None` if the property did not exist before.
71 old_value: Option<Value>,
72 },
73 /// An edge property was changed or added.
74 EdgeProperty {
75 /// The edge that was modified.
76 edge_id: EdgeId,
77 /// The property key that was set or removed.
78 key: PropertyKey,
79 /// The previous value, or `None` if the property did not exist before.
80 old_value: Option<Value>,
81 },
82 /// A label was added to a node.
83 LabelAdded {
84 /// The node that had a label added.
85 node_id: NodeId,
86 /// The label string that was added.
87 label: String,
88 },
89 /// A label was removed from a node.
90 LabelRemoved {
91 /// The node that had a label removed.
92 node_id: NodeId,
93 /// The label string that was removed.
94 label: String,
95 },
96 /// A node was deleted (for rollback restoration).
97 NodeDeleted {
98 /// The node that was deleted.
99 node_id: NodeId,
100 /// The labels the node had before deletion.
101 labels: Vec<String>,
102 /// The properties the node had before deletion.
103 properties: Vec<(PropertyKey, Value)>,
104 },
105 /// An edge was deleted (for rollback restoration).
106 EdgeDeleted {
107 /// The edge that was deleted.
108 edge_id: EdgeId,
109 /// The source node.
110 src: NodeId,
111 /// The destination node.
112 dst: NodeId,
113 /// The edge type name.
114 edge_type: String,
115 /// The properties the edge had before deletion.
116 properties: Vec<(PropertyKey, Value)>,
117 },
118}
119
120/// Compares two values for ordering (used for range checks).
121pub(super) fn compare_values_for_range(a: &Value, b: &Value) -> Option<CmpOrdering> {
122 match (a, b) {
123 (Value::Int64(a), Value::Int64(b)) => Some(a.cmp(b)),
124 (Value::Float64(a), Value::Float64(b)) => a.partial_cmp(b),
125 (Value::Int64(a), Value::Float64(b)) => (*a as f64).partial_cmp(b),
126 (Value::Float64(a), Value::Int64(b)) => a.partial_cmp(&(*b as f64)),
127 (Value::String(a), Value::String(b)) => Some(a.cmp(b)),
128 (Value::Bool(a), Value::Bool(b)) => Some(a.cmp(b)),
129 (Value::Timestamp(a), Value::Timestamp(b)) => Some(a.cmp(b)),
130 (Value::Date(a), Value::Date(b)) => Some(a.cmp(b)),
131 (Value::Time(a), Value::Time(b)) => Some(a.cmp(b)),
132 _ => None,
133 }
134}
135
136/// Checks if a value is within a range.
137pub(super) fn value_in_range(
138 value: &Value,
139 min: Option<&Value>,
140 max: Option<&Value>,
141 min_inclusive: bool,
142 max_inclusive: bool,
143) -> bool {
144 // Check lower bound
145 if let Some(min_val) = min {
146 match compare_values_for_range(value, min_val) {
147 Some(CmpOrdering::Less) => return false,
148 Some(CmpOrdering::Equal) if !min_inclusive => return false,
149 None => return false, // Can't compare
150 _ => {}
151 }
152 }
153
154 // Check upper bound
155 if let Some(max_val) = max {
156 match compare_values_for_range(value, max_val) {
157 Some(CmpOrdering::Greater) => return false,
158 Some(CmpOrdering::Equal) if !max_inclusive => return false,
159 None => return false,
160 _ => {}
161 }
162 }
163
164 true
165}
166
167/// Configuration for the LPG store.
168///
169/// The defaults work well for most cases. Tune `backward_edges` if you only
170/// traverse in one direction (saves memory), or adjust capacities if you know
171/// your graph size upfront (avoids reallocations).
172#[derive(Debug, Clone)]
173pub struct LpgStoreConfig {
174 /// Maintain backward adjacency for incoming edge queries. Turn off if
175 /// you only traverse outgoing edges - saves ~50% adjacency memory.
176 pub backward_edges: bool,
177 /// Initial capacity for nodes (avoids early reallocations).
178 pub initial_node_capacity: usize,
179 /// Initial capacity for edges (avoids early reallocations).
180 pub initial_edge_capacity: usize,
181}
182
183impl Default for LpgStoreConfig {
184 fn default() -> Self {
185 Self {
186 backward_edges: true,
187 initial_node_capacity: 1024,
188 initial_edge_capacity: 4096,
189 }
190 }
191}
192
193/// Bidirectional label name/ID registry.
194///
195/// Combines the name-to-ID and ID-to-name mappings behind a single lock,
196/// reducing lock acquisitions on both the read path (`build_node`) and the
197/// write path (`get_or_create_label_id`).
198pub(super) struct LabelRegistry {
199 /// Label name to numeric ID.
200 name_to_id: FxHashMap<ArcStr, u32>,
201 /// Numeric ID to label name (index = ID).
202 id_to_name: Vec<ArcStr>,
203}
204
205impl LabelRegistry {
206 fn new() -> Self {
207 Self {
208 name_to_id: FxHashMap::default(),
209 id_to_name: Vec::new(),
210 }
211 }
212
213 /// Looks up an existing label ID by name.
214 pub(super) fn get_id(&self, name: &str) -> Option<u32> {
215 self.name_to_id.get(name).copied()
216 }
217
218 /// Returns the label name for a given ID.
219 pub(super) fn get_name(&self, id: u32) -> Option<&ArcStr> {
220 self.id_to_name.get(id as usize)
221 }
222
223 /// Returns or creates a label ID for the given name.
224 pub(super) fn get_or_create(&mut self, name: &str) -> u32 {
225 if let Some(&id) = self.name_to_id.get(name) {
226 return id;
227 }
228 let id = self.id_to_name.len() as u32;
229 let label: ArcStr = name.into();
230 self.name_to_id.insert(label.clone(), id);
231 self.id_to_name.push(label);
232 id
233 }
234
235 /// Returns the total number of distinct labels.
236 pub(super) fn len(&self) -> usize {
237 self.id_to_name.len()
238 }
239
240 /// Returns the ID-to-name slice for iteration.
241 pub(super) fn names(&self) -> &[ArcStr] {
242 &self.id_to_name
243 }
244
245 /// Clears all label mappings.
246 pub(super) fn clear(&mut self) {
247 self.name_to_id.clear();
248 self.id_to_name.clear();
249 }
250
251 /// Estimates heap memory usage in bytes.
252 pub(super) fn heap_bytes(&self) -> usize {
253 let map_bytes = self.name_to_id.capacity()
254 * (std::mem::size_of::<ArcStr>() + std::mem::size_of::<u32>());
255 let vec_bytes = self.id_to_name.capacity() * std::mem::size_of::<ArcStr>();
256 let string_bytes: usize = self.id_to_name.iter().map(|s| s.len()).sum();
257 map_bytes + vec_bytes + string_bytes
258 }
259}
260
261/// The core in-memory graph storage.
262///
263/// Everything lives here: nodes, edges, properties, adjacency indexes, and
264/// version chains for MVCC. Concurrent reads never block each other.
265///
266/// Most users should go through `GrafeoDB` (from the `grafeo_engine` crate) which
267/// adds transaction management and query execution. Use `LpgStore` directly
268/// when you need raw performance for algorithm implementations.
269///
270/// # Example
271///
272/// ```
273/// use grafeo_core::graph::lpg::LpgStore;
274/// use grafeo_core::graph::Direction;
275///
276/// let store = LpgStore::new().expect("arena allocation");
277///
278/// // Create a small social network
279/// let alix = store.create_node(&["Person"]);
280/// let gus = store.create_node(&["Person"]);
281/// store.create_edge(alix, gus, "KNOWS");
282///
283/// // Traverse outgoing edges
284/// for neighbor in store.neighbors(alix, Direction::Outgoing) {
285/// println!("Alix knows node {:?}", neighbor);
286/// }
287/// ```
288///
289/// # Lock Ordering
290///
291/// `LpgStore` contains multiple `RwLock` fields that must be acquired in a
292/// consistent order to prevent deadlocks. Always acquire locks in this order:
293///
294/// ## Level 1: Entity Storage (mutually exclusive via feature flag)
295/// 1. `nodes` / `node_versions`
296/// 2. `edges` / `edge_versions`
297///
298/// ## Level 2: Catalogs
299/// 3. `label_registry`
300/// 4. `edge_type_to_id` + `id_to_edge_type`
301///
302/// ## Level 3: Indexes
303/// 5. `label_index`
304/// 6. `node_labels`
305/// 7. `property_indexes`
306///
307/// ## Level 4: Statistics
308/// 8. `statistics`
309///
310/// ## Level 5: Nested Locks (internal to other structs)
311/// 9. `PropertyStorage::columns` (via `node_properties`/`edge_properties`)
312/// 10. `ChunkedAdjacency::lists` (via `forward_adj`/`backward_adj`)
313///
314/// ## Rules
315/// - Never hold entity locks while acquiring catalog locks in a different scope.
316/// - Statistics lock is always last.
317/// - Read locks are generally safe, but avoid read-to-write upgrades.
318pub struct LpgStore {
319 /// Node records indexed by NodeId, with version chains for MVCC.
320 /// Used when `tiered-storage` feature is disabled.
321 /// Lock order: 1
322 #[cfg(not(feature = "tiered-storage"))]
323 pub(super) nodes: RwLock<FxHashMap<NodeId, VersionChain<NodeRecord>>>,
324
325 /// Edge records indexed by EdgeId, with version chains for MVCC.
326 /// Used when `tiered-storage` feature is disabled.
327 /// Lock order: 2
328 #[cfg(not(feature = "tiered-storage"))]
329 pub(super) edges: RwLock<FxHashMap<EdgeId, VersionChain<EdgeRecord>>>,
330
331 // === Tiered Storage Fields (feature-gated) ===
332 //
333 // Lock ordering for arena access:
334 // version_lock (read/write) → arena read lock (via arena_allocator.arena())
335 //
336 // Rules:
337 // - Acquire arena read lock *after* version locks, never before.
338 // - Multiple threads may call arena.read_at() concurrently (shared refs only).
339 // - Never acquire arena write lock (alloc_new_chunk) while holding version locks.
340 // - freeze_epoch order: node_versions.read() → arena.read_at(),
341 // then edge_versions.read() → arena.read_at().
342 /// Arena allocator for hot data storage.
343 /// Data is stored in per-epoch arenas for fast allocation and bulk deallocation.
344 #[cfg(feature = "tiered-storage")]
345 pub(super) arena_allocator: Arc<ArenaAllocator>,
346
347 /// Node version indexes - store metadata and arena offsets.
348 /// The actual NodeRecord data is stored in the arena.
349 /// Lock order: 1
350 #[cfg(feature = "tiered-storage")]
351 pub(super) node_versions: RwLock<FxHashMap<NodeId, VersionIndex>>,
352
353 /// Edge version indexes - store metadata and arena offsets.
354 /// The actual EdgeRecord data is stored in the arena.
355 /// Lock order: 2
356 #[cfg(feature = "tiered-storage")]
357 pub(super) edge_versions: RwLock<FxHashMap<EdgeId, VersionIndex>>,
358
359 /// Cold storage for frozen epochs.
360 /// Contains compressed epoch blocks for historical data.
361 #[cfg(feature = "tiered-storage")]
362 pub(super) epoch_store: Arc<EpochStore>,
363
364 /// Property storage for nodes.
365 pub(super) node_properties: PropertyStorage<NodeId>,
366
367 /// Property storage for edges.
368 pub(super) edge_properties: PropertyStorage<EdgeId>,
369
370 /// Bidirectional label name/ID registry.
371 /// Lock order: 3
372 pub(super) label_registry: RwLock<LabelRegistry>,
373
374 /// Edge type name to ID mapping.
375 /// Lock order: 4 (acquire with id_to_edge_type)
376 pub(super) edge_type_to_id: RwLock<FxHashMap<ArcStr, u32>>,
377
378 /// Edge type ID to name mapping.
379 /// Lock order: 4 (acquire with edge_type_to_id)
380 pub(super) id_to_edge_type: RwLock<Vec<ArcStr>>,
381
382 /// Forward adjacency lists (outgoing edges).
383 pub(super) forward_adj: ChunkedAdjacency,
384
385 /// Backward adjacency lists (incoming edges).
386 /// Only populated if config.backward_edges is true.
387 pub(super) backward_adj: Option<ChunkedAdjacency>,
388
389 /// Label index: label_id -> set of node IDs.
390 /// Lock order: 5
391 pub(super) label_index: RwLock<Vec<FxHashMap<NodeId, ()>>>,
392
393 /// Node labels: node_id -> set of label IDs.
394 /// Reverse mapping to efficiently get labels for a node.
395 /// Lock order: 6
396 #[cfg(not(feature = "temporal"))]
397 pub(super) node_labels: RwLock<FxHashMap<NodeId, FxHashSet<u32>>>,
398 /// Versioned node labels: node_id -> version log of label sets.
399 /// Lock order: 6
400 #[cfg(feature = "temporal")]
401 pub(super) node_labels: RwLock<FxHashMap<NodeId, VersionLog<FxHashSet<u32>>>>,
402
403 /// Property indexes: property_key -> (value -> set of node IDs).
404 ///
405 /// When a property is indexed, lookups by value are O(1) instead of O(n).
406 /// Use [`create_property_index`] to enable indexing for a property.
407 /// Lock order: 7
408 pub(super) property_indexes:
409 RwLock<FxHashMap<PropertyKey, DashMap<HashableValue, FxHashSet<NodeId>>>>,
410
411 /// Vector indexes: "label:property" -> HNSW index.
412 ///
413 /// Created via [`GrafeoDB::create_vector_index`](grafeo_engine::GrafeoDB::create_vector_index).
414 /// Lock order: 7 (same level as property_indexes, disjoint keys)
415 #[cfg(feature = "vector-index")]
416 pub(super) vector_indexes: RwLock<FxHashMap<String, Arc<VectorIndexKind>>>,
417
418 /// Text indexes: "label:property" -> inverted index with BM25 scoring.
419 ///
420 /// Created via [`GrafeoDB::create_text_index`](grafeo_engine::GrafeoDB::create_text_index).
421 /// Lock order: 7 (same level as property_indexes, disjoint keys)
422 #[cfg(feature = "text-index")]
423 pub(super) text_indexes:
424 RwLock<FxHashMap<String, Arc<RwLock<crate::index::text::InvertedIndex>>>>,
425
426 /// Next node ID.
427 pub(super) next_node_id: AtomicU64,
428
429 /// Next edge ID.
430 pub(super) next_edge_id: AtomicU64,
431
432 /// Current epoch.
433 pub(super) current_epoch: AtomicU64,
434
435 /// Live (non-deleted) node count, maintained incrementally.
436 /// Avoids O(n) full scan in `compute_statistics()`.
437 pub(super) live_node_count: AtomicI64,
438
439 /// Live (non-deleted) edge count, maintained incrementally.
440 /// Avoids O(m) full scan in `compute_statistics()`.
441 pub(super) live_edge_count: AtomicI64,
442
443 /// Per-edge-type live counts, indexed by edge_type_id.
444 /// Avoids O(m) edge scan in `compute_statistics()`.
445 /// Lock order: 8 (same level as statistics)
446 pub(super) edge_type_live_counts: RwLock<Vec<i64>>,
447
448 /// Statistics for cost-based optimization.
449 /// Lock order: 8 (always last)
450 pub(super) statistics: RwLock<Arc<Statistics>>,
451
452 /// Whether statistics need full recomputation (e.g., after rollback).
453 pub(super) needs_stats_recompute: AtomicBool,
454
455 /// Named graphs, each an independent `LpgStore` partition.
456 /// Zero overhead for single-graph databases (empty HashMap).
457 /// Lock order: 9 (after statistics)
458 named_graphs: RwLock<FxHashMap<String, Arc<LpgStore>>>,
459
460 /// Undo log for property mutations within transactions.
461 ///
462 /// Maps transaction IDs to a list of undo entries that capture the
463 /// previous property values. On rollback, entries are replayed in
464 /// reverse order to restore properties. On commit, the entries are
465 /// simply discarded.
466 /// Lock order: 10 (after named_graphs, independent of other locks)
467 property_undo_log: RwLock<FxHashMap<TransactionId, Vec<PropertyUndoEntry>>>,
468}
469
470impl LpgStore {
471 /// Creates a new LPG store with default configuration.
472 ///
473 /// # Errors
474 ///
475 /// Returns [`AllocError`] if the arena allocator cannot be initialized
476 /// (only possible with the `tiered-storage` feature).
477 pub fn new() -> Result<Self, AllocError> {
478 Self::with_config(LpgStoreConfig::default())
479 }
480
481 /// Creates a new LPG store with custom configuration.
482 ///
483 /// # Errors
484 ///
485 /// Returns [`AllocError`] if the arena allocator cannot be initialized
486 /// (only possible with the `tiered-storage` feature).
487 pub fn with_config(config: LpgStoreConfig) -> Result<Self, AllocError> {
488 let backward_adj = if config.backward_edges {
489 Some(ChunkedAdjacency::new())
490 } else {
491 None
492 };
493
494 Ok(Self {
495 #[cfg(not(feature = "tiered-storage"))]
496 nodes: RwLock::new(FxHashMap::default()),
497 #[cfg(not(feature = "tiered-storage"))]
498 edges: RwLock::new(FxHashMap::default()),
499 #[cfg(feature = "tiered-storage")]
500 arena_allocator: Arc::new(ArenaAllocator::new()?),
501 #[cfg(feature = "tiered-storage")]
502 node_versions: RwLock::new(FxHashMap::default()),
503 #[cfg(feature = "tiered-storage")]
504 edge_versions: RwLock::new(FxHashMap::default()),
505 #[cfg(feature = "tiered-storage")]
506 epoch_store: Arc::new(EpochStore::new()),
507 node_properties: PropertyStorage::new(),
508 edge_properties: PropertyStorage::new(),
509 label_registry: RwLock::new(LabelRegistry::new()),
510 edge_type_to_id: RwLock::new(FxHashMap::default()),
511 id_to_edge_type: RwLock::new(Vec::new()),
512 forward_adj: ChunkedAdjacency::new(),
513 backward_adj,
514 label_index: RwLock::new(Vec::with_capacity(16)),
515 node_labels: RwLock::new(FxHashMap::default()),
516 property_indexes: RwLock::new(FxHashMap::default()),
517 #[cfg(feature = "vector-index")]
518 vector_indexes: RwLock::new(FxHashMap::default()),
519 #[cfg(feature = "text-index")]
520 text_indexes: RwLock::new(FxHashMap::default()),
521 next_node_id: AtomicU64::new(0),
522 next_edge_id: AtomicU64::new(0),
523 current_epoch: AtomicU64::new(0),
524 live_node_count: AtomicI64::new(0),
525 live_edge_count: AtomicI64::new(0),
526 edge_type_live_counts: RwLock::new(Vec::new()),
527 statistics: RwLock::new(Arc::new(Statistics::new())),
528 needs_stats_recompute: AtomicBool::new(false),
529 named_graphs: RwLock::new(FxHashMap::default()),
530 property_undo_log: RwLock::new(FxHashMap::default()),
531 })
532 }
533
534 /// Returns the current epoch.
535 #[must_use]
536 pub fn current_epoch(&self) -> EpochId {
537 EpochId::new(self.current_epoch.load(Ordering::Acquire))
538 }
539
540 /// Creates a new epoch.
541 #[doc(hidden)]
542 pub fn new_epoch(&self) -> EpochId {
543 let id = self.current_epoch.fetch_add(1, Ordering::AcqRel) + 1;
544 EpochId::new(id)
545 }
546
547 /// Syncs the store epoch to match an external epoch counter.
548 ///
549 /// Used by the transaction manager to keep the store's epoch in step
550 /// after a transaction commit advances the global epoch.
551 #[doc(hidden)]
552 pub fn sync_epoch(&self, epoch: EpochId) {
553 self.current_epoch
554 .fetch_max(epoch.as_u64(), Ordering::AcqRel);
555 }
556
557 /// Removes all data from the store, resetting it to an empty state.
558 ///
559 /// Acquires locks in the documented ordering to prevent deadlocks.
560 /// After clearing, the store behaves as if freshly constructed.
561 pub fn clear(&self) {
562 // Level 1: Entity storage
563 #[cfg(not(feature = "tiered-storage"))]
564 {
565 self.nodes.write().clear();
566 self.edges.write().clear();
567 }
568 #[cfg(feature = "tiered-storage")]
569 {
570 self.node_versions.write().clear();
571 self.edge_versions.write().clear();
572 // Arena allocator chunks are leaked; epochs are cleared via epoch_store.
573 }
574
575 // Level 2: Catalogs
576 self.label_registry.write().clear();
577 {
578 self.edge_type_to_id.write().clear();
579 self.id_to_edge_type.write().clear();
580 }
581
582 // Level 3: Indexes
583 self.label_index.write().clear();
584 self.node_labels.write().clear();
585 self.property_indexes.write().clear();
586 #[cfg(feature = "vector-index")]
587 self.vector_indexes.write().clear();
588 #[cfg(feature = "text-index")]
589 self.text_indexes.write().clear();
590
591 // Nested: Properties and adjacency
592 self.node_properties.clear();
593 self.edge_properties.clear();
594 self.forward_adj.clear();
595 if let Some(ref backward) = self.backward_adj {
596 backward.clear();
597 }
598
599 // Atomics: ID counters
600 self.next_node_id.store(0, Ordering::Release);
601 self.next_edge_id.store(0, Ordering::Release);
602 self.current_epoch.store(0, Ordering::Release);
603
604 // Level 4: Statistics
605 self.live_node_count.store(0, Ordering::Release);
606 self.live_edge_count.store(0, Ordering::Release);
607 self.edge_type_live_counts.write().clear();
608 *self.statistics.write() = Arc::new(Statistics::new());
609 self.needs_stats_recompute.store(false, Ordering::Release);
610
611 // Level 5: Undo log
612 self.property_undo_log.write().clear();
613 }
614
615 /// Returns whether backward adjacency (incoming edge index) is available.
616 ///
617 /// When backward adjacency is enabled (the default), bidirectional search
618 /// algorithms can traverse from the target toward the source.
619 #[must_use]
620 pub fn has_backward_adjacency(&self) -> bool {
621 self.backward_adj.is_some()
622 }
623
624 // === Named Graph Management ===
625
626 /// Returns a named graph by name, or `None` if it does not exist.
627 #[must_use]
628 pub fn graph(&self, name: &str) -> Option<Arc<LpgStore>> {
629 self.named_graphs.read().get(name).cloned()
630 }
631
632 /// Returns a named graph, creating it if it does not exist.
633 ///
634 /// # Errors
635 ///
636 /// Returns [`AllocError`] if a new store cannot be allocated.
637 pub fn graph_or_create(&self, name: &str) -> Result<Arc<LpgStore>, AllocError> {
638 {
639 let graphs = self.named_graphs.read();
640 if let Some(g) = graphs.get(name) {
641 return Ok(Arc::clone(g));
642 }
643 }
644 let mut graphs = self.named_graphs.write();
645 // Double-check after acquiring write lock
646 if let Some(g) = graphs.get(name) {
647 return Ok(Arc::clone(g));
648 }
649 let store = Arc::new(LpgStore::new()?);
650 graphs.insert(name.to_string(), Arc::clone(&store));
651 Ok(store)
652 }
653
654 /// Creates a named graph. Returns `true` on success, `false` if it already exists.
655 ///
656 /// # Errors
657 ///
658 /// Returns [`AllocError`] if the new store cannot be allocated.
659 pub fn create_graph(&self, name: &str) -> Result<bool, AllocError> {
660 let mut graphs = self.named_graphs.write();
661 if graphs.contains_key(name) {
662 return Ok(false);
663 }
664 graphs.insert(name.to_string(), Arc::new(LpgStore::new()?));
665 Ok(true)
666 }
667
668 /// Drops a named graph. Returns `false` if it did not exist.
669 pub fn drop_graph(&self, name: &str) -> bool {
670 self.named_graphs.write().remove(name).is_some()
671 }
672
673 /// Returns all named graph names.
674 #[must_use]
675 pub fn graph_names(&self) -> Vec<String> {
676 self.named_graphs.read().keys().cloned().collect()
677 }
678
679 /// Returns the number of named graphs.
680 #[must_use]
681 pub fn graph_count(&self) -> usize {
682 self.named_graphs.read().len()
683 }
684
685 /// Clears a specific graph, or the default graph if `name` is `None`.
686 pub fn clear_graph(&self, name: Option<&str>) {
687 match name {
688 Some(n) => {
689 if let Some(g) = self.named_graphs.read().get(n) {
690 g.clear();
691 }
692 }
693 None => self.clear(),
694 }
695 }
696
697 /// Copies all data from the source graph to the destination graph.
698 /// Creates the destination graph if it does not exist.
699 ///
700 /// # Errors
701 ///
702 /// Returns [`AllocError`] if the destination store cannot be allocated.
703 pub fn copy_graph(&self, source: Option<&str>, dest: Option<&str>) -> Result<(), AllocError> {
704 let _src = match source {
705 Some(n) => self.graph(n),
706 None => None, // default graph
707 };
708 let _dest_graph = dest.map(|n| self.graph_or_create(n)).transpose()?;
709 // Full graph copy is complex (requires iterating all entities).
710 // For now, this creates the destination graph structure.
711 // Full entity-level copy will be implemented when needed.
712 Ok(())
713 }
714
715 // === Internal Helpers ===
716
717 pub(super) fn get_or_create_label_id(&self, label: &str) -> u32 {
718 if let Some(id) = self.label_registry.read().get_id(label) {
719 return id;
720 }
721 self.label_registry.write().get_or_create(label)
722 }
723
724 pub(super) fn get_or_create_edge_type_id(&self, edge_type: &str) -> u32 {
725 {
726 let type_to_id = self.edge_type_to_id.read();
727 if let Some(&id) = type_to_id.get(edge_type) {
728 return id;
729 }
730 }
731
732 let mut type_to_id = self.edge_type_to_id.write();
733 let mut id_to_type = self.id_to_edge_type.write();
734
735 // Double-check
736 if let Some(&id) = type_to_id.get(edge_type) {
737 return id;
738 }
739
740 let id = id_to_type.len() as u32;
741 let edge_type: ArcStr = edge_type.into();
742 type_to_id.insert(edge_type.clone(), id);
743 id_to_type.push(edge_type);
744
745 // Grow edge type live counts to match
746 let mut counts = self.edge_type_live_counts.write();
747 while counts.len() <= id as usize {
748 counts.push(0);
749 }
750
751 id
752 }
753
754 /// Increments the live edge count for a given edge type.
755 pub(super) fn increment_edge_type_count(&self, type_id: u32) {
756 let mut counts = self.edge_type_live_counts.write();
757 if counts.len() <= type_id as usize {
758 counts.resize(type_id as usize + 1, 0);
759 }
760 counts[type_id as usize] += 1;
761 }
762
763 /// Decrements the live edge count for a given edge type.
764 pub(super) fn decrement_edge_type_count(&self, type_id: u32) {
765 let mut counts = self.edge_type_live_counts.write();
766 if type_id < counts.len() as u32 {
767 counts[type_id as usize] -= 1;
768 }
769 }
770}