use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use crate::confidence::ConfidenceMetadata;
use crate::graph::unified::bind::alias::AliasTable;
use crate::graph::unified::bind::scope::provenance::{
ScopeProvenance, ScopeProvenanceStore, ScopeStableId,
};
use crate::graph::unified::bind::scope::{ScopeArena, ScopeId};
use crate::graph::unified::bind::shadow::ShadowTable;
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::edge::bidirectional::BidirectionalEdgeStore;
use crate::graph::unified::file::FileId;
use crate::graph::unified::memory::{GraphMemorySize, HASHMAP_ENTRY_OVERHEAD};
use crate::graph::unified::storage::arena::NodeArena;
use crate::graph::unified::storage::edge_provenance::{EdgeProvenance, EdgeProvenanceStore};
use crate::graph::unified::storage::indices::AuxiliaryIndices;
use crate::graph::unified::storage::interner::StringInterner;
use crate::graph::unified::storage::metadata::NodeMetadataStore;
use crate::graph::unified::storage::node_provenance::{NodeProvenance, NodeProvenanceStore};
use crate::graph::unified::storage::registry::{FileProvenanceView, FileRegistry};
use crate::graph::unified::storage::segment::FileSegmentTable;
use crate::graph::unified::string::id::StringId;
#[derive(Clone)]
pub struct CodeGraph {
pub(crate) nodes: Arc<NodeArena>,
pub(crate) edges: Arc<BidirectionalEdgeStore>,
pub(crate) strings: Arc<StringInterner>,
pub(crate) files: Arc<FileRegistry>,
pub(crate) indices: Arc<AuxiliaryIndices>,
pub(crate) macro_metadata: Arc<NodeMetadataStore>,
pub(crate) node_provenance: Arc<NodeProvenanceStore>,
pub(crate) edge_provenance: Arc<EdgeProvenanceStore>,
pub(crate) fact_epoch: u64,
pub(crate) epoch: u64,
pub(crate) confidence: HashMap<String, ConfidenceMetadata>,
pub(crate) scope_arena: Arc<ScopeArena>,
pub(crate) alias_table: Arc<AliasTable>,
pub(crate) shadow_table: Arc<ShadowTable>,
pub(crate) scope_provenance_store: Arc<ScopeProvenanceStore>,
pub(crate) file_segments: Arc<FileSegmentTable>,
}
impl CodeGraph {
#[must_use]
pub fn new() -> Self {
Self {
nodes: Arc::new(NodeArena::new()),
edges: Arc::new(BidirectionalEdgeStore::new()),
strings: Arc::new(StringInterner::new()),
files: Arc::new(FileRegistry::new()),
indices: Arc::new(AuxiliaryIndices::new()),
macro_metadata: Arc::new(NodeMetadataStore::new()),
node_provenance: Arc::new(NodeProvenanceStore::new()),
edge_provenance: Arc::new(EdgeProvenanceStore::new()),
fact_epoch: 0,
epoch: 0,
confidence: HashMap::new(),
scope_arena: Arc::new(ScopeArena::new()),
alias_table: Arc::new(AliasTable::new()),
shadow_table: Arc::new(ShadowTable::new()),
scope_provenance_store: Arc::new(ScopeProvenanceStore::new()),
file_segments: Arc::new(FileSegmentTable::new()),
}
}
#[must_use]
pub fn from_components(
nodes: NodeArena,
edges: BidirectionalEdgeStore,
strings: StringInterner,
files: FileRegistry,
indices: AuxiliaryIndices,
macro_metadata: NodeMetadataStore,
) -> Self {
Self {
nodes: Arc::new(nodes),
edges: Arc::new(edges),
strings: Arc::new(strings),
files: Arc::new(files),
indices: Arc::new(indices),
macro_metadata: Arc::new(macro_metadata),
node_provenance: Arc::new(NodeProvenanceStore::new()),
edge_provenance: Arc::new(EdgeProvenanceStore::new()),
fact_epoch: 0,
epoch: 0,
confidence: HashMap::new(),
scope_arena: Arc::new(ScopeArena::new()),
alias_table: Arc::new(AliasTable::new()),
shadow_table: Arc::new(ShadowTable::new()),
scope_provenance_store: Arc::new(ScopeProvenanceStore::new()),
file_segments: Arc::new(FileSegmentTable::new()),
}
}
#[must_use]
pub fn snapshot(&self) -> GraphSnapshot {
GraphSnapshot {
nodes: Arc::clone(&self.nodes),
edges: Arc::clone(&self.edges),
strings: Arc::clone(&self.strings),
files: Arc::clone(&self.files),
indices: Arc::clone(&self.indices),
macro_metadata: Arc::clone(&self.macro_metadata),
node_provenance: Arc::clone(&self.node_provenance),
edge_provenance: Arc::clone(&self.edge_provenance),
fact_epoch: self.fact_epoch,
epoch: self.epoch,
scope_arena: Arc::clone(&self.scope_arena),
alias_table: Arc::clone(&self.alias_table),
shadow_table: Arc::clone(&self.shadow_table),
scope_provenance_store: Arc::clone(&self.scope_provenance_store),
file_segments: Arc::clone(&self.file_segments),
}
}
#[inline]
#[must_use]
pub fn nodes(&self) -> &NodeArena {
&self.nodes
}
#[inline]
#[must_use]
pub fn edges(&self) -> &BidirectionalEdgeStore {
&self.edges
}
#[inline]
#[must_use]
pub fn strings(&self) -> &StringInterner {
&self.strings
}
#[inline]
#[must_use]
pub fn files(&self) -> &FileRegistry {
&self.files
}
#[inline]
#[must_use]
pub fn indices(&self) -> &AuxiliaryIndices {
&self.indices
}
#[inline]
#[must_use]
pub fn macro_metadata(&self) -> &NodeMetadataStore {
&self.macro_metadata
}
#[inline]
#[must_use]
pub fn fact_epoch(&self) -> u64 {
self.fact_epoch
}
#[inline]
#[must_use]
pub fn node_provenance(
&self,
id: crate::graph::unified::node::id::NodeId,
) -> Option<&NodeProvenance> {
self.node_provenance.lookup(id)
}
#[inline]
#[must_use]
pub fn edge_provenance(
&self,
id: crate::graph::unified::edge::id::EdgeId,
) -> Option<&EdgeProvenance> {
self.edge_provenance.lookup(id)
}
#[inline]
#[must_use]
pub fn file_provenance(
&self,
id: crate::graph::unified::file::id::FileId,
) -> Option<FileProvenanceView<'_>> {
self.files.file_provenance(id)
}
#[inline]
#[must_use]
pub fn scope_arena(&self) -> &ScopeArena {
&self.scope_arena
}
pub(crate) fn set_scope_arena(&mut self, arena: ScopeArena) {
self.scope_arena = Arc::new(arena);
}
#[inline]
#[must_use]
pub fn alias_table(&self) -> &AliasTable {
&self.alias_table
}
pub(crate) fn set_alias_table(&mut self, table: AliasTable) {
self.alias_table = Arc::new(table);
}
#[inline]
#[must_use]
pub fn shadow_table(&self) -> &ShadowTable {
&self.shadow_table
}
pub(crate) fn set_shadow_table(&mut self, table: ShadowTable) {
self.shadow_table = Arc::new(table);
}
#[inline]
#[must_use]
pub fn scope_provenance_store(&self) -> &ScopeProvenanceStore {
&self.scope_provenance_store
}
#[inline]
#[must_use]
pub fn scope_provenance(&self, id: ScopeId) -> Option<&ScopeProvenance> {
self.scope_provenance_store.lookup(id)
}
#[inline]
#[must_use]
pub fn scope_by_stable_id(&self, stable: ScopeStableId) -> Option<ScopeId> {
self.scope_provenance_store.scope_by_stable_id(stable)
}
pub(crate) fn set_scope_provenance_store(&mut self, store: ScopeProvenanceStore) {
self.scope_provenance_store = Arc::new(store);
}
#[inline]
#[must_use]
pub fn file_segments(&self) -> &FileSegmentTable {
&self.file_segments
}
pub(crate) fn set_file_segments(&mut self, table: FileSegmentTable) {
self.file_segments = Arc::new(table);
}
pub(crate) fn file_segments_mut(&mut self) -> &mut FileSegmentTable {
Arc::make_mut(&mut self.file_segments)
}
#[cfg(feature = "rebuild-internals")]
#[doc(hidden)]
pub fn test_only_record_file_segment(
&mut self,
file_id: FileId,
start_slot: u32,
slot_count: u32,
) {
Arc::make_mut(&mut self.file_segments).record_range(file_id, start_slot, slot_count);
}
pub(crate) fn set_provenance(
&mut self,
node_provenance: NodeProvenanceStore,
edge_provenance: EdgeProvenanceStore,
fact_epoch: u64,
) {
self.node_provenance = Arc::new(node_provenance);
self.edge_provenance = Arc::new(edge_provenance);
self.fact_epoch = fact_epoch;
}
#[inline]
#[must_use]
pub fn epoch(&self) -> u64 {
self.epoch
}
#[inline]
pub fn nodes_mut(&mut self) -> &mut NodeArena {
Arc::make_mut(&mut self.nodes)
}
#[inline]
pub fn edges_mut(&mut self) -> &mut BidirectionalEdgeStore {
Arc::make_mut(&mut self.edges)
}
#[inline]
pub fn strings_mut(&mut self) -> &mut StringInterner {
Arc::make_mut(&mut self.strings)
}
#[inline]
pub fn files_mut(&mut self) -> &mut FileRegistry {
Arc::make_mut(&mut self.files)
}
#[inline]
pub fn indices_mut(&mut self) -> &mut AuxiliaryIndices {
Arc::make_mut(&mut self.indices)
}
#[inline]
pub fn macro_metadata_mut(&mut self) -> &mut NodeMetadataStore {
Arc::make_mut(&mut self.macro_metadata)
}
#[inline]
pub fn nodes_and_strings_mut(&mut self) -> (&mut NodeArena, &mut StringInterner) {
(
Arc::make_mut(&mut self.nodes),
Arc::make_mut(&mut self.strings),
)
}
pub fn rebuild_indices(&mut self) {
crate::graph::unified::build::parallel_commit::rebuild_indices(self);
}
#[inline]
pub fn bump_epoch(&mut self) -> u64 {
self.epoch = self.epoch.wrapping_add(1);
self.epoch
}
#[inline]
pub fn set_epoch(&mut self, epoch: u64) {
self.epoch = epoch;
}
#[inline]
#[must_use]
pub fn node_count(&self) -> usize {
self.nodes.len()
}
#[inline]
#[must_use]
pub fn edge_count(&self) -> usize {
let stats = self.edges.stats();
stats.forward.csr_edge_count + stats.forward.delta_edge_count
}
#[inline]
#[must_use]
pub fn is_empty(&self) -> bool {
self.nodes.is_empty()
}
#[inline]
pub fn indexed_files(
&self,
) -> impl Iterator<Item = (crate::graph::unified::file::FileId, &std::path::Path)> {
self.files
.iter()
.map(|(id, arc_path)| (id, arc_path.as_ref()))
}
#[must_use]
pub fn reverse_import_index(&self, file_id: FileId) -> Vec<FileId> {
let mut importers: HashSet<FileId> = HashSet::new();
for &target_node in self.indices.by_file(file_id) {
for edge_ref in self.edges.edges_to(target_node) {
if !matches!(edge_ref.kind, EdgeKind::Imports { .. }) {
continue;
}
let Some(source_entry) = self.nodes.get(edge_ref.source) else {
continue;
};
let source_file = source_entry.file;
if source_file != file_id {
importers.insert(source_file);
}
}
}
let mut result: Vec<FileId> = importers.into_iter().collect();
result.sort();
result
}
#[must_use]
pub fn reverse_dependency_index(&self, file_id: FileId) -> Vec<FileId> {
let mut dependents: HashSet<FileId> = HashSet::new();
for &target_node in self.indices.by_file(file_id) {
for edge_ref in self.edges.edges_to(target_node) {
let Some(source_entry) = self.nodes.get(edge_ref.source) else {
continue;
};
let source_file = source_entry.file;
if source_file != file_id {
dependents.insert(source_file);
}
}
}
let mut result: Vec<FileId> = dependents.into_iter().collect();
result.sort();
result
}
#[inline]
#[must_use]
pub fn confidence(&self) -> &HashMap<String, ConfidenceMetadata> {
&self.confidence
}
pub fn merge_confidence(&mut self, language: &str, metadata: ConfidenceMetadata) {
use crate::confidence::ConfidenceLevel;
self.confidence
.entry(language.to_string())
.and_modify(|existing| {
let new_level = match (&existing.level, &metadata.level) {
(ConfidenceLevel::Verified, other) | (other, ConfidenceLevel::Verified) => {
*other
}
(ConfidenceLevel::Partial, ConfidenceLevel::AstOnly)
| (ConfidenceLevel::AstOnly, ConfidenceLevel::Partial) => {
ConfidenceLevel::AstOnly
}
(level, _) => *level,
};
existing.level = new_level;
for limitation in &metadata.limitations {
if !existing.limitations.contains(limitation) {
existing.limitations.push(limitation.clone());
}
}
for feature in &metadata.unavailable_features {
if !existing.unavailable_features.contains(feature) {
existing.unavailable_features.push(feature.clone());
}
}
})
.or_insert(metadata);
}
pub fn set_confidence(&mut self, confidence: HashMap<String, ConfidenceMetadata>) {
self.confidence = confidence;
}
#[allow(dead_code)] pub(crate) fn remove_file(
&mut self,
file_id: FileId,
) -> Vec<crate::graph::unified::node::NodeId> {
use crate::graph::unified::node::NodeId;
use crate::graph::unified::rebuild::coverage::NodeIdBearing;
let tombstoned: Vec<NodeId> = self.files_mut().take_nodes(file_id);
self.files_mut().unregister(file_id);
self.file_segments_mut().remove(file_id);
if tombstoned.is_empty() {
return tombstoned;
}
let dead: HashSet<NodeId> = tombstoned.iter().copied().collect();
{
let arena = self.nodes_mut();
for &nid in &tombstoned {
let _ = arena.remove(nid);
}
}
self.edges_mut().tombstone_edges_for_nodes(&dead);
{
let predicate: Box<dyn Fn(NodeId) -> bool + '_> = Box::new(|nid| !dead.contains(&nid));
self.indices_mut().retain_nodes(&*predicate);
self.macro_metadata_mut().retain_nodes(&*predicate);
Arc::make_mut(&mut self.node_provenance).retain_nodes(&*predicate);
Arc::make_mut(&mut self.scope_arena).retain_nodes(&*predicate);
Arc::make_mut(&mut self.alias_table).retain_nodes(&*predicate);
Arc::make_mut(&mut self.shadow_table).retain_nodes(&*predicate);
}
tombstoned
}
#[doc(hidden)]
#[allow(clippy::too_many_arguments)]
#[must_use]
pub(crate) fn __assemble_from_rebuild_parts_internal(
nodes: NodeArena,
edges: BidirectionalEdgeStore,
strings: StringInterner,
files: FileRegistry,
indices: AuxiliaryIndices,
macro_metadata: NodeMetadataStore,
node_provenance: NodeProvenanceStore,
edge_provenance: EdgeProvenanceStore,
fact_epoch: u64,
epoch: u64,
confidence: HashMap<String, ConfidenceMetadata>,
scope_arena: ScopeArena,
alias_table: AliasTable,
shadow_table: ShadowTable,
scope_provenance_store: ScopeProvenanceStore,
file_segments: FileSegmentTable,
) -> Self {
Self {
nodes: Arc::new(nodes),
edges: Arc::new(edges),
strings: Arc::new(strings),
files: Arc::new(files),
indices: Arc::new(indices),
macro_metadata: Arc::new(macro_metadata),
node_provenance: Arc::new(node_provenance),
edge_provenance: Arc::new(edge_provenance),
fact_epoch,
epoch,
confidence,
scope_arena: Arc::new(scope_arena),
alias_table: Arc::new(alias_table),
shadow_table: Arc::new(shadow_table),
scope_provenance_store: Arc::new(scope_provenance_store),
file_segments: Arc::new(file_segments),
}
}
#[cfg(any(debug_assertions, test))]
pub fn assert_bucket_bijection(&self) {
use std::collections::HashMap as StdHashMap;
let mut seen: StdHashMap<
crate::graph::unified::node::NodeId,
crate::graph::unified::file::FileId,
> = StdHashMap::new();
let mut any_bucket_populated = false;
for (file_id, bucket) in self.files.per_file_nodes_for_gate0d() {
if !bucket.is_empty() {
any_bucket_populated = true;
}
let mut within_bucket: std::collections::HashSet<crate::graph::unified::node::NodeId> =
std::collections::HashSet::new();
for node_id in bucket {
assert!(
within_bucket.insert(node_id),
"assert_bucket_bijection: duplicate node {node_id:?} inside bucket {file_id:?}"
);
assert!(
self.nodes.get(node_id).is_some(),
"assert_bucket_bijection: dead node {node_id:?} in bucket {file_id:?}"
);
let prior = seen.insert(node_id, file_id);
assert!(
prior.is_none(),
"assert_bucket_bijection: node {node_id:?} in multiple buckets: \
prior={prior:?}, current={file_id:?}"
);
if let Some(entry) = self.nodes.get(node_id) {
assert_eq!(
entry.file, file_id,
"assert_bucket_bijection: node {node_id:?} misfiled: in bucket \
{file_id:?}, actual {:?}",
entry.file
);
}
}
}
if any_bucket_populated {
for (node_id, _entry) in self.nodes.iter() {
assert!(
seen.contains_key(&node_id),
"assert_bucket_bijection: live node {node_id:?} absent from all buckets"
);
}
}
}
#[cfg(any(debug_assertions, test))]
pub fn assert_no_tombstone_residue_for(
&self,
dead: &std::collections::HashSet<crate::graph::unified::node::NodeId>,
) {
use super::super::rebuild::coverage::NodeIdBearing;
if dead.is_empty() {
return;
}
for nid in self.nodes.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in NodeArena"
);
}
for nid in self.indices.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in auxiliary indices"
);
}
for nid in self.edges.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in edge store"
);
}
for nid in self.macro_metadata.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in macro metadata"
);
}
for nid in self.node_provenance.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in node provenance"
);
}
for nid in self.scope_arena.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in scope arena"
);
}
for nid in self.alias_table.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in alias table"
);
}
for nid in self.shadow_table.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in shadow table"
);
}
for nid in self.files.all_node_ids() {
assert!(
!dead.contains(&nid),
"assert_no_tombstone_residue: tombstone {nid:?} still in per-file bucket"
);
}
}
}
impl Default for CodeGraph {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for CodeGraph {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CodeGraph")
.field("nodes", &self.nodes.len())
.field("epoch", &self.epoch)
.finish_non_exhaustive()
}
}
impl GraphMemorySize for CodeGraph {
fn heap_bytes(&self) -> usize {
let mut confidence_bytes = self.confidence.capacity()
* (std::mem::size_of::<String>()
+ std::mem::size_of::<ConfidenceMetadata>()
+ HASHMAP_ENTRY_OVERHEAD);
for (key, meta) in &self.confidence {
confidence_bytes += key.capacity();
confidence_bytes += meta.limitations.capacity() * std::mem::size_of::<String>();
for s in &meta.limitations {
confidence_bytes += s.capacity();
}
confidence_bytes +=
meta.unavailable_features.capacity() * std::mem::size_of::<String>();
for s in &meta.unavailable_features {
confidence_bytes += s.capacity();
}
}
self.nodes.heap_bytes()
+ self.edges.heap_bytes()
+ self.strings.heap_bytes()
+ self.files.heap_bytes()
+ self.indices.heap_bytes()
+ self.macro_metadata.heap_bytes()
+ self.node_provenance.heap_bytes()
+ self.edge_provenance.heap_bytes()
+ confidence_bytes
+ self.file_segments.capacity()
* std::mem::size_of::<Option<crate::graph::unified::storage::segment::FileSegment>>(
)
}
}
pub struct ConcurrentCodeGraph {
inner: RwLock<CodeGraph>,
epoch: AtomicU64,
}
impl ConcurrentCodeGraph {
#[must_use]
pub fn new() -> Self {
Self {
inner: RwLock::new(CodeGraph::new()),
epoch: AtomicU64::new(0),
}
}
#[must_use]
pub fn from_graph(graph: CodeGraph) -> Self {
let epoch = graph.epoch();
Self {
inner: RwLock::new(graph),
epoch: AtomicU64::new(epoch),
}
}
#[inline]
pub fn read(&self) -> RwLockReadGuard<'_, CodeGraph> {
self.inner.read()
}
#[inline]
pub fn write(&self) -> RwLockWriteGuard<'_, CodeGraph> {
self.epoch.fetch_add(1, Ordering::SeqCst);
let mut guard = self.inner.write();
guard.set_epoch(self.epoch.load(Ordering::SeqCst));
guard
}
#[inline]
#[must_use]
pub fn epoch(&self) -> u64 {
self.epoch.load(Ordering::SeqCst)
}
#[must_use]
pub fn snapshot(&self) -> GraphSnapshot {
self.inner.read().snapshot()
}
#[must_use]
pub fn fact_epoch(&self) -> u64 {
self.inner.read().fact_epoch()
}
#[must_use]
pub fn node_provenance(
&self,
id: crate::graph::unified::node::id::NodeId,
) -> Option<NodeProvenance> {
self.inner.read().node_provenance(id).copied()
}
#[must_use]
pub fn edge_provenance(
&self,
id: crate::graph::unified::edge::id::EdgeId,
) -> Option<EdgeProvenance> {
self.inner.read().edge_provenance(id).copied()
}
#[must_use]
pub fn file_provenance(
&self,
id: crate::graph::unified::file::id::FileId,
) -> Option<OwnedFileProvenanceView> {
let guard = self.inner.read();
guard.file_provenance(id).map(|v| OwnedFileProvenanceView {
content_hash: *v.content_hash,
indexed_at: v.indexed_at,
source_uri: v.source_uri,
is_external: v.is_external,
})
}
#[must_use]
pub fn scope_arena(&self) -> Arc<ScopeArena> {
Arc::clone(&self.inner.read().scope_arena)
}
#[must_use]
pub fn alias_table(&self) -> Arc<AliasTable> {
Arc::clone(&self.inner.read().alias_table)
}
#[must_use]
pub fn shadow_table(&self) -> Arc<ShadowTable> {
Arc::clone(&self.inner.read().shadow_table)
}
#[must_use]
pub fn scope_provenance_store(&self) -> Arc<ScopeProvenanceStore> {
Arc::clone(&self.inner.read().scope_provenance_store)
}
#[must_use]
pub fn scope_provenance(&self, id: ScopeId) -> Option<ScopeProvenance> {
self.inner.read().scope_provenance(id).cloned()
}
#[must_use]
pub fn scope_by_stable_id(&self, stable: ScopeStableId) -> Option<ScopeId> {
self.inner.read().scope_by_stable_id(stable)
}
#[must_use]
pub fn file_segments(&self) -> Arc<FileSegmentTable> {
Arc::clone(&self.inner.read().file_segments)
}
#[inline]
#[must_use]
pub fn try_read(&self) -> Option<RwLockReadGuard<'_, CodeGraph>> {
self.inner.try_read()
}
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, CodeGraph>> {
self.inner.try_write().map(|mut guard| {
self.epoch.fetch_add(1, Ordering::SeqCst);
guard.set_epoch(self.epoch.load(Ordering::SeqCst));
guard
})
}
}
impl Default for ConcurrentCodeGraph {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for ConcurrentCodeGraph {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ConcurrentCodeGraph")
.field("epoch", &self.epoch.load(Ordering::SeqCst))
.finish_non_exhaustive()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct OwnedFileProvenanceView {
pub content_hash: [u8; 32],
pub indexed_at: u64,
pub source_uri: Option<StringId>,
pub is_external: bool,
}
#[derive(Clone)]
pub struct GraphSnapshot {
nodes: Arc<NodeArena>,
edges: Arc<BidirectionalEdgeStore>,
strings: Arc<StringInterner>,
files: Arc<FileRegistry>,
indices: Arc<AuxiliaryIndices>,
macro_metadata: Arc<NodeMetadataStore>,
node_provenance: Arc<NodeProvenanceStore>,
edge_provenance: Arc<EdgeProvenanceStore>,
fact_epoch: u64,
epoch: u64,
scope_arena: Arc<ScopeArena>,
alias_table: Arc<AliasTable>,
shadow_table: Arc<ShadowTable>,
scope_provenance_store: Arc<ScopeProvenanceStore>,
file_segments: Arc<FileSegmentTable>,
}
impl GraphSnapshot {
#[inline]
#[must_use]
pub fn nodes(&self) -> &NodeArena {
&self.nodes
}
#[inline]
#[must_use]
pub fn edges(&self) -> &BidirectionalEdgeStore {
&self.edges
}
#[inline]
#[must_use]
pub fn strings(&self) -> &StringInterner {
&self.strings
}
#[inline]
#[must_use]
pub fn files(&self) -> &FileRegistry {
&self.files
}
#[inline]
#[must_use]
pub fn indices(&self) -> &AuxiliaryIndices {
&self.indices
}
#[inline]
#[must_use]
pub fn macro_metadata(&self) -> &NodeMetadataStore {
&self.macro_metadata
}
#[inline]
#[must_use]
pub fn fact_epoch(&self) -> u64 {
self.fact_epoch
}
#[inline]
#[must_use]
pub fn node_provenance(
&self,
id: crate::graph::unified::node::id::NodeId,
) -> Option<&NodeProvenance> {
self.node_provenance.lookup(id)
}
#[inline]
#[must_use]
pub fn edge_provenance(
&self,
id: crate::graph::unified::edge::id::EdgeId,
) -> Option<&EdgeProvenance> {
self.edge_provenance.lookup(id)
}
#[inline]
#[must_use]
pub fn file_provenance(
&self,
id: crate::graph::unified::file::id::FileId,
) -> Option<FileProvenanceView<'_>> {
self.files.file_provenance(id)
}
#[inline]
#[must_use]
pub fn scope_arena(&self) -> &ScopeArena {
&self.scope_arena
}
#[inline]
#[must_use]
pub fn alias_table(&self) -> &AliasTable {
&self.alias_table
}
#[inline]
#[must_use]
pub fn shadow_table(&self) -> &ShadowTable {
&self.shadow_table
}
#[inline]
#[must_use]
pub fn scope_provenance_store(&self) -> &ScopeProvenanceStore {
&self.scope_provenance_store
}
#[inline]
#[must_use]
pub fn scope_provenance(&self, id: ScopeId) -> Option<&ScopeProvenance> {
self.scope_provenance_store.lookup(id)
}
#[inline]
#[must_use]
pub fn scope_by_stable_id(&self, stable: ScopeStableId) -> Option<ScopeId> {
self.scope_provenance_store.scope_by_stable_id(stable)
}
#[inline]
#[must_use]
pub fn file_segments(&self) -> &FileSegmentTable {
&self.file_segments
}
#[inline]
#[must_use]
pub fn epoch(&self) -> u64 {
self.epoch
}
#[inline]
#[must_use]
pub fn epoch_matches(&self, other_epoch: u64) -> bool {
self.epoch == other_epoch
}
#[inline]
#[must_use]
pub fn binding_plane(&self) -> crate::graph::unified::bind::plane::BindingPlane<'_> {
crate::graph::unified::bind::plane::BindingPlane::new(self)
}
#[must_use]
pub fn find_by_pattern(&self, pattern: &str) -> Vec<crate::graph::unified::node::NodeId> {
self.find_by_pattern_with_options(pattern, false)
}
#[must_use]
pub fn find_by_pattern_with_options(
&self,
pattern: &str,
include_synthetic: bool,
) -> Vec<crate::graph::unified::node::NodeId> {
let mut matches = Vec::new();
for (str_id, s) in self.strings.iter() {
if s.contains(pattern) {
matches.extend_from_slice(self.indices.by_qualified_name(str_id));
matches.extend_from_slice(self.indices.by_name(str_id));
}
}
matches.sort_unstable();
matches.dedup();
if !include_synthetic {
matches.retain(|&node_id| !self.is_node_synthetic(node_id));
}
matches
}
#[must_use]
pub fn find_by_exact_name(&self, name: &str) -> Vec<crate::graph::unified::node::NodeId> {
let Some(str_id) = self.strings.get(name) else {
return Vec::new();
};
let mut matches: Vec<crate::graph::unified::node::NodeId> = Vec::new();
matches.extend_from_slice(self.indices.by_name(str_id));
matches.extend_from_slice(self.indices.by_qualified_name(str_id));
matches.sort_unstable();
matches.dedup();
matches.retain(|&node_id| !self.is_node_synthetic(node_id));
matches
}
#[must_use]
pub fn is_node_synthetic(&self, node_id: crate::graph::unified::node::NodeId) -> bool {
if self.macro_metadata.is_synthetic(node_id) {
return true;
}
let Some(entry) = self.nodes.get(node_id) else {
return false;
};
if entry.is_unified_loser() {
return false;
}
let Some(name) = self.strings.resolve(entry.name) else {
return false;
};
crate::graph::unified::storage::arena::NodeEntry::is_synthetic_placeholder_name(
name.as_ref(),
)
}
#[must_use]
pub fn get_callees(
&self,
node: crate::graph::unified::node::NodeId,
) -> Vec<crate::graph::unified::node::NodeId> {
use crate::graph::unified::edge::EdgeKind;
self.edges
.edges_from(node)
.into_iter()
.filter(|edge| matches!(edge.kind, EdgeKind::Calls { .. }))
.map(|edge| edge.target)
.collect()
}
#[must_use]
pub fn get_callers(
&self,
node: crate::graph::unified::node::NodeId,
) -> Vec<crate::graph::unified::node::NodeId> {
use crate::graph::unified::edge::EdgeKind;
self.edges
.edges_to(node)
.into_iter()
.filter(|edge| matches!(edge.kind, EdgeKind::Calls { .. }))
.map(|edge| edge.source)
.collect()
}
pub fn iter_nodes(
&self,
) -> impl Iterator<
Item = (
crate::graph::unified::node::NodeId,
&crate::graph::unified::storage::arena::NodeEntry,
),
> {
self.nodes.iter()
}
pub fn iter_edges(
&self,
) -> impl Iterator<
Item = (
crate::graph::unified::node::NodeId,
crate::graph::unified::node::NodeId,
crate::graph::unified::edge::EdgeKind,
),
> + '_ {
self.nodes.iter().flat_map(move |(node_id, _entry)| {
self.edges
.edges_from(node_id)
.into_iter()
.map(move |edge| (node_id, edge.target, edge.kind))
})
}
#[must_use]
pub fn get_node(
&self,
id: crate::graph::unified::node::NodeId,
) -> Option<&crate::graph::unified::storage::arena::NodeEntry> {
self.nodes.get(id)
}
}
impl fmt::Debug for GraphSnapshot {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GraphSnapshot")
.field("nodes", &self.nodes.len())
.field("epoch", &self.epoch)
.finish_non_exhaustive()
}
}
pub trait GraphAccess {
fn nodes(&self) -> &NodeArena;
fn edges(&self) -> &BidirectionalEdgeStore;
fn strings(&self) -> &StringInterner;
fn files(&self) -> &FileRegistry;
fn indices(&self) -> &AuxiliaryIndices;
}
impl GraphAccess for CodeGraph {
#[inline]
fn nodes(&self) -> &NodeArena {
CodeGraph::nodes(self)
}
#[inline]
fn edges(&self) -> &BidirectionalEdgeStore {
CodeGraph::edges(self)
}
#[inline]
fn strings(&self) -> &StringInterner {
CodeGraph::strings(self)
}
#[inline]
fn files(&self) -> &FileRegistry {
CodeGraph::files(self)
}
#[inline]
fn indices(&self) -> &AuxiliaryIndices {
CodeGraph::indices(self)
}
}
impl GraphAccess for GraphSnapshot {
#[inline]
fn nodes(&self) -> &NodeArena {
GraphSnapshot::nodes(self)
}
#[inline]
fn edges(&self) -> &BidirectionalEdgeStore {
GraphSnapshot::edges(self)
}
#[inline]
fn strings(&self) -> &StringInterner {
GraphSnapshot::strings(self)
}
#[inline]
fn files(&self) -> &FileRegistry {
GraphSnapshot::files(self)
}
#[inline]
fn indices(&self) -> &AuxiliaryIndices {
GraphSnapshot::indices(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::graph::unified::{
FileScope, NodeId, ResolutionMode, SymbolCandidateOutcome, SymbolQuery,
SymbolResolutionOutcome,
};
fn resolve_symbol_strict(snapshot: &GraphSnapshot, symbol: &str) -> Option<NodeId> {
match snapshot.resolve_symbol(&SymbolQuery {
symbol,
file_scope: FileScope::Any,
mode: ResolutionMode::Strict,
}) {
SymbolResolutionOutcome::Resolved(node_id) => Some(node_id),
SymbolResolutionOutcome::NotFound
| SymbolResolutionOutcome::FileNotIndexed
| SymbolResolutionOutcome::Ambiguous(_) => None,
}
}
fn candidate_nodes(snapshot: &GraphSnapshot, symbol: &str) -> Vec<NodeId> {
match snapshot.find_symbol_candidates(&SymbolQuery {
symbol,
file_scope: FileScope::Any,
mode: ResolutionMode::AllowSuffixCandidates,
}) {
SymbolCandidateOutcome::Candidates(candidates) => candidates,
SymbolCandidateOutcome::NotFound | SymbolCandidateOutcome::FileNotIndexed => Vec::new(),
}
}
#[test]
fn test_code_graph_new() {
let graph = CodeGraph::new();
assert_eq!(graph.epoch(), 0);
assert_eq!(graph.nodes().len(), 0);
}
#[test]
fn test_code_graph_default() {
let graph = CodeGraph::default();
assert_eq!(graph.epoch(), 0);
}
#[test]
fn test_code_graph_snapshot() {
let graph = CodeGraph::new();
let snapshot = graph.snapshot();
assert_eq!(snapshot.epoch(), 0);
assert_eq!(snapshot.nodes().len(), 0);
}
#[test]
fn test_code_graph_bump_epoch() {
let mut graph = CodeGraph::new();
assert_eq!(graph.epoch(), 0);
assert_eq!(graph.bump_epoch(), 1);
assert_eq!(graph.epoch(), 1);
assert_eq!(graph.bump_epoch(), 2);
assert_eq!(graph.epoch(), 2);
}
#[test]
fn test_code_graph_set_epoch() {
let mut graph = CodeGraph::new();
graph.set_epoch(42);
assert_eq!(graph.epoch(), 42);
}
#[test]
fn test_code_graph_from_components() {
let nodes = NodeArena::new();
let edges = BidirectionalEdgeStore::new();
let strings = StringInterner::new();
let files = FileRegistry::new();
let indices = AuxiliaryIndices::new();
let macro_metadata = NodeMetadataStore::new();
let graph =
CodeGraph::from_components(nodes, edges, strings, files, indices, macro_metadata);
assert_eq!(graph.epoch(), 0);
}
#[test]
fn test_code_graph_mut_accessors() {
let mut graph = CodeGraph::new();
let _nodes = graph.nodes_mut();
let _edges = graph.edges_mut();
let _strings = graph.strings_mut();
let _files = graph.files_mut();
let _indices = graph.indices_mut();
}
#[test]
fn test_code_graph_snapshot_isolation() {
let mut graph = CodeGraph::new();
let snapshot1 = graph.snapshot();
graph.bump_epoch();
let snapshot2 = graph.snapshot();
assert_eq!(snapshot1.epoch(), 0);
assert_eq!(snapshot2.epoch(), 1);
}
#[test]
fn test_code_graph_debug() {
let graph = CodeGraph::new();
let debug_str = format!("{graph:?}");
assert!(debug_str.contains("CodeGraph"));
assert!(debug_str.contains("epoch"));
}
#[test]
fn test_codegraph_heap_bytes_counts_confidence_inner_strings() {
let mut graph = CodeGraph::new();
graph.set_confidence({
let mut m = HashMap::with_capacity(8);
m.insert("seed".to_string(), ConfidenceMetadata::default());
m
});
let before = graph.heap_bytes();
let before_cap = graph.confidence.capacity();
let lim1 = String::from("no type inference");
let lim2 = String::from("no generic specialization");
let feat1 = String::from("rust-analyzer");
let l1 = lim1.capacity();
let l2 = lim2.capacity();
let f1 = feat1.capacity();
let limitations = vec![lim1, lim2];
let lim_vec_cap = limitations.capacity();
let unavailable_features = vec![feat1];
let feat_vec_cap = unavailable_features.capacity();
let key = String::from("rust");
let key_cap = key.capacity();
graph.confidence.insert(
key,
ConfidenceMetadata {
limitations,
unavailable_features,
..Default::default()
},
);
assert_eq!(
graph.confidence.capacity(),
before_cap,
"prerequisite: confidence HashMap must not rehash during the test insert",
);
let after = graph.heap_bytes();
let expected_inner = key_cap
+ lim_vec_cap * std::mem::size_of::<String>()
+ l1
+ l2
+ feat_vec_cap * std::mem::size_of::<String>()
+ f1;
assert_eq!(
after - before,
expected_inner,
"CodeGraph::heap_bytes must count ConfidenceMetadata inner Vec<String> capacity exactly",
);
}
#[test]
fn test_codegraph_heap_bytes_grows_with_content() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let empty = CodeGraph::new();
let empty_bytes = empty.heap_bytes();
assert!(
empty_bytes < 100 * 1024 * 1024,
"empty graph heap_bytes should be <100 MiB, got {empty_bytes}"
);
let mut graph = CodeGraph::new();
for i in 0..32u32 {
let name = format!("sym_{i}");
let qual = format!("module::sym_{i}");
let file = format!("file_{i}.rs");
let name_id = graph.strings_mut().intern(&name).unwrap();
let qual_id = graph.strings_mut().intern(&qual).unwrap();
let file_id = graph.files_mut().register(Path::new(&file)).unwrap();
let entry =
NodeEntry::new(NodeKind::Function, name_id, file_id).with_qualified_name(qual_id);
let node_id = graph.nodes_mut().alloc(entry).unwrap();
graph
.indices_mut()
.add(node_id, NodeKind::Function, name_id, Some(qual_id), file_id);
}
let populated_bytes = graph.heap_bytes();
assert!(
populated_bytes > 0,
"populated graph should report nonzero heap bytes"
);
assert!(
populated_bytes > empty_bytes,
"populated graph ({populated_bytes}) should exceed empty graph ({empty_bytes})"
);
assert!(
populated_bytes < 100 * 1024 * 1024,
"test graph heap_bytes should be <100 MiB, got {populated_bytes}"
);
}
#[test]
fn test_concurrent_code_graph_new() {
let graph = ConcurrentCodeGraph::new();
assert_eq!(graph.epoch(), 0);
}
#[test]
fn test_concurrent_code_graph_default() {
let graph = ConcurrentCodeGraph::default();
assert_eq!(graph.epoch(), 0);
}
#[test]
fn test_concurrent_code_graph_from_graph() {
let mut inner = CodeGraph::new();
inner.set_epoch(10);
let graph = ConcurrentCodeGraph::from_graph(inner);
assert_eq!(graph.epoch(), 10);
}
#[test]
fn test_concurrent_code_graph_read() {
let graph = ConcurrentCodeGraph::new();
let guard = graph.read();
assert_eq!(guard.epoch(), 0);
assert_eq!(guard.nodes().len(), 0);
}
#[test]
fn test_concurrent_code_graph_write_increments_epoch() {
let graph = ConcurrentCodeGraph::new();
assert_eq!(graph.epoch(), 0);
{
let guard = graph.write();
assert_eq!(guard.epoch(), 1);
}
assert_eq!(graph.epoch(), 1);
{
let _guard = graph.write();
}
assert_eq!(graph.epoch(), 2);
}
#[test]
fn test_concurrent_code_graph_snapshot() {
let graph = ConcurrentCodeGraph::new();
{
let _guard = graph.write();
}
let snapshot = graph.snapshot();
assert_eq!(snapshot.epoch(), 1);
}
#[test]
fn test_concurrent_code_graph_try_read() {
let graph = ConcurrentCodeGraph::new();
let guard = graph.try_read();
assert!(guard.is_some());
}
#[test]
fn test_concurrent_code_graph_try_write() {
let graph = ConcurrentCodeGraph::new();
let guard = graph.try_write();
assert!(guard.is_some());
assert_eq!(graph.epoch(), 1);
}
#[test]
fn test_concurrent_code_graph_debug() {
let graph = ConcurrentCodeGraph::new();
let debug_str = format!("{graph:?}");
assert!(debug_str.contains("ConcurrentCodeGraph"));
assert!(debug_str.contains("epoch"));
}
#[test]
fn test_graph_snapshot_accessors() {
let graph = CodeGraph::new();
let snapshot = graph.snapshot();
let _nodes = snapshot.nodes();
let _edges = snapshot.edges();
let _strings = snapshot.strings();
let _files = snapshot.files();
let _indices = snapshot.indices();
let _epoch = snapshot.epoch();
}
#[test]
fn test_graph_snapshot_epoch_matches() {
let graph = CodeGraph::new();
let snapshot = graph.snapshot();
assert!(snapshot.epoch_matches(0));
assert!(!snapshot.epoch_matches(1));
}
#[test]
fn test_graph_snapshot_clone() {
let graph = CodeGraph::new();
let snapshot1 = graph.snapshot();
let snapshot2 = snapshot1.clone();
assert_eq!(snapshot1.epoch(), snapshot2.epoch());
}
#[test]
fn test_graph_snapshot_debug() {
let graph = CodeGraph::new();
let snapshot = graph.snapshot();
let debug_str = format!("{snapshot:?}");
assert!(debug_str.contains("GraphSnapshot"));
assert!(debug_str.contains("epoch"));
}
#[test]
fn test_multiple_readers() {
let graph = ConcurrentCodeGraph::new();
let guard1 = graph.read();
let guard2 = graph.read();
let guard3 = graph.read();
assert_eq!(guard1.epoch(), 0);
assert_eq!(guard2.epoch(), 0);
assert_eq!(guard3.epoch(), 0);
}
#[test]
fn test_code_graph_clone() {
let mut graph = CodeGraph::new();
graph.bump_epoch();
let cloned = graph.clone();
assert_eq!(cloned.epoch(), 1);
}
#[test]
fn test_epoch_wrapping() {
let mut graph = CodeGraph::new();
graph.set_epoch(u64::MAX);
let new_epoch = graph.bump_epoch();
assert_eq!(new_epoch, 0); }
#[test]
fn test_snapshot_resolve_symbol() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name_id = graph.strings_mut().intern("test_func").unwrap();
let qual_name_id = graph.strings_mut().intern("module::test_func").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let entry =
NodeEntry::new(NodeKind::Function, name_id, file_id).with_qualified_name(qual_name_id);
let node_id = graph.nodes_mut().alloc(entry).unwrap();
graph.indices_mut().add(
node_id,
NodeKind::Function,
name_id,
Some(qual_name_id),
file_id,
);
let snapshot = graph.snapshot();
let found = resolve_symbol_strict(&snapshot, "module::test_func");
assert_eq!(found, Some(node_id));
let found2 = resolve_symbol_strict(&snapshot, "test_func");
assert_eq!(found2, Some(node_id));
assert!(resolve_symbol_strict(&snapshot, "nonexistent").is_none());
}
#[test]
fn test_snapshot_find_by_pattern() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name1 = graph.strings_mut().intern("foo_bar").unwrap();
let name2 = graph.strings_mut().intern("baz_bar").unwrap();
let name3 = graph.strings_mut().intern("qux_test").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node1 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name1, file_id))
.unwrap();
let node2 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name2, file_id))
.unwrap();
let node3 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name3, file_id))
.unwrap();
graph
.indices_mut()
.add(node1, NodeKind::Function, name1, None, file_id);
graph
.indices_mut()
.add(node2, NodeKind::Function, name2, None, file_id);
graph
.indices_mut()
.add(node3, NodeKind::Function, name3, None, file_id);
let snapshot = graph.snapshot();
let matches = snapshot.find_by_pattern("bar");
assert_eq!(matches.len(), 2);
assert!(matches.contains(&node1));
assert!(matches.contains(&node2));
let matches = snapshot.find_by_pattern("qux");
assert_eq!(matches.len(), 1);
assert_eq!(matches[0], node3);
let matches = snapshot.find_by_pattern("nonexistent");
assert!(matches.is_empty());
}
#[test]
fn synthetic_nodes_are_filtered_from_find_by_pattern_default() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let real_property = graph
.strings_mut()
.intern("main.SelectorSource.NeedTags")
.unwrap();
let real_local_var = graph.strings_mut().intern("NeedTags").unwrap();
let synthetic_field = graph
.strings_mut()
.intern("<field:selector.NeedTags>")
.unwrap();
let synthetic_offset_a = graph.strings_mut().intern("NeedTags@469").unwrap();
let synthetic_offset_b = graph.strings_mut().intern("NeedTags@508").unwrap();
let file_id = graph.files_mut().register(Path::new("main.go")).unwrap();
let prop_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Property, real_property, file_id))
.unwrap();
let local_var_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Variable, real_local_var, file_id))
.unwrap();
let syn_field_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Variable, synthetic_field, file_id))
.unwrap();
let syn_a_id = graph
.nodes_mut()
.alloc(NodeEntry::new(
NodeKind::Variable,
synthetic_offset_a,
file_id,
))
.unwrap();
let syn_b_id = graph
.nodes_mut()
.alloc(NodeEntry::new(
NodeKind::Variable,
synthetic_offset_b,
file_id,
))
.unwrap();
graph
.indices_mut()
.add(prop_id, NodeKind::Property, real_property, None, file_id);
graph.indices_mut().add(
local_var_id,
NodeKind::Variable,
real_local_var,
None,
file_id,
);
graph.indices_mut().add(
syn_field_id,
NodeKind::Variable,
synthetic_field,
None,
file_id,
);
graph.indices_mut().add(
syn_a_id,
NodeKind::Variable,
synthetic_offset_a,
None,
file_id,
);
graph.indices_mut().add(
syn_b_id,
NodeKind::Variable,
synthetic_offset_b,
None,
file_id,
);
graph.macro_metadata_mut().mark_synthetic(syn_a_id);
graph.macro_metadata_mut().mark_synthetic(syn_b_id);
let snapshot = graph.snapshot();
let matches = snapshot.find_by_pattern("NeedTags");
assert!(matches.contains(&prop_id), "Property must be surfaced");
assert!(
matches.contains(&local_var_id),
"real local var must be surfaced"
);
assert!(
!matches.contains(&syn_field_id),
"<field:...> synthetic must be suppressed (name-shape fallback)"
);
assert!(
!matches.contains(&syn_a_id),
"NeedTags@469 must be suppressed (metadata bit)"
);
assert!(
!matches.contains(&syn_b_id),
"NeedTags@508 must be suppressed (metadata bit)"
);
assert_eq!(matches.len(), 2, "exactly Property + local var, no leakage");
let all_matches = snapshot.find_by_pattern_with_options("NeedTags", true);
assert_eq!(
all_matches.len(),
5,
"include_synthetic surfaces everything"
);
assert!(all_matches.contains(&prop_id));
assert!(all_matches.contains(&local_var_id));
assert!(all_matches.contains(&syn_field_id));
assert!(all_matches.contains(&syn_a_id));
assert!(all_matches.contains(&syn_b_id));
assert!(snapshot.is_node_synthetic(syn_field_id));
assert!(snapshot.is_node_synthetic(syn_a_id));
assert!(snapshot.is_node_synthetic(syn_b_id));
assert!(!snapshot.is_node_synthetic(prop_id));
assert!(!snapshot.is_node_synthetic(local_var_id));
}
#[test]
fn find_by_exact_name_aligns_with_planner_name_predicate() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let property_qname = graph
.strings_mut()
.intern("main.SelectorSource.NeedTags")
.unwrap();
let local_var_name = graph.strings_mut().intern("NeedTags").unwrap();
let synthetic_field_name = graph
.strings_mut()
.intern("<field:selector.NeedTags>")
.unwrap();
let synthetic_offset_name = graph.strings_mut().intern("NeedTags@469").unwrap();
let unrelated_name = graph.strings_mut().intern("NeedTagsHelper").unwrap();
let file_id = graph.files_mut().register(Path::new("main.go")).unwrap();
let prop_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Property, property_qname, file_id))
.unwrap();
let local_var_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Variable, local_var_name, file_id))
.unwrap();
let syn_field_id = graph
.nodes_mut()
.alloc(NodeEntry::new(
NodeKind::Variable,
synthetic_field_name,
file_id,
))
.unwrap();
let syn_offset_id = graph
.nodes_mut()
.alloc(NodeEntry::new(
NodeKind::Variable,
synthetic_offset_name,
file_id,
))
.unwrap();
let unrelated_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, unrelated_name, file_id))
.unwrap();
graph
.indices_mut()
.add(prop_id, NodeKind::Property, property_qname, None, file_id);
graph.indices_mut().add(
local_var_id,
NodeKind::Variable,
local_var_name,
None,
file_id,
);
graph.indices_mut().add(
syn_field_id,
NodeKind::Variable,
synthetic_field_name,
None,
file_id,
);
graph.indices_mut().add(
syn_offset_id,
NodeKind::Variable,
synthetic_offset_name,
None,
file_id,
);
graph.indices_mut().add(
unrelated_id,
NodeKind::Function,
unrelated_name,
None,
file_id,
);
graph.macro_metadata_mut().mark_synthetic(syn_offset_id);
let snapshot = graph.snapshot();
let exact = snapshot.find_by_exact_name("NeedTags");
assert_eq!(
exact,
vec![local_var_id],
"exact match must be byte-for-byte against entry.name / qualified_name and exclude synthetics"
);
let qualified = snapshot.find_by_exact_name("main.SelectorSource.NeedTags");
assert_eq!(qualified, vec![prop_id]);
assert!(
snapshot
.find_by_exact_name("NeedTagsHelper")
.contains(&unrelated_id)
);
assert!(
!snapshot
.find_by_exact_name("NeedTags")
.contains(&unrelated_id),
"exact 'NeedTags' must not match 'NeedTagsHelper'"
);
assert!(
snapshot
.find_by_exact_name("ThisStringIsNotInterned")
.is_empty()
);
}
#[test]
fn test_snapshot_get_callees() {
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let caller_name = graph.strings_mut().intern("caller").unwrap();
let callee1_name = graph.strings_mut().intern("callee1").unwrap();
let callee2_name = graph.strings_mut().intern("callee2").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let caller_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, caller_name, file_id))
.unwrap();
let callee1_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, callee1_name, file_id))
.unwrap();
let callee2_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, callee2_name, file_id))
.unwrap();
graph.edges_mut().add_edge(
caller_id,
callee1_id,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
graph.edges_mut().add_edge(
caller_id,
callee2_id,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
let snapshot = graph.snapshot();
let callees = snapshot.get_callees(caller_id);
assert_eq!(callees.len(), 2);
assert!(callees.contains(&callee1_id));
assert!(callees.contains(&callee2_id));
let callees = snapshot.get_callees(callee1_id);
assert!(callees.is_empty());
}
#[test]
fn test_snapshot_get_callers() {
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let caller1_name = graph.strings_mut().intern("caller1").unwrap();
let caller2_name = graph.strings_mut().intern("caller2").unwrap();
let callee_name = graph.strings_mut().intern("callee").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let caller1_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, caller1_name, file_id))
.unwrap();
let caller2_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, caller2_name, file_id))
.unwrap();
let callee_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, callee_name, file_id))
.unwrap();
graph.edges_mut().add_edge(
caller1_id,
callee_id,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
graph.edges_mut().add_edge(
caller2_id,
callee_id,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
let snapshot = graph.snapshot();
let callers = snapshot.get_callers(callee_id);
assert_eq!(callers.len(), 2);
assert!(callers.contains(&caller1_id));
assert!(callers.contains(&caller2_id));
let callers = snapshot.get_callers(caller1_id);
assert!(callers.is_empty());
}
#[test]
fn test_snapshot_find_symbol_candidates() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let symbol_name = graph.strings_mut().intern("test").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node1 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, symbol_name, file_id))
.unwrap();
let node2 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Method, symbol_name, file_id))
.unwrap();
let other_name = graph.strings_mut().intern("other").unwrap();
let node3 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, other_name, file_id))
.unwrap();
graph
.indices_mut()
.add(node1, NodeKind::Function, symbol_name, None, file_id);
graph
.indices_mut()
.add(node2, NodeKind::Method, symbol_name, None, file_id);
graph
.indices_mut()
.add(node3, NodeKind::Function, other_name, None, file_id);
let snapshot = graph.snapshot();
let matches = candidate_nodes(&snapshot, "test");
assert_eq!(matches.len(), 2);
assert!(matches.contains(&node1));
assert!(matches.contains(&node2));
let matches = candidate_nodes(&snapshot, "other");
assert_eq!(matches.len(), 1);
assert_eq!(matches[0], node3);
let matches = candidate_nodes(&snapshot, "nonexistent");
assert!(matches.is_empty());
}
#[test]
fn test_snapshot_iter_nodes() {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name1 = graph.strings_mut().intern("func1").unwrap();
let name2 = graph.strings_mut().intern("func2").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node1 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name1, file_id))
.unwrap();
let node2 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name2, file_id))
.unwrap();
let snapshot = graph.snapshot();
let snapshot_nodes: Vec<_> = snapshot.iter_nodes().collect();
assert_eq!(snapshot_nodes.len(), 2);
let node_ids: Vec<_> = snapshot_nodes.iter().map(|(id, _)| *id).collect();
assert!(node_ids.contains(&node1));
assert!(node_ids.contains(&node2));
}
#[test]
fn test_snapshot_iter_edges() {
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name1 = graph.strings_mut().intern("func1").unwrap();
let name2 = graph.strings_mut().intern("func2").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node1 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name1, file_id))
.unwrap();
let node2 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name2, file_id))
.unwrap();
graph.edges_mut().add_edge(
node1,
node2,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
let snapshot = graph.snapshot();
let edges: Vec<_> = snapshot.iter_edges().collect();
assert_eq!(edges.len(), 1);
let (src, tgt, kind) = &edges[0];
assert_eq!(*src, node1);
assert_eq!(*tgt, node2);
assert!(matches!(
kind,
EdgeKind::Calls {
argument_count: 0,
is_async: false
}
));
}
#[test]
fn test_snapshot_get_node() {
use crate::graph::unified::node::NodeId;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name = graph.strings_mut().intern("test_func").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node_id = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name, file_id))
.unwrap();
let snapshot = graph.snapshot();
let entry = snapshot.get_node(node_id);
assert!(entry.is_some());
assert_eq!(entry.unwrap().kind, NodeKind::Function);
let invalid_id = NodeId::INVALID;
assert!(snapshot.get_node(invalid_id).is_none());
}
#[test]
fn test_snapshot_query_empty_graph() {
use crate::graph::unified::node::NodeId;
let graph = CodeGraph::new();
let snapshot = graph.snapshot();
assert!(resolve_symbol_strict(&snapshot, "test").is_none());
assert!(snapshot.find_by_pattern("test").is_empty());
assert!(candidate_nodes(&snapshot, "test").is_empty());
let dummy_id = NodeId::new(0, 1);
assert!(snapshot.get_callees(dummy_id).is_empty());
assert!(snapshot.get_callers(dummy_id).is_empty());
assert_eq!(snapshot.iter_nodes().count(), 0);
assert_eq!(snapshot.iter_edges().count(), 0);
}
#[test]
fn test_snapshot_edge_filtering_by_kind() {
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let name1 = graph.strings_mut().intern("func1").unwrap();
let name2 = graph.strings_mut().intern("func2").unwrap();
let file_id = graph.files_mut().register(Path::new("test.rs")).unwrap();
let node1 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name1, file_id))
.unwrap();
let node2 = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, name2, file_id))
.unwrap();
graph.edges_mut().add_edge(
node1,
node2,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_id,
);
graph
.edges_mut()
.add_edge(node1, node2, EdgeKind::References, file_id);
let snapshot = graph.snapshot();
let callees = snapshot.get_callees(node1);
assert_eq!(callees.len(), 1);
assert_eq!(callees[0], node2);
let edges: Vec<_> = snapshot.iter_edges().collect();
assert_eq!(edges.len(), 2);
}
#[cfg(test)]
fn build_import_test_graph(files: &[&str]) -> (CodeGraph, Vec<FileId>, Vec<NodeId>) {
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let placeholder_name = graph.strings_mut().intern("sym").unwrap();
let mut file_ids = Vec::with_capacity(files.len());
let mut node_ids = Vec::with_capacity(files.len());
for path in files {
let file_id = graph.files_mut().register(Path::new(path)).unwrap();
let node_id = graph
.nodes_mut()
.alloc(NodeEntry::new(
NodeKind::Function,
placeholder_name,
file_id,
))
.unwrap();
file_ids.push(file_id);
node_ids.push(node_id);
}
graph.rebuild_indices();
(graph, file_ids, node_ids)
}
#[cfg(test)]
fn add_import_edge(
graph: &mut CodeGraph,
source_node: NodeId,
target_node: NodeId,
importer_file: FileId,
) {
graph.edges_mut().add_edge(
source_node,
target_node,
EdgeKind::Imports {
alias: None,
is_wildcard: false,
},
importer_file,
);
}
#[test]
fn reverse_import_index_empty_graph_returns_empty() {
let (graph, files, _) = build_import_test_graph(&["only.rs"]);
assert!(graph.reverse_import_index(files[0]).is_empty());
}
#[test]
fn reverse_import_index_single_importer() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs", "b.rs"]);
let (a, b) = (files[0], files[1]);
add_import_edge(&mut graph, nodes[0], nodes[1], a);
let importers_of_b = graph.reverse_import_index(b);
assert_eq!(importers_of_b, vec![a]);
let importers_of_a = graph.reverse_import_index(a);
assert!(
importers_of_a.is_empty(),
"A has no inbound Imports edges; reverse index must be empty"
);
}
#[test]
fn reverse_import_index_multiple_importers_deduped_and_sorted() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs", "b.rs", "c.rs", "d.rs"]);
let (a, b, c, d) = (files[0], files[1], files[2], files[3]);
add_import_edge(&mut graph, nodes[0], nodes[3], a);
add_import_edge(&mut graph, nodes[1], nodes[3], b);
add_import_edge(&mut graph, nodes[2], nodes[3], c);
add_import_edge(&mut graph, nodes[0], nodes[3], a);
let importers_of_d = graph.reverse_import_index(d);
assert_eq!(importers_of_d, vec![a, b, c]);
let mut sorted = importers_of_d.clone();
sorted.sort();
assert_eq!(importers_of_d, sorted);
}
#[test]
fn reverse_import_index_filters_non_import_edges() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs", "b.rs"]);
let (a, b) = (files[0], files[1]);
graph.edges_mut().add_edge(
nodes[0],
nodes[1],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
a,
);
graph
.edges_mut()
.add_edge(nodes[0], nodes[1], EdgeKind::References, a);
assert!(
graph.reverse_import_index(b).is_empty(),
"non-Imports edges must not register as importers"
);
}
#[test]
fn reverse_import_index_elides_self_imports() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs"]);
let a = files[0];
let name2 = graph.strings_mut().intern("sym2").unwrap();
let second_in_a = graph
.nodes_mut()
.alloc(crate::graph::unified::storage::arena::NodeEntry::new(
crate::graph::unified::node::NodeKind::Function,
name2,
a,
))
.unwrap();
graph.rebuild_indices();
add_import_edge(&mut graph, second_in_a, nodes[0], a);
assert!(
graph.reverse_import_index(a).is_empty(),
"self-imports must be elided from reverse index"
);
}
#[test]
fn reverse_import_index_mixed_edge_kinds_counts_only_imports() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs", "b.rs"]);
let (a, b) = (files[0], files[1]);
add_import_edge(&mut graph, nodes[0], nodes[1], a);
graph.edges_mut().add_edge(
nodes[0],
nodes[1],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
a,
);
assert_eq!(graph.reverse_import_index(b), vec![a]);
}
#[test]
fn reverse_import_index_uninitialized_file_returns_empty() {
let (graph, _, _) = build_import_test_graph(&["a.rs"]);
let bogus = FileId::new(9999);
assert!(
graph.reverse_import_index(bogus).is_empty(),
"unknown FileId must return empty Vec without panicking"
);
}
#[test]
fn reverse_import_index_skips_tombstoned_source_nodes() {
let (mut graph, files, nodes) = build_import_test_graph(&["a.rs", "b.rs"]);
let (a, b) = (files[0], files[1]);
add_import_edge(&mut graph, nodes[0], nodes[1], a);
assert_eq!(graph.reverse_import_index(b), vec![a]);
let removed = graph.nodes_mut().remove(nodes[0]);
assert!(
removed.is_some(),
"arena.remove must succeed for a live node"
);
assert!(
graph.nodes().get(nodes[0]).is_none(),
"tombstoned lookup must return None"
);
assert!(
graph.reverse_import_index(b).is_empty(),
"Imports edges whose source is tombstoned must be silently skipped"
);
}
fn seed_two_file_graph(
per_file: usize,
) -> (
CodeGraph,
crate::graph::unified::file::FileId,
crate::graph::unified::file::FileId,
Vec<NodeId>,
Vec<NodeId>,
) {
use crate::graph::unified::edge::EdgeKind;
use crate::graph::unified::node::NodeKind;
use crate::graph::unified::storage::arena::NodeEntry;
use std::path::Path;
let mut graph = CodeGraph::new();
let sym = graph.strings_mut().intern("sym").expect("intern");
let file_a = graph
.files_mut()
.register(Path::new("/tmp/remove_file_test/a.rs"))
.expect("register a");
let file_b = graph
.files_mut()
.register(Path::new("/tmp/remove_file_test/b.rs"))
.expect("register b");
let mut file_a_nodes = Vec::with_capacity(per_file);
let mut file_b_nodes = Vec::with_capacity(per_file);
for _ in 0..per_file {
let n = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, sym, file_a))
.expect("alloc a-node");
file_a_nodes.push(n);
graph.files_mut().record_node(file_a, n);
graph
.indices_mut()
.add(n, NodeKind::Function, sym, None, file_a);
}
for _ in 0..per_file {
let n = graph
.nodes_mut()
.alloc(NodeEntry::new(NodeKind::Function, sym, file_b))
.expect("alloc b-node");
file_b_nodes.push(n);
graph.files_mut().record_node(file_b, n);
graph
.indices_mut()
.add(n, NodeKind::Function, sym, None, file_b);
}
for i in 0..per_file.saturating_sub(1) {
graph.edges_mut().add_edge(
file_a_nodes[i],
file_a_nodes[i + 1],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_a,
);
graph.edges_mut().add_edge(
file_b_nodes[i],
file_b_nodes[i + 1],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_b,
);
}
graph.edges_mut().add_edge(
file_a_nodes[0],
file_b_nodes[0],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_a,
);
graph.edges_mut().add_edge(
file_b_nodes[0],
file_a_nodes[0],
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file_b,
);
(graph, file_a, file_b, file_a_nodes, file_b_nodes)
}
#[test]
fn code_graph_remove_file_tombstones_all_per_file_nodes() {
let (mut graph, file_a, _file_b, file_a_nodes, _file_b_nodes) = seed_two_file_graph(3);
let returned = graph.remove_file(file_a);
let returned_set: std::collections::HashSet<NodeId> = returned.iter().copied().collect();
let expected_set: std::collections::HashSet<NodeId> =
file_a_nodes.iter().copied().collect();
assert_eq!(
returned_set, expected_set,
"remove_file must return exactly the file_a nodes drained from the bucket"
);
for nid in &file_a_nodes {
assert!(
graph.nodes().get(*nid).is_none(),
"node {nid:?} from removed file must be tombstoned in arena"
);
}
}
#[test]
fn code_graph_remove_file_invalidates_all_edges_sourced_or_targeted_at_removed_nodes() {
use crate::graph::unified::edge::EdgeKind;
let (mut graph, file_a, _file_b, file_a_nodes, file_b_nodes) = seed_two_file_graph(3);
let before_delta = graph.edges().stats().forward.delta_edge_count;
assert_eq!(
before_delta, 6,
"seed must produce 2 intra-A + 2 intra-B + 2 cross edges"
);
let _ = graph.remove_file(file_a);
let after_delta_forward = graph.edges().stats().forward.delta_edge_count;
assert_eq!(
after_delta_forward, 2,
"only intra-B forward edges must remain after removing file_a"
);
let after_delta_reverse = graph.edges().stats().reverse.delta_edge_count;
assert_eq!(
after_delta_reverse, 2,
"only intra-B reverse edges must remain after removing file_a"
);
let b0 = file_b_nodes[0];
let a0 = file_a_nodes[0];
let remaining_from_b0: Vec<_> = graph
.edges()
.edges_from(b0)
.into_iter()
.filter(|e| {
matches!(
e.kind,
EdgeKind::Calls {
argument_count: 0,
is_async: false
}
)
})
.collect();
assert!(
!remaining_from_b0.iter().any(|e| e.target == a0),
"edge b0 -> a0 must be gone after remove_file(file_a)"
);
let remaining_to_a0: Vec<_> = graph.edges().edges_to(a0).into_iter().collect();
assert!(
remaining_to_a0.is_empty(),
"every edge targeting the tombstoned a0 must be gone"
);
}
#[test]
fn code_graph_remove_file_drops_file_registry_entry() {
let (mut graph, file_a, _file_b, _, _) = seed_two_file_graph(2);
assert!(
graph.files().resolve(file_a).is_some(),
"seed registered file_a"
);
assert!(
!graph.files().nodes_for_file(file_a).is_empty(),
"seed populated the file_a bucket"
);
let _ = graph.remove_file(file_a);
assert!(
graph.files().resolve(file_a).is_none(),
"FileRegistry::resolve must return None after remove_file"
);
assert!(
graph.files().nodes_for_file(file_a).is_empty(),
"per-file bucket for file_a must be drained"
);
}
#[test]
fn code_graph_remove_file_is_idempotent_on_unknown_file() {
use crate::graph::unified::file::FileId;
let (mut graph, _file_a, _file_b, _, _) = seed_two_file_graph(2);
let nodes_before = graph.nodes().len();
let delta_fwd_before = graph.edges().stats().forward.delta_edge_count;
let delta_rev_before = graph.edges().stats().reverse.delta_edge_count;
let files_before = graph.files().len();
let bogus = FileId::new(9999);
let returned = graph.remove_file(bogus);
assert!(
returned.is_empty(),
"remove_file on unknown FileId must return an empty Vec"
);
assert_eq!(graph.nodes().len(), nodes_before, "arena count unchanged");
assert_eq!(
graph.edges().stats().forward.delta_edge_count,
delta_fwd_before,
"forward delta unchanged"
);
assert_eq!(
graph.edges().stats().reverse.delta_edge_count,
delta_rev_before,
"reverse delta unchanged"
);
assert_eq!(graph.files().len(), files_before, "file count unchanged");
}
#[test]
fn code_graph_remove_file_clears_file_segments_entry() {
use crate::graph::unified::storage::segment::FileSegmentTable;
let (mut graph, file_a, _file_b, file_a_nodes, _file_b_nodes) = seed_two_file_graph(3);
let first_index = file_a_nodes
.iter()
.map(|n| n.index())
.min()
.expect("per_file = 3");
let last_index = file_a_nodes
.iter()
.map(|n| n.index())
.max()
.expect("per_file = 3");
let slot_count = last_index - first_index + 1;
let table: &mut FileSegmentTable = graph.file_segments_mut();
table.record_range(file_a, first_index, slot_count);
assert!(
graph.file_segments().get(file_a).is_some(),
"seed must install a segment for file_a before remove_file"
);
let _ = graph.remove_file(file_a);
assert!(
graph.file_segments().get(file_a).is_none(),
"remove_file must clear the FileSegmentTable entry for file_a"
);
}
#[test]
fn code_graph_remove_file_repeated_calls_are_idempotent() {
let (mut graph, file_a, _file_b, file_a_nodes, _file_b_nodes) = seed_two_file_graph(3);
let first = graph.remove_file(file_a);
assert_eq!(first.len(), file_a_nodes.len());
let nodes_after = graph.nodes().len();
let delta_fwd_after = graph.edges().stats().forward.delta_edge_count;
let delta_rev_after = graph.edges().stats().reverse.delta_edge_count;
let files_after = graph.files().len();
let second = graph.remove_file(file_a);
assert!(
second.is_empty(),
"second remove_file on the same file must return an empty Vec"
);
assert_eq!(graph.nodes().len(), nodes_after);
assert_eq!(
graph.edges().stats().forward.delta_edge_count,
delta_fwd_after
);
assert_eq!(
graph.edges().stats().reverse.delta_edge_count,
delta_rev_after
);
assert_eq!(graph.files().len(), files_after);
}
}