use super::super::bind::alias::AliasTable;
use super::super::bind::scope::ScopeArena;
use super::super::bind::shadow::ShadowTable;
use super::super::edge::BidirectionalEdgeStore;
use super::super::edge::delta::DeltaOp;
use super::super::node::NodeId;
use super::super::storage::arena::NodeArena;
use super::super::storage::indices::AuxiliaryIndices;
use super::super::storage::metadata::NodeMetadataStore;
use super::super::storage::node_provenance::NodeProvenanceStore;
use super::super::storage::registry::FileRegistry;
#[allow(dead_code)]
pub(crate) trait NodeIdBearing {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_>;
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool);
}
impl NodeIdBearing for NodeArena {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.iter().map(|(id, _entry)| id))
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
let to_drop: Vec<NodeId> = self
.iter()
.filter_map(|(id, _entry)| (!keep(id)).then_some(id))
.collect();
for id in to_drop {
let _ = self.remove(id);
}
}
}
impl NodeIdBearing for BidirectionalEdgeStore {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
let mut out: Vec<NodeId> = Vec::new();
{
let forward = self.forward();
for edge in forward.delta().iter() {
if matches!(edge.op, DeltaOp::Add) {
out.push(edge.source);
out.push(edge.target);
}
}
if let Some(csr) = forward.csr() {
for node_idx in 0..csr.node_count() {
for edge_ref in csr.edges_of(node_idx as u32) {
out.push(edge_ref.target);
}
}
}
}
{
let reverse = self.reverse();
for edge in reverse.delta().iter() {
if matches!(edge.op, DeltaOp::Add) {
out.push(edge.source);
out.push(edge.target);
}
}
if let Some(csr) = reverse.csr() {
for node_idx in 0..csr.node_count() {
for edge_ref in csr.edges_of(node_idx as u32) {
out.push(edge_ref.target);
}
}
}
}
Box::new(out.into_iter())
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
{
let mut forward = self.forward_mut();
forward
.delta_mut()
.retain_if(|edge| keep(edge.source) && keep(edge.target));
}
{
let mut reverse = self.reverse_mut();
reverse
.delta_mut()
.retain_if(|edge| keep(edge.source) && keep(edge.target));
}
}
}
impl NodeIdBearing for AuxiliaryIndices {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.iter_all_node_ids())
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_node_ids(&keep);
}
}
impl NodeIdBearing for NodeMetadataStore {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(
self.iter_all()
.map(|((index, generation), _meta)| NodeId::new(index, generation)),
)
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_entries(|index, generation| keep(NodeId::new(index, generation)));
}
}
impl NodeIdBearing for NodeProvenanceStore {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.iter_node_ids())
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_by_node(&keep);
}
}
impl NodeIdBearing for ScopeArena {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.iter_node_ids())
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_by_node(&keep);
}
}
impl NodeIdBearing for AliasTable {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.entries().iter().map(|entry| entry.import_node))
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_by_node(&keep);
}
}
impl NodeIdBearing for ShadowTable {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.entries().iter().map(|entry| entry.node))
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_by_node(&keep);
}
}
impl NodeIdBearing for FileRegistry {
fn all_node_ids(&self) -> Box<dyn Iterator<Item = NodeId> + '_> {
Box::new(self.iter_all_bucket_node_ids())
}
fn retain_nodes(&mut self, keep: &dyn Fn(NodeId) -> bool) {
self.retain_nodes_in_buckets(&keep);
}
}
use static_assertions::assert_impl_all;
assert_impl_all!(NodeArena: NodeIdBearing); assert_impl_all!(BidirectionalEdgeStore: NodeIdBearing); assert_impl_all!(AuxiliaryIndices: NodeIdBearing); assert_impl_all!(NodeMetadataStore: NodeIdBearing); assert_impl_all!(NodeProvenanceStore: NodeIdBearing); assert_impl_all!(ScopeArena: NodeIdBearing); assert_impl_all!(AliasTable: NodeIdBearing); assert_impl_all!(ShadowTable: NodeIdBearing);
assert_impl_all!(FileRegistry: NodeIdBearing);
#[cfg(test)]
mod tests {
use super::super::super::edge::EdgeKind;
use super::super::super::file::FileId;
use super::super::super::node::NodeKind;
use super::super::super::storage::arena::NodeEntry;
use super::super::super::storage::metadata::{MacroNodeMetadata, NodeMetadata};
use super::super::super::string::id::StringId;
use super::*;
use std::collections::HashSet;
use std::path::Path;
#[test]
fn arena_all_node_ids_enumerates_every_live_slot() {
let mut arena = NodeArena::new();
let name = StringId::new(0);
let file = FileId::new(1);
let a = arena
.alloc(NodeEntry::new(NodeKind::Function, name, file))
.unwrap();
let b = arena
.alloc(NodeEntry::new(NodeKind::Method, name, file))
.unwrap();
let c = arena
.alloc(NodeEntry::new(NodeKind::Struct, name, file))
.unwrap();
let ids: HashSet<NodeId> = arena.all_node_ids().collect();
assert_eq!(ids, HashSet::from([a, b, c]));
}
#[test]
fn arena_retain_nodes_drops_rejected_live_slots() {
let mut arena = NodeArena::new();
let name = StringId::new(0);
let file = FileId::new(1);
let a = arena
.alloc(NodeEntry::new(NodeKind::Function, name, file))
.unwrap();
let b = arena
.alloc(NodeEntry::new(NodeKind::Method, name, file))
.unwrap();
let c = arena
.alloc(NodeEntry::new(NodeKind::Struct, name, file))
.unwrap();
let keep_only_b = |id: NodeId| id == b;
arena.retain_nodes(&keep_only_b);
assert!(arena.get(a).is_none(), "A must be tombstoned");
assert!(arena.get(b).is_some(), "B must remain");
assert!(arena.get(c).is_none(), "C must be tombstoned");
assert_eq!(arena.len(), 1);
let survivors: HashSet<NodeId> = arena.all_node_ids().collect();
assert_eq!(survivors, HashSet::from([b]));
}
fn sample_edge_kind() -> EdgeKind {
EdgeKind::Calls {
argument_count: 0,
is_async: false,
}
}
#[test]
fn edge_store_all_node_ids_includes_delta_sources_and_targets() {
let store = BidirectionalEdgeStore::new();
let src = NodeId::new(1, 1);
let tgt = NodeId::new(2, 1);
let file = FileId::new(1);
store.add_edge(src, tgt, sample_edge_kind(), file);
let ids: HashSet<NodeId> = store.all_node_ids().collect();
assert!(ids.contains(&src), "forward source present");
assert!(ids.contains(&tgt), "forward target present");
assert_eq!(ids.len(), 2, "exactly the two endpoints, deduplicated");
}
#[test]
fn edge_store_retain_nodes_drops_delta_edges_with_rejected_endpoints() {
let mut store = BidirectionalEdgeStore::new();
let keep_src = NodeId::new(10, 1);
let keep_tgt = NodeId::new(11, 1);
let drop_src = NodeId::new(12, 1);
let drop_tgt = NodeId::new(13, 1);
let file = FileId::new(1);
store.add_edge(keep_src, keep_tgt, sample_edge_kind(), file);
store.add_edge(drop_src, keep_tgt, sample_edge_kind(), file); store.add_edge(keep_src, drop_tgt, sample_edge_kind(), file);
let live: HashSet<NodeId> = HashSet::from([keep_src, keep_tgt]);
store.retain_nodes(&|id| live.contains(&id));
let ids: HashSet<NodeId> = store.all_node_ids().collect();
assert_eq!(ids, live, "only fully-live edges survive in either tier");
}
#[test]
fn indices_all_node_ids_surfaces_every_inner_bucket_entry() {
let mut indices = AuxiliaryIndices::new();
let a = NodeId::new(1, 1);
let b = NodeId::new(2, 1);
let name = StringId::new(0);
let qname = StringId::new(1);
let file = FileId::new(1);
indices.add(a, NodeKind::Function, name, Some(qname), file);
indices.add(b, NodeKind::Method, name, Some(qname), file);
let ids: HashSet<NodeId> = indices.all_node_ids().collect();
assert_eq!(ids, HashSet::from([a, b]));
}
#[test]
fn indices_retain_nodes_drops_rejected_ids_from_all_four_indices() {
let mut indices = AuxiliaryIndices::new();
let keep = NodeId::new(1, 1);
let drop = NodeId::new(2, 1);
let name = StringId::new(0);
let qname = StringId::new(1);
let file = FileId::new(1);
indices.add(keep, NodeKind::Function, name, Some(qname), file);
indices.add(drop, NodeKind::Method, name, Some(qname), file);
indices.retain_nodes(&|id| id == keep);
let survivors: HashSet<NodeId> = indices.all_node_ids().collect();
assert_eq!(survivors, HashSet::from([keep]));
assert!(!indices.by_kind(NodeKind::Method).contains(&drop));
assert!(!indices.by_name(name).contains(&drop));
assert!(!indices.by_qualified_name(qname).contains(&drop));
assert!(!indices.by_file(file).contains(&drop));
assert!(indices.by_kind(NodeKind::Function).contains(&keep));
assert!(indices.by_name(name).contains(&keep));
assert!(indices.by_qualified_name(qname).contains(&keep));
assert!(indices.by_file(file).contains(&keep));
}
#[test]
fn metadata_all_node_ids_covers_every_entry() {
let mut store = NodeMetadataStore::new();
let a = NodeId::new(1, 1);
let b = NodeId::new(2, 1);
store.insert(a, MacroNodeMetadata::default());
store.insert_metadata(b, NodeMetadata::Macro(MacroNodeMetadata::default()));
let ids: HashSet<NodeId> = store.all_node_ids().collect();
assert_eq!(ids, HashSet::from([a, b]));
}
#[test]
fn metadata_retain_nodes_drops_rejected_entries() {
let mut store = NodeMetadataStore::new();
let keep = NodeId::new(1, 1);
let drop = NodeId::new(2, 1);
store.insert(keep, MacroNodeMetadata::default());
store.insert(drop, MacroNodeMetadata::default());
store.retain_nodes(&|id| id == keep);
assert_eq!(store.len(), 1);
assert!(store.get(keep).is_some());
assert!(store.get(drop).is_none());
}
fn sample_provenance(
epoch: u64,
byte: u8,
) -> super::super::super::storage::node_provenance::NodeProvenance {
super::super::super::storage::node_provenance::NodeProvenance::fresh(epoch, [byte; 32])
}
#[test]
fn node_provenance_all_node_ids_uses_stored_generation_not_one() {
let mut store = NodeProvenanceStore::new();
let a = NodeId::new(0, 7);
let b = NodeId::new(3, 42);
store.insert(a, sample_provenance(100, 0xAA));
store.insert(b, sample_provenance(200, 0xBB));
let ids: HashSet<NodeId> = store.all_node_ids().collect();
assert_eq!(
ids,
HashSet::from([a, b]),
"NodeIds must be reconstructed from each slot's stored generation, \
not hardcoded to generation 1"
);
}
#[test]
fn node_provenance_retain_nodes_clears_rejected_slots_and_keeps_slot_count() {
let mut store = NodeProvenanceStore::new();
let keep = NodeId::new(0, 1);
let drop = NodeId::new(2, 1);
store.insert(keep, sample_provenance(10, 0x01));
store.insert(drop, sample_provenance(20, 0x02));
let slots_before = store.slot_count();
store.retain_nodes(&|id| id == keep);
let survivors: HashSet<NodeId> = store.all_node_ids().collect();
assert_eq!(survivors, HashSet::from([keep]));
assert!(store.lookup(keep).is_some(), "kept provenance must survive");
assert!(
store.lookup(drop).is_none(),
"rejected slot must report None on lookup"
);
assert_eq!(
store.slot_count(),
slots_before,
"retain_nodes must preserve dense slot alignment with NodeArena"
);
}
fn sample_scope(node: NodeId) -> super::super::super::bind::scope::Scope {
use super::super::super::bind::scope::{Scope, ScopeId, ScopeKind};
Scope {
kind: ScopeKind::Module,
parent: ScopeId::INVALID,
node,
byte_span: (0, 32),
file: FileId::new(0),
}
}
#[test]
fn scope_arena_all_node_ids_surfaces_every_scope_node() {
let mut arena = ScopeArena::new();
let a = NodeId::new(1, 2);
let b = NodeId::new(5, 9);
arena.allocate(sample_scope(a));
arena.allocate(sample_scope(b));
let ids: HashSet<NodeId> = arena.all_node_ids().collect();
assert_eq!(ids, HashSet::from([a, b]));
}
#[test]
fn scope_arena_retain_nodes_frees_rejected_scopes_and_advances_generation() {
let mut arena = ScopeArena::new();
let keep_node = NodeId::new(1, 1);
let drop_node = NodeId::new(2, 1);
let keep_id = arena.allocate(sample_scope(keep_node));
let drop_id = arena.allocate(sample_scope(drop_node));
arena.retain_nodes(&|id| id == keep_node);
assert!(
arena.get(keep_id).is_some(),
"scope with kept node must survive"
);
assert!(
arena.get(drop_id).is_none(),
"scope with rejected node must be freed (stale handle)"
);
assert_eq!(arena.len(), 1);
let survivors: HashSet<NodeId> = arena.all_node_ids().collect();
assert_eq!(survivors, HashSet::from([keep_node]));
}
fn alias_entry(
scope_ix: u32,
from: u32,
to: u32,
import_node: NodeId,
) -> super::super::super::bind::alias::AliasEntry {
use super::super::super::bind::alias::{AliasEntry, AliasEntryId};
use super::super::super::bind::scope::ScopeId;
AliasEntry {
id: AliasEntryId(0),
scope: ScopeId::new(scope_ix, 1),
from_symbol: StringId::new(from),
to_symbol: StringId::new(to),
import_node,
is_wildcard: false,
}
}
fn alias_table_from_entries(
mut entries: Vec<super::super::super::bind::alias::AliasEntry>,
) -> AliasTable {
use super::super::super::bind::alias::AliasEntryId;
entries.sort_by(|a, b| (a.scope, a.from_symbol).cmp(&(b.scope, b.from_symbol)));
for (idx, entry) in entries.iter_mut().enumerate() {
entry.id = AliasEntryId(u32::try_from(idx).unwrap());
}
let encoded = postcard::to_allocvec(&entries).expect("encode entries");
postcard::from_bytes(&encoded).expect("decode AliasTable")
}
#[test]
fn alias_table_all_node_ids_returns_every_import_node() {
let import_a = NodeId::new(1, 1);
let import_b = NodeId::new(2, 3);
let entries = vec![
alias_entry(0, 10, 100, import_a),
alias_entry(0, 20, 200, import_b),
];
let table = alias_table_from_entries(entries);
let ids: HashSet<NodeId> = table.all_node_ids().collect();
assert_eq!(ids, HashSet::from([import_a, import_b]));
}
#[test]
fn alias_table_retain_nodes_drops_rejected_entries_and_rebuilds_index() {
use super::super::super::bind::alias::AliasEntryId;
use super::super::super::bind::scope::ScopeId;
let keep_node = NodeId::new(1, 1);
let drop_node = NodeId::new(2, 1);
let scope = ScopeId::new(0, 1);
let entries = vec![
alias_entry(0, 10, 100, keep_node),
alias_entry(0, 20, 200, drop_node),
];
let mut table = alias_table_from_entries(entries);
table.retain_nodes(&|id| id == keep_node);
assert_eq!(table.len(), 1);
let entry = table.get(AliasEntryId(0)).expect("entry 0 must exist");
assert_eq!(entry.import_node, keep_node);
assert_eq!(entry.id, AliasEntryId(0));
assert!(
table.get(AliasEntryId(1)).is_none(),
"survivor count is 1, id 1 must be vacant"
);
let resolved = table.resolve_alias(scope, StringId::new(10));
assert_eq!(
resolved,
Some(StringId::new(100)),
"resolve_alias must still work after retain_nodes (by_scope rebuilt)"
);
assert_eq!(
table.resolve_alias(scope, StringId::new(20)),
None,
"dropped alias must not resolve"
);
}
fn shadow_entry(
scope_ix: u32,
symbol: u32,
node: NodeId,
byte_offset: u32,
) -> super::super::super::bind::shadow::ShadowEntry {
use super::super::super::bind::scope::ScopeId;
use super::super::super::bind::shadow::{ShadowEntry, ShadowEntryId};
ShadowEntry {
id: ShadowEntryId(0),
scope: ScopeId::new(scope_ix, 1),
symbol: StringId::new(symbol),
node,
byte_offset,
}
}
fn shadow_table_from_entries(
mut entries: Vec<super::super::super::bind::shadow::ShadowEntry>,
) -> ShadowTable {
use super::super::super::bind::shadow::ShadowEntryId;
entries.sort_by(|a, b| {
(a.scope, a.symbol, a.byte_offset).cmp(&(b.scope, b.symbol, b.byte_offset))
});
for (idx, entry) in entries.iter_mut().enumerate() {
entry.id = ShadowEntryId(u32::try_from(idx).unwrap());
}
let encoded = postcard::to_allocvec(&entries).expect("encode entries");
postcard::from_bytes(&encoded).expect("decode ShadowTable")
}
#[test]
fn shadow_table_all_node_ids_returns_every_defining_node() {
let a = NodeId::new(10, 1);
let b = NodeId::new(11, 1);
let entries = vec![shadow_entry(0, 1, a, 10), shadow_entry(0, 1, b, 30)];
let table = shadow_table_from_entries(entries);
let ids: HashSet<NodeId> = table.all_node_ids().collect();
assert_eq!(ids, HashSet::from([a, b]));
}
#[test]
fn shadow_table_retain_nodes_drops_rejected_entries_and_rebuilds_chains() {
use super::super::super::bind::scope::ScopeId;
use super::super::super::bind::shadow::ShadowEntryId;
let keep = NodeId::new(10, 1);
let drop = NodeId::new(11, 1);
let scope = ScopeId::new(0, 1);
let sym = StringId::new(1);
let entries = vec![shadow_entry(0, 1, keep, 10), shadow_entry(0, 1, drop, 30)];
let mut table = shadow_table_from_entries(entries);
table.retain_nodes(&|id| id == keep);
assert_eq!(table.len(), 1);
let entry = table.get(ShadowEntryId(0)).expect("entry 0 must exist");
assert_eq!(entry.node, keep);
assert_eq!(entry.id, ShadowEntryId(0));
assert_eq!(
table.effective_binding(scope, sym, 40),
Some(keep),
"effective_binding must still resolve after retain_nodes (chains rebuilt)"
);
assert_eq!(
table.effective_binding(scope, sym, 20),
Some(keep),
"dropped definition at offset 30 must not shadow the kept def at 10"
);
}
#[test]
fn file_registry_all_node_ids_empty_when_no_buckets_recorded() {
let mut reg = FileRegistry::new();
reg.register(Path::new("/tmp/unused_fixture.rs")).unwrap();
let ids: Vec<NodeId> = reg.all_node_ids().collect();
assert!(
ids.is_empty(),
"FileRegistry has no NodeIds when no buckets are recorded"
);
}
#[test]
fn file_registry_all_node_ids_yields_every_recorded_bucket_entry() {
let mut reg = FileRegistry::new();
let file_a = crate::graph::unified::file::FileId::new(1);
let file_b = crate::graph::unified::file::FileId::new(2);
let n1 = NodeId::new(10, 1);
let n2 = NodeId::new(11, 1);
let n3 = NodeId::new(12, 1);
reg.record_node(file_a, n1);
reg.record_node(file_a, n2);
reg.record_node(file_b, n3);
let ids: HashSet<NodeId> = reg.all_node_ids().collect();
assert_eq!(ids, HashSet::from([n1, n2, n3]));
}
#[test]
fn file_registry_retain_nodes_drops_rejected_from_buckets() {
let mut reg = FileRegistry::new();
let file = crate::graph::unified::file::FileId::new(1);
let keep = NodeId::new(10, 1);
let drop = NodeId::new(11, 1);
reg.record_node(file, keep);
reg.record_node(file, drop);
reg.retain_nodes(&|id| id == keep);
let ids: HashSet<NodeId> = reg.all_node_ids().collect();
assert_eq!(ids, HashSet::from([keep]));
}
#[test]
fn file_registry_retain_nodes_preserves_file_slots() {
let mut reg = FileRegistry::new();
let fid = reg.register(Path::new("/tmp/unused_fixture.rs")).unwrap();
reg.record_node(fid, NodeId::new(10, 1));
let before = reg.len();
reg.retain_nodes(&|_id| false); assert_eq!(
reg.len(),
before,
"retain_nodes must not touch file-slot accounting"
);
assert!(reg.resolve(fid).is_some(), "file path itself must survive");
}
}