Skip to main content

sqry_core/graph/unified/storage/
arena.rs

1//! `NodeArena`: Contiguous node storage with generational indices.
2//!
3//! This module implements `NodeArena`, the primary node storage for the
4//! unified graph architecture. It provides:
5//!
6//! - **O(1) access**: Direct indexing into contiguous storage
7//! - **Generational safety**: Stale IDs return `None` instead of wrong data
8//! - **Free list reuse**: Deleted slots are recycled efficiently
9//!
10//! # Design
11//!
12//! The arena uses a slot-based design:
13//! - Each slot is either occupied (with data) or vacant (with next-free pointer)
14//! - Occupied slots store `NodeEntry` with generation counter
15//! - Vacant slots form a singly-linked free list
16//! - Generation counter increments on each reuse to detect stale IDs
17
18use std::fmt;
19
20use serde::{Deserialize, Serialize};
21
22use super::super::file::id::FileId;
23use super::super::node::id::{GenerationOverflowError, NodeId};
24use super::super::node::kind::NodeKind;
25use super::super::string::id::StringId;
26use crate::graph::body_hash::BodyHash128;
27
28/// Error type for arena allocation operations.
29///
30/// This enum covers all possible failure modes during node allocation,
31/// providing graceful error handling instead of panics.
32#[derive(Debug, Clone, PartialEq, Eq)]
33pub enum ArenaError {
34    /// Generation counter would overflow, risking stale ID collision.
35    GenerationOverflow(GenerationOverflowError),
36
37    /// Free list is corrupted: an occupied slot was found in the free list.
38    ///
39    /// This indicates a serious internal consistency error, likely caused by
40    /// concurrent modification without proper synchronization, memory corruption,
41    /// or a bug in the arena implementation.
42    FreeListCorruption {
43        /// Index of the corrupted slot.
44        index: u32,
45    },
46
47    /// Arena capacity exceeded: cannot allocate more than `u32::MAX` nodes.
48    CapacityExceeded,
49}
50
51impl fmt::Display for ArenaError {
52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53        match self {
54            ArenaError::GenerationOverflow(e) => write!(f, "{e}"),
55            ArenaError::FreeListCorruption { index } => {
56                write!(
57                    f,
58                    "free list corruption: occupied slot found at index {index}"
59                )
60            }
61            ArenaError::CapacityExceeded => {
62                write!(
63                    f,
64                    "arena capacity exceeded: cannot allocate more than {} nodes",
65                    u32::MAX
66                )
67            }
68        }
69    }
70}
71
72impl std::error::Error for ArenaError {
73    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
74        match self {
75            ArenaError::GenerationOverflow(e) => Some(e),
76            _ => None,
77        }
78    }
79}
80
81impl From<GenerationOverflowError> for ArenaError {
82    fn from(e: GenerationOverflowError) -> Self {
83        ArenaError::GenerationOverflow(e)
84    }
85}
86
87/// State of a slot in the arena.
88#[derive(Debug, Clone, Serialize, Deserialize)]
89pub enum SlotState<T> {
90    /// Slot contains an occupied entry.
91    Occupied(T),
92    /// Slot is vacant, contains next free index (or None if end of list).
93    Vacant {
94        /// Index of the next free slot in the free list, or None if end.
95        next_free: Option<u32>,
96    },
97}
98
99/// A slot in the arena with generation tracking.
100#[derive(Debug, Clone, Serialize, Deserialize)]
101pub struct Slot<T> {
102    /// Current generation for this slot.
103    generation: u64,
104    /// Slot state (occupied or vacant).
105    state: SlotState<T>,
106}
107
108impl<T> Slot<T> {
109    /// Creates a new vacant slot with the given generation and next free.
110    #[allow(dead_code)] // Used by Compaction (Step 15)
111    fn new_vacant(generation: u64, next_free: Option<u32>) -> Self {
112        Self {
113            generation,
114            state: SlotState::Vacant { next_free },
115        }
116    }
117
118    /// Creates a new occupied slot with the given generation and data.
119    pub(crate) fn new_occupied(generation: u64, data: T) -> Self {
120        Self {
121            generation,
122            state: SlotState::Occupied(data),
123        }
124    }
125
126    /// Returns true if this slot is occupied.
127    #[inline]
128    pub fn is_occupied(&self) -> bool {
129        matches!(self.state, SlotState::Occupied(_))
130    }
131
132    /// Returns true if this slot is vacant.
133    #[inline]
134    pub fn is_vacant(&self) -> bool {
135        matches!(self.state, SlotState::Vacant { .. })
136    }
137
138    /// Returns the generation of this slot.
139    #[inline]
140    pub fn generation(&self) -> u64 {
141        self.generation
142    }
143
144    /// Returns a reference to the occupied data, if any.
145    pub fn get(&self) -> Option<&T> {
146        match &self.state {
147            SlotState::Occupied(data) => Some(data),
148            SlotState::Vacant { .. } => None,
149        }
150    }
151
152    /// Returns a reference to the slot state (occupied or vacant).
153    #[inline]
154    pub fn state(&self) -> &SlotState<T> {
155        &self.state
156    }
157
158    /// Returns a mutable reference to the occupied data, if any.
159    pub fn get_mut(&mut self) -> Option<&mut T> {
160        match &mut self.state {
161            SlotState::Occupied(data) => Some(data),
162            SlotState::Vacant { .. } => None,
163        }
164    }
165}
166
167/// A node entry stored in the arena.
168///
169/// This is the actual data stored for each node in the graph.
170#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
171pub struct NodeEntry {
172    /// The kind of code entity this node represents.
173    pub kind: NodeKind,
174    /// Interned name of the symbol.
175    pub name: StringId,
176    /// File containing this node.
177    pub file: FileId,
178    /// Start byte offset in the file.
179    pub start_byte: u32,
180    /// End byte offset in the file.
181    pub end_byte: u32,
182    /// Start line (1-indexed).
183    pub start_line: u32,
184    /// Start column (0-indexed).
185    pub start_column: u32,
186    /// End line (1-indexed).
187    pub end_line: u32,
188    /// End column (0-indexed).
189    pub end_column: u32,
190    /// Optional signature/type information.
191    pub signature: Option<StringId>,
192    /// Optional docstring.
193    pub doc: Option<StringId>,
194    /// Optional qualified name (e.g., module.Class.method).
195    pub qualified_name: Option<StringId>,
196    /// Optional visibility modifier (public, private, protected, etc.).
197    pub visibility: Option<StringId>,
198    /// Whether this is an async function/method.
199    pub is_async: bool,
200    /// Whether this is a static member.
201    pub is_static: bool,
202    /// 128-bit body hash for duplicate detection.
203    ///
204    /// Computed from the raw body bytes of hashable symbol kinds:
205    /// Function, Method, Class, Struct, Enum, Interface, Trait, Module.
206    ///
207    /// CRITICAL: This field MUST remain BEFORE `is_unsafe` for
208    /// postcard serialization compatibility with older snapshots.
209    /// Uses `#[serde(default)]` to read older V2 snapshots that lack this field.
210    /// DO NOT use `skip_serializing_if` - postcard is positional and skipping corrupts the stream.
211    #[serde(default)]
212    pub body_hash: Option<BodyHash128>,
213    /// Whether this is an unsafe function (Rust, C, etc.).
214    ///
215    /// For FFI functions, this indicates whether the function is declared with
216    /// `unsafe` modifier (Haskell), `unsafe` keyword (Rust), or similar safety
217    /// markers in other languages.
218    ///
219    /// CRITICAL: This field is placed AFTER `body_hash` to maintain postcard
220    /// serialization compatibility. Adding fields before `body_hash` would
221    /// corrupt existing snapshots due to positional deserialization.
222    /// Uses `#[serde(default)]` to read older snapshots that lack this field.
223    #[serde(default)]
224    pub is_unsafe: bool,
225}
226
227impl NodeEntry {
228    /// Creates a new node entry with minimal required fields.
229    #[must_use]
230    pub fn new(kind: NodeKind, name: StringId, file: FileId) -> Self {
231        Self {
232            kind,
233            name,
234            file,
235            start_byte: 0,
236            end_byte: 0,
237            start_line: 0,
238            start_column: 0,
239            end_line: 0,
240            end_column: 0,
241            signature: None,
242            doc: None,
243            qualified_name: None,
244            visibility: None,
245            is_async: false,
246            is_static: false,
247            is_unsafe: false,
248            body_hash: None,
249        }
250    }
251
252    /// Sets the byte range for this node.
253    #[must_use]
254    pub fn with_byte_range(mut self, start: u32, end: u32) -> Self {
255        self.start_byte = start;
256        self.end_byte = end;
257        self
258    }
259
260    /// Sets the line/column range for this node.
261    #[must_use]
262    pub fn with_location(
263        mut self,
264        start_line: u32,
265        start_column: u32,
266        end_line: u32,
267        end_column: u32,
268    ) -> Self {
269        self.start_line = start_line;
270        self.start_column = start_column;
271        self.end_line = end_line;
272        self.end_column = end_column;
273        self
274    }
275
276    /// Whether this arena entry is a Phase 4c-prime cross-file
277    /// unification loser (an "inert" merged duplicate).
278    ///
279    /// Phase 4c-prime merges cross-file stubs that share a qualified
280    /// name and a call-compatible kind into a single canonical winner,
281    /// folding metadata + flags into the winner and calling
282    /// [`crate::graph::unified::build::unification::merge_node_into`] on
283    /// the loser. The loser's arena slot stays `Occupied` (so
284    /// `NodeArena::slot_count()` stays stable for CSR row_ptr sizing),
285    /// but its `name` is cleared to [`StringId::INVALID`] and its
286    /// `qualified_name` to `None`. That sentinel is what this method
287    /// detects.
288    ///
289    /// Callers that iterate `NodeArena::iter()` for publish-visible
290    /// purposes (symbol listings, document-symbol enumeration,
291    /// find_by_pattern-style fanout) MUST skip entries where
292    /// `is_unified_loser()` returns `true`. Callers working on rebuild
293    /// internals (Gate 0d residue checks, CSR compaction) must continue
294    /// to include them.
295    #[inline]
296    #[must_use]
297    pub fn is_unified_loser(&self) -> bool {
298        self.name == StringId::INVALID
299    }
300
301    /// Sets the signature for this node.
302    #[must_use]
303    pub fn with_signature(mut self, signature: StringId) -> Self {
304        self.signature = Some(signature);
305        self
306    }
307
308    /// Sets the docstring for this node.
309    #[must_use]
310    pub fn with_doc(mut self, doc: StringId) -> Self {
311        self.doc = Some(doc);
312        self
313    }
314
315    /// Sets the qualified name for this node.
316    #[must_use]
317    pub fn with_qualified_name(mut self, qualified_name: StringId) -> Self {
318        self.qualified_name = Some(qualified_name);
319        self
320    }
321
322    /// Sets the visibility modifier for this node.
323    #[must_use]
324    pub fn with_visibility(mut self, visibility: StringId) -> Self {
325        self.visibility = Some(visibility);
326        self
327    }
328
329    /// Sets the async flag for this node.
330    #[must_use]
331    pub fn with_async(mut self, is_async: bool) -> Self {
332        self.is_async = is_async;
333        self
334    }
335
336    /// Sets the static flag for this node.
337    #[must_use]
338    pub fn with_static(mut self, is_static: bool) -> Self {
339        self.is_static = is_static;
340        self
341    }
342
343    /// Sets the unsafe flag for this node.
344    #[must_use]
345    pub fn with_unsafe(mut self, is_unsafe: bool) -> Self {
346        self.is_unsafe = is_unsafe;
347        self
348    }
349
350    /// Sets the body hash for this node.
351    ///
352    /// The body hash is a 128-bit hash computed from the raw body bytes
353    /// of the symbol. Used for duplicate code detection.
354    #[must_use]
355    pub fn with_body_hash(mut self, hash: BodyHash128) -> Self {
356        self.body_hash = Some(hash);
357        self
358    }
359}
360
361/// Contiguous node storage with generational indices.
362///
363/// `NodeArena` provides O(1) access to nodes by `NodeId`, with stale
364/// reference detection via generation counters.
365///
366/// # Free List Management
367///
368/// When a node is removed, its slot is added to the free list for reuse.
369/// This avoids memory fragmentation and keeps indices stable.
370///
371/// # Example
372///
373/// ```rust,ignore
374/// let mut arena = NodeArena::new();
375/// let id = arena.alloc(NodeEntry::new(NodeKind::Function, name, file))?;
376///
377/// let node = arena.get(id).expect("node exists");
378/// assert_eq!(node.kind, NodeKind::Function);
379///
380/// arena.remove(id)?;
381/// assert!(arena.get(id).is_none()); // Stale ID returns None
382/// ```
383#[derive(Debug, Clone, Serialize, Deserialize)]
384pub struct NodeArena {
385    /// Slots for node storage.
386    slots: Vec<Slot<NodeEntry>>,
387    /// Head of the free list (index of first free slot).
388    free_head: Option<u32>,
389    /// Number of currently occupied slots.
390    len: usize,
391}
392
393impl NodeArena {
394    /// Creates a new empty arena.
395    #[must_use]
396    pub fn new() -> Self {
397        Self {
398            slots: Vec::new(),
399            free_head: None,
400            len: 0,
401        }
402    }
403
404    /// Creates a new arena with the specified initial capacity.
405    #[must_use]
406    pub fn with_capacity(capacity: usize) -> Self {
407        Self {
408            slots: Vec::with_capacity(capacity),
409            free_head: None,
410            len: 0,
411        }
412    }
413
414    /// Returns the number of occupied slots.
415    #[inline]
416    #[must_use]
417    pub fn len(&self) -> usize {
418        self.len
419    }
420
421    /// Returns true if the arena is empty.
422    #[inline]
423    #[must_use]
424    pub fn is_empty(&self) -> bool {
425        self.len == 0
426    }
427
428    /// Returns the allocated capacity (slots that can be stored without reallocation).
429    #[inline]
430    #[must_use]
431    pub fn capacity(&self) -> usize {
432        self.slots.capacity()
433    }
434
435    /// Returns the total number of slots (occupied + vacant).
436    ///
437    /// This is different from `len()` which counts only occupied slots,
438    /// and `capacity()` which counts pre-allocated slots.
439    #[inline]
440    #[must_use]
441    pub fn slot_count(&self) -> usize {
442        self.slots.len()
443    }
444
445    /// Allocates a new node and returns its `NodeId`.
446    ///
447    /// If there are free slots available, one is reused. Otherwise, a new
448    /// slot is appended to the storage.
449    ///
450    /// # Errors
451    ///
452    /// Returns [`ArenaError`] if:
453    /// - Generation counter would overflow (`GenerationOverflow`)
454    /// - Free list is corrupted (`FreeListCorruption`)
455    /// - Arena capacity exceeded (`CapacityExceeded`)
456    pub fn alloc(&mut self, entry: NodeEntry) -> Result<NodeId, ArenaError> {
457        self.len += 1;
458
459        if let Some(free_idx) = self.free_head {
460            // Check bounds before accessing slot - corrupted free list could point out of bounds
461            let slot_index = free_idx as usize;
462            if slot_index >= self.slots.len() {
463                self.len -= 1; // Rollback
464                return Err(ArenaError::FreeListCorruption { index: free_idx });
465            }
466
467            // Reuse a free slot (bounds now guaranteed valid)
468            let slot = &mut self.slots[slot_index];
469
470            // Update free list head
471            self.free_head = match &slot.state {
472                SlotState::Vacant { next_free } => *next_free,
473                SlotState::Occupied(_) => {
474                    // Free list corruption detected - return error instead of panicking
475                    self.len -= 1; // Rollback
476                    return Err(ArenaError::FreeListCorruption { index: free_idx });
477                }
478            };
479
480            // Increment generation for reuse detection (respect MAX_GENERATION limit)
481            // Use NodeId::try_increment_generation which checks against MAX_GENERATION, not u64::MAX
482            let temp_id = NodeId::new(free_idx, slot.generation);
483            let new_generation = temp_id.try_increment_generation().map_err(|mut e| {
484                self.len -= 1; // Rollback
485                e.index = free_idx; // Ensure index is correct
486                ArenaError::GenerationOverflow(e)
487            })?;
488
489            slot.generation = new_generation;
490            slot.state = SlotState::Occupied(entry);
491
492            Ok(NodeId::new(free_idx, new_generation))
493        } else {
494            // Append a new slot
495            let index = self.slots.len();
496
497            // Check capacity before allocation using checked conversion
498            let Ok(index_u32) = u32::try_from(index) else {
499                self.len -= 1; // Rollback
500                return Err(ArenaError::CapacityExceeded);
501            };
502
503            let slot = Slot::new_occupied(1, entry);
504            self.slots.push(slot);
505
506            Ok(NodeId::new(index_u32, 1))
507        }
508    }
509
510    /// Returns a reference to the node with the given ID.
511    ///
512    /// Returns `None` if the ID is invalid or stale (generation mismatch).
513    #[must_use]
514    pub fn get(&self, id: NodeId) -> Option<&NodeEntry> {
515        if id.is_invalid() {
516            return None;
517        }
518
519        let index = id.index() as usize;
520        let slot = self.slots.get(index)?;
521
522        // Check generation match
523        if slot.generation != id.generation() {
524            return None;
525        }
526
527        slot.get()
528    }
529
530    /// Returns a mutable reference to the node with the given ID.
531    ///
532    /// Returns `None` if the ID is invalid or stale.
533    #[must_use]
534    pub fn get_mut(&mut self, id: NodeId) -> Option<&mut NodeEntry> {
535        if id.is_invalid() {
536            return None;
537        }
538
539        let index = id.index() as usize;
540        let slot = self.slots.get_mut(index)?;
541
542        // Check generation match
543        if slot.generation != id.generation() {
544            return None;
545        }
546
547        slot.get_mut()
548    }
549
550    /// Checks if the given ID is valid (not stale).
551    #[must_use]
552    pub fn contains(&self, id: NodeId) -> bool {
553        self.get(id).is_some()
554    }
555
556    /// Removes the node with the given ID.
557    ///
558    /// Returns the removed entry, or `None` if the ID was invalid/stale.
559    /// This method is idempotent: calling it twice with the same ID will
560    /// return `None` on the second call without corrupting the arena.
561    pub fn remove(&mut self, id: NodeId) -> Option<NodeEntry> {
562        if id.is_invalid() {
563            return None;
564        }
565
566        let index = id.index() as usize;
567        let slot = self.slots.get_mut(index)?;
568
569        // Check generation match
570        if slot.generation != id.generation() {
571            return None;
572        }
573
574        // Only proceed if slot is occupied - this makes remove idempotent
575        // and prevents double-decrement of len / double-enqueue to free list
576        if let SlotState::Occupied(_) = &slot.state {
577            let old_state = std::mem::replace(
578                &mut slot.state,
579                SlotState::Vacant {
580                    next_free: self.free_head,
581                },
582            );
583
584            // Update free list and len only when actually removing
585            self.free_head = Some(id.index());
586            self.len -= 1;
587
588            if let SlotState::Occupied(entry) = old_state {
589                return Some(entry);
590            }
591        }
592
593        None
594    }
595
596    /// Iterates over all occupied entries with their IDs.
597    pub fn iter(&self) -> impl Iterator<Item = (NodeId, &NodeEntry)> {
598        self.slots.iter().enumerate().filter_map(|(index, slot)| {
599            if let SlotState::Occupied(entry) = &slot.state {
600                let index_u32 = u32::try_from(index).ok()?;
601                Some((NodeId::new(index_u32, slot.generation), entry))
602            } else {
603                None
604            }
605        })
606    }
607
608    /// Iterates over all occupied entries with mutable access.
609    pub fn iter_mut(&mut self) -> impl Iterator<Item = (NodeId, &mut NodeEntry)> {
610        self.slots
611            .iter_mut()
612            .enumerate()
613            .filter_map(|(index, slot)| {
614                let generation = slot.generation;
615                if let SlotState::Occupied(entry) = &mut slot.state {
616                    let index_u32 = u32::try_from(index).ok()?;
617                    Some((NodeId::new(index_u32, generation), entry))
618                } else {
619                    None
620                }
621            })
622    }
623
624    /// Clears all nodes from the arena.
625    ///
626    /// This resets the arena to an empty state, but retains allocated capacity.
627    pub fn clear(&mut self) {
628        self.slots.clear();
629        self.free_head = None;
630        self.len = 0;
631    }
632
633    /// Reserves capacity for at least `additional` more nodes.
634    pub fn reserve(&mut self, additional: usize) {
635        self.slots.reserve(additional);
636    }
637
638    /// Returns the slot at the given index, if any.
639    ///
640    /// This is a low-level method for internal use. Prefer `get()` for normal access.
641    #[must_use]
642    pub fn slot(&self, index: u32) -> Option<&Slot<NodeEntry>> {
643        self.slots.get(index as usize)
644    }
645
646    /// Pre-allocates a contiguous range of occupied slots for parallel commit.
647    ///
648    /// Returns the start index of the allocated range. All slots are initialized
649    /// as `Occupied` with clones of the `placeholder` entry at generation 1.
650    /// The free list is **not** touched, preserving its invariants.
651    ///
652    /// A `count` of zero is a no-op and returns the current slot count as the
653    /// start index.
654    ///
655    /// # Errors
656    ///
657    /// Returns [`ArenaError::CapacityExceeded`] if the resulting slot count
658    /// would exceed `u32::MAX`.
659    pub fn alloc_range(&mut self, count: u32, placeholder: &NodeEntry) -> Result<u32, ArenaError> {
660        if count == 0 {
661            // No-op: return current slot count as start index (safe because
662            // slot_count <= u32::MAX is maintained as an invariant).
663            return Ok(u32::try_from(self.slots.len())
664                .unwrap_or_else(|_| unreachable!("slot_count invariant violated")));
665        }
666
667        let start = self.slots.len();
668        let new_total = start
669            .checked_add(count as usize)
670            .ok_or(ArenaError::CapacityExceeded)?;
671
672        // Verify new total fits in u32 (arena index space).
673        if new_total > u32::MAX as usize + 1 {
674            return Err(ArenaError::CapacityExceeded);
675        }
676
677        let start_u32 = u32::try_from(start).map_err(|_| ArenaError::CapacityExceeded)?;
678
679        // Reserve and fill in one go.
680        self.slots.reserve(count as usize);
681        for _ in 0..count {
682            self.slots.push(Slot::new_occupied(1, placeholder.clone()));
683        }
684
685        // Increment occupied count immediately — arena state is always consistent.
686        self.len += count as usize;
687
688        Ok(start_u32)
689    }
690
691    /// Returns a mutable sub-slice of the internal slot storage.
692    ///
693    /// This is intended for parallel writes after [`alloc_range`](Self::alloc_range):
694    /// each thread can write to its own non-overlapping portion of the slice.
695    ///
696    /// # Panics
697    ///
698    /// Panics if `start + count` exceeds the slot count.
699    #[must_use]
700    pub fn bulk_slice_mut(&mut self, start: u32, count: u32) -> &mut [Slot<NodeEntry>] {
701        let begin = start as usize;
702        let end = begin + count as usize;
703        &mut self.slots[begin..end]
704    }
705
706    /// Truncates the slot storage to `saved_slot_count` slots.
707    ///
708    /// Used to roll back a failed bulk allocation. Any slots beyond
709    /// `saved_slot_count` are dropped, and `len` is adjusted to reflect
710    /// only the occupied slots that remain.
711    ///
712    /// # Panics
713    ///
714    /// Panics if `saved_slot_count` is greater than the current slot count
715    /// (cannot grow via truncation).
716    pub fn truncate_to(&mut self, saved_slot_count: usize) {
717        assert!(
718            saved_slot_count <= self.slots.len(),
719            "truncate_to({saved_slot_count}) exceeds current slot count ({})",
720            self.slots.len(),
721        );
722
723        // Count occupied slots that will be dropped.
724        let dropped_occupied = self.slots[saved_slot_count..]
725            .iter()
726            .filter(|s| s.is_occupied())
727            .count();
728
729        self.slots.truncate(saved_slot_count);
730        self.len -= dropped_occupied;
731
732        // Rebuild free list: remove any entries that pointed into the truncated
733        // region. We walk the chain and keep only links to retained slots.
734        self.rebuild_free_list_after_truncate(saved_slot_count);
735    }
736
737    /// Rebuilds the free list after a truncation, dropping any links to slots
738    /// at or beyond `cutoff`.
739    ///
740    /// Scans retained slots for vacant entries and re-links them into a fresh
741    /// free list. This is O(cutoff) but `truncate_to` is a rare rollback path.
742    fn rebuild_free_list_after_truncate(&mut self, cutoff: usize) {
743        // Simple approach: scan retained slots for vacant entries.
744        // This avoids issues with interleaved chains (e.g., head -> 6 -> 0)
745        // where truncated indices appear mid-chain.
746        let mut new_head: Option<u32> = None;
747        // Walk backwards so the resulting list is in ascending index order
748        // (head = lowest free index), matching the pattern of fresh arenas.
749        for i in (0..cutoff).rev() {
750            if self.slots[i].is_vacant() {
751                let i_u32 = u32::try_from(i)
752                    .unwrap_or_else(|_| unreachable!("free-list index exceeds u32 invariant"));
753                self.slots[i].state = SlotState::Vacant {
754                    next_free: new_head,
755                };
756                new_head = Some(i_u32);
757            }
758        }
759        self.free_head = new_head;
760    }
761
762    /// Returns a read-only view of all slots (occupied and vacant).
763    ///
764    /// Useful for post-build passes that need to scan every slot without
765    /// the overhead of generation checks.
766    #[must_use]
767    pub fn slots(&self) -> &[Slot<NodeEntry>] {
768        &self.slots
769    }
770}
771
772impl Default for NodeArena {
773    fn default() -> Self {
774        Self::new()
775    }
776}
777
778impl fmt::Display for NodeArena {
779    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
780        write!(
781            f,
782            "NodeArena(len={}, capacity={})",
783            self.len,
784            self.capacity()
785        )
786    }
787}
788
789impl crate::graph::unified::memory::GraphMemorySize for NodeArena {
790    fn heap_bytes(&self) -> usize {
791        self.slots.capacity() * std::mem::size_of::<Slot<NodeEntry>>()
792    }
793}
794
795#[cfg(test)]
796mod tests {
797    use super::*;
798
799    fn test_file() -> FileId {
800        FileId::new(1)
801    }
802
803    fn test_name() -> StringId {
804        StringId::new(1)
805    }
806
807    fn test_entry(kind: NodeKind) -> NodeEntry {
808        NodeEntry::new(kind, test_name(), test_file())
809    }
810
811    #[test]
812    fn test_arena_new() {
813        let arena = NodeArena::new();
814        assert_eq!(arena.len(), 0);
815        assert!(arena.is_empty());
816        assert_eq!(arena.capacity(), 0);
817    }
818
819    #[test]
820    fn test_arena_with_capacity() {
821        let arena = NodeArena::with_capacity(100);
822        assert_eq!(arena.len(), 0);
823        assert!(arena.capacity() >= 100);
824    }
825
826    #[test]
827    fn test_alloc_and_get() {
828        let mut arena = NodeArena::new();
829        let entry = test_entry(NodeKind::Function);
830
831        let id = arena.alloc(entry).unwrap();
832        assert!(!id.is_invalid());
833        assert_eq!(arena.len(), 1);
834
835        let node = arena.get(id).unwrap();
836        assert_eq!(node.kind, NodeKind::Function);
837    }
838
839    #[test]
840    fn test_alloc_multiple() {
841        let mut arena = NodeArena::new();
842
843        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
844        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
845        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
846
847        assert_eq!(arena.len(), 3);
848        assert_ne!(id1, id2);
849        assert_ne!(id2, id3);
850
851        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Function);
852        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
853        assert_eq!(arena.get(id3).unwrap().kind, NodeKind::Method);
854    }
855
856    #[test]
857    fn test_get_mut() {
858        let mut arena = NodeArena::new();
859        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
860
861        {
862            let node = arena.get_mut(id).unwrap();
863            node.start_line = 42;
864        }
865
866        assert_eq!(arena.get(id).unwrap().start_line, 42);
867    }
868
869    #[test]
870    fn test_contains() {
871        let mut arena = NodeArena::new();
872        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
873
874        assert!(arena.contains(id));
875        assert!(!arena.contains(NodeId::INVALID));
876    }
877
878    #[test]
879    fn test_remove() {
880        let mut arena = NodeArena::new();
881        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
882
883        assert_eq!(arena.len(), 1);
884        assert!(arena.contains(id));
885
886        let removed = arena.remove(id).unwrap();
887        assert_eq!(removed.kind, NodeKind::Function);
888        assert_eq!(arena.len(), 0);
889        assert!(!arena.contains(id));
890    }
891
892    #[test]
893    fn test_stale_id_after_remove() {
894        let mut arena = NodeArena::new();
895        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
896
897        // Remove and reallocate
898        arena.remove(id1);
899        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
900
901        // Old ID should be stale
902        assert!(arena.get(id1).is_none());
903        // New ID should work
904        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
905        // They should have the same index but different generations
906        assert_eq!(id1.index(), id2.index());
907        assert_ne!(id1.generation(), id2.generation());
908    }
909
910    #[test]
911    fn test_remove_idempotent() {
912        let mut arena = NodeArena::new();
913        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
914        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
915
916        // First remove should succeed
917        assert!(arena.remove(id1).is_some());
918        assert_eq!(arena.len(), 1);
919
920        // Second remove of same ID should be idempotent (no side effects)
921        assert!(arena.remove(id1).is_none());
922        assert_eq!(arena.len(), 1); // len unchanged
923
924        // Third remove should still be safe
925        assert!(arena.remove(id1).is_none());
926        assert_eq!(arena.len(), 1); // len still unchanged
927
928        // Other entries should be unaffected
929        assert!(arena.contains(id2));
930
931        // New allocation should work correctly
932        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
933        assert_eq!(id3.index(), id1.index()); // Reuses the removed slot
934        assert_eq!(arena.len(), 2);
935    }
936
937    #[test]
938    fn test_free_list_reuse() {
939        let mut arena = NodeArena::new();
940
941        // Allocate 3 nodes
942        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
943        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
944        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
945
946        // Remove middle node
947        arena.remove(id2);
948        assert_eq!(arena.len(), 2);
949        assert_eq!(arena.slot_count(), 3);
950
951        // Allocate new node - should reuse id2's slot
952        let id4 = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
953        assert_eq!(id4.index(), id2.index());
954        assert_eq!(arena.len(), 3);
955        assert_eq!(arena.slot_count(), 3); // No growth - slot reused
956
957        // Old id2 is stale
958        assert!(arena.get(id2).is_none());
959        // New id4 works
960        assert_eq!(arena.get(id4).unwrap().kind, NodeKind::Variable);
961        // Other IDs still valid
962        assert!(arena.contains(id1));
963        assert!(arena.contains(id3));
964    }
965
966    #[test]
967    fn test_invalid_id() {
968        let arena = NodeArena::new();
969        assert!(arena.get(NodeId::INVALID).is_none());
970    }
971
972    #[test]
973    fn test_out_of_bounds_id() {
974        let arena = NodeArena::new();
975        let fake_id = NodeId::new(999, 1);
976        assert!(arena.get(fake_id).is_none());
977    }
978
979    #[test]
980    fn test_wrong_generation() {
981        let mut arena = NodeArena::new();
982        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
983
984        // Create ID with wrong generation
985        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
986        assert!(arena.get(wrong_gen_id).is_none());
987    }
988
989    #[test]
990    fn test_iter() {
991        let mut arena = NodeArena::new();
992        arena.alloc(test_entry(NodeKind::Function)).unwrap();
993        arena.alloc(test_entry(NodeKind::Class)).unwrap();
994        arena.alloc(test_entry(NodeKind::Method)).unwrap();
995
996        let entries: Vec<_> = arena.iter().collect();
997        assert_eq!(entries.len(), 3);
998
999        let kinds: Vec<_> = entries.iter().map(|(_, e)| e.kind).collect();
1000        assert!(kinds.contains(&NodeKind::Function));
1001        assert!(kinds.contains(&NodeKind::Class));
1002        assert!(kinds.contains(&NodeKind::Method));
1003    }
1004
1005    #[test]
1006    fn test_iter_with_removed() {
1007        let mut arena = NodeArena::new();
1008        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1009        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1010        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
1011
1012        arena.remove(id2);
1013
1014        let entries: Vec<_> = arena.iter().collect();
1015        assert_eq!(entries.len(), 2);
1016
1017        let collected_ids: Vec<_> = entries.iter().map(|(id, _)| *id).collect();
1018        assert!(collected_ids.contains(&id1));
1019        assert!(!collected_ids.contains(&id2));
1020        assert!(collected_ids.contains(&id3));
1021    }
1022
1023    #[test]
1024    fn test_iter_mut() {
1025        let mut arena = NodeArena::new();
1026        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1027        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1028
1029        for (_, entry) in arena.iter_mut() {
1030            entry.start_line = 100;
1031        }
1032
1033        for (_, entry) in arena.iter() {
1034            assert_eq!(entry.start_line, 100);
1035        }
1036    }
1037
1038    #[test]
1039    fn test_clear() {
1040        let mut arena = NodeArena::new();
1041        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1042        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1043
1044        assert_eq!(arena.len(), 2);
1045        arena.clear();
1046        assert_eq!(arena.len(), 0);
1047        assert!(arena.is_empty());
1048    }
1049
1050    #[test]
1051    fn test_reserve() {
1052        let mut arena = NodeArena::new();
1053        arena.reserve(1000);
1054        assert!(arena.capacity() >= 1000);
1055    }
1056
1057    #[test]
1058    fn test_display() {
1059        let mut arena = NodeArena::new();
1060        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1061
1062        let display = format!("{arena}");
1063        assert!(display.contains("NodeArena"));
1064        assert!(display.contains("len=1"));
1065    }
1066
1067    #[test]
1068    fn test_node_entry_builder() {
1069        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1070            .with_byte_range(10, 100)
1071            .with_location(1, 0, 5, 10)
1072            .with_signature(StringId::new(2))
1073            .with_doc(StringId::new(3))
1074            .with_qualified_name(StringId::new(4));
1075
1076        assert_eq!(entry.kind, NodeKind::Function);
1077        assert_eq!(entry.start_byte, 10);
1078        assert_eq!(entry.end_byte, 100);
1079        assert_eq!(entry.start_line, 1);
1080        assert_eq!(entry.start_column, 0);
1081        assert_eq!(entry.end_line, 5);
1082        assert_eq!(entry.end_column, 10);
1083        assert_eq!(entry.signature, Some(StringId::new(2)));
1084        assert_eq!(entry.doc, Some(StringId::new(3)));
1085        assert_eq!(entry.qualified_name, Some(StringId::new(4)));
1086    }
1087
1088    #[test]
1089    fn test_slot_state() {
1090        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1091        assert!(occupied.is_occupied());
1092        assert!(!occupied.is_vacant());
1093        assert_eq!(occupied.get(), Some(&42));
1094
1095        let vacant: Slot<i32> = Slot::new_vacant(2, Some(5));
1096        assert!(!vacant.is_occupied());
1097        assert!(vacant.is_vacant());
1098        assert_eq!(vacant.get(), None);
1099    }
1100
1101    #[test]
1102    fn test_slot_generation() {
1103        let slot: Slot<i32> = Slot::new_occupied(42, 100);
1104        assert_eq!(slot.generation(), 42);
1105    }
1106
1107    #[test]
1108    fn test_slot_state_accessor() {
1109        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1110        assert!(matches!(occupied.state(), SlotState::Occupied(42)));
1111
1112        let vacant: Slot<i32> = Slot::new_vacant(1, Some(3));
1113        assert!(matches!(
1114            vacant.state(),
1115            SlotState::Vacant { next_free: Some(3) }
1116        ));
1117    }
1118
1119    // ---- Bulk API tests ----
1120
1121    #[test]
1122    fn test_alloc_range_basic() {
1123        let mut arena = NodeArena::new();
1124        let placeholder = test_entry(NodeKind::Function);
1125
1126        let start = arena.alloc_range(5, &placeholder).unwrap();
1127        assert_eq!(start, 0);
1128        assert_eq!(arena.len(), 5);
1129        assert_eq!(arena.slot_count(), 5);
1130
1131        // All slots should be occupied with generation 1.
1132        for i in 0..5u32 {
1133            let id = NodeId::new(i, 1);
1134            let entry = arena.get(id).expect("slot should be occupied");
1135            assert_eq!(entry.kind, NodeKind::Function);
1136        }
1137    }
1138
1139    #[test]
1140    fn test_alloc_range_after_existing() {
1141        let mut arena = NodeArena::new();
1142
1143        // Allocate one node via normal path.
1144        let id0 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1145        assert_eq!(id0.index(), 0);
1146        assert_eq!(arena.len(), 1);
1147
1148        // Bulk allocate 3 more.
1149        let placeholder = test_entry(NodeKind::Method);
1150        let start = arena.alloc_range(3, &placeholder).unwrap();
1151        assert_eq!(start, 1);
1152        assert_eq!(arena.len(), 4);
1153        assert_eq!(arena.slot_count(), 4);
1154
1155        // Original node intact.
1156        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Class);
1157
1158        // Bulk-allocated nodes accessible.
1159        for i in 1..4u32 {
1160            let id = NodeId::new(i, 1);
1161            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1162        }
1163    }
1164
1165    #[test]
1166    fn test_alloc_range_zero_is_noop() {
1167        let mut arena = NodeArena::new();
1168
1169        // Allocate one normally first.
1170        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1171        let len_before = arena.len();
1172        let slot_count_before = arena.slot_count();
1173
1174        let start = arena.alloc_range(0, &test_entry(NodeKind::Class)).unwrap();
1175        #[allow(clippy::cast_possible_truncation)]
1176        // Arena slot count fits in u32 for graph storage indices
1177        let expected_start = slot_count_before as u32;
1178        assert_eq!(start, expected_start);
1179        assert_eq!(arena.len(), len_before);
1180        assert_eq!(arena.slot_count(), slot_count_before);
1181    }
1182
1183    #[test]
1184    fn test_bulk_slice_mut() {
1185        let mut arena = NodeArena::new();
1186        let placeholder = test_entry(NodeKind::Function);
1187        let start = arena.alloc_range(3, &placeholder).unwrap();
1188
1189        // Overwrite slot 1 via bulk_slice_mut.
1190        {
1191            let slice = arena.bulk_slice_mut(start, 3);
1192            assert_eq!(slice.len(), 3);
1193
1194            // Replace the middle slot with a Class entry.
1195            slice[1] = Slot::new_occupied(1, test_entry(NodeKind::Class));
1196        }
1197
1198        // Verify the overwrite via get().
1199        let id1 = NodeId::new(1, 1);
1200        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Class);
1201
1202        // Other slots unchanged.
1203        let id0 = NodeId::new(0, 1);
1204        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Function);
1205        let id2 = NodeId::new(2, 1);
1206        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Function);
1207    }
1208
1209    #[test]
1210    fn test_truncate_to() {
1211        let mut arena = NodeArena::new();
1212        let placeholder = test_entry(NodeKind::Function);
1213
1214        // Allocate initial range.
1215        arena.alloc_range(3, &placeholder).unwrap();
1216        assert_eq!(arena.len(), 3);
1217        assert_eq!(arena.slot_count(), 3);
1218
1219        // Save state.
1220        let saved = arena.slot_count();
1221
1222        // Allocate more.
1223        arena.alloc_range(4, &placeholder).unwrap();
1224        assert_eq!(arena.len(), 7);
1225        assert_eq!(arena.slot_count(), 7);
1226
1227        // Truncate back.
1228        arena.truncate_to(saved);
1229        assert_eq!(arena.len(), 3);
1230        assert_eq!(arena.slot_count(), 3);
1231
1232        // Original slots still accessible.
1233        for i in 0..3u32 {
1234            let id = NodeId::new(i, 1);
1235            assert!(arena.get(id).is_some());
1236        }
1237    }
1238
1239    #[test]
1240    fn test_alloc_range_with_free_list() {
1241        let mut arena = NodeArena::new();
1242
1243        // Allocate two nodes, then remove the first to create a free list entry.
1244        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1245        let _id1 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1246        arena.remove(id0);
1247
1248        // State: len=1, slot_count=2, free_head=Some(0)
1249        assert_eq!(arena.len(), 1);
1250        assert_eq!(arena.slot_count(), 2);
1251
1252        // alloc_range should append AFTER slot_count, not reuse free list.
1253        let start = arena.alloc_range(3, &test_entry(NodeKind::Method)).unwrap();
1254        assert_eq!(start, 2); // Appended after existing 2 slots
1255        assert_eq!(arena.len(), 4); // 1 existing + 3 new
1256        assert_eq!(arena.slot_count(), 5); // 2 existing + 3 new
1257
1258        // Free list should still be intact — slot 0 still vacant.
1259        let slot0 = arena.slot(0).unwrap();
1260        assert!(slot0.is_vacant());
1261
1262        // Bulk-allocated nodes accessible.
1263        for i in 2..5u32 {
1264            let id = NodeId::new(i, 1);
1265            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1266        }
1267    }
1268
1269    #[test]
1270    fn test_truncate_to_clamps_free_head() {
1271        let mut arena = NodeArena::new();
1272
1273        // Allocate 5 nodes.
1274        let mut ids = Vec::new();
1275        for _ in 0..5 {
1276            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1277        }
1278
1279        // Save state at 5 slots.
1280        let saved = arena.slot_count();
1281
1282        // Allocate 3 more, then remove one of them to put it on the free list.
1283        let extra_ids: Vec<_> = (0..3)
1284            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1285            .collect();
1286        arena.remove(extra_ids[1]); // free_head now points to index 6
1287
1288        // Truncate back — free_head should be clamped since index 6 is gone.
1289        arena.truncate_to(saved);
1290        assert_eq!(arena.slot_count(), 5);
1291        assert_eq!(arena.len(), 5);
1292
1293        // Arena should still be usable: new alloc appends (no dangling free list).
1294        let new_id = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1295        assert_eq!(new_id.index(), 5); // Appended, not reusing dangling slot
1296        assert_eq!(arena.get(new_id).unwrap().kind, NodeKind::Variable);
1297    }
1298
1299    #[test]
1300    fn test_truncate_to_preserves_retained_free_list() {
1301        let mut arena = NodeArena::new();
1302
1303        // Allocate 8 nodes: indices 0..7
1304        let mut ids = Vec::new();
1305        for _ in 0..8 {
1306            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1307        }
1308        assert_eq!(arena.len(), 8);
1309
1310        // Remove indices 2 and 6 — both on free list.
1311        // Free list: 6 -> 2 -> None (LIFO)
1312        arena.remove(ids[2]);
1313        arena.remove(ids[6]);
1314        assert_eq!(arena.len(), 6);
1315
1316        // Truncate to 5 slots — removes indices 5,6,7.
1317        // Index 6 (vacant) is in truncated region, index 2 (vacant) is retained.
1318        // Free list chain was 6 -> 2. After truncate, only index 2 should survive.
1319        arena.truncate_to(5);
1320
1321        // Should have 4 occupied (indices 0,1,3,4) + 1 vacant (index 2) = 5 slots.
1322        // len should be 4 (we had 6 occupied, truncated 3 slots: 5 occupied + 1 vacant
1323        // in truncated region = indices 5(occ), 6(vac), 7(occ) => dropped 2 occupied).
1324        assert_eq!(arena.slot_count(), 5);
1325        assert_eq!(arena.len(), 4);
1326
1327        // Free list should contain only index 2.
1328        // Verify by allocating — should reuse index 2.
1329        let reused = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1330        assert_eq!(reused.index(), 2);
1331        assert_eq!(arena.get(reused).unwrap().kind, NodeKind::Variable);
1332        assert_eq!(arena.len(), 5);
1333
1334        // Next alloc should append (no more free slots).
1335        let appended = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1336        assert_eq!(appended.index(), 5);
1337        assert_eq!(arena.len(), 6);
1338    }
1339
1340    #[test]
1341    fn test_slots_read_access() {
1342        let mut arena = NodeArena::new();
1343        let placeholder = test_entry(NodeKind::Variable);
1344        arena.alloc_range(4, &placeholder).unwrap();
1345
1346        let slots = arena.slots();
1347        assert_eq!(slots.len(), 4);
1348
1349        for slot in slots {
1350            assert!(slot.is_occupied());
1351            assert_eq!(slot.generation(), 1);
1352            assert_eq!(slot.get().unwrap().kind, NodeKind::Variable);
1353        }
1354    }
1355
1356    #[test]
1357    fn test_multiple_remove_reuse_cycle() {
1358        let mut arena = NodeArena::new();
1359
1360        // Allocate and remove multiple times to test free list chaining
1361        let mut ids = Vec::new();
1362        for _ in 0..5 {
1363            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1364        }
1365
1366        // Remove in reverse order (builds free list: 4 -> 3 -> 2 -> 1 -> 0)
1367        for &id in ids.iter().rev() {
1368            arena.remove(id);
1369        }
1370
1371        // Reallocate - should use free list in LIFO order
1372        let new_ids: Vec<_> = (0..5)
1373            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1374            .collect();
1375
1376        // All old IDs should be stale
1377        for &old_id in &ids {
1378            assert!(arena.get(old_id).is_none());
1379        }
1380
1381        // All new IDs should work
1382        for &new_id in &new_ids {
1383            assert!(arena.get(new_id).is_some());
1384        }
1385    }
1386
1387    #[test]
1388    fn test_generation_overflow_at_max_generation() {
1389        // Test that allocation fails when generation reaches MAX_GENERATION
1390        let mut arena = NodeArena::new();
1391
1392        // Allocate a node
1393        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1394        assert_eq!(id.index(), 0);
1395
1396        // Remove it to put on free list
1397        arena.remove(id);
1398
1399        // Manually set the slot's generation to MAX_GENERATION
1400        // This simulates what would happen after ~9.2 quintillion allocations
1401        arena.slots[0].generation = NodeId::MAX_GENERATION;
1402
1403        // Now allocation should fail because incrementing MAX_GENERATION would overflow
1404        let result = arena.alloc(test_entry(NodeKind::Class));
1405        assert!(result.is_err());
1406
1407        let err = result.unwrap_err();
1408        match err {
1409            ArenaError::GenerationOverflow(inner) => {
1410                assert_eq!(inner.index, 0);
1411                assert_eq!(inner.generation, NodeId::MAX_GENERATION);
1412            }
1413            _ => panic!("expected GenerationOverflow, got {err:?}"),
1414        }
1415    }
1416
1417    #[test]
1418    fn test_free_list_corruption_returns_error() {
1419        // Test that free list corruption returns an error instead of panicking
1420        let mut arena = NodeArena::new();
1421
1422        // Allocate and remove a node to put it on free list
1423        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1424        arena.remove(id);
1425
1426        // Corrupt the free list by manually marking the vacant slot as occupied
1427        arena.slots[0].state = SlotState::Occupied(test_entry(NodeKind::Class));
1428
1429        // Allocation should fail gracefully with FreeListCorruption error
1430        let result = arena.alloc(test_entry(NodeKind::Method));
1431        assert!(result.is_err());
1432
1433        let err = result.unwrap_err();
1434        match err {
1435            ArenaError::FreeListCorruption { index } => {
1436                assert_eq!(index, 0);
1437            }
1438            _ => panic!("expected FreeListCorruption, got {err:?}"),
1439        }
1440
1441        // Verify the arena is still in a consistent state (len was rolled back)
1442        assert_eq!(arena.len(), 0);
1443    }
1444
1445    #[test]
1446    fn test_arena_error_display() {
1447        let gen_err = ArenaError::GenerationOverflow(GenerationOverflowError {
1448            index: 42,
1449            generation: 100,
1450        });
1451        let display = format!("{gen_err}");
1452        assert!(display.contains("42"));
1453        assert!(display.contains("100"));
1454
1455        let corruption_err = ArenaError::FreeListCorruption { index: 5 };
1456        let display = format!("{corruption_err}");
1457        assert!(display.contains("free list corruption"));
1458        assert!(display.contains('5'));
1459
1460        let capacity_err = ArenaError::CapacityExceeded;
1461        let display = format!("{capacity_err}");
1462        assert!(display.contains("capacity exceeded"));
1463    }
1464
1465    #[test]
1466    fn test_arena_error_source_generation_overflow() {
1467        use std::error::Error;
1468
1469        let inner = GenerationOverflowError {
1470            index: 0,
1471            generation: NodeId::MAX_GENERATION,
1472        };
1473        let err = ArenaError::GenerationOverflow(inner);
1474        // source() returns Some for GenerationOverflow
1475        assert!(err.source().is_some());
1476    }
1477
1478    #[test]
1479    fn test_arena_error_source_none_for_other_variants() {
1480        use std::error::Error;
1481
1482        let corruption = ArenaError::FreeListCorruption { index: 0 };
1483        assert!(corruption.source().is_none());
1484
1485        let capacity = ArenaError::CapacityExceeded;
1486        assert!(capacity.source().is_none());
1487    }
1488
1489    #[test]
1490    fn test_arena_from_generation_overflow_error() {
1491        let inner = GenerationOverflowError {
1492            index: 10,
1493            generation: 99,
1494        };
1495        let err: ArenaError = inner.into();
1496        assert!(matches!(err, ArenaError::GenerationOverflow(_)));
1497    }
1498
1499    #[test]
1500    fn test_arena_error_clone_and_eq() {
1501        let err = ArenaError::CapacityExceeded;
1502        let cloned = err.clone();
1503        assert_eq!(err, cloned);
1504
1505        let err2 = ArenaError::FreeListCorruption { index: 3 };
1506        let cloned2 = err2.clone();
1507        assert_eq!(err2, cloned2);
1508    }
1509
1510    #[test]
1511    fn test_node_entry_with_visibility() {
1512        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1513            .with_visibility(StringId::new(5));
1514
1515        assert_eq!(entry.visibility, Some(StringId::new(5)));
1516    }
1517
1518    #[test]
1519    fn test_node_entry_with_async_and_static() {
1520        let entry = NodeEntry::new(NodeKind::Method, test_name(), test_file())
1521            .with_async(true)
1522            .with_static(true);
1523
1524        assert!(entry.is_async);
1525        assert!(entry.is_static);
1526    }
1527
1528    #[test]
1529    fn test_node_entry_with_unsafe_flag() {
1530        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_unsafe(true);
1531
1532        assert!(entry.is_unsafe);
1533    }
1534
1535    #[test]
1536    fn test_node_entry_with_body_hash() {
1537        use crate::graph::body_hash::BodyHash128;
1538        let hash = BodyHash128::from_u128(0u128);
1539        let entry =
1540            NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_body_hash(hash);
1541
1542        assert!(entry.body_hash.is_some());
1543    }
1544
1545    #[test]
1546    fn test_node_entry_defaults() {
1547        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file());
1548        assert_eq!(entry.start_byte, 0);
1549        assert_eq!(entry.end_byte, 0);
1550        assert_eq!(entry.start_line, 0);
1551        assert_eq!(entry.start_column, 0);
1552        assert_eq!(entry.end_line, 0);
1553        assert_eq!(entry.end_column, 0);
1554        assert!(entry.signature.is_none());
1555        assert!(entry.doc.is_none());
1556        assert!(entry.qualified_name.is_none());
1557        assert!(entry.visibility.is_none());
1558        assert!(!entry.is_async);
1559        assert!(!entry.is_static);
1560        assert!(!entry.is_unsafe);
1561        assert!(entry.body_hash.is_none());
1562    }
1563
1564    #[test]
1565    fn test_arena_default_impl() {
1566        let arena = NodeArena::default();
1567        assert_eq!(arena.len(), 0);
1568        assert!(arena.is_empty());
1569    }
1570
1571    #[test]
1572    fn test_get_invalid_id() {
1573        let mut arena = NodeArena::new();
1574        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1575        // get with INVALID should return None
1576        assert!(arena.get(NodeId::INVALID).is_none());
1577    }
1578
1579    #[test]
1580    fn test_get_mut_invalid_id() {
1581        let mut arena = NodeArena::new();
1582        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1583        assert!(arena.get_mut(NodeId::INVALID).is_none());
1584    }
1585
1586    #[test]
1587    fn test_get_mut_wrong_generation() {
1588        let mut arena = NodeArena::new();
1589        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1590        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
1591        assert!(arena.get_mut(wrong_gen_id).is_none());
1592    }
1593
1594    #[test]
1595    fn test_remove_invalid_id() {
1596        let mut arena = NodeArena::new();
1597        assert!(arena.remove(NodeId::INVALID).is_none());
1598    }
1599
1600    #[test]
1601    fn test_slot_get_mut_occupied() {
1602        let mut slot: Slot<i32> = Slot::new_occupied(1, 42);
1603        let val = slot.get_mut().unwrap();
1604        *val = 99;
1605        assert_eq!(slot.get(), Some(&99));
1606    }
1607
1608    #[test]
1609    fn test_slot_get_mut_vacant() {
1610        let mut slot: Slot<i32> = Slot::new_vacant(1, None);
1611        assert!(slot.get_mut().is_none());
1612    }
1613
1614    #[test]
1615    fn test_truncate_to_zero_occupied_dropped() {
1616        let mut arena = NodeArena::new();
1617        let placeholder = test_entry(NodeKind::Function);
1618        arena.alloc_range(5, &placeholder).unwrap();
1619        // Truncate to 0 — all 5 occupied slots are dropped
1620        arena.truncate_to(0);
1621        assert_eq!(arena.len(), 0);
1622        assert_eq!(arena.slot_count(), 0);
1623    }
1624
1625    #[test]
1626    fn test_slot_count_vs_len() {
1627        let mut arena = NodeArena::new();
1628        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1629        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1630        arena.remove(id0);
1631        // slot_count is total slots (including vacant), len is occupied only
1632        assert_eq!(arena.slot_count(), 2);
1633        assert_eq!(arena.len(), 1);
1634    }
1635}