Skip to main content

sqry_core/graph/unified/storage/
arena.rs

1//! `NodeArena`: Contiguous node storage with generational indices.
2//!
3//! This module implements `NodeArena`, the primary node storage for the
4//! unified graph architecture. It provides:
5//!
6//! - **O(1) access**: Direct indexing into contiguous storage
7//! - **Generational safety**: Stale IDs return `None` instead of wrong data
8//! - **Free list reuse**: Deleted slots are recycled efficiently
9//!
10//! # Design
11//!
12//! The arena uses a slot-based design:
13//! - Each slot is either occupied (with data) or vacant (with next-free pointer)
14//! - Occupied slots store `NodeEntry` with generation counter
15//! - Vacant slots form a singly-linked free list
16//! - Generation counter increments on each reuse to detect stale IDs
17
18use std::fmt;
19
20use serde::{Deserialize, Serialize};
21
22use super::super::file::id::FileId;
23use super::super::node::id::{GenerationOverflowError, NodeId};
24use super::super::node::kind::NodeKind;
25use super::super::string::id::StringId;
26use crate::graph::body_hash::BodyHash128;
27
28/// Error type for arena allocation operations.
29///
30/// This enum covers all possible failure modes during node allocation,
31/// providing graceful error handling instead of panics.
32#[derive(Debug, Clone, PartialEq, Eq)]
33pub enum ArenaError {
34    /// Generation counter would overflow, risking stale ID collision.
35    GenerationOverflow(GenerationOverflowError),
36
37    /// Free list is corrupted: an occupied slot was found in the free list.
38    ///
39    /// This indicates a serious internal consistency error, likely caused by
40    /// concurrent modification without proper synchronization, memory corruption,
41    /// or a bug in the arena implementation.
42    FreeListCorruption {
43        /// Index of the corrupted slot.
44        index: u32,
45    },
46
47    /// Arena capacity exceeded: cannot allocate more than `u32::MAX` nodes.
48    CapacityExceeded,
49}
50
51impl fmt::Display for ArenaError {
52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53        match self {
54            ArenaError::GenerationOverflow(e) => write!(f, "{e}"),
55            ArenaError::FreeListCorruption { index } => {
56                write!(
57                    f,
58                    "free list corruption: occupied slot found at index {index}"
59                )
60            }
61            ArenaError::CapacityExceeded => {
62                write!(
63                    f,
64                    "arena capacity exceeded: cannot allocate more than {} nodes",
65                    u32::MAX
66                )
67            }
68        }
69    }
70}
71
72impl std::error::Error for ArenaError {
73    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
74        match self {
75            ArenaError::GenerationOverflow(e) => Some(e),
76            _ => None,
77        }
78    }
79}
80
81impl From<GenerationOverflowError> for ArenaError {
82    fn from(e: GenerationOverflowError) -> Self {
83        ArenaError::GenerationOverflow(e)
84    }
85}
86
87/// State of a slot in the arena.
88#[derive(Debug, Clone, Serialize, Deserialize)]
89pub enum SlotState<T> {
90    /// Slot contains an occupied entry.
91    Occupied(T),
92    /// Slot is vacant, contains next free index (or None if end of list).
93    Vacant {
94        /// Index of the next free slot in the free list, or None if end.
95        next_free: Option<u32>,
96    },
97}
98
99/// A slot in the arena with generation tracking.
100#[derive(Debug, Clone, Serialize, Deserialize)]
101pub struct Slot<T> {
102    /// Current generation for this slot.
103    generation: u64,
104    /// Slot state (occupied or vacant).
105    state: SlotState<T>,
106}
107
108impl<T> Slot<T> {
109    /// Creates a new vacant slot with the given generation and next free.
110    #[allow(dead_code)] // Used by Compaction (Step 15)
111    fn new_vacant(generation: u64, next_free: Option<u32>) -> Self {
112        Self {
113            generation,
114            state: SlotState::Vacant { next_free },
115        }
116    }
117
118    /// Creates a new occupied slot with the given generation and data.
119    pub(crate) fn new_occupied(generation: u64, data: T) -> Self {
120        Self {
121            generation,
122            state: SlotState::Occupied(data),
123        }
124    }
125
126    /// Returns true if this slot is occupied.
127    #[inline]
128    pub fn is_occupied(&self) -> bool {
129        matches!(self.state, SlotState::Occupied(_))
130    }
131
132    /// Returns true if this slot is vacant.
133    #[inline]
134    pub fn is_vacant(&self) -> bool {
135        matches!(self.state, SlotState::Vacant { .. })
136    }
137
138    /// Returns the generation of this slot.
139    #[inline]
140    pub fn generation(&self) -> u64 {
141        self.generation
142    }
143
144    /// Returns a reference to the occupied data, if any.
145    pub fn get(&self) -> Option<&T> {
146        match &self.state {
147            SlotState::Occupied(data) => Some(data),
148            SlotState::Vacant { .. } => None,
149        }
150    }
151
152    /// Returns a reference to the slot state (occupied or vacant).
153    #[inline]
154    pub fn state(&self) -> &SlotState<T> {
155        &self.state
156    }
157
158    /// Returns a mutable reference to the occupied data, if any.
159    pub fn get_mut(&mut self) -> Option<&mut T> {
160        match &mut self.state {
161            SlotState::Occupied(data) => Some(data),
162            SlotState::Vacant { .. } => None,
163        }
164    }
165}
166
167/// A node entry stored in the arena.
168///
169/// This is the actual data stored for each node in the graph.
170#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
171pub struct NodeEntry {
172    /// The kind of code entity this node represents.
173    pub kind: NodeKind,
174    /// Interned name of the symbol.
175    pub name: StringId,
176    /// File containing this node.
177    pub file: FileId,
178    /// Start byte offset in the file.
179    pub start_byte: u32,
180    /// End byte offset in the file.
181    pub end_byte: u32,
182    /// Start line (1-indexed).
183    pub start_line: u32,
184    /// Start column (0-indexed).
185    pub start_column: u32,
186    /// End line (1-indexed).
187    pub end_line: u32,
188    /// End column (0-indexed).
189    pub end_column: u32,
190    /// Optional signature/type information.
191    pub signature: Option<StringId>,
192    /// Optional docstring.
193    pub doc: Option<StringId>,
194    /// Optional qualified name (e.g., module.Class.method).
195    pub qualified_name: Option<StringId>,
196    /// Optional visibility modifier (public, private, protected, etc.).
197    pub visibility: Option<StringId>,
198    /// Whether this is an async function/method.
199    pub is_async: bool,
200    /// Whether this is a static member.
201    pub is_static: bool,
202    /// 128-bit body hash for duplicate detection.
203    ///
204    /// Computed from the raw body bytes of hashable symbol kinds:
205    /// Function, Method, Class, Struct, Enum, Interface, Trait, Module.
206    ///
207    /// CRITICAL: This field MUST remain BEFORE `is_unsafe` for
208    /// postcard serialization compatibility with older snapshots.
209    /// Uses `#[serde(default)]` to read older V2 snapshots that lack this field.
210    /// DO NOT use `skip_serializing_if` - postcard is positional and skipping corrupts the stream.
211    #[serde(default)]
212    pub body_hash: Option<BodyHash128>,
213    /// Whether this is an unsafe function (Rust, C, etc.).
214    ///
215    /// For FFI functions, this indicates whether the function is declared with
216    /// `unsafe` modifier (Haskell), `unsafe` keyword (Rust), or similar safety
217    /// markers in other languages.
218    ///
219    /// CRITICAL: This field is placed AFTER `body_hash` to maintain postcard
220    /// serialization compatibility. Adding fields before `body_hash` would
221    /// corrupt existing snapshots due to positional deserialization.
222    /// Uses `#[serde(default)]` to read older snapshots that lack this field.
223    #[serde(default)]
224    pub is_unsafe: bool,
225}
226
227impl NodeEntry {
228    /// Creates a new node entry with minimal required fields.
229    #[must_use]
230    pub fn new(kind: NodeKind, name: StringId, file: FileId) -> Self {
231        Self {
232            kind,
233            name,
234            file,
235            start_byte: 0,
236            end_byte: 0,
237            start_line: 0,
238            start_column: 0,
239            end_line: 0,
240            end_column: 0,
241            signature: None,
242            doc: None,
243            qualified_name: None,
244            visibility: None,
245            is_async: false,
246            is_static: false,
247            is_unsafe: false,
248            body_hash: None,
249        }
250    }
251
252    /// Sets the byte range for this node.
253    #[must_use]
254    pub fn with_byte_range(mut self, start: u32, end: u32) -> Self {
255        self.start_byte = start;
256        self.end_byte = end;
257        self
258    }
259
260    /// Sets the line/column range for this node.
261    #[must_use]
262    pub fn with_location(
263        mut self,
264        start_line: u32,
265        start_column: u32,
266        end_line: u32,
267        end_column: u32,
268    ) -> Self {
269        self.start_line = start_line;
270        self.start_column = start_column;
271        self.end_line = end_line;
272        self.end_column = end_column;
273        self
274    }
275
276    /// Sets the signature for this node.
277    #[must_use]
278    pub fn with_signature(mut self, signature: StringId) -> Self {
279        self.signature = Some(signature);
280        self
281    }
282
283    /// Sets the docstring for this node.
284    #[must_use]
285    pub fn with_doc(mut self, doc: StringId) -> Self {
286        self.doc = Some(doc);
287        self
288    }
289
290    /// Sets the qualified name for this node.
291    #[must_use]
292    pub fn with_qualified_name(mut self, qualified_name: StringId) -> Self {
293        self.qualified_name = Some(qualified_name);
294        self
295    }
296
297    /// Sets the visibility modifier for this node.
298    #[must_use]
299    pub fn with_visibility(mut self, visibility: StringId) -> Self {
300        self.visibility = Some(visibility);
301        self
302    }
303
304    /// Sets the async flag for this node.
305    #[must_use]
306    pub fn with_async(mut self, is_async: bool) -> Self {
307        self.is_async = is_async;
308        self
309    }
310
311    /// Sets the static flag for this node.
312    #[must_use]
313    pub fn with_static(mut self, is_static: bool) -> Self {
314        self.is_static = is_static;
315        self
316    }
317
318    /// Sets the unsafe flag for this node.
319    #[must_use]
320    pub fn with_unsafe(mut self, is_unsafe: bool) -> Self {
321        self.is_unsafe = is_unsafe;
322        self
323    }
324
325    /// Sets the body hash for this node.
326    ///
327    /// The body hash is a 128-bit hash computed from the raw body bytes
328    /// of the symbol. Used for duplicate code detection.
329    #[must_use]
330    pub fn with_body_hash(mut self, hash: BodyHash128) -> Self {
331        self.body_hash = Some(hash);
332        self
333    }
334}
335
336/// Contiguous node storage with generational indices.
337///
338/// `NodeArena` provides O(1) access to nodes by `NodeId`, with stale
339/// reference detection via generation counters.
340///
341/// # Free List Management
342///
343/// When a node is removed, its slot is added to the free list for reuse.
344/// This avoids memory fragmentation and keeps indices stable.
345///
346/// # Example
347///
348/// ```rust,ignore
349/// let mut arena = NodeArena::new();
350/// let id = arena.alloc(NodeEntry::new(NodeKind::Function, name, file))?;
351///
352/// let node = arena.get(id).expect("node exists");
353/// assert_eq!(node.kind, NodeKind::Function);
354///
355/// arena.remove(id)?;
356/// assert!(arena.get(id).is_none()); // Stale ID returns None
357/// ```
358#[derive(Debug, Clone, Serialize, Deserialize)]
359pub struct NodeArena {
360    /// Slots for node storage.
361    slots: Vec<Slot<NodeEntry>>,
362    /// Head of the free list (index of first free slot).
363    free_head: Option<u32>,
364    /// Number of currently occupied slots.
365    len: usize,
366}
367
368impl NodeArena {
369    /// Creates a new empty arena.
370    #[must_use]
371    pub fn new() -> Self {
372        Self {
373            slots: Vec::new(),
374            free_head: None,
375            len: 0,
376        }
377    }
378
379    /// Creates a new arena with the specified initial capacity.
380    #[must_use]
381    pub fn with_capacity(capacity: usize) -> Self {
382        Self {
383            slots: Vec::with_capacity(capacity),
384            free_head: None,
385            len: 0,
386        }
387    }
388
389    /// Returns the number of occupied slots.
390    #[inline]
391    #[must_use]
392    pub fn len(&self) -> usize {
393        self.len
394    }
395
396    /// Returns true if the arena is empty.
397    #[inline]
398    #[must_use]
399    pub fn is_empty(&self) -> bool {
400        self.len == 0
401    }
402
403    /// Returns the allocated capacity (slots that can be stored without reallocation).
404    #[inline]
405    #[must_use]
406    pub fn capacity(&self) -> usize {
407        self.slots.capacity()
408    }
409
410    /// Returns the total number of slots (occupied + vacant).
411    ///
412    /// This is different from `len()` which counts only occupied slots,
413    /// and `capacity()` which counts pre-allocated slots.
414    #[inline]
415    #[must_use]
416    pub fn slot_count(&self) -> usize {
417        self.slots.len()
418    }
419
420    /// Allocates a new node and returns its `NodeId`.
421    ///
422    /// If there are free slots available, one is reused. Otherwise, a new
423    /// slot is appended to the storage.
424    ///
425    /// # Errors
426    ///
427    /// Returns [`ArenaError`] if:
428    /// - Generation counter would overflow (`GenerationOverflow`)
429    /// - Free list is corrupted (`FreeListCorruption`)
430    /// - Arena capacity exceeded (`CapacityExceeded`)
431    pub fn alloc(&mut self, entry: NodeEntry) -> Result<NodeId, ArenaError> {
432        self.len += 1;
433
434        if let Some(free_idx) = self.free_head {
435            // Check bounds before accessing slot - corrupted free list could point out of bounds
436            let slot_index = free_idx as usize;
437            if slot_index >= self.slots.len() {
438                self.len -= 1; // Rollback
439                return Err(ArenaError::FreeListCorruption { index: free_idx });
440            }
441
442            // Reuse a free slot (bounds now guaranteed valid)
443            let slot = &mut self.slots[slot_index];
444
445            // Update free list head
446            self.free_head = match &slot.state {
447                SlotState::Vacant { next_free } => *next_free,
448                SlotState::Occupied(_) => {
449                    // Free list corruption detected - return error instead of panicking
450                    self.len -= 1; // Rollback
451                    return Err(ArenaError::FreeListCorruption { index: free_idx });
452                }
453            };
454
455            // Increment generation for reuse detection (respect MAX_GENERATION limit)
456            // Use NodeId::try_increment_generation which checks against MAX_GENERATION, not u64::MAX
457            let temp_id = NodeId::new(free_idx, slot.generation);
458            let new_generation = temp_id.try_increment_generation().map_err(|mut e| {
459                self.len -= 1; // Rollback
460                e.index = free_idx; // Ensure index is correct
461                ArenaError::GenerationOverflow(e)
462            })?;
463
464            slot.generation = new_generation;
465            slot.state = SlotState::Occupied(entry);
466
467            Ok(NodeId::new(free_idx, new_generation))
468        } else {
469            // Append a new slot
470            let index = self.slots.len();
471
472            // Check capacity before allocation using checked conversion
473            let Ok(index_u32) = u32::try_from(index) else {
474                self.len -= 1; // Rollback
475                return Err(ArenaError::CapacityExceeded);
476            };
477
478            let slot = Slot::new_occupied(1, entry);
479            self.slots.push(slot);
480
481            Ok(NodeId::new(index_u32, 1))
482        }
483    }
484
485    /// Returns a reference to the node with the given ID.
486    ///
487    /// Returns `None` if the ID is invalid or stale (generation mismatch).
488    #[must_use]
489    pub fn get(&self, id: NodeId) -> Option<&NodeEntry> {
490        if id.is_invalid() {
491            return None;
492        }
493
494        let index = id.index() as usize;
495        let slot = self.slots.get(index)?;
496
497        // Check generation match
498        if slot.generation != id.generation() {
499            return None;
500        }
501
502        slot.get()
503    }
504
505    /// Returns a mutable reference to the node with the given ID.
506    ///
507    /// Returns `None` if the ID is invalid or stale.
508    #[must_use]
509    pub fn get_mut(&mut self, id: NodeId) -> Option<&mut NodeEntry> {
510        if id.is_invalid() {
511            return None;
512        }
513
514        let index = id.index() as usize;
515        let slot = self.slots.get_mut(index)?;
516
517        // Check generation match
518        if slot.generation != id.generation() {
519            return None;
520        }
521
522        slot.get_mut()
523    }
524
525    /// Checks if the given ID is valid (not stale).
526    #[must_use]
527    pub fn contains(&self, id: NodeId) -> bool {
528        self.get(id).is_some()
529    }
530
531    /// Removes the node with the given ID.
532    ///
533    /// Returns the removed entry, or `None` if the ID was invalid/stale.
534    /// This method is idempotent: calling it twice with the same ID will
535    /// return `None` on the second call without corrupting the arena.
536    pub fn remove(&mut self, id: NodeId) -> Option<NodeEntry> {
537        if id.is_invalid() {
538            return None;
539        }
540
541        let index = id.index() as usize;
542        let slot = self.slots.get_mut(index)?;
543
544        // Check generation match
545        if slot.generation != id.generation() {
546            return None;
547        }
548
549        // Only proceed if slot is occupied - this makes remove idempotent
550        // and prevents double-decrement of len / double-enqueue to free list
551        if let SlotState::Occupied(_) = &slot.state {
552            let old_state = std::mem::replace(
553                &mut slot.state,
554                SlotState::Vacant {
555                    next_free: self.free_head,
556                },
557            );
558
559            // Update free list and len only when actually removing
560            self.free_head = Some(id.index());
561            self.len -= 1;
562
563            if let SlotState::Occupied(entry) = old_state {
564                return Some(entry);
565            }
566        }
567
568        None
569    }
570
571    /// Iterates over all occupied entries with their IDs.
572    pub fn iter(&self) -> impl Iterator<Item = (NodeId, &NodeEntry)> {
573        self.slots.iter().enumerate().filter_map(|(index, slot)| {
574            if let SlotState::Occupied(entry) = &slot.state {
575                let index_u32 = u32::try_from(index).ok()?;
576                Some((NodeId::new(index_u32, slot.generation), entry))
577            } else {
578                None
579            }
580        })
581    }
582
583    /// Iterates over all occupied entries with mutable access.
584    pub fn iter_mut(&mut self) -> impl Iterator<Item = (NodeId, &mut NodeEntry)> {
585        self.slots
586            .iter_mut()
587            .enumerate()
588            .filter_map(|(index, slot)| {
589                let generation = slot.generation;
590                if let SlotState::Occupied(entry) = &mut slot.state {
591                    let index_u32 = u32::try_from(index).ok()?;
592                    Some((NodeId::new(index_u32, generation), entry))
593                } else {
594                    None
595                }
596            })
597    }
598
599    /// Clears all nodes from the arena.
600    ///
601    /// This resets the arena to an empty state, but retains allocated capacity.
602    pub fn clear(&mut self) {
603        self.slots.clear();
604        self.free_head = None;
605        self.len = 0;
606    }
607
608    /// Reserves capacity for at least `additional` more nodes.
609    pub fn reserve(&mut self, additional: usize) {
610        self.slots.reserve(additional);
611    }
612
613    /// Returns the slot at the given index, if any.
614    ///
615    /// This is a low-level method for internal use. Prefer `get()` for normal access.
616    #[must_use]
617    pub fn slot(&self, index: u32) -> Option<&Slot<NodeEntry>> {
618        self.slots.get(index as usize)
619    }
620
621    /// Pre-allocates a contiguous range of occupied slots for parallel commit.
622    ///
623    /// Returns the start index of the allocated range. All slots are initialized
624    /// as `Occupied` with clones of the `placeholder` entry at generation 1.
625    /// The free list is **not** touched, preserving its invariants.
626    ///
627    /// A `count` of zero is a no-op and returns the current slot count as the
628    /// start index.
629    ///
630    /// # Errors
631    ///
632    /// Returns [`ArenaError::CapacityExceeded`] if the resulting slot count
633    /// would exceed `u32::MAX`.
634    pub fn alloc_range(&mut self, count: u32, placeholder: &NodeEntry) -> Result<u32, ArenaError> {
635        if count == 0 {
636            // No-op: return current slot count as start index (safe because
637            // slot_count <= u32::MAX is maintained as an invariant).
638            return Ok(u32::try_from(self.slots.len())
639                .unwrap_or_else(|_| unreachable!("slot_count invariant violated")));
640        }
641
642        let start = self.slots.len();
643        let new_total = start
644            .checked_add(count as usize)
645            .ok_or(ArenaError::CapacityExceeded)?;
646
647        // Verify new total fits in u32 (arena index space).
648        if new_total > u32::MAX as usize + 1 {
649            return Err(ArenaError::CapacityExceeded);
650        }
651
652        let start_u32 = u32::try_from(start).map_err(|_| ArenaError::CapacityExceeded)?;
653
654        // Reserve and fill in one go.
655        self.slots.reserve(count as usize);
656        for _ in 0..count {
657            self.slots.push(Slot::new_occupied(1, placeholder.clone()));
658        }
659
660        // Increment occupied count immediately — arena state is always consistent.
661        self.len += count as usize;
662
663        Ok(start_u32)
664    }
665
666    /// Returns a mutable sub-slice of the internal slot storage.
667    ///
668    /// This is intended for parallel writes after [`alloc_range`](Self::alloc_range):
669    /// each thread can write to its own non-overlapping portion of the slice.
670    ///
671    /// # Panics
672    ///
673    /// Panics if `start + count` exceeds the slot count.
674    #[must_use]
675    pub fn bulk_slice_mut(&mut self, start: u32, count: u32) -> &mut [Slot<NodeEntry>] {
676        let begin = start as usize;
677        let end = begin + count as usize;
678        &mut self.slots[begin..end]
679    }
680
681    /// Truncates the slot storage to `saved_slot_count` slots.
682    ///
683    /// Used to roll back a failed bulk allocation. Any slots beyond
684    /// `saved_slot_count` are dropped, and `len` is adjusted to reflect
685    /// only the occupied slots that remain.
686    ///
687    /// # Panics
688    ///
689    /// Panics if `saved_slot_count` is greater than the current slot count
690    /// (cannot grow via truncation).
691    pub fn truncate_to(&mut self, saved_slot_count: usize) {
692        assert!(
693            saved_slot_count <= self.slots.len(),
694            "truncate_to({saved_slot_count}) exceeds current slot count ({})",
695            self.slots.len(),
696        );
697
698        // Count occupied slots that will be dropped.
699        let dropped_occupied = self.slots[saved_slot_count..]
700            .iter()
701            .filter(|s| s.is_occupied())
702            .count();
703
704        self.slots.truncate(saved_slot_count);
705        self.len -= dropped_occupied;
706
707        // Rebuild free list: remove any entries that pointed into the truncated
708        // region. We walk the chain and keep only links to retained slots.
709        self.rebuild_free_list_after_truncate(saved_slot_count);
710    }
711
712    /// Rebuilds the free list after a truncation, dropping any links to slots
713    /// at or beyond `cutoff`.
714    ///
715    /// Scans retained slots for vacant entries and re-links them into a fresh
716    /// free list. This is O(cutoff) but `truncate_to` is a rare rollback path.
717    fn rebuild_free_list_after_truncate(&mut self, cutoff: usize) {
718        // Simple approach: scan retained slots for vacant entries.
719        // This avoids issues with interleaved chains (e.g., head -> 6 -> 0)
720        // where truncated indices appear mid-chain.
721        let mut new_head: Option<u32> = None;
722        // Walk backwards so the resulting list is in ascending index order
723        // (head = lowest free index), matching the pattern of fresh arenas.
724        for i in (0..cutoff).rev() {
725            if self.slots[i].is_vacant() {
726                let i_u32 = u32::try_from(i)
727                    .unwrap_or_else(|_| unreachable!("free-list index exceeds u32 invariant"));
728                self.slots[i].state = SlotState::Vacant {
729                    next_free: new_head,
730                };
731                new_head = Some(i_u32);
732            }
733        }
734        self.free_head = new_head;
735    }
736
737    /// Returns a read-only view of all slots (occupied and vacant).
738    ///
739    /// Useful for post-build passes that need to scan every slot without
740    /// the overhead of generation checks.
741    #[must_use]
742    pub fn slots(&self) -> &[Slot<NodeEntry>] {
743        &self.slots
744    }
745}
746
747impl Default for NodeArena {
748    fn default() -> Self {
749        Self::new()
750    }
751}
752
753impl fmt::Display for NodeArena {
754    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
755        write!(
756            f,
757            "NodeArena(len={}, capacity={})",
758            self.len,
759            self.capacity()
760        )
761    }
762}
763
764#[cfg(test)]
765mod tests {
766    use super::*;
767
768    fn test_file() -> FileId {
769        FileId::new(1)
770    }
771
772    fn test_name() -> StringId {
773        StringId::new(1)
774    }
775
776    fn test_entry(kind: NodeKind) -> NodeEntry {
777        NodeEntry::new(kind, test_name(), test_file())
778    }
779
780    #[test]
781    fn test_arena_new() {
782        let arena = NodeArena::new();
783        assert_eq!(arena.len(), 0);
784        assert!(arena.is_empty());
785        assert_eq!(arena.capacity(), 0);
786    }
787
788    #[test]
789    fn test_arena_with_capacity() {
790        let arena = NodeArena::with_capacity(100);
791        assert_eq!(arena.len(), 0);
792        assert!(arena.capacity() >= 100);
793    }
794
795    #[test]
796    fn test_alloc_and_get() {
797        let mut arena = NodeArena::new();
798        let entry = test_entry(NodeKind::Function);
799
800        let id = arena.alloc(entry).unwrap();
801        assert!(!id.is_invalid());
802        assert_eq!(arena.len(), 1);
803
804        let node = arena.get(id).unwrap();
805        assert_eq!(node.kind, NodeKind::Function);
806    }
807
808    #[test]
809    fn test_alloc_multiple() {
810        let mut arena = NodeArena::new();
811
812        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
813        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
814        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
815
816        assert_eq!(arena.len(), 3);
817        assert_ne!(id1, id2);
818        assert_ne!(id2, id3);
819
820        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Function);
821        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
822        assert_eq!(arena.get(id3).unwrap().kind, NodeKind::Method);
823    }
824
825    #[test]
826    fn test_get_mut() {
827        let mut arena = NodeArena::new();
828        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
829
830        {
831            let node = arena.get_mut(id).unwrap();
832            node.start_line = 42;
833        }
834
835        assert_eq!(arena.get(id).unwrap().start_line, 42);
836    }
837
838    #[test]
839    fn test_contains() {
840        let mut arena = NodeArena::new();
841        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
842
843        assert!(arena.contains(id));
844        assert!(!arena.contains(NodeId::INVALID));
845    }
846
847    #[test]
848    fn test_remove() {
849        let mut arena = NodeArena::new();
850        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
851
852        assert_eq!(arena.len(), 1);
853        assert!(arena.contains(id));
854
855        let removed = arena.remove(id).unwrap();
856        assert_eq!(removed.kind, NodeKind::Function);
857        assert_eq!(arena.len(), 0);
858        assert!(!arena.contains(id));
859    }
860
861    #[test]
862    fn test_stale_id_after_remove() {
863        let mut arena = NodeArena::new();
864        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
865
866        // Remove and reallocate
867        arena.remove(id1);
868        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
869
870        // Old ID should be stale
871        assert!(arena.get(id1).is_none());
872        // New ID should work
873        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
874        // They should have the same index but different generations
875        assert_eq!(id1.index(), id2.index());
876        assert_ne!(id1.generation(), id2.generation());
877    }
878
879    #[test]
880    fn test_remove_idempotent() {
881        let mut arena = NodeArena::new();
882        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
883        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
884
885        // First remove should succeed
886        assert!(arena.remove(id1).is_some());
887        assert_eq!(arena.len(), 1);
888
889        // Second remove of same ID should be idempotent (no side effects)
890        assert!(arena.remove(id1).is_none());
891        assert_eq!(arena.len(), 1); // len unchanged
892
893        // Third remove should still be safe
894        assert!(arena.remove(id1).is_none());
895        assert_eq!(arena.len(), 1); // len still unchanged
896
897        // Other entries should be unaffected
898        assert!(arena.contains(id2));
899
900        // New allocation should work correctly
901        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
902        assert_eq!(id3.index(), id1.index()); // Reuses the removed slot
903        assert_eq!(arena.len(), 2);
904    }
905
906    #[test]
907    fn test_free_list_reuse() {
908        let mut arena = NodeArena::new();
909
910        // Allocate 3 nodes
911        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
912        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
913        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
914
915        // Remove middle node
916        arena.remove(id2);
917        assert_eq!(arena.len(), 2);
918        assert_eq!(arena.slot_count(), 3);
919
920        // Allocate new node - should reuse id2's slot
921        let id4 = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
922        assert_eq!(id4.index(), id2.index());
923        assert_eq!(arena.len(), 3);
924        assert_eq!(arena.slot_count(), 3); // No growth - slot reused
925
926        // Old id2 is stale
927        assert!(arena.get(id2).is_none());
928        // New id4 works
929        assert_eq!(arena.get(id4).unwrap().kind, NodeKind::Variable);
930        // Other IDs still valid
931        assert!(arena.contains(id1));
932        assert!(arena.contains(id3));
933    }
934
935    #[test]
936    fn test_invalid_id() {
937        let arena = NodeArena::new();
938        assert!(arena.get(NodeId::INVALID).is_none());
939    }
940
941    #[test]
942    fn test_out_of_bounds_id() {
943        let arena = NodeArena::new();
944        let fake_id = NodeId::new(999, 1);
945        assert!(arena.get(fake_id).is_none());
946    }
947
948    #[test]
949    fn test_wrong_generation() {
950        let mut arena = NodeArena::new();
951        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
952
953        // Create ID with wrong generation
954        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
955        assert!(arena.get(wrong_gen_id).is_none());
956    }
957
958    #[test]
959    fn test_iter() {
960        let mut arena = NodeArena::new();
961        arena.alloc(test_entry(NodeKind::Function)).unwrap();
962        arena.alloc(test_entry(NodeKind::Class)).unwrap();
963        arena.alloc(test_entry(NodeKind::Method)).unwrap();
964
965        let entries: Vec<_> = arena.iter().collect();
966        assert_eq!(entries.len(), 3);
967
968        let kinds: Vec<_> = entries.iter().map(|(_, e)| e.kind).collect();
969        assert!(kinds.contains(&NodeKind::Function));
970        assert!(kinds.contains(&NodeKind::Class));
971        assert!(kinds.contains(&NodeKind::Method));
972    }
973
974    #[test]
975    fn test_iter_with_removed() {
976        let mut arena = NodeArena::new();
977        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
978        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
979        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
980
981        arena.remove(id2);
982
983        let entries: Vec<_> = arena.iter().collect();
984        assert_eq!(entries.len(), 2);
985
986        let collected_ids: Vec<_> = entries.iter().map(|(id, _)| *id).collect();
987        assert!(collected_ids.contains(&id1));
988        assert!(!collected_ids.contains(&id2));
989        assert!(collected_ids.contains(&id3));
990    }
991
992    #[test]
993    fn test_iter_mut() {
994        let mut arena = NodeArena::new();
995        arena.alloc(test_entry(NodeKind::Function)).unwrap();
996        arena.alloc(test_entry(NodeKind::Class)).unwrap();
997
998        for (_, entry) in arena.iter_mut() {
999            entry.start_line = 100;
1000        }
1001
1002        for (_, entry) in arena.iter() {
1003            assert_eq!(entry.start_line, 100);
1004        }
1005    }
1006
1007    #[test]
1008    fn test_clear() {
1009        let mut arena = NodeArena::new();
1010        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1011        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1012
1013        assert_eq!(arena.len(), 2);
1014        arena.clear();
1015        assert_eq!(arena.len(), 0);
1016        assert!(arena.is_empty());
1017    }
1018
1019    #[test]
1020    fn test_reserve() {
1021        let mut arena = NodeArena::new();
1022        arena.reserve(1000);
1023        assert!(arena.capacity() >= 1000);
1024    }
1025
1026    #[test]
1027    fn test_display() {
1028        let mut arena = NodeArena::new();
1029        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1030
1031        let display = format!("{arena}");
1032        assert!(display.contains("NodeArena"));
1033        assert!(display.contains("len=1"));
1034    }
1035
1036    #[test]
1037    fn test_node_entry_builder() {
1038        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1039            .with_byte_range(10, 100)
1040            .with_location(1, 0, 5, 10)
1041            .with_signature(StringId::new(2))
1042            .with_doc(StringId::new(3))
1043            .with_qualified_name(StringId::new(4));
1044
1045        assert_eq!(entry.kind, NodeKind::Function);
1046        assert_eq!(entry.start_byte, 10);
1047        assert_eq!(entry.end_byte, 100);
1048        assert_eq!(entry.start_line, 1);
1049        assert_eq!(entry.start_column, 0);
1050        assert_eq!(entry.end_line, 5);
1051        assert_eq!(entry.end_column, 10);
1052        assert_eq!(entry.signature, Some(StringId::new(2)));
1053        assert_eq!(entry.doc, Some(StringId::new(3)));
1054        assert_eq!(entry.qualified_name, Some(StringId::new(4)));
1055    }
1056
1057    #[test]
1058    fn test_slot_state() {
1059        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1060        assert!(occupied.is_occupied());
1061        assert!(!occupied.is_vacant());
1062        assert_eq!(occupied.get(), Some(&42));
1063
1064        let vacant: Slot<i32> = Slot::new_vacant(2, Some(5));
1065        assert!(!vacant.is_occupied());
1066        assert!(vacant.is_vacant());
1067        assert_eq!(vacant.get(), None);
1068    }
1069
1070    #[test]
1071    fn test_slot_generation() {
1072        let slot: Slot<i32> = Slot::new_occupied(42, 100);
1073        assert_eq!(slot.generation(), 42);
1074    }
1075
1076    #[test]
1077    fn test_slot_state_accessor() {
1078        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1079        assert!(matches!(occupied.state(), SlotState::Occupied(42)));
1080
1081        let vacant: Slot<i32> = Slot::new_vacant(1, Some(3));
1082        assert!(matches!(
1083            vacant.state(),
1084            SlotState::Vacant { next_free: Some(3) }
1085        ));
1086    }
1087
1088    // ---- Bulk API tests ----
1089
1090    #[test]
1091    fn test_alloc_range_basic() {
1092        let mut arena = NodeArena::new();
1093        let placeholder = test_entry(NodeKind::Function);
1094
1095        let start = arena.alloc_range(5, &placeholder).unwrap();
1096        assert_eq!(start, 0);
1097        assert_eq!(arena.len(), 5);
1098        assert_eq!(arena.slot_count(), 5);
1099
1100        // All slots should be occupied with generation 1.
1101        for i in 0..5u32 {
1102            let id = NodeId::new(i, 1);
1103            let entry = arena.get(id).expect("slot should be occupied");
1104            assert_eq!(entry.kind, NodeKind::Function);
1105        }
1106    }
1107
1108    #[test]
1109    fn test_alloc_range_after_existing() {
1110        let mut arena = NodeArena::new();
1111
1112        // Allocate one node via normal path.
1113        let id0 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1114        assert_eq!(id0.index(), 0);
1115        assert_eq!(arena.len(), 1);
1116
1117        // Bulk allocate 3 more.
1118        let placeholder = test_entry(NodeKind::Method);
1119        let start = arena.alloc_range(3, &placeholder).unwrap();
1120        assert_eq!(start, 1);
1121        assert_eq!(arena.len(), 4);
1122        assert_eq!(arena.slot_count(), 4);
1123
1124        // Original node intact.
1125        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Class);
1126
1127        // Bulk-allocated nodes accessible.
1128        for i in 1..4u32 {
1129            let id = NodeId::new(i, 1);
1130            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1131        }
1132    }
1133
1134    #[test]
1135    fn test_alloc_range_zero_is_noop() {
1136        let mut arena = NodeArena::new();
1137
1138        // Allocate one normally first.
1139        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1140        let len_before = arena.len();
1141        let slot_count_before = arena.slot_count();
1142
1143        let start = arena.alloc_range(0, &test_entry(NodeKind::Class)).unwrap();
1144        assert_eq!(start, slot_count_before as u32);
1145        assert_eq!(arena.len(), len_before);
1146        assert_eq!(arena.slot_count(), slot_count_before);
1147    }
1148
1149    #[test]
1150    fn test_bulk_slice_mut() {
1151        let mut arena = NodeArena::new();
1152        let placeholder = test_entry(NodeKind::Function);
1153        let start = arena.alloc_range(3, &placeholder).unwrap();
1154
1155        // Overwrite slot 1 via bulk_slice_mut.
1156        {
1157            let slice = arena.bulk_slice_mut(start, 3);
1158            assert_eq!(slice.len(), 3);
1159
1160            // Replace the middle slot with a Class entry.
1161            slice[1] = Slot::new_occupied(1, test_entry(NodeKind::Class));
1162        }
1163
1164        // Verify the overwrite via get().
1165        let id1 = NodeId::new(1, 1);
1166        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Class);
1167
1168        // Other slots unchanged.
1169        let id0 = NodeId::new(0, 1);
1170        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Function);
1171        let id2 = NodeId::new(2, 1);
1172        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Function);
1173    }
1174
1175    #[test]
1176    fn test_truncate_to() {
1177        let mut arena = NodeArena::new();
1178        let placeholder = test_entry(NodeKind::Function);
1179
1180        // Allocate initial range.
1181        arena.alloc_range(3, &placeholder).unwrap();
1182        assert_eq!(arena.len(), 3);
1183        assert_eq!(arena.slot_count(), 3);
1184
1185        // Save state.
1186        let saved = arena.slot_count();
1187
1188        // Allocate more.
1189        arena.alloc_range(4, &placeholder).unwrap();
1190        assert_eq!(arena.len(), 7);
1191        assert_eq!(arena.slot_count(), 7);
1192
1193        // Truncate back.
1194        arena.truncate_to(saved);
1195        assert_eq!(arena.len(), 3);
1196        assert_eq!(arena.slot_count(), 3);
1197
1198        // Original slots still accessible.
1199        for i in 0..3u32 {
1200            let id = NodeId::new(i, 1);
1201            assert!(arena.get(id).is_some());
1202        }
1203    }
1204
1205    #[test]
1206    fn test_alloc_range_with_free_list() {
1207        let mut arena = NodeArena::new();
1208
1209        // Allocate two nodes, then remove the first to create a free list entry.
1210        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1211        let _id1 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1212        arena.remove(id0);
1213
1214        // State: len=1, slot_count=2, free_head=Some(0)
1215        assert_eq!(arena.len(), 1);
1216        assert_eq!(arena.slot_count(), 2);
1217
1218        // alloc_range should append AFTER slot_count, not reuse free list.
1219        let start = arena.alloc_range(3, &test_entry(NodeKind::Method)).unwrap();
1220        assert_eq!(start, 2); // Appended after existing 2 slots
1221        assert_eq!(arena.len(), 4); // 1 existing + 3 new
1222        assert_eq!(arena.slot_count(), 5); // 2 existing + 3 new
1223
1224        // Free list should still be intact — slot 0 still vacant.
1225        let slot0 = arena.slot(0).unwrap();
1226        assert!(slot0.is_vacant());
1227
1228        // Bulk-allocated nodes accessible.
1229        for i in 2..5u32 {
1230            let id = NodeId::new(i, 1);
1231            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1232        }
1233    }
1234
1235    #[test]
1236    fn test_truncate_to_clamps_free_head() {
1237        let mut arena = NodeArena::new();
1238
1239        // Allocate 5 nodes.
1240        let mut ids = Vec::new();
1241        for _ in 0..5 {
1242            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1243        }
1244
1245        // Save state at 5 slots.
1246        let saved = arena.slot_count();
1247
1248        // Allocate 3 more, then remove one of them to put it on the free list.
1249        let extra_ids: Vec<_> = (0..3)
1250            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1251            .collect();
1252        arena.remove(extra_ids[1]); // free_head now points to index 6
1253
1254        // Truncate back — free_head should be clamped since index 6 is gone.
1255        arena.truncate_to(saved);
1256        assert_eq!(arena.slot_count(), 5);
1257        assert_eq!(arena.len(), 5);
1258
1259        // Arena should still be usable: new alloc appends (no dangling free list).
1260        let new_id = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1261        assert_eq!(new_id.index(), 5); // Appended, not reusing dangling slot
1262        assert_eq!(arena.get(new_id).unwrap().kind, NodeKind::Variable);
1263    }
1264
1265    #[test]
1266    fn test_truncate_to_preserves_retained_free_list() {
1267        let mut arena = NodeArena::new();
1268
1269        // Allocate 8 nodes: indices 0..7
1270        let mut ids = Vec::new();
1271        for _ in 0..8 {
1272            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1273        }
1274        assert_eq!(arena.len(), 8);
1275
1276        // Remove indices 2 and 6 — both on free list.
1277        // Free list: 6 -> 2 -> None (LIFO)
1278        arena.remove(ids[2]);
1279        arena.remove(ids[6]);
1280        assert_eq!(arena.len(), 6);
1281
1282        // Truncate to 5 slots — removes indices 5,6,7.
1283        // Index 6 (vacant) is in truncated region, index 2 (vacant) is retained.
1284        // Free list chain was 6 -> 2. After truncate, only index 2 should survive.
1285        arena.truncate_to(5);
1286
1287        // Should have 4 occupied (indices 0,1,3,4) + 1 vacant (index 2) = 5 slots.
1288        // len should be 4 (we had 6 occupied, truncated 3 slots: 5 occupied + 1 vacant
1289        // in truncated region = indices 5(occ), 6(vac), 7(occ) => dropped 2 occupied).
1290        assert_eq!(arena.slot_count(), 5);
1291        assert_eq!(arena.len(), 4);
1292
1293        // Free list should contain only index 2.
1294        // Verify by allocating — should reuse index 2.
1295        let reused = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1296        assert_eq!(reused.index(), 2);
1297        assert_eq!(arena.get(reused).unwrap().kind, NodeKind::Variable);
1298        assert_eq!(arena.len(), 5);
1299
1300        // Next alloc should append (no more free slots).
1301        let appended = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1302        assert_eq!(appended.index(), 5);
1303        assert_eq!(arena.len(), 6);
1304    }
1305
1306    #[test]
1307    fn test_slots_read_access() {
1308        let mut arena = NodeArena::new();
1309        let placeholder = test_entry(NodeKind::Variable);
1310        arena.alloc_range(4, &placeholder).unwrap();
1311
1312        let slots = arena.slots();
1313        assert_eq!(slots.len(), 4);
1314
1315        for slot in slots {
1316            assert!(slot.is_occupied());
1317            assert_eq!(slot.generation(), 1);
1318            assert_eq!(slot.get().unwrap().kind, NodeKind::Variable);
1319        }
1320    }
1321
1322    #[test]
1323    fn test_multiple_remove_reuse_cycle() {
1324        let mut arena = NodeArena::new();
1325
1326        // Allocate and remove multiple times to test free list chaining
1327        let mut ids = Vec::new();
1328        for _ in 0..5 {
1329            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1330        }
1331
1332        // Remove in reverse order (builds free list: 4 -> 3 -> 2 -> 1 -> 0)
1333        for &id in ids.iter().rev() {
1334            arena.remove(id);
1335        }
1336
1337        // Reallocate - should use free list in LIFO order
1338        let new_ids: Vec<_> = (0..5)
1339            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1340            .collect();
1341
1342        // All old IDs should be stale
1343        for &old_id in &ids {
1344            assert!(arena.get(old_id).is_none());
1345        }
1346
1347        // All new IDs should work
1348        for &new_id in &new_ids {
1349            assert!(arena.get(new_id).is_some());
1350        }
1351    }
1352
1353    #[test]
1354    fn test_generation_overflow_at_max_generation() {
1355        // Test that allocation fails when generation reaches MAX_GENERATION
1356        let mut arena = NodeArena::new();
1357
1358        // Allocate a node
1359        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1360        assert_eq!(id.index(), 0);
1361
1362        // Remove it to put on free list
1363        arena.remove(id);
1364
1365        // Manually set the slot's generation to MAX_GENERATION
1366        // This simulates what would happen after ~9.2 quintillion allocations
1367        arena.slots[0].generation = NodeId::MAX_GENERATION;
1368
1369        // Now allocation should fail because incrementing MAX_GENERATION would overflow
1370        let result = arena.alloc(test_entry(NodeKind::Class));
1371        assert!(result.is_err());
1372
1373        let err = result.unwrap_err();
1374        match err {
1375            ArenaError::GenerationOverflow(inner) => {
1376                assert_eq!(inner.index, 0);
1377                assert_eq!(inner.generation, NodeId::MAX_GENERATION);
1378            }
1379            _ => panic!("expected GenerationOverflow, got {err:?}"),
1380        }
1381    }
1382
1383    #[test]
1384    fn test_free_list_corruption_returns_error() {
1385        // Test that free list corruption returns an error instead of panicking
1386        let mut arena = NodeArena::new();
1387
1388        // Allocate and remove a node to put it on free list
1389        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1390        arena.remove(id);
1391
1392        // Corrupt the free list by manually marking the vacant slot as occupied
1393        arena.slots[0].state = SlotState::Occupied(test_entry(NodeKind::Class));
1394
1395        // Allocation should fail gracefully with FreeListCorruption error
1396        let result = arena.alloc(test_entry(NodeKind::Method));
1397        assert!(result.is_err());
1398
1399        let err = result.unwrap_err();
1400        match err {
1401            ArenaError::FreeListCorruption { index } => {
1402                assert_eq!(index, 0);
1403            }
1404            _ => panic!("expected FreeListCorruption, got {err:?}"),
1405        }
1406
1407        // Verify the arena is still in a consistent state (len was rolled back)
1408        assert_eq!(arena.len(), 0);
1409    }
1410
1411    #[test]
1412    fn test_arena_error_display() {
1413        let gen_err = ArenaError::GenerationOverflow(GenerationOverflowError {
1414            index: 42,
1415            generation: 100,
1416        });
1417        let display = format!("{gen_err}");
1418        assert!(display.contains("42"));
1419        assert!(display.contains("100"));
1420
1421        let corruption_err = ArenaError::FreeListCorruption { index: 5 };
1422        let display = format!("{corruption_err}");
1423        assert!(display.contains("free list corruption"));
1424        assert!(display.contains("5"));
1425
1426        let capacity_err = ArenaError::CapacityExceeded;
1427        let display = format!("{capacity_err}");
1428        assert!(display.contains("capacity exceeded"));
1429    }
1430
1431    #[test]
1432    fn test_arena_error_source_generation_overflow() {
1433        use std::error::Error;
1434
1435        let inner = GenerationOverflowError {
1436            index: 0,
1437            generation: NodeId::MAX_GENERATION,
1438        };
1439        let err = ArenaError::GenerationOverflow(inner);
1440        // source() returns Some for GenerationOverflow
1441        assert!(err.source().is_some());
1442    }
1443
1444    #[test]
1445    fn test_arena_error_source_none_for_other_variants() {
1446        use std::error::Error;
1447
1448        let corruption = ArenaError::FreeListCorruption { index: 0 };
1449        assert!(corruption.source().is_none());
1450
1451        let capacity = ArenaError::CapacityExceeded;
1452        assert!(capacity.source().is_none());
1453    }
1454
1455    #[test]
1456    fn test_arena_from_generation_overflow_error() {
1457        let inner = GenerationOverflowError {
1458            index: 10,
1459            generation: 99,
1460        };
1461        let err: ArenaError = inner.into();
1462        assert!(matches!(err, ArenaError::GenerationOverflow(_)));
1463    }
1464
1465    #[test]
1466    fn test_arena_error_clone_and_eq() {
1467        let err = ArenaError::CapacityExceeded;
1468        let cloned = err.clone();
1469        assert_eq!(err, cloned);
1470
1471        let err2 = ArenaError::FreeListCorruption { index: 3 };
1472        let cloned2 = err2.clone();
1473        assert_eq!(err2, cloned2);
1474    }
1475
1476    #[test]
1477    fn test_node_entry_with_visibility() {
1478        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1479            .with_visibility(StringId::new(5));
1480
1481        assert_eq!(entry.visibility, Some(StringId::new(5)));
1482    }
1483
1484    #[test]
1485    fn test_node_entry_with_async_and_static() {
1486        let entry = NodeEntry::new(NodeKind::Method, test_name(), test_file())
1487            .with_async(true)
1488            .with_static(true);
1489
1490        assert!(entry.is_async);
1491        assert!(entry.is_static);
1492    }
1493
1494    #[test]
1495    fn test_node_entry_with_unsafe_flag() {
1496        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_unsafe(true);
1497
1498        assert!(entry.is_unsafe);
1499    }
1500
1501    #[test]
1502    fn test_node_entry_with_body_hash() {
1503        use crate::graph::body_hash::BodyHash128;
1504        let hash = BodyHash128::from_u128(0u128);
1505        let entry =
1506            NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_body_hash(hash);
1507
1508        assert!(entry.body_hash.is_some());
1509    }
1510
1511    #[test]
1512    fn test_node_entry_defaults() {
1513        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file());
1514        assert_eq!(entry.start_byte, 0);
1515        assert_eq!(entry.end_byte, 0);
1516        assert_eq!(entry.start_line, 0);
1517        assert_eq!(entry.start_column, 0);
1518        assert_eq!(entry.end_line, 0);
1519        assert_eq!(entry.end_column, 0);
1520        assert!(entry.signature.is_none());
1521        assert!(entry.doc.is_none());
1522        assert!(entry.qualified_name.is_none());
1523        assert!(entry.visibility.is_none());
1524        assert!(!entry.is_async);
1525        assert!(!entry.is_static);
1526        assert!(!entry.is_unsafe);
1527        assert!(entry.body_hash.is_none());
1528    }
1529
1530    #[test]
1531    fn test_arena_default_impl() {
1532        let arena = NodeArena::default();
1533        assert_eq!(arena.len(), 0);
1534        assert!(arena.is_empty());
1535    }
1536
1537    #[test]
1538    fn test_get_invalid_id() {
1539        let mut arena = NodeArena::new();
1540        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1541        // get with INVALID should return None
1542        assert!(arena.get(NodeId::INVALID).is_none());
1543    }
1544
1545    #[test]
1546    fn test_get_mut_invalid_id() {
1547        let mut arena = NodeArena::new();
1548        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1549        assert!(arena.get_mut(NodeId::INVALID).is_none());
1550    }
1551
1552    #[test]
1553    fn test_get_mut_wrong_generation() {
1554        let mut arena = NodeArena::new();
1555        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1556        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
1557        assert!(arena.get_mut(wrong_gen_id).is_none());
1558    }
1559
1560    #[test]
1561    fn test_remove_invalid_id() {
1562        let mut arena = NodeArena::new();
1563        assert!(arena.remove(NodeId::INVALID).is_none());
1564    }
1565
1566    #[test]
1567    fn test_slot_get_mut_occupied() {
1568        let mut slot: Slot<i32> = Slot::new_occupied(1, 42);
1569        let val = slot.get_mut().unwrap();
1570        *val = 99;
1571        assert_eq!(slot.get(), Some(&99));
1572    }
1573
1574    #[test]
1575    fn test_slot_get_mut_vacant() {
1576        let mut slot: Slot<i32> = Slot::new_vacant(1, None);
1577        assert!(slot.get_mut().is_none());
1578    }
1579
1580    #[test]
1581    fn test_truncate_to_zero_occupied_dropped() {
1582        let mut arena = NodeArena::new();
1583        let placeholder = test_entry(NodeKind::Function);
1584        arena.alloc_range(5, &placeholder).unwrap();
1585        // Truncate to 0 — all 5 occupied slots are dropped
1586        arena.truncate_to(0);
1587        assert_eq!(arena.len(), 0);
1588        assert_eq!(arena.slot_count(), 0);
1589    }
1590
1591    #[test]
1592    fn test_slot_count_vs_len() {
1593        let mut arena = NodeArena::new();
1594        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1595        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1596        arena.remove(id0);
1597        // slot_count is total slots (including vacant), len is occupied only
1598        assert_eq!(arena.slot_count(), 2);
1599        assert_eq!(arena.len(), 1);
1600    }
1601}