Skip to main content

sqry_core/graph/unified/storage/
arena.rs

1//! `NodeArena`: Contiguous node storage with generational indices.
2//!
3//! This module implements `NodeArena`, the primary node storage for the
4//! unified graph architecture. It provides:
5//!
6//! - **O(1) access**: Direct indexing into contiguous storage
7//! - **Generational safety**: Stale IDs return `None` instead of wrong data
8//! - **Free list reuse**: Deleted slots are recycled efficiently
9//!
10//! # Design
11//!
12//! The arena uses a slot-based design:
13//! - Each slot is either occupied (with data) or vacant (with next-free pointer)
14//! - Occupied slots store `NodeEntry` with generation counter
15//! - Vacant slots form a singly-linked free list
16//! - Generation counter increments on each reuse to detect stale IDs
17
18use std::fmt;
19
20use serde::{Deserialize, Serialize};
21
22use super::super::file::id::FileId;
23use super::super::node::id::{GenerationOverflowError, NodeId};
24use super::super::node::kind::NodeKind;
25use super::super::string::id::StringId;
26use crate::graph::body_hash::BodyHash128;
27
28/// Error type for arena allocation operations.
29///
30/// This enum covers all possible failure modes during node allocation,
31/// providing graceful error handling instead of panics.
32#[derive(Debug, Clone, PartialEq, Eq)]
33pub enum ArenaError {
34    /// Generation counter would overflow, risking stale ID collision.
35    GenerationOverflow(GenerationOverflowError),
36
37    /// Free list is corrupted: an occupied slot was found in the free list.
38    ///
39    /// This indicates a serious internal consistency error, likely caused by
40    /// concurrent modification without proper synchronization, memory corruption,
41    /// or a bug in the arena implementation.
42    FreeListCorruption {
43        /// Index of the corrupted slot.
44        index: u32,
45    },
46
47    /// Arena capacity exceeded: cannot allocate more than `u32::MAX` nodes.
48    CapacityExceeded,
49}
50
51impl fmt::Display for ArenaError {
52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53        match self {
54            ArenaError::GenerationOverflow(e) => write!(f, "{e}"),
55            ArenaError::FreeListCorruption { index } => {
56                write!(
57                    f,
58                    "free list corruption: occupied slot found at index {index}"
59                )
60            }
61            ArenaError::CapacityExceeded => {
62                write!(
63                    f,
64                    "arena capacity exceeded: cannot allocate more than {} nodes",
65                    u32::MAX
66                )
67            }
68        }
69    }
70}
71
72impl std::error::Error for ArenaError {
73    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
74        match self {
75            ArenaError::GenerationOverflow(e) => Some(e),
76            _ => None,
77        }
78    }
79}
80
81impl From<GenerationOverflowError> for ArenaError {
82    fn from(e: GenerationOverflowError) -> Self {
83        ArenaError::GenerationOverflow(e)
84    }
85}
86
87/// State of a slot in the arena.
88#[derive(Debug, Clone, Serialize, Deserialize)]
89pub enum SlotState<T> {
90    /// Slot contains an occupied entry.
91    Occupied(T),
92    /// Slot is vacant, contains next free index (or None if end of list).
93    Vacant {
94        /// Index of the next free slot in the free list, or None if end.
95        next_free: Option<u32>,
96    },
97}
98
99/// A slot in the arena with generation tracking.
100#[derive(Debug, Clone, Serialize, Deserialize)]
101pub struct Slot<T> {
102    /// Current generation for this slot.
103    generation: u64,
104    /// Slot state (occupied or vacant).
105    state: SlotState<T>,
106}
107
108impl<T> Slot<T> {
109    /// Creates a new vacant slot with the given generation and next free.
110    #[allow(dead_code)] // Used by Compaction (Step 15)
111    fn new_vacant(generation: u64, next_free: Option<u32>) -> Self {
112        Self {
113            generation,
114            state: SlotState::Vacant { next_free },
115        }
116    }
117
118    /// Creates a new occupied slot with the given generation and data.
119    pub(crate) fn new_occupied(generation: u64, data: T) -> Self {
120        Self {
121            generation,
122            state: SlotState::Occupied(data),
123        }
124    }
125
126    /// Returns true if this slot is occupied.
127    #[inline]
128    pub fn is_occupied(&self) -> bool {
129        matches!(self.state, SlotState::Occupied(_))
130    }
131
132    /// Returns true if this slot is vacant.
133    #[inline]
134    pub fn is_vacant(&self) -> bool {
135        matches!(self.state, SlotState::Vacant { .. })
136    }
137
138    /// Returns the generation of this slot.
139    #[inline]
140    pub fn generation(&self) -> u64 {
141        self.generation
142    }
143
144    /// Returns a reference to the occupied data, if any.
145    pub fn get(&self) -> Option<&T> {
146        match &self.state {
147            SlotState::Occupied(data) => Some(data),
148            SlotState::Vacant { .. } => None,
149        }
150    }
151
152    /// Returns a reference to the slot state (occupied or vacant).
153    #[inline]
154    pub fn state(&self) -> &SlotState<T> {
155        &self.state
156    }
157
158    /// Returns a mutable reference to the occupied data, if any.
159    pub fn get_mut(&mut self) -> Option<&mut T> {
160        match &mut self.state {
161            SlotState::Occupied(data) => Some(data),
162            SlotState::Vacant { .. } => None,
163        }
164    }
165}
166
167/// A node entry stored in the arena.
168///
169/// This is the actual data stored for each node in the graph.
170#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
171pub struct NodeEntry {
172    /// The kind of code entity this node represents.
173    pub kind: NodeKind,
174    /// Interned name of the symbol.
175    pub name: StringId,
176    /// File containing this node.
177    pub file: FileId,
178    /// Start byte offset in the file.
179    pub start_byte: u32,
180    /// End byte offset in the file.
181    pub end_byte: u32,
182    /// Start line (1-indexed).
183    pub start_line: u32,
184    /// Start column (0-indexed).
185    pub start_column: u32,
186    /// End line (1-indexed).
187    pub end_line: u32,
188    /// End column (0-indexed).
189    pub end_column: u32,
190    /// Optional signature/type information.
191    pub signature: Option<StringId>,
192    /// Optional docstring.
193    pub doc: Option<StringId>,
194    /// Optional qualified name (e.g., module.Class.method).
195    pub qualified_name: Option<StringId>,
196    /// Optional visibility modifier (public, private, protected, etc.).
197    pub visibility: Option<StringId>,
198    /// Whether this is an async function/method.
199    pub is_async: bool,
200    /// Whether this is a static member.
201    pub is_static: bool,
202    /// 128-bit body hash for duplicate detection.
203    ///
204    /// Computed from the raw body bytes of hashable symbol kinds:
205    /// Function, Method, Class, Struct, Enum, Interface, Trait, Module.
206    ///
207    /// CRITICAL: This field MUST remain BEFORE `is_unsafe` for
208    /// postcard serialization compatibility with older snapshots.
209    /// Uses `#[serde(default)]` to read older V2 snapshots that lack this field.
210    /// DO NOT use `skip_serializing_if` - postcard is positional and skipping corrupts the stream.
211    #[serde(default)]
212    pub body_hash: Option<BodyHash128>,
213    /// Whether this is an unsafe function (Rust, C, etc.).
214    ///
215    /// For FFI functions, this indicates whether the function is declared with
216    /// `unsafe` modifier (Haskell), `unsafe` keyword (Rust), or similar safety
217    /// markers in other languages.
218    ///
219    /// CRITICAL: This field is placed AFTER `body_hash` to maintain postcard
220    /// serialization compatibility. Adding fields before `body_hash` would
221    /// corrupt existing snapshots due to positional deserialization.
222    /// Uses `#[serde(default)]` to read older snapshots that lack this field.
223    #[serde(default)]
224    pub is_unsafe: bool,
225}
226
227impl NodeEntry {
228    /// Creates a new node entry with minimal required fields.
229    #[must_use]
230    pub fn new(kind: NodeKind, name: StringId, file: FileId) -> Self {
231        Self {
232            kind,
233            name,
234            file,
235            start_byte: 0,
236            end_byte: 0,
237            start_line: 0,
238            start_column: 0,
239            end_line: 0,
240            end_column: 0,
241            signature: None,
242            doc: None,
243            qualified_name: None,
244            visibility: None,
245            is_async: false,
246            is_static: false,
247            is_unsafe: false,
248            body_hash: None,
249        }
250    }
251
252    /// Sets the byte range for this node.
253    #[must_use]
254    pub fn with_byte_range(mut self, start: u32, end: u32) -> Self {
255        self.start_byte = start;
256        self.end_byte = end;
257        self
258    }
259
260    /// Sets the line/column range for this node.
261    #[must_use]
262    pub fn with_location(
263        mut self,
264        start_line: u32,
265        start_column: u32,
266        end_line: u32,
267        end_column: u32,
268    ) -> Self {
269        self.start_line = start_line;
270        self.start_column = start_column;
271        self.end_line = end_line;
272        self.end_column = end_column;
273        self
274    }
275
276    /// Sets the signature for this node.
277    #[must_use]
278    pub fn with_signature(mut self, signature: StringId) -> Self {
279        self.signature = Some(signature);
280        self
281    }
282
283    /// Sets the docstring for this node.
284    #[must_use]
285    pub fn with_doc(mut self, doc: StringId) -> Self {
286        self.doc = Some(doc);
287        self
288    }
289
290    /// Sets the qualified name for this node.
291    #[must_use]
292    pub fn with_qualified_name(mut self, qualified_name: StringId) -> Self {
293        self.qualified_name = Some(qualified_name);
294        self
295    }
296
297    /// Sets the visibility modifier for this node.
298    #[must_use]
299    pub fn with_visibility(mut self, visibility: StringId) -> Self {
300        self.visibility = Some(visibility);
301        self
302    }
303
304    /// Sets the async flag for this node.
305    #[must_use]
306    pub fn with_async(mut self, is_async: bool) -> Self {
307        self.is_async = is_async;
308        self
309    }
310
311    /// Sets the static flag for this node.
312    #[must_use]
313    pub fn with_static(mut self, is_static: bool) -> Self {
314        self.is_static = is_static;
315        self
316    }
317
318    /// Sets the unsafe flag for this node.
319    #[must_use]
320    pub fn with_unsafe(mut self, is_unsafe: bool) -> Self {
321        self.is_unsafe = is_unsafe;
322        self
323    }
324
325    /// Sets the body hash for this node.
326    ///
327    /// The body hash is a 128-bit hash computed from the raw body bytes
328    /// of the symbol. Used for duplicate code detection.
329    #[must_use]
330    pub fn with_body_hash(mut self, hash: BodyHash128) -> Self {
331        self.body_hash = Some(hash);
332        self
333    }
334}
335
336/// Contiguous node storage with generational indices.
337///
338/// `NodeArena` provides O(1) access to nodes by `NodeId`, with stale
339/// reference detection via generation counters.
340///
341/// # Free List Management
342///
343/// When a node is removed, its slot is added to the free list for reuse.
344/// This avoids memory fragmentation and keeps indices stable.
345///
346/// # Example
347///
348/// ```rust,ignore
349/// let mut arena = NodeArena::new();
350/// let id = arena.alloc(NodeEntry::new(NodeKind::Function, name, file))?;
351///
352/// let node = arena.get(id).expect("node exists");
353/// assert_eq!(node.kind, NodeKind::Function);
354///
355/// arena.remove(id)?;
356/// assert!(arena.get(id).is_none()); // Stale ID returns None
357/// ```
358#[derive(Debug, Clone, Serialize, Deserialize)]
359pub struct NodeArena {
360    /// Slots for node storage.
361    slots: Vec<Slot<NodeEntry>>,
362    /// Head of the free list (index of first free slot).
363    free_head: Option<u32>,
364    /// Number of currently occupied slots.
365    len: usize,
366}
367
368impl NodeArena {
369    /// Creates a new empty arena.
370    #[must_use]
371    pub fn new() -> Self {
372        Self {
373            slots: Vec::new(),
374            free_head: None,
375            len: 0,
376        }
377    }
378
379    /// Creates a new arena with the specified initial capacity.
380    #[must_use]
381    pub fn with_capacity(capacity: usize) -> Self {
382        Self {
383            slots: Vec::with_capacity(capacity),
384            free_head: None,
385            len: 0,
386        }
387    }
388
389    /// Returns the number of occupied slots.
390    #[inline]
391    #[must_use]
392    pub fn len(&self) -> usize {
393        self.len
394    }
395
396    /// Returns true if the arena is empty.
397    #[inline]
398    #[must_use]
399    pub fn is_empty(&self) -> bool {
400        self.len == 0
401    }
402
403    /// Returns the allocated capacity (slots that can be stored without reallocation).
404    #[inline]
405    #[must_use]
406    pub fn capacity(&self) -> usize {
407        self.slots.capacity()
408    }
409
410    /// Returns the total number of slots (occupied + vacant).
411    ///
412    /// This is different from `len()` which counts only occupied slots,
413    /// and `capacity()` which counts pre-allocated slots.
414    #[inline]
415    #[must_use]
416    pub fn slot_count(&self) -> usize {
417        self.slots.len()
418    }
419
420    /// Allocates a new node and returns its `NodeId`.
421    ///
422    /// If there are free slots available, one is reused. Otherwise, a new
423    /// slot is appended to the storage.
424    ///
425    /// # Errors
426    ///
427    /// Returns [`ArenaError`] if:
428    /// - Generation counter would overflow (`GenerationOverflow`)
429    /// - Free list is corrupted (`FreeListCorruption`)
430    /// - Arena capacity exceeded (`CapacityExceeded`)
431    pub fn alloc(&mut self, entry: NodeEntry) -> Result<NodeId, ArenaError> {
432        self.len += 1;
433
434        if let Some(free_idx) = self.free_head {
435            // Check bounds before accessing slot - corrupted free list could point out of bounds
436            let slot_index = free_idx as usize;
437            if slot_index >= self.slots.len() {
438                self.len -= 1; // Rollback
439                return Err(ArenaError::FreeListCorruption { index: free_idx });
440            }
441
442            // Reuse a free slot (bounds now guaranteed valid)
443            let slot = &mut self.slots[slot_index];
444
445            // Update free list head
446            self.free_head = match &slot.state {
447                SlotState::Vacant { next_free } => *next_free,
448                SlotState::Occupied(_) => {
449                    // Free list corruption detected - return error instead of panicking
450                    self.len -= 1; // Rollback
451                    return Err(ArenaError::FreeListCorruption { index: free_idx });
452                }
453            };
454
455            // Increment generation for reuse detection (respect MAX_GENERATION limit)
456            // Use NodeId::try_increment_generation which checks against MAX_GENERATION, not u64::MAX
457            let temp_id = NodeId::new(free_idx, slot.generation);
458            let new_generation = temp_id.try_increment_generation().map_err(|mut e| {
459                self.len -= 1; // Rollback
460                e.index = free_idx; // Ensure index is correct
461                ArenaError::GenerationOverflow(e)
462            })?;
463
464            slot.generation = new_generation;
465            slot.state = SlotState::Occupied(entry);
466
467            Ok(NodeId::new(free_idx, new_generation))
468        } else {
469            // Append a new slot
470            let index = self.slots.len();
471
472            // Check capacity before allocation using checked conversion
473            let Ok(index_u32) = u32::try_from(index) else {
474                self.len -= 1; // Rollback
475                return Err(ArenaError::CapacityExceeded);
476            };
477
478            let slot = Slot::new_occupied(1, entry);
479            self.slots.push(slot);
480
481            Ok(NodeId::new(index_u32, 1))
482        }
483    }
484
485    /// Returns a reference to the node with the given ID.
486    ///
487    /// Returns `None` if the ID is invalid or stale (generation mismatch).
488    #[must_use]
489    pub fn get(&self, id: NodeId) -> Option<&NodeEntry> {
490        if id.is_invalid() {
491            return None;
492        }
493
494        let index = id.index() as usize;
495        let slot = self.slots.get(index)?;
496
497        // Check generation match
498        if slot.generation != id.generation() {
499            return None;
500        }
501
502        slot.get()
503    }
504
505    /// Returns a mutable reference to the node with the given ID.
506    ///
507    /// Returns `None` if the ID is invalid or stale.
508    #[must_use]
509    pub fn get_mut(&mut self, id: NodeId) -> Option<&mut NodeEntry> {
510        if id.is_invalid() {
511            return None;
512        }
513
514        let index = id.index() as usize;
515        let slot = self.slots.get_mut(index)?;
516
517        // Check generation match
518        if slot.generation != id.generation() {
519            return None;
520        }
521
522        slot.get_mut()
523    }
524
525    /// Checks if the given ID is valid (not stale).
526    #[must_use]
527    pub fn contains(&self, id: NodeId) -> bool {
528        self.get(id).is_some()
529    }
530
531    /// Removes the node with the given ID.
532    ///
533    /// Returns the removed entry, or `None` if the ID was invalid/stale.
534    /// This method is idempotent: calling it twice with the same ID will
535    /// return `None` on the second call without corrupting the arena.
536    pub fn remove(&mut self, id: NodeId) -> Option<NodeEntry> {
537        if id.is_invalid() {
538            return None;
539        }
540
541        let index = id.index() as usize;
542        let slot = self.slots.get_mut(index)?;
543
544        // Check generation match
545        if slot.generation != id.generation() {
546            return None;
547        }
548
549        // Only proceed if slot is occupied - this makes remove idempotent
550        // and prevents double-decrement of len / double-enqueue to free list
551        if let SlotState::Occupied(_) = &slot.state {
552            let old_state = std::mem::replace(
553                &mut slot.state,
554                SlotState::Vacant {
555                    next_free: self.free_head,
556                },
557            );
558
559            // Update free list and len only when actually removing
560            self.free_head = Some(id.index());
561            self.len -= 1;
562
563            if let SlotState::Occupied(entry) = old_state {
564                return Some(entry);
565            }
566        }
567
568        None
569    }
570
571    /// Iterates over all occupied entries with their IDs.
572    pub fn iter(&self) -> impl Iterator<Item = (NodeId, &NodeEntry)> {
573        self.slots.iter().enumerate().filter_map(|(index, slot)| {
574            if let SlotState::Occupied(entry) = &slot.state {
575                let index_u32 = u32::try_from(index).ok()?;
576                Some((NodeId::new(index_u32, slot.generation), entry))
577            } else {
578                None
579            }
580        })
581    }
582
583    /// Iterates over all occupied entries with mutable access.
584    pub fn iter_mut(&mut self) -> impl Iterator<Item = (NodeId, &mut NodeEntry)> {
585        self.slots
586            .iter_mut()
587            .enumerate()
588            .filter_map(|(index, slot)| {
589                let generation = slot.generation;
590                if let SlotState::Occupied(entry) = &mut slot.state {
591                    let index_u32 = u32::try_from(index).ok()?;
592                    Some((NodeId::new(index_u32, generation), entry))
593                } else {
594                    None
595                }
596            })
597    }
598
599    /// Clears all nodes from the arena.
600    ///
601    /// This resets the arena to an empty state, but retains allocated capacity.
602    pub fn clear(&mut self) {
603        self.slots.clear();
604        self.free_head = None;
605        self.len = 0;
606    }
607
608    /// Reserves capacity for at least `additional` more nodes.
609    pub fn reserve(&mut self, additional: usize) {
610        self.slots.reserve(additional);
611    }
612
613    /// Returns the slot at the given index, if any.
614    ///
615    /// This is a low-level method for internal use. Prefer `get()` for normal access.
616    #[must_use]
617    pub fn slot(&self, index: u32) -> Option<&Slot<NodeEntry>> {
618        self.slots.get(index as usize)
619    }
620
621    /// Pre-allocates a contiguous range of occupied slots for parallel commit.
622    ///
623    /// Returns the start index of the allocated range. All slots are initialized
624    /// as `Occupied` with clones of the `placeholder` entry at generation 1.
625    /// The free list is **not** touched, preserving its invariants.
626    ///
627    /// A `count` of zero is a no-op and returns the current slot count as the
628    /// start index.
629    ///
630    /// # Errors
631    ///
632    /// Returns [`ArenaError::CapacityExceeded`] if the resulting slot count
633    /// would exceed `u32::MAX`.
634    pub fn alloc_range(&mut self, count: u32, placeholder: &NodeEntry) -> Result<u32, ArenaError> {
635        if count == 0 {
636            // No-op: return current slot count as start index (safe because
637            // slot_count <= u32::MAX is maintained as an invariant).
638            return Ok(u32::try_from(self.slots.len())
639                .unwrap_or_else(|_| unreachable!("slot_count invariant violated")));
640        }
641
642        let start = self.slots.len();
643        let new_total = start
644            .checked_add(count as usize)
645            .ok_or(ArenaError::CapacityExceeded)?;
646
647        // Verify new total fits in u32 (arena index space).
648        if new_total > u32::MAX as usize + 1 {
649            return Err(ArenaError::CapacityExceeded);
650        }
651
652        let start_u32 = u32::try_from(start).map_err(|_| ArenaError::CapacityExceeded)?;
653
654        // Reserve and fill in one go.
655        self.slots.reserve(count as usize);
656        for _ in 0..count {
657            self.slots.push(Slot::new_occupied(1, placeholder.clone()));
658        }
659
660        // Increment occupied count immediately — arena state is always consistent.
661        self.len += count as usize;
662
663        Ok(start_u32)
664    }
665
666    /// Returns a mutable sub-slice of the internal slot storage.
667    ///
668    /// This is intended for parallel writes after [`alloc_range`](Self::alloc_range):
669    /// each thread can write to its own non-overlapping portion of the slice.
670    ///
671    /// # Panics
672    ///
673    /// Panics if `start + count` exceeds the slot count.
674    #[must_use]
675    pub fn bulk_slice_mut(&mut self, start: u32, count: u32) -> &mut [Slot<NodeEntry>] {
676        let begin = start as usize;
677        let end = begin + count as usize;
678        &mut self.slots[begin..end]
679    }
680
681    /// Truncates the slot storage to `saved_slot_count` slots.
682    ///
683    /// Used to roll back a failed bulk allocation. Any slots beyond
684    /// `saved_slot_count` are dropped, and `len` is adjusted to reflect
685    /// only the occupied slots that remain.
686    ///
687    /// # Panics
688    ///
689    /// Panics if `saved_slot_count` is greater than the current slot count
690    /// (cannot grow via truncation).
691    pub fn truncate_to(&mut self, saved_slot_count: usize) {
692        assert!(
693            saved_slot_count <= self.slots.len(),
694            "truncate_to({saved_slot_count}) exceeds current slot count ({})",
695            self.slots.len(),
696        );
697
698        // Count occupied slots that will be dropped.
699        let dropped_occupied = self.slots[saved_slot_count..]
700            .iter()
701            .filter(|s| s.is_occupied())
702            .count();
703
704        self.slots.truncate(saved_slot_count);
705        self.len -= dropped_occupied;
706
707        // Rebuild free list: remove any entries that pointed into the truncated
708        // region. We walk the chain and keep only links to retained slots.
709        self.rebuild_free_list_after_truncate(saved_slot_count);
710    }
711
712    /// Rebuilds the free list after a truncation, dropping any links to slots
713    /// at or beyond `cutoff`.
714    ///
715    /// Scans retained slots for vacant entries and re-links them into a fresh
716    /// free list. This is O(cutoff) but `truncate_to` is a rare rollback path.
717    fn rebuild_free_list_after_truncate(&mut self, cutoff: usize) {
718        // Simple approach: scan retained slots for vacant entries.
719        // This avoids issues with interleaved chains (e.g., head -> 6 -> 0)
720        // where truncated indices appear mid-chain.
721        let mut new_head: Option<u32> = None;
722        // Walk backwards so the resulting list is in ascending index order
723        // (head = lowest free index), matching the pattern of fresh arenas.
724        for i in (0..cutoff).rev() {
725            if self.slots[i].is_vacant() {
726                let i_u32 = u32::try_from(i)
727                    .unwrap_or_else(|_| unreachable!("free-list index exceeds u32 invariant"));
728                self.slots[i].state = SlotState::Vacant {
729                    next_free: new_head,
730                };
731                new_head = Some(i_u32);
732            }
733        }
734        self.free_head = new_head;
735    }
736
737    /// Returns a read-only view of all slots (occupied and vacant).
738    ///
739    /// Useful for post-build passes that need to scan every slot without
740    /// the overhead of generation checks.
741    #[must_use]
742    pub fn slots(&self) -> &[Slot<NodeEntry>] {
743        &self.slots
744    }
745}
746
747impl Default for NodeArena {
748    fn default() -> Self {
749        Self::new()
750    }
751}
752
753impl fmt::Display for NodeArena {
754    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
755        write!(
756            f,
757            "NodeArena(len={}, capacity={})",
758            self.len,
759            self.capacity()
760        )
761    }
762}
763
764impl crate::graph::unified::memory::GraphMemorySize for NodeArena {
765    fn heap_bytes(&self) -> usize {
766        self.slots.capacity() * std::mem::size_of::<Slot<NodeEntry>>()
767    }
768}
769
770#[cfg(test)]
771mod tests {
772    use super::*;
773
774    fn test_file() -> FileId {
775        FileId::new(1)
776    }
777
778    fn test_name() -> StringId {
779        StringId::new(1)
780    }
781
782    fn test_entry(kind: NodeKind) -> NodeEntry {
783        NodeEntry::new(kind, test_name(), test_file())
784    }
785
786    #[test]
787    fn test_arena_new() {
788        let arena = NodeArena::new();
789        assert_eq!(arena.len(), 0);
790        assert!(arena.is_empty());
791        assert_eq!(arena.capacity(), 0);
792    }
793
794    #[test]
795    fn test_arena_with_capacity() {
796        let arena = NodeArena::with_capacity(100);
797        assert_eq!(arena.len(), 0);
798        assert!(arena.capacity() >= 100);
799    }
800
801    #[test]
802    fn test_alloc_and_get() {
803        let mut arena = NodeArena::new();
804        let entry = test_entry(NodeKind::Function);
805
806        let id = arena.alloc(entry).unwrap();
807        assert!(!id.is_invalid());
808        assert_eq!(arena.len(), 1);
809
810        let node = arena.get(id).unwrap();
811        assert_eq!(node.kind, NodeKind::Function);
812    }
813
814    #[test]
815    fn test_alloc_multiple() {
816        let mut arena = NodeArena::new();
817
818        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
819        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
820        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
821
822        assert_eq!(arena.len(), 3);
823        assert_ne!(id1, id2);
824        assert_ne!(id2, id3);
825
826        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Function);
827        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
828        assert_eq!(arena.get(id3).unwrap().kind, NodeKind::Method);
829    }
830
831    #[test]
832    fn test_get_mut() {
833        let mut arena = NodeArena::new();
834        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
835
836        {
837            let node = arena.get_mut(id).unwrap();
838            node.start_line = 42;
839        }
840
841        assert_eq!(arena.get(id).unwrap().start_line, 42);
842    }
843
844    #[test]
845    fn test_contains() {
846        let mut arena = NodeArena::new();
847        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
848
849        assert!(arena.contains(id));
850        assert!(!arena.contains(NodeId::INVALID));
851    }
852
853    #[test]
854    fn test_remove() {
855        let mut arena = NodeArena::new();
856        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
857
858        assert_eq!(arena.len(), 1);
859        assert!(arena.contains(id));
860
861        let removed = arena.remove(id).unwrap();
862        assert_eq!(removed.kind, NodeKind::Function);
863        assert_eq!(arena.len(), 0);
864        assert!(!arena.contains(id));
865    }
866
867    #[test]
868    fn test_stale_id_after_remove() {
869        let mut arena = NodeArena::new();
870        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
871
872        // Remove and reallocate
873        arena.remove(id1);
874        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
875
876        // Old ID should be stale
877        assert!(arena.get(id1).is_none());
878        // New ID should work
879        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Class);
880        // They should have the same index but different generations
881        assert_eq!(id1.index(), id2.index());
882        assert_ne!(id1.generation(), id2.generation());
883    }
884
885    #[test]
886    fn test_remove_idempotent() {
887        let mut arena = NodeArena::new();
888        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
889        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
890
891        // First remove should succeed
892        assert!(arena.remove(id1).is_some());
893        assert_eq!(arena.len(), 1);
894
895        // Second remove of same ID should be idempotent (no side effects)
896        assert!(arena.remove(id1).is_none());
897        assert_eq!(arena.len(), 1); // len unchanged
898
899        // Third remove should still be safe
900        assert!(arena.remove(id1).is_none());
901        assert_eq!(arena.len(), 1); // len still unchanged
902
903        // Other entries should be unaffected
904        assert!(arena.contains(id2));
905
906        // New allocation should work correctly
907        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
908        assert_eq!(id3.index(), id1.index()); // Reuses the removed slot
909        assert_eq!(arena.len(), 2);
910    }
911
912    #[test]
913    fn test_free_list_reuse() {
914        let mut arena = NodeArena::new();
915
916        // Allocate 3 nodes
917        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
918        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
919        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
920
921        // Remove middle node
922        arena.remove(id2);
923        assert_eq!(arena.len(), 2);
924        assert_eq!(arena.slot_count(), 3);
925
926        // Allocate new node - should reuse id2's slot
927        let id4 = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
928        assert_eq!(id4.index(), id2.index());
929        assert_eq!(arena.len(), 3);
930        assert_eq!(arena.slot_count(), 3); // No growth - slot reused
931
932        // Old id2 is stale
933        assert!(arena.get(id2).is_none());
934        // New id4 works
935        assert_eq!(arena.get(id4).unwrap().kind, NodeKind::Variable);
936        // Other IDs still valid
937        assert!(arena.contains(id1));
938        assert!(arena.contains(id3));
939    }
940
941    #[test]
942    fn test_invalid_id() {
943        let arena = NodeArena::new();
944        assert!(arena.get(NodeId::INVALID).is_none());
945    }
946
947    #[test]
948    fn test_out_of_bounds_id() {
949        let arena = NodeArena::new();
950        let fake_id = NodeId::new(999, 1);
951        assert!(arena.get(fake_id).is_none());
952    }
953
954    #[test]
955    fn test_wrong_generation() {
956        let mut arena = NodeArena::new();
957        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
958
959        // Create ID with wrong generation
960        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
961        assert!(arena.get(wrong_gen_id).is_none());
962    }
963
964    #[test]
965    fn test_iter() {
966        let mut arena = NodeArena::new();
967        arena.alloc(test_entry(NodeKind::Function)).unwrap();
968        arena.alloc(test_entry(NodeKind::Class)).unwrap();
969        arena.alloc(test_entry(NodeKind::Method)).unwrap();
970
971        let entries: Vec<_> = arena.iter().collect();
972        assert_eq!(entries.len(), 3);
973
974        let kinds: Vec<_> = entries.iter().map(|(_, e)| e.kind).collect();
975        assert!(kinds.contains(&NodeKind::Function));
976        assert!(kinds.contains(&NodeKind::Class));
977        assert!(kinds.contains(&NodeKind::Method));
978    }
979
980    #[test]
981    fn test_iter_with_removed() {
982        let mut arena = NodeArena::new();
983        let id1 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
984        let id2 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
985        let id3 = arena.alloc(test_entry(NodeKind::Method)).unwrap();
986
987        arena.remove(id2);
988
989        let entries: Vec<_> = arena.iter().collect();
990        assert_eq!(entries.len(), 2);
991
992        let collected_ids: Vec<_> = entries.iter().map(|(id, _)| *id).collect();
993        assert!(collected_ids.contains(&id1));
994        assert!(!collected_ids.contains(&id2));
995        assert!(collected_ids.contains(&id3));
996    }
997
998    #[test]
999    fn test_iter_mut() {
1000        let mut arena = NodeArena::new();
1001        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1002        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1003
1004        for (_, entry) in arena.iter_mut() {
1005            entry.start_line = 100;
1006        }
1007
1008        for (_, entry) in arena.iter() {
1009            assert_eq!(entry.start_line, 100);
1010        }
1011    }
1012
1013    #[test]
1014    fn test_clear() {
1015        let mut arena = NodeArena::new();
1016        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1017        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1018
1019        assert_eq!(arena.len(), 2);
1020        arena.clear();
1021        assert_eq!(arena.len(), 0);
1022        assert!(arena.is_empty());
1023    }
1024
1025    #[test]
1026    fn test_reserve() {
1027        let mut arena = NodeArena::new();
1028        arena.reserve(1000);
1029        assert!(arena.capacity() >= 1000);
1030    }
1031
1032    #[test]
1033    fn test_display() {
1034        let mut arena = NodeArena::new();
1035        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1036
1037        let display = format!("{arena}");
1038        assert!(display.contains("NodeArena"));
1039        assert!(display.contains("len=1"));
1040    }
1041
1042    #[test]
1043    fn test_node_entry_builder() {
1044        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1045            .with_byte_range(10, 100)
1046            .with_location(1, 0, 5, 10)
1047            .with_signature(StringId::new(2))
1048            .with_doc(StringId::new(3))
1049            .with_qualified_name(StringId::new(4));
1050
1051        assert_eq!(entry.kind, NodeKind::Function);
1052        assert_eq!(entry.start_byte, 10);
1053        assert_eq!(entry.end_byte, 100);
1054        assert_eq!(entry.start_line, 1);
1055        assert_eq!(entry.start_column, 0);
1056        assert_eq!(entry.end_line, 5);
1057        assert_eq!(entry.end_column, 10);
1058        assert_eq!(entry.signature, Some(StringId::new(2)));
1059        assert_eq!(entry.doc, Some(StringId::new(3)));
1060        assert_eq!(entry.qualified_name, Some(StringId::new(4)));
1061    }
1062
1063    #[test]
1064    fn test_slot_state() {
1065        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1066        assert!(occupied.is_occupied());
1067        assert!(!occupied.is_vacant());
1068        assert_eq!(occupied.get(), Some(&42));
1069
1070        let vacant: Slot<i32> = Slot::new_vacant(2, Some(5));
1071        assert!(!vacant.is_occupied());
1072        assert!(vacant.is_vacant());
1073        assert_eq!(vacant.get(), None);
1074    }
1075
1076    #[test]
1077    fn test_slot_generation() {
1078        let slot: Slot<i32> = Slot::new_occupied(42, 100);
1079        assert_eq!(slot.generation(), 42);
1080    }
1081
1082    #[test]
1083    fn test_slot_state_accessor() {
1084        let occupied: Slot<i32> = Slot::new_occupied(1, 42);
1085        assert!(matches!(occupied.state(), SlotState::Occupied(42)));
1086
1087        let vacant: Slot<i32> = Slot::new_vacant(1, Some(3));
1088        assert!(matches!(
1089            vacant.state(),
1090            SlotState::Vacant { next_free: Some(3) }
1091        ));
1092    }
1093
1094    // ---- Bulk API tests ----
1095
1096    #[test]
1097    fn test_alloc_range_basic() {
1098        let mut arena = NodeArena::new();
1099        let placeholder = test_entry(NodeKind::Function);
1100
1101        let start = arena.alloc_range(5, &placeholder).unwrap();
1102        assert_eq!(start, 0);
1103        assert_eq!(arena.len(), 5);
1104        assert_eq!(arena.slot_count(), 5);
1105
1106        // All slots should be occupied with generation 1.
1107        for i in 0..5u32 {
1108            let id = NodeId::new(i, 1);
1109            let entry = arena.get(id).expect("slot should be occupied");
1110            assert_eq!(entry.kind, NodeKind::Function);
1111        }
1112    }
1113
1114    #[test]
1115    fn test_alloc_range_after_existing() {
1116        let mut arena = NodeArena::new();
1117
1118        // Allocate one node via normal path.
1119        let id0 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1120        assert_eq!(id0.index(), 0);
1121        assert_eq!(arena.len(), 1);
1122
1123        // Bulk allocate 3 more.
1124        let placeholder = test_entry(NodeKind::Method);
1125        let start = arena.alloc_range(3, &placeholder).unwrap();
1126        assert_eq!(start, 1);
1127        assert_eq!(arena.len(), 4);
1128        assert_eq!(arena.slot_count(), 4);
1129
1130        // Original node intact.
1131        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Class);
1132
1133        // Bulk-allocated nodes accessible.
1134        for i in 1..4u32 {
1135            let id = NodeId::new(i, 1);
1136            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1137        }
1138    }
1139
1140    #[test]
1141    fn test_alloc_range_zero_is_noop() {
1142        let mut arena = NodeArena::new();
1143
1144        // Allocate one normally first.
1145        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1146        let len_before = arena.len();
1147        let slot_count_before = arena.slot_count();
1148
1149        let start = arena.alloc_range(0, &test_entry(NodeKind::Class)).unwrap();
1150        #[allow(clippy::cast_possible_truncation)]
1151        // Arena slot count fits in u32 for graph storage indices
1152        let expected_start = slot_count_before as u32;
1153        assert_eq!(start, expected_start);
1154        assert_eq!(arena.len(), len_before);
1155        assert_eq!(arena.slot_count(), slot_count_before);
1156    }
1157
1158    #[test]
1159    fn test_bulk_slice_mut() {
1160        let mut arena = NodeArena::new();
1161        let placeholder = test_entry(NodeKind::Function);
1162        let start = arena.alloc_range(3, &placeholder).unwrap();
1163
1164        // Overwrite slot 1 via bulk_slice_mut.
1165        {
1166            let slice = arena.bulk_slice_mut(start, 3);
1167            assert_eq!(slice.len(), 3);
1168
1169            // Replace the middle slot with a Class entry.
1170            slice[1] = Slot::new_occupied(1, test_entry(NodeKind::Class));
1171        }
1172
1173        // Verify the overwrite via get().
1174        let id1 = NodeId::new(1, 1);
1175        assert_eq!(arena.get(id1).unwrap().kind, NodeKind::Class);
1176
1177        // Other slots unchanged.
1178        let id0 = NodeId::new(0, 1);
1179        assert_eq!(arena.get(id0).unwrap().kind, NodeKind::Function);
1180        let id2 = NodeId::new(2, 1);
1181        assert_eq!(arena.get(id2).unwrap().kind, NodeKind::Function);
1182    }
1183
1184    #[test]
1185    fn test_truncate_to() {
1186        let mut arena = NodeArena::new();
1187        let placeholder = test_entry(NodeKind::Function);
1188
1189        // Allocate initial range.
1190        arena.alloc_range(3, &placeholder).unwrap();
1191        assert_eq!(arena.len(), 3);
1192        assert_eq!(arena.slot_count(), 3);
1193
1194        // Save state.
1195        let saved = arena.slot_count();
1196
1197        // Allocate more.
1198        arena.alloc_range(4, &placeholder).unwrap();
1199        assert_eq!(arena.len(), 7);
1200        assert_eq!(arena.slot_count(), 7);
1201
1202        // Truncate back.
1203        arena.truncate_to(saved);
1204        assert_eq!(arena.len(), 3);
1205        assert_eq!(arena.slot_count(), 3);
1206
1207        // Original slots still accessible.
1208        for i in 0..3u32 {
1209            let id = NodeId::new(i, 1);
1210            assert!(arena.get(id).is_some());
1211        }
1212    }
1213
1214    #[test]
1215    fn test_alloc_range_with_free_list() {
1216        let mut arena = NodeArena::new();
1217
1218        // Allocate two nodes, then remove the first to create a free list entry.
1219        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1220        let _id1 = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1221        arena.remove(id0);
1222
1223        // State: len=1, slot_count=2, free_head=Some(0)
1224        assert_eq!(arena.len(), 1);
1225        assert_eq!(arena.slot_count(), 2);
1226
1227        // alloc_range should append AFTER slot_count, not reuse free list.
1228        let start = arena.alloc_range(3, &test_entry(NodeKind::Method)).unwrap();
1229        assert_eq!(start, 2); // Appended after existing 2 slots
1230        assert_eq!(arena.len(), 4); // 1 existing + 3 new
1231        assert_eq!(arena.slot_count(), 5); // 2 existing + 3 new
1232
1233        // Free list should still be intact — slot 0 still vacant.
1234        let slot0 = arena.slot(0).unwrap();
1235        assert!(slot0.is_vacant());
1236
1237        // Bulk-allocated nodes accessible.
1238        for i in 2..5u32 {
1239            let id = NodeId::new(i, 1);
1240            assert_eq!(arena.get(id).unwrap().kind, NodeKind::Method);
1241        }
1242    }
1243
1244    #[test]
1245    fn test_truncate_to_clamps_free_head() {
1246        let mut arena = NodeArena::new();
1247
1248        // Allocate 5 nodes.
1249        let mut ids = Vec::new();
1250        for _ in 0..5 {
1251            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1252        }
1253
1254        // Save state at 5 slots.
1255        let saved = arena.slot_count();
1256
1257        // Allocate 3 more, then remove one of them to put it on the free list.
1258        let extra_ids: Vec<_> = (0..3)
1259            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1260            .collect();
1261        arena.remove(extra_ids[1]); // free_head now points to index 6
1262
1263        // Truncate back — free_head should be clamped since index 6 is gone.
1264        arena.truncate_to(saved);
1265        assert_eq!(arena.slot_count(), 5);
1266        assert_eq!(arena.len(), 5);
1267
1268        // Arena should still be usable: new alloc appends (no dangling free list).
1269        let new_id = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1270        assert_eq!(new_id.index(), 5); // Appended, not reusing dangling slot
1271        assert_eq!(arena.get(new_id).unwrap().kind, NodeKind::Variable);
1272    }
1273
1274    #[test]
1275    fn test_truncate_to_preserves_retained_free_list() {
1276        let mut arena = NodeArena::new();
1277
1278        // Allocate 8 nodes: indices 0..7
1279        let mut ids = Vec::new();
1280        for _ in 0..8 {
1281            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1282        }
1283        assert_eq!(arena.len(), 8);
1284
1285        // Remove indices 2 and 6 — both on free list.
1286        // Free list: 6 -> 2 -> None (LIFO)
1287        arena.remove(ids[2]);
1288        arena.remove(ids[6]);
1289        assert_eq!(arena.len(), 6);
1290
1291        // Truncate to 5 slots — removes indices 5,6,7.
1292        // Index 6 (vacant) is in truncated region, index 2 (vacant) is retained.
1293        // Free list chain was 6 -> 2. After truncate, only index 2 should survive.
1294        arena.truncate_to(5);
1295
1296        // Should have 4 occupied (indices 0,1,3,4) + 1 vacant (index 2) = 5 slots.
1297        // len should be 4 (we had 6 occupied, truncated 3 slots: 5 occupied + 1 vacant
1298        // in truncated region = indices 5(occ), 6(vac), 7(occ) => dropped 2 occupied).
1299        assert_eq!(arena.slot_count(), 5);
1300        assert_eq!(arena.len(), 4);
1301
1302        // Free list should contain only index 2.
1303        // Verify by allocating — should reuse index 2.
1304        let reused = arena.alloc(test_entry(NodeKind::Variable)).unwrap();
1305        assert_eq!(reused.index(), 2);
1306        assert_eq!(arena.get(reused).unwrap().kind, NodeKind::Variable);
1307        assert_eq!(arena.len(), 5);
1308
1309        // Next alloc should append (no more free slots).
1310        let appended = arena.alloc(test_entry(NodeKind::Class)).unwrap();
1311        assert_eq!(appended.index(), 5);
1312        assert_eq!(arena.len(), 6);
1313    }
1314
1315    #[test]
1316    fn test_slots_read_access() {
1317        let mut arena = NodeArena::new();
1318        let placeholder = test_entry(NodeKind::Variable);
1319        arena.alloc_range(4, &placeholder).unwrap();
1320
1321        let slots = arena.slots();
1322        assert_eq!(slots.len(), 4);
1323
1324        for slot in slots {
1325            assert!(slot.is_occupied());
1326            assert_eq!(slot.generation(), 1);
1327            assert_eq!(slot.get().unwrap().kind, NodeKind::Variable);
1328        }
1329    }
1330
1331    #[test]
1332    fn test_multiple_remove_reuse_cycle() {
1333        let mut arena = NodeArena::new();
1334
1335        // Allocate and remove multiple times to test free list chaining
1336        let mut ids = Vec::new();
1337        for _ in 0..5 {
1338            ids.push(arena.alloc(test_entry(NodeKind::Function)).unwrap());
1339        }
1340
1341        // Remove in reverse order (builds free list: 4 -> 3 -> 2 -> 1 -> 0)
1342        for &id in ids.iter().rev() {
1343            arena.remove(id);
1344        }
1345
1346        // Reallocate - should use free list in LIFO order
1347        let new_ids: Vec<_> = (0..5)
1348            .map(|_| arena.alloc(test_entry(NodeKind::Class)).unwrap())
1349            .collect();
1350
1351        // All old IDs should be stale
1352        for &old_id in &ids {
1353            assert!(arena.get(old_id).is_none());
1354        }
1355
1356        // All new IDs should work
1357        for &new_id in &new_ids {
1358            assert!(arena.get(new_id).is_some());
1359        }
1360    }
1361
1362    #[test]
1363    fn test_generation_overflow_at_max_generation() {
1364        // Test that allocation fails when generation reaches MAX_GENERATION
1365        let mut arena = NodeArena::new();
1366
1367        // Allocate a node
1368        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1369        assert_eq!(id.index(), 0);
1370
1371        // Remove it to put on free list
1372        arena.remove(id);
1373
1374        // Manually set the slot's generation to MAX_GENERATION
1375        // This simulates what would happen after ~9.2 quintillion allocations
1376        arena.slots[0].generation = NodeId::MAX_GENERATION;
1377
1378        // Now allocation should fail because incrementing MAX_GENERATION would overflow
1379        let result = arena.alloc(test_entry(NodeKind::Class));
1380        assert!(result.is_err());
1381
1382        let err = result.unwrap_err();
1383        match err {
1384            ArenaError::GenerationOverflow(inner) => {
1385                assert_eq!(inner.index, 0);
1386                assert_eq!(inner.generation, NodeId::MAX_GENERATION);
1387            }
1388            _ => panic!("expected GenerationOverflow, got {err:?}"),
1389        }
1390    }
1391
1392    #[test]
1393    fn test_free_list_corruption_returns_error() {
1394        // Test that free list corruption returns an error instead of panicking
1395        let mut arena = NodeArena::new();
1396
1397        // Allocate and remove a node to put it on free list
1398        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1399        arena.remove(id);
1400
1401        // Corrupt the free list by manually marking the vacant slot as occupied
1402        arena.slots[0].state = SlotState::Occupied(test_entry(NodeKind::Class));
1403
1404        // Allocation should fail gracefully with FreeListCorruption error
1405        let result = arena.alloc(test_entry(NodeKind::Method));
1406        assert!(result.is_err());
1407
1408        let err = result.unwrap_err();
1409        match err {
1410            ArenaError::FreeListCorruption { index } => {
1411                assert_eq!(index, 0);
1412            }
1413            _ => panic!("expected FreeListCorruption, got {err:?}"),
1414        }
1415
1416        // Verify the arena is still in a consistent state (len was rolled back)
1417        assert_eq!(arena.len(), 0);
1418    }
1419
1420    #[test]
1421    fn test_arena_error_display() {
1422        let gen_err = ArenaError::GenerationOverflow(GenerationOverflowError {
1423            index: 42,
1424            generation: 100,
1425        });
1426        let display = format!("{gen_err}");
1427        assert!(display.contains("42"));
1428        assert!(display.contains("100"));
1429
1430        let corruption_err = ArenaError::FreeListCorruption { index: 5 };
1431        let display = format!("{corruption_err}");
1432        assert!(display.contains("free list corruption"));
1433        assert!(display.contains('5'));
1434
1435        let capacity_err = ArenaError::CapacityExceeded;
1436        let display = format!("{capacity_err}");
1437        assert!(display.contains("capacity exceeded"));
1438    }
1439
1440    #[test]
1441    fn test_arena_error_source_generation_overflow() {
1442        use std::error::Error;
1443
1444        let inner = GenerationOverflowError {
1445            index: 0,
1446            generation: NodeId::MAX_GENERATION,
1447        };
1448        let err = ArenaError::GenerationOverflow(inner);
1449        // source() returns Some for GenerationOverflow
1450        assert!(err.source().is_some());
1451    }
1452
1453    #[test]
1454    fn test_arena_error_source_none_for_other_variants() {
1455        use std::error::Error;
1456
1457        let corruption = ArenaError::FreeListCorruption { index: 0 };
1458        assert!(corruption.source().is_none());
1459
1460        let capacity = ArenaError::CapacityExceeded;
1461        assert!(capacity.source().is_none());
1462    }
1463
1464    #[test]
1465    fn test_arena_from_generation_overflow_error() {
1466        let inner = GenerationOverflowError {
1467            index: 10,
1468            generation: 99,
1469        };
1470        let err: ArenaError = inner.into();
1471        assert!(matches!(err, ArenaError::GenerationOverflow(_)));
1472    }
1473
1474    #[test]
1475    fn test_arena_error_clone_and_eq() {
1476        let err = ArenaError::CapacityExceeded;
1477        let cloned = err.clone();
1478        assert_eq!(err, cloned);
1479
1480        let err2 = ArenaError::FreeListCorruption { index: 3 };
1481        let cloned2 = err2.clone();
1482        assert_eq!(err2, cloned2);
1483    }
1484
1485    #[test]
1486    fn test_node_entry_with_visibility() {
1487        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file())
1488            .with_visibility(StringId::new(5));
1489
1490        assert_eq!(entry.visibility, Some(StringId::new(5)));
1491    }
1492
1493    #[test]
1494    fn test_node_entry_with_async_and_static() {
1495        let entry = NodeEntry::new(NodeKind::Method, test_name(), test_file())
1496            .with_async(true)
1497            .with_static(true);
1498
1499        assert!(entry.is_async);
1500        assert!(entry.is_static);
1501    }
1502
1503    #[test]
1504    fn test_node_entry_with_unsafe_flag() {
1505        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_unsafe(true);
1506
1507        assert!(entry.is_unsafe);
1508    }
1509
1510    #[test]
1511    fn test_node_entry_with_body_hash() {
1512        use crate::graph::body_hash::BodyHash128;
1513        let hash = BodyHash128::from_u128(0u128);
1514        let entry =
1515            NodeEntry::new(NodeKind::Function, test_name(), test_file()).with_body_hash(hash);
1516
1517        assert!(entry.body_hash.is_some());
1518    }
1519
1520    #[test]
1521    fn test_node_entry_defaults() {
1522        let entry = NodeEntry::new(NodeKind::Function, test_name(), test_file());
1523        assert_eq!(entry.start_byte, 0);
1524        assert_eq!(entry.end_byte, 0);
1525        assert_eq!(entry.start_line, 0);
1526        assert_eq!(entry.start_column, 0);
1527        assert_eq!(entry.end_line, 0);
1528        assert_eq!(entry.end_column, 0);
1529        assert!(entry.signature.is_none());
1530        assert!(entry.doc.is_none());
1531        assert!(entry.qualified_name.is_none());
1532        assert!(entry.visibility.is_none());
1533        assert!(!entry.is_async);
1534        assert!(!entry.is_static);
1535        assert!(!entry.is_unsafe);
1536        assert!(entry.body_hash.is_none());
1537    }
1538
1539    #[test]
1540    fn test_arena_default_impl() {
1541        let arena = NodeArena::default();
1542        assert_eq!(arena.len(), 0);
1543        assert!(arena.is_empty());
1544    }
1545
1546    #[test]
1547    fn test_get_invalid_id() {
1548        let mut arena = NodeArena::new();
1549        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1550        // get with INVALID should return None
1551        assert!(arena.get(NodeId::INVALID).is_none());
1552    }
1553
1554    #[test]
1555    fn test_get_mut_invalid_id() {
1556        let mut arena = NodeArena::new();
1557        arena.alloc(test_entry(NodeKind::Function)).unwrap();
1558        assert!(arena.get_mut(NodeId::INVALID).is_none());
1559    }
1560
1561    #[test]
1562    fn test_get_mut_wrong_generation() {
1563        let mut arena = NodeArena::new();
1564        let id = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1565        let wrong_gen_id = NodeId::new(id.index(), id.generation() + 1);
1566        assert!(arena.get_mut(wrong_gen_id).is_none());
1567    }
1568
1569    #[test]
1570    fn test_remove_invalid_id() {
1571        let mut arena = NodeArena::new();
1572        assert!(arena.remove(NodeId::INVALID).is_none());
1573    }
1574
1575    #[test]
1576    fn test_slot_get_mut_occupied() {
1577        let mut slot: Slot<i32> = Slot::new_occupied(1, 42);
1578        let val = slot.get_mut().unwrap();
1579        *val = 99;
1580        assert_eq!(slot.get(), Some(&99));
1581    }
1582
1583    #[test]
1584    fn test_slot_get_mut_vacant() {
1585        let mut slot: Slot<i32> = Slot::new_vacant(1, None);
1586        assert!(slot.get_mut().is_none());
1587    }
1588
1589    #[test]
1590    fn test_truncate_to_zero_occupied_dropped() {
1591        let mut arena = NodeArena::new();
1592        let placeholder = test_entry(NodeKind::Function);
1593        arena.alloc_range(5, &placeholder).unwrap();
1594        // Truncate to 0 — all 5 occupied slots are dropped
1595        arena.truncate_to(0);
1596        assert_eq!(arena.len(), 0);
1597        assert_eq!(arena.slot_count(), 0);
1598    }
1599
1600    #[test]
1601    fn test_slot_count_vs_len() {
1602        let mut arena = NodeArena::new();
1603        let id0 = arena.alloc(test_entry(NodeKind::Function)).unwrap();
1604        arena.alloc(test_entry(NodeKind::Class)).unwrap();
1605        arena.remove(id0);
1606        // slot_count is total slots (including vacant), len is occupied only
1607        assert_eq!(arena.slot_count(), 2);
1608        assert_eq!(arena.len(), 1);
1609    }
1610}