Skip to main content

cjc_runtime/
gc.rs

1//! GC compatibility layer — thin wrapper over `ObjectSlab`.
2//!
3//! The mark-sweep garbage collector has been removed. This module preserves
4//! the `GcRef` and `GcHeap` API for backward compatibility, but all allocation
5//! is now backed by deterministic reference counting via `ObjectSlab`.
6//!
7//! # What changed
8//!
9//! - `GcHeap::alloc()` → delegates to `ObjectSlab::alloc()` (RC-backed)
10//! - `GcHeap::collect()` → no-op (RC handles deallocation)
11//! - `GcHeap::mark()` / `sweep()` → removed (no mark-sweep semantics)
12//! - `GcHeap::get()` → delegates to `ObjectSlab::get()`
13//! - `GcHeap::live_count()` → delegates to `ObjectSlab::live_count()`
14//!
15//! # Determinism
16//!
17//! The `ObjectSlab` provides deterministic LIFO slot reuse. Same allocation
18//! sequence → same slot indices. No stop-the-world pauses.
19
20use std::any::Any;
21use std::fmt;
22
23use crate::object_slab::{ObjectSlab, SlabRef};
24
25/// A handle into the GC heap. Lightweight, copyable index.
26///
27/// Now backed by `SlabRef` (RC-based slab) instead of mark-sweep GC.
28#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
29pub struct GcRef {
30    pub index: usize,
31}
32
33impl GcRef {
34    fn from_slab(sr: SlabRef) -> Self {
35        GcRef { index: sr.index }
36    }
37
38    fn to_slab(self) -> SlabRef {
39        SlabRef { index: self.index }
40    }
41}
42
43/// Backward-compatible GC heap interface backed by `ObjectSlab`.
44///
45/// All mark-sweep semantics have been removed. Objects are reference-counted
46/// and freed deterministically when no references remain.
47pub struct GcHeap {
48    slab: ObjectSlab,
49    /// Maintained for API compat; incremented on `collect()` calls.
50    collection_count: u64,
51}
52
53impl fmt::Debug for GcHeap {
54    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55        f.debug_struct("GcHeap")
56            .field("live_count", &self.slab.live_count())
57            .field("capacity", &self.slab.capacity())
58            .field("collection_count", &self.collection_count)
59            .finish()
60    }
61}
62
63impl GcHeap {
64    /// Create a new heap. The `_collection_threshold` parameter is accepted
65    /// for API compatibility but ignored (no automatic collection in RC mode).
66    pub fn new(_collection_threshold: usize) -> Self {
67        GcHeap {
68            slab: ObjectSlab::new(),
69            collection_count: 0,
70        }
71    }
72
73    /// Allocate a value on the heap, returning a handle.
74    pub fn alloc<T: Any + 'static>(&mut self, value: T) -> GcRef {
75        GcRef::from_slab(self.slab.alloc(value))
76    }
77
78    /// Allocate with "auto-collection" — for API compat.
79    /// The `_roots` parameter is ignored (no GC to trigger).
80    pub fn alloc_auto<T: Any + 'static>(&mut self, value: T, _roots: &[GcRef]) -> GcRef {
81        self.alloc(value)
82    }
83
84    /// Read a reference to the value behind `gc_ref`, downcasting to `T`.
85    pub fn get<T: Any + 'static>(&self, gc_ref: GcRef) -> Option<&T> {
86        self.slab.get::<T>(gc_ref.to_slab())
87    }
88
89    /// No-op: mark-sweep has been removed. Objects are reference-counted.
90    pub fn collect(&mut self, _roots: &[GcRef]) {
91        self.collection_count += 1;
92        self.slab.collect_noop();
93    }
94
95    /// Number of live objects on the heap.
96    pub fn live_count(&self) -> usize {
97        self.slab.live_count()
98    }
99
100    /// Total capacity (number of slots, including freed ones).
101    pub fn capacity(&self) -> usize {
102        self.slab.capacity()
103    }
104
105    /// Access the free list (for backward compat with tests).
106    pub fn free_list(&self) -> &[usize] {
107        &self.slab.free_list
108    }
109
110    /// Explicitly free a slot.
111    pub fn free(&mut self, gc_ref: GcRef) {
112        self.slab.free(gc_ref.to_slab());
113    }
114}
115
116impl Default for GcHeap {
117    fn default() -> Self {
118        Self::new(1024)
119    }
120}
121
122#[cfg(test)]
123mod tests {
124    use super::*;
125
126    #[test]
127    fn alloc_and_read_back() {
128        let mut heap = GcHeap::new(1024);
129        let r = heap.alloc(42i64);
130        assert_eq!(heap.get::<i64>(r), Some(&42));
131        assert_eq!(heap.live_count(), 1);
132    }
133
134    #[test]
135    fn collect_is_noop_objects_survive() {
136        let mut heap = GcHeap::new(1024);
137        let r1 = heap.alloc(10i64);
138        let r2 = heap.alloc(20i64);
139        // Collect with partial roots — but since it's RC-backed, ALL survive
140        heap.collect(&[r1]);
141        assert_eq!(heap.live_count(), 2, "RC keeps all objects alive");
142        assert_eq!(heap.get::<i64>(r1), Some(&10));
143        assert_eq!(heap.get::<i64>(r2), Some(&20));
144    }
145
146    #[test]
147    fn explicit_free_and_slot_reuse() {
148        let mut heap = GcHeap::new(1024);
149        let r1 = heap.alloc(1i64);
150        let r2 = heap.alloc(2i64);
151        let _r3 = heap.alloc(3i64);
152
153        // Explicitly free r2
154        heap.free(r2);
155        assert_eq!(heap.free_list().len(), 1);
156
157        // New alloc reuses freed slot (LIFO)
158        let r4 = heap.alloc(4i64);
159        assert_eq!(r4.index, r2.index, "LIFO slot reuse");
160        assert_eq!(heap.get::<i64>(r4), Some(&4));
161        assert_eq!(heap.get::<i64>(r1), Some(&1));
162    }
163
164    #[test]
165    fn type_mismatch_returns_none() {
166        let mut heap = GcHeap::new(1024);
167        let r = heap.alloc(42i64);
168        assert_eq!(heap.get::<String>(r), None);
169        assert_eq!(heap.get::<i64>(r), Some(&42));
170    }
171
172    #[test]
173    fn alloc_auto_compat() {
174        let mut heap = GcHeap::new(2);
175        let r1 = heap.alloc(1i64);
176        let _ = heap.alloc(2i64);
177        // alloc_auto ignores roots in RC mode
178        let r3 = heap.alloc_auto(3i64, &[r1]);
179        assert_eq!(heap.get::<i64>(r1), Some(&1));
180        assert_eq!(heap.get::<i64>(r3), Some(&3));
181        // All objects still alive (RC, not GC)
182        assert_eq!(heap.live_count(), 3);
183    }
184
185    #[test]
186    fn deterministic_slot_order() {
187        let mut h1 = GcHeap::new(1024);
188        let mut h2 = GcHeap::new(1024);
189
190        let a1 = h1.alloc(10i64);
191        let a2 = h1.alloc(20i64);
192        h1.free(a1);
193        let a3 = h1.alloc(30i64);
194
195        let b1 = h2.alloc(10i64);
196        let b2 = h2.alloc(20i64);
197        h2.free(b1);
198        let b3 = h2.alloc(30i64);
199
200        assert_eq!(a1.index, b1.index);
201        assert_eq!(a2.index, b2.index);
202        assert_eq!(a3.index, b3.index, "LIFO reuse deterministic");
203    }
204}