Skip to main content

cjc_runtime/
object_slab.rs

1//! Object Slab — deterministic RC-backed replacement for mark-sweep GC.
2//!
3//! Provides indexed allocation of type-erased objects. Objects are ref-counted
4//! (via `Rc<RefCell<Box<dyn Any>>>`) so they are freed deterministically when
5//! no references remain. Freed slots are reused in LIFO order for deterministic
6//! placement.
7//!
8//! # Determinism guarantees
9//!
10//! - Same allocation sequence produces same slot indices.
11//! - Slot reuse is LIFO (last freed = first reused).
12//! - No stop-the-world pauses.
13//! - No OS memory return during normal execution (slots are recycled).
14
15use std::any::Any;
16use std::cell::RefCell;
17use std::fmt;
18use std::rc::Rc;
19
20/// A handle into the object slab. Lightweight, copyable index.
21///
22/// This is API-compatible with the old `GcRef` so that `Value::ClassRef`
23/// continues to work without changes.
24#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
25pub struct SlabRef {
26    pub index: usize,
27}
28
29/// A type-erased object stored in the slab, backed by reference counting.
30struct SlabObject {
31    value: Rc<RefCell<Box<dyn Any>>>,
32}
33
34impl fmt::Debug for SlabObject {
35    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
36        f.debug_struct("SlabObject")
37            .field("rc_count", &Rc::strong_count(&self.value))
38            .field("value", &"<dyn Any>")
39            .finish()
40    }
41}
42
43/// A deterministic slab allocator for type-erased objects.
44///
45/// Replaces the old mark-sweep `GcHeap`. Objects are reference-counted
46/// and freed automatically when no references remain. The slab provides
47/// stable indexed access and deterministic slot reuse.
48#[derive(Debug)]
49pub struct ObjectSlab {
50    objects: Vec<Option<SlabObject>>,
51    /// Free-list for slot reuse (LIFO for determinism).
52    pub free_list: Vec<usize>,
53    /// Total allocations performed (for statistics).
54    alloc_count: usize,
55}
56
57impl ObjectSlab {
58    /// Create a new empty slab.
59    pub fn new() -> Self {
60        ObjectSlab {
61            objects: Vec::new(),
62            free_list: Vec::new(),
63            alloc_count: 0,
64        }
65    }
66
67    /// Allocate a value in the slab, returning a handle.
68    ///
69    /// Reuses a freed slot if available (LIFO), otherwise extends the slab.
70    pub fn alloc<T: Any + 'static>(&mut self, value: T) -> SlabRef {
71        let obj = SlabObject {
72            value: Rc::new(RefCell::new(Box::new(value) as Box<dyn Any>)),
73        };
74
75        let index = if let Some(idx) = self.free_list.pop() {
76            self.objects[idx] = Some(obj);
77            idx
78        } else {
79            self.objects.push(Some(obj));
80            self.objects.len() - 1
81        };
82
83        self.alloc_count += 1;
84        SlabRef { index }
85    }
86
87    /// Read a reference to the value behind `slab_ref`, downcasting to `T`.
88    /// Returns `None` if the slot is empty or the type does not match.
89    pub fn get<T: Any + 'static>(&self, slab_ref: SlabRef) -> Option<&T> {
90        self.objects
91            .get(slab_ref.index)
92            .and_then(|slot| slot.as_ref())
93            .and_then(|obj| {
94                // SAFETY: We hold a shared ref to the slab, so the RefCell
95                // borrow is safe as long as no mutable borrow is active.
96                // We use try_borrow() to be safe.
97                let borrowed = obj.value.try_borrow().ok()?;
98                // We need to extend the lifetime since we hold the slab ref.
99                // This is safe because the slab outlives the returned reference.
100                let any_ref: &dyn Any = &**borrowed;
101                // SAFETY: The slab holds the Rc, which keeps the data alive.
102                // We cast to a raw pointer and back to extend the lifetime
103                // to match the slab borrow.
104                let ptr = any_ref as *const dyn Any;
105                unsafe { &*ptr }.downcast_ref::<T>()
106            })
107    }
108
109    /// Get a mutable reference to the value behind `slab_ref`.
110    pub fn get_mut<T: Any + 'static>(&self, slab_ref: SlabRef) -> Option<std::cell::RefMut<'_, Box<dyn Any>>> {
111        self.objects
112            .get(slab_ref.index)
113            .and_then(|slot| slot.as_ref())
114            .and_then(|obj| obj.value.try_borrow_mut().ok())
115    }
116
117    /// Number of live (non-None) objects in the slab.
118    pub fn live_count(&self) -> usize {
119        self.objects.iter().filter(|s| s.is_some()).count()
120    }
121
122    /// Total number of slots (including freed ones).
123    pub fn capacity(&self) -> usize {
124        self.objects.len()
125    }
126
127    /// Total allocations performed since creation.
128    pub fn alloc_count(&self) -> usize {
129        self.alloc_count
130    }
131
132    /// Explicitly free a slot (return to free-list).
133    ///
134    /// This is optional — objects are also freed when their Rc drops to zero.
135    /// Use this for explicit lifecycle management.
136    pub fn free(&mut self, slab_ref: SlabRef) {
137        if let Some(slot) = self.objects.get_mut(slab_ref.index) {
138            if slot.is_some() {
139                *slot = None;
140                self.free_list.push(slab_ref.index);
141            }
142        }
143    }
144
145    /// No-op collect for backward compatibility with GC API.
146    ///
147    /// The object slab uses reference counting; there is nothing to collect.
148    /// This method exists so that `gc_collect` builtins don't need to be
149    /// removed from user code immediately.
150    pub fn collect_noop(&self) {
151        // Intentionally empty. RC handles deallocation deterministically.
152    }
153}
154
155impl Default for ObjectSlab {
156    fn default() -> Self {
157        Self::new()
158    }
159}
160
161#[cfg(test)]
162mod tests {
163    use super::*;
164
165    #[test]
166    fn alloc_and_read_back() {
167        let mut slab = ObjectSlab::new();
168        let r = slab.alloc(42i64);
169        assert_eq!(slab.get::<i64>(r), Some(&42));
170        assert_eq!(slab.live_count(), 1);
171    }
172
173    #[test]
174    fn free_and_slot_reuse() {
175        let mut slab = ObjectSlab::new();
176        let r1 = slab.alloc(1i64);
177        let r2 = slab.alloc(2i64);
178        assert_eq!(slab.capacity(), 2);
179
180        // Free r2
181        slab.free(r2);
182        assert_eq!(slab.live_count(), 1);
183        assert_eq!(slab.free_list.len(), 1);
184
185        // New alloc should reuse r2's slot
186        let r3 = slab.alloc(3i64);
187        assert_eq!(r3.index, r2.index, "LIFO reuse");
188        assert_eq!(slab.capacity(), 2, "no new slots");
189        assert_eq!(slab.get::<i64>(r3), Some(&3));
190        assert_eq!(slab.get::<i64>(r1), Some(&1));
191    }
192
193    #[test]
194    fn type_mismatch_returns_none() {
195        let mut slab = ObjectSlab::new();
196        let r = slab.alloc(42i64);
197        assert_eq!(slab.get::<String>(r), None);
198        assert_eq!(slab.get::<i64>(r), Some(&42));
199    }
200
201    #[test]
202    fn collect_noop_is_harmless() {
203        let mut slab = ObjectSlab::new();
204        let r1 = slab.alloc(1i64);
205        slab.collect_noop();
206        // Object still alive (RC, not GC)
207        assert_eq!(slab.live_count(), 1);
208        assert_eq!(slab.get::<i64>(r1), Some(&1));
209    }
210
211    #[test]
212    fn deterministic_slot_order() {
213        // Same allocation sequence → same slot indices
214        let mut slab1 = ObjectSlab::new();
215        let mut slab2 = ObjectSlab::new();
216
217        let a1 = slab1.alloc(10i64);
218        let a2 = slab1.alloc(20i64);
219        let a3 = slab1.alloc(30i64);
220        slab1.free(a2);
221        let a4 = slab1.alloc(40i64);
222
223        let b1 = slab2.alloc(10i64);
224        let b2 = slab2.alloc(20i64);
225        let b3 = slab2.alloc(30i64);
226        slab2.free(b2);
227        let b4 = slab2.alloc(40i64);
228
229        assert_eq!(a1.index, b1.index);
230        assert_eq!(a2.index, b2.index);
231        assert_eq!(a3.index, b3.index);
232        assert_eq!(a4.index, b4.index, "LIFO reuse deterministic");
233    }
234
235    #[test]
236    fn alloc_count_tracking() {
237        let mut slab = ObjectSlab::new();
238        assert_eq!(slab.alloc_count(), 0);
239        let _ = slab.alloc(1i64);
240        let _ = slab.alloc(2i64);
241        assert_eq!(slab.alloc_count(), 2);
242    }
243}