Skip to main content

shape_vm/
memory.rs

1//! Memory management for Shape VM
2//!
3//! Without `gc` feature: stub using Arc reference counting (no-op GC).
4//! With `gc` feature: delegates to shape-gc's GcHeap for real collection.
5
6use std::cell::RefCell;
7use std::time::{Duration, Instant};
8
9/// Garbage collection configuration
10#[derive(Debug, Clone)]
11pub struct GCConfig {
12    pub initial_heap_size: usize,
13    pub max_heap_size: usize,
14    pub collection_threshold: f64,
15    pub generational: bool,
16    pub incremental: bool,
17    pub max_increment_time: u64,
18    pub enable_stats: bool,
19}
20
21impl Default for GCConfig {
22    fn default() -> Self {
23        Self {
24            initial_heap_size: 1024 * 1024,
25            max_heap_size: 64 * 1024 * 1024,
26            collection_threshold: 0.75,
27            generational: true,
28            incremental: true,
29            max_increment_time: 1000,
30            enable_stats: false,
31        }
32    }
33}
34
35/// Unique identifier for managed objects (legacy, kept for API compatibility)
36pub type ObjectId = u64;
37
38/// Garbage collection statistics
39#[derive(Debug, Default, Clone)]
40pub struct GCStats {
41    pub collections: u64,
42    pub objects_collected: u64,
43    pub bytes_collected: u64,
44    pub total_collection_time: Duration,
45    pub avg_collection_time: Duration,
46    pub peak_heap_size: usize,
47    pub current_heap_size: usize,
48    pub last_collection: Option<Instant>,
49}
50
51/// Shape VM Garbage Collector
52///
53/// Without `gc` feature: stub (all operations are no-ops, Arc handles memory).
54/// With `gc` feature: tracks stats from GcHeap collections.
55pub struct GarbageCollector {
56    config: GCConfig,
57    stats: RefCell<GCStats>,
58}
59
60impl GarbageCollector {
61    pub fn new(config: GCConfig) -> Self {
62        Self {
63            config,
64            stats: RefCell::new(GCStats::default()),
65        }
66    }
67
68    pub fn config(&self) -> &GCConfig {
69        &self.config
70    }
71
72    pub fn add_root(&self, _obj_id: ObjectId) {}
73    pub fn remove_root(&self, _obj_id: ObjectId) {}
74    pub fn collect(&self) -> GCResult {
75        GCResult::empty()
76    }
77    /// Incremental collection step (no-op in this stub).
78    ///
79    /// When the `gc` feature is enabled, incremental marking is driven by
80    /// `gc_heap.collect_incremental()` in `gc_integration.rs` -- this stub
81    /// is not called in that path. It exists for API compatibility when
82    /// the `gc` feature is disabled (Arc refcounting handles memory).
83    pub fn collect_incremental(&self) {}
84    pub fn force_collect(&self) -> GCResult {
85        GCResult::empty()
86    }
87    pub fn heap_size(&self) -> usize {
88        0
89    }
90    pub fn object_count(&self) -> usize {
91        0
92    }
93    pub fn stats(&self) -> GCStats {
94        self.stats.borrow().clone()
95    }
96    pub fn contains_object(&self, _obj_id: ObjectId) -> bool {
97        false
98    }
99
100    /// Record a collection in the stats (used by GC integration).
101    pub fn record_collection(&self, result: &GCResult) {
102        let mut stats = self.stats.borrow_mut();
103        stats.collections += 1;
104        stats.objects_collected += result.objects_collected;
105        stats.bytes_collected += result.bytes_collected;
106        stats.total_collection_time += result.duration;
107        if stats.collections > 0 {
108            stats.avg_collection_time = stats.total_collection_time / stats.collections as u32;
109        }
110        stats.last_collection = Some(Instant::now());
111    }
112}
113
114/// Write barrier for GC-tracked heap writes (raw u64 bits).
115///
116/// Called when a heap pointer in an existing slot is overwritten.
117/// `old` is the NaN-boxed bits being replaced; `new` is the incoming bits.
118///
119/// Without `gc` feature: no-op (compiles away entirely).
120/// With `gc` feature: enqueues the old reference into the SATB buffer
121/// and marks the new reference gray if an incremental marking cycle is active.
122#[inline(always)]
123pub fn write_barrier_slot(_old: u64, _new: u64) {
124    #[cfg(feature = "gc_barrier_debug")]
125    {
126        BARRIER_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
127    }
128    #[cfg(feature = "gc")]
129    {
130        // Will wire to shape_gc::barrier::SatbBuffer::enqueue() here.
131        // 1. `_old` may become unreachable (SATB enqueue)
132        // 2. `_new` has a new reference (mark gray)
133    }
134}
135
136/// Write barrier for GC-tracked heap writes (ValueWord level).
137///
138/// Convenience wrapper that extracts raw bits from the old and new ValueWord
139/// values and forwards to `write_barrier_slot`. This is the primary entry
140/// point used by the VM executor for slot overwrites.
141#[inline(always)]
142pub fn write_barrier_vw(old: &shape_value::ValueWord, new: &shape_value::ValueWord) {
143    write_barrier_slot(old.raw_bits(), new.raw_bits());
144}
145
146/// Write barrier counter for debug coverage assertions.
147///
148/// Incremented by every `write_barrier_slot` call when the `gc_barrier_debug`
149/// feature is enabled. Tests can compare this against a heap-write counter
150/// to verify that no write site is missing a barrier.
151#[cfg(feature = "gc_barrier_debug")]
152pub static BARRIER_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
153
154/// Heap-write counter for debug coverage assertions.
155///
156/// Incremented at every heap-write site when the `gc_barrier_debug` feature
157/// is enabled. At the end of execution, `BARRIER_COUNT >= HEAP_WRITE_COUNT`
158/// must hold, guaranteeing full barrier coverage.
159#[cfg(feature = "gc_barrier_debug")]
160pub static HEAP_WRITE_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
161
162/// Record a heap write for barrier coverage tracking.
163///
164/// Call this at every heap write site under `gc_barrier_debug`. The
165/// corresponding barrier call increments `BARRIER_COUNT`. After execution,
166/// `assert_barrier_coverage()` checks that every write was barriered.
167#[inline(always)]
168pub fn record_heap_write() {
169    #[cfg(feature = "gc_barrier_debug")]
170    {
171        HEAP_WRITE_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
172    }
173}
174
175/// Assert that every heap write was accompanied by a write barrier.
176///
177/// Panics if `BARRIER_COUNT < HEAP_WRITE_COUNT`, indicating a missing barrier.
178/// Only active under `gc_barrier_debug` feature; no-op otherwise.
179#[cfg(feature = "gc_barrier_debug")]
180pub fn assert_barrier_coverage() {
181    let barriers = BARRIER_COUNT.load(std::sync::atomic::Ordering::Relaxed);
182    let writes = HEAP_WRITE_COUNT.load(std::sync::atomic::Ordering::Relaxed);
183    assert!(
184        barriers >= writes,
185        "Write barrier coverage gap: {} heap writes but only {} barriers",
186        writes,
187        barriers
188    );
189}
190
191#[cfg(test)]
192mod tests {
193    use super::*;
194
195    #[test]
196    fn write_barrier_slot_does_not_panic() {
197        // Verify the barrier can be called with arbitrary bits without panicking.
198        write_barrier_slot(0, 0);
199        write_barrier_slot(u64::MAX, 0);
200        write_barrier_slot(0, u64::MAX);
201        write_barrier_slot(0xFFF8_0000_0000_0000, 0xFFF8_0000_0000_0001);
202    }
203
204    #[test]
205    fn write_barrier_vw_does_not_panic() {
206        let a = shape_value::ValueWord::none();
207        let b = shape_value::ValueWord::from_i64(42);
208        write_barrier_vw(&a, &b);
209        write_barrier_vw(&b, &a);
210    }
211
212    #[test]
213    fn record_heap_write_does_not_panic() {
214        // Even without gc_barrier_debug, the function should be a safe no-op.
215        record_heap_write();
216    }
217}
218
219/// Result of a garbage collection
220#[derive(Debug, Clone)]
221pub struct GCResult {
222    pub objects_collected: u64,
223    pub bytes_collected: u64,
224    pub duration: Duration,
225}
226
227impl GCResult {
228    pub fn new(objects_collected: u64, bytes_collected: u64, duration: Duration) -> Self {
229        Self {
230            objects_collected,
231            bytes_collected,
232            duration,
233        }
234    }
235
236    pub fn empty() -> Self {
237        Self {
238            objects_collected: 0,
239            bytes_collected: 0,
240            duration: Duration::ZERO,
241        }
242    }
243}