Skip to main content

shape_gc/
allocator.rs

1//! Bump allocator with thread-local allocation buffers (TLABs).
2//!
3//! Fast-path allocation: pointer bump in thread-local buffer (~1 cycle).
4//! When the TLAB is exhausted, refill from a Region. When the Region is
5//! full, allocate a new Region.
6
7use crate::header::{GcColor, GcHeader};
8use crate::marker::Marker;
9use crate::region::Region;
10use std::alloc::Layout;
11use std::cell::UnsafeCell;
12
13/// Size of each thread-local allocation buffer: 32KB.
14const TLAB_SIZE: usize = 32 * 1024;
15
16/// Thread-Local Allocation Buffer — a slice of a Region for fast bump allocation.
17struct Tlab {
18    /// Pointer to the start of this TLAB within a region.
19    base: *mut u8,
20    /// Current allocation cursor within the TLAB.
21    cursor: usize,
22    /// Limit of this TLAB (relative to base).
23    limit: usize,
24    /// Index into the allocator's regions vec that owns this TLAB.
25    region_index: usize,
26}
27
28impl Tlab {
29    /// Create a TLAB from a region slice.
30    fn new(base: *mut u8, size: usize, region_index: usize) -> Self {
31        Self {
32            base,
33            cursor: 0,
34            limit: size,
35            region_index,
36        }
37    }
38
39    /// Try to bump-allocate in this TLAB.
40    fn try_alloc(&mut self, total_size: usize) -> Option<*mut u8> {
41        if self.cursor + total_size > self.limit {
42            return None;
43        }
44        let ptr = unsafe { self.base.add(self.cursor) };
45        self.cursor += total_size;
46        Some(ptr)
47    }
48}
49
50/// Bump allocator managing Regions and handing out TLABs.
51pub struct BumpAllocator {
52    /// All allocated regions.
53    regions: UnsafeCell<Vec<Region>>,
54    /// Current TLAB for allocation.
55    tlab: UnsafeCell<Option<Tlab>>,
56}
57
58// Safety: BumpAllocator is only used from a single thread (VM is single-threaded per instance).
59// The GC stop-the-world phase has exclusive access.
60unsafe impl Send for BumpAllocator {}
61unsafe impl Sync for BumpAllocator {}
62
63impl BumpAllocator {
64    /// Create a new allocator (no regions pre-allocated).
65    pub fn new() -> Self {
66        Self {
67            regions: UnsafeCell::new(Vec::new()),
68            tlab: UnsafeCell::new(None),
69        }
70    }
71
72    /// Allocate memory for an object with the given layout.
73    ///
74    /// Returns a pointer to the object data (after the GcHeader).
75    /// The GcHeader is written automatically.
76    pub fn alloc(&self, layout: Layout) -> *mut u8 {
77        let header_size = std::mem::size_of::<GcHeader>();
78        let obj_size = layout.size();
79        let total = (header_size + obj_size + 7) & !7; // 8-byte aligned
80
81        // Fast path: try TLAB
82        let tlab = unsafe { &mut *self.tlab.get() };
83        if let Some(t) = tlab {
84            if let Some(raw_ptr) = t.try_alloc(total) {
85                // Write header, return pointer past header
86                let header_ptr = raw_ptr as *mut GcHeader;
87                unsafe {
88                    header_ptr.write(GcHeader::new(0, obj_size as u32));
89                }
90                return unsafe { raw_ptr.add(header_size) };
91            }
92        }
93
94        // Slow path: refill TLAB and retry
95        self.alloc_slow(layout, total)
96    }
97
98    /// Slow path: allocate a new TLAB from a region, then allocate from it.
99    fn alloc_slow(&self, layout: Layout, total: usize) -> *mut u8 {
100        let header_size = std::mem::size_of::<GcHeader>();
101        let obj_size = layout.size();
102
103        // Large object (> TLAB size): allocate directly from a region
104        if total > TLAB_SIZE {
105            return self.alloc_large(layout);
106        }
107
108        // Allocate a new region and carve TLAB from it
109        let regions = unsafe { &mut *self.regions.get() };
110        let tlab = unsafe { &mut *self.tlab.get() };
111
112        let new_region = Region::new();
113        let base = new_region.base();
114        let region_index = regions.len();
115        regions.push(new_region);
116
117        // Create a TLAB from the start of the new region
118        let new_tlab = Tlab::new(base, TLAB_SIZE, region_index);
119        *tlab = Some(new_tlab);
120
121        // Now allocate from the fresh TLAB
122        let t = tlab.as_mut().unwrap();
123        let raw_ptr = t
124            .try_alloc(total)
125            .expect("fresh TLAB should have space for allocation");
126        let header_ptr = raw_ptr as *mut GcHeader;
127        unsafe {
128            header_ptr.write(GcHeader::new(0, obj_size as u32));
129        }
130        unsafe { raw_ptr.add(header_size) }
131    }
132
133    /// Allocate a large object directly from a dedicated region.
134    fn alloc_large(&self, layout: Layout) -> *mut u8 {
135        let regions = unsafe { &mut *self.regions.get() };
136        let mut region = Region::new();
137        let ptr = region
138            .try_alloc(layout)
139            .expect("fresh region should fit large allocation");
140        regions.push(region);
141        ptr
142    }
143
144    /// Flush the active TLAB's cursor back into its owning Region so that
145    /// `for_each_object_mut` can walk all TLAB-allocated objects during sweep.
146    fn flush_tlab(&self) {
147        let tlab = unsafe { &mut *self.tlab.get() };
148        if let Some(t) = tlab {
149            let regions = unsafe { &mut *self.regions.get() };
150            if t.region_index < regions.len() {
151                regions[t.region_index].set_cursor(t.cursor);
152            }
153        }
154    }
155
156    /// Public TLAB flush for use by the incremental sweep path in GcHeap.
157    pub fn flush_tlab_for_sweep(&self) {
158        self.flush_tlab();
159    }
160
161    /// Sweep all regions: walk objects, reclaim white objects.
162    /// Returns total bytes collected.
163    pub fn sweep(&self, _marker: &Marker) -> usize {
164        // Flush TLAB cursor so sweep can see all allocated objects.
165        self.flush_tlab();
166
167        let regions = unsafe { &mut *self.regions.get() };
168        let mut total_collected = 0;
169
170        for region in regions.iter_mut() {
171            let mut live_bytes = 0;
172            region.for_each_object_mut(|header, _obj_ptr| {
173                if header.color() == GcColor::White {
174                    // Dead object — mark as reclaimable
175                    total_collected += header.size as usize;
176                } else {
177                    // Live object — reset to white for next cycle
178                    live_bytes += header.size as usize;
179                    header.set_color(GcColor::White);
180                }
181            });
182            region.set_live_bytes(live_bytes);
183        }
184
185        // Invalidate TLAB (it may point into a swept region)
186        let tlab = unsafe { &mut *self.tlab.get() };
187        *tlab = None;
188
189        total_collected
190    }
191
192    /// Total bytes across all regions.
193    pub fn total_region_bytes(&self) -> usize {
194        let regions = unsafe { &*self.regions.get() };
195        regions.len() * crate::region::REGION_SIZE
196    }
197
198    /// Number of regions.
199    pub fn region_count(&self) -> usize {
200        let regions = unsafe { &*self.regions.get() };
201        regions.len()
202    }
203
204    /// Get a reference to all regions (for the marker to iterate).
205    pub fn regions(&self) -> &Vec<Region> {
206        unsafe { &*self.regions.get() }
207    }
208
209    /// Get a mutable reference to all regions.
210    pub fn regions_mut(&self) -> &mut Vec<Region> {
211        unsafe { &mut *self.regions.get() }
212    }
213}
214
215impl Default for BumpAllocator {
216    fn default() -> Self {
217        Self::new()
218    }
219}
220
221#[cfg(test)]
222mod tests {
223    use super::*;
224
225    #[test]
226    fn test_basic_allocation() {
227        let alloc = BumpAllocator::new();
228        let layout = Layout::from_size_align(64, 8).unwrap();
229        let ptr = alloc.alloc(layout);
230        assert!(!ptr.is_null());
231
232        // Check header is accessible
233        let header_ptr =
234            unsafe { (ptr as *const u8).sub(std::mem::size_of::<GcHeader>()) } as *const GcHeader;
235        let header = unsafe { &*header_ptr };
236        assert_eq!(header.size, 64);
237        assert_eq!(header.color(), GcColor::White);
238    }
239
240    #[test]
241    fn test_multiple_allocations() {
242        let alloc = BumpAllocator::new();
243        let layout = Layout::from_size_align(32, 8).unwrap();
244
245        let mut ptrs = Vec::new();
246        for _ in 0..100 {
247            ptrs.push(alloc.alloc(layout));
248        }
249
250        // All pointers should be distinct
251        for i in 0..ptrs.len() {
252            for j in (i + 1)..ptrs.len() {
253                assert_ne!(ptrs[i], ptrs[j]);
254            }
255        }
256    }
257
258    #[test]
259    fn test_tlab_refill() {
260        let alloc = BumpAllocator::new();
261        let layout = Layout::from_size_align(1024, 8).unwrap();
262
263        // Allocate enough to exhaust the first TLAB (32KB / ~1KB = ~32 allocations)
264        for _ in 0..50 {
265            let ptr = alloc.alloc(layout);
266            assert!(!ptr.is_null());
267        }
268    }
269}