1use crate::{
2 allocator::Allocator,
3 gcref::{GcRef, UntypedGcRef, WeakGcRef, WeakSlot},
4 global_allocator::{round_up, GlobalAllocator},
5 globals::{LARGE_CUTOFF, MEDIUM_CUTOFF},
6 header::HeapObjectHeader,
7 internal::{
8 block_list::BlockList,
9 collection_barrier::CollectionBarrier,
10 gc_info::{GCInfoIndex, GCInfoTrait},
11 stack_bounds::StackBounds,
12 },
13 large_space::PreciseAllocation,
14 marking::SynchronousMarking,
15 visitor::Visitor,
16 Config,
17};
18use std::{
19 mem::{size_of, swap},
20 ptr::{null_mut, NonNull},
21 sync::atomic::AtomicUsize,
22};
23
24#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
25pub enum CollectionScope {
26 Full,
27 Eden,
28}
29
30pub trait MarkingConstraint {
31 fn execute(&mut self, vis: &mut Visitor);
32}
33
34impl<T: FnMut(&mut Visitor)> MarkingConstraint for T {
35 fn execute(&mut self, vis: &mut Visitor) {
36 self(vis);
37 }
38}
39thread_local! {static BOUNDS: StackBounds = StackBounds::current_thread_stack_bounds();}
40
41#[repr(C)]
48#[allow(dead_code)]
49pub struct Heap {
50 pub(crate) global: GlobalAllocator,
51 pub(crate) gc_prepare_stw_callback: Option<Box<dyn FnMut()>>,
52 collection_barrier: CollectionBarrier,
53 config: Config,
54
55 pub(crate) constraints: Vec<Box<dyn MarkingConstraint>>,
56
57 generational_gc: bool,
58 size_after_last_collect: usize,
59 size_after_last_full_collect: usize,
60 size_before_last_full_collect: usize,
61 size_after_last_eden_collect: usize,
62 size_before_last_eden_collect: usize,
63 pub(crate) defers: AtomicUsize,
64 pub(crate) bytes_allocated_this_cycle: usize,
65 pub(crate) max_eden_size: usize,
66 max_heap_size: usize,
67 total_bytes_visited: usize,
68 total_bytes_visited_this_cycle: usize,
69 increment_balance: f64,
70 should_do_full_collection: bool,
71 collection_scope: Option<CollectionScope>,
72 last_collection_scope: Option<CollectionScope>,
73 pub(crate) weak_references: Vec<GcRef<WeakSlot>>,
74 pub(crate) total_gc_count: usize,
75}
76
77impl Heap {
78 pub fn total_gc_cycles_count(&self) -> usize {
79 return self.total_gc_count;
80 }
81 pub fn for_each_cell(
85 &mut self,
86
87 mut callback: impl FnMut(*mut HeapObjectHeader),
88 mut weak_refs: impl FnMut(GcRef<WeakSlot>),
89 ) {
90 unsafe {
91 for weak in self.weak_references.iter() {
92 weak_refs(*weak);
93 }
94
95 self.global.large_space.allocations.iter().for_each(|cell| {
96 callback((**cell).cell());
97 });
98 }
99 }
100 pub fn add_constraint(&mut self, constraint: impl MarkingConstraint + 'static) {
102 self.constraints.push(Box::new(constraint));
103 }
104 pub fn add_core_constraints(&mut self) {
106 self.add_constraint(|visitor: &mut Visitor| unsafe {
107 let heap = &mut *visitor.heap();
108
109 heap.global.large_space.prepare_for_conservative_scan();
110
111 let mut from = BOUNDS.with(|b| b.origin);
112 let mut to = approximate_stack_pointer();
113 if from > to {
114 swap(&mut from, &mut to);
115 }
116 visitor.trace_conservatively(from, to)
117 });
118 }
119 pub fn new(config: Config) -> Box<Self> {
121 let mut this = Box::new(Self {
122 total_gc_count: 0,
123 constraints: Vec::new(),
124 generational_gc: config.generational,
125
126 defers: AtomicUsize::new(0),
127 global: GlobalAllocator::new(&config),
128 gc_prepare_stw_callback: None,
129
130 collection_barrier: CollectionBarrier::new(null_mut()),
131 weak_references: vec![],
132
133 should_do_full_collection: false,
134 size_after_last_collect: 0,
135 size_after_last_eden_collect: 0,
136 size_after_last_full_collect: 0,
137 size_before_last_eden_collect: 0,
138 size_before_last_full_collect: 0,
139 max_eden_size: config.max_eden_size,
140 max_heap_size: config.max_heap_size,
141 collection_scope: None,
142 last_collection_scope: None,
143 total_bytes_visited: 0,
144 total_bytes_visited_this_cycle: 0,
145 increment_balance: 0.0,
146 bytes_allocated_this_cycle: 0,
147 config,
148 });
149
150 this.global.normal_allocator.line_bitmap = &this.global.line_bitmap;
151 this.global.overflow_allocator.line_bitmap = &this.global.line_bitmap;
152 this
153 }
154
155 pub(crate) unsafe fn sweep(&mut self, blocks: BlockList) {
156 self.global.sweep(blocks);
158 }
159 pub fn collect_garbage(&mut self) {
161 if self.defers.load(atomic::Ordering::SeqCst) > 0 {
162 return;
163 }
164 self.perform_garbage_collection();
165 }
166
167 #[allow(dead_code)]
169 pub fn collect_if_necessary_or_defer(&mut self) {
170 if self.defers.load(atomic::Ordering::Relaxed) > 0 {
171 return;
172 } else {
173 let bytes_allowed = self.max_eden_size;
174
175 if self.bytes_allocated_this_cycle >= bytes_allowed {
176 self.collect_garbage();
177 }
178 }
179 }
180 pub unsafe fn allocate_weak(&mut self, target: UntypedGcRef) -> WeakGcRef {
182 let ptr = self.allocate_raw_or_fail(
183 size_of::<WeakSlot>() + size_of::<HeapObjectHeader>(),
184 WeakSlot::index(),
185 );
186
187 ptr.get().cast::<WeakSlot>().write(WeakSlot {
188 value: Some(target),
189 });
190 WeakGcRef {
191 slot: ptr.cast_unchecked(),
192 }
193 }
194 pub unsafe fn allocate_raw(&mut self, size: usize, index: GCInfoIndex) -> Option<UntypedGcRef> {
201 let size = round_up(size, 8);
202 let cell = if size >= LARGE_CUTOFF {
203 return self.allocate_large(size, index);
204 } else if size < MEDIUM_CUTOFF {
205 self.global.normal_allocator.allocate(size)
206 } else {
207 self.global.overflow_allocator.allocate(size)
208 };
209 cell.map(|x| {
210 (*x).set_size(size);
211
212 self.bytes_allocated_this_cycle += size;
213 (*x).set_gc_info(index);
214 debug_assert!(
215 {
216 let mut scan = x as usize;
217 let end = scan + size;
218 let mut f = true;
219 while scan < end {
220 if self.global.live_bitmap.test(scan as _) {
221 f = false;
222 break;
223 }
224 scan += 8;
225 }
226 f
227 },
228 "object at {:p} was already allocated!",
229 x
230 );
231 self.global.live_bitmap.set(x as _);
232 UntypedGcRef {
233 header: NonNull::new_unchecked(x),
234 }
235 })
236 }
237
238 #[cold]
239 unsafe fn try_perform_collection_and_allocate_again(
240 &mut self,
241 gc_info: GCInfoIndex,
242 size: usize,
243 ) -> UntypedGcRef {
244 for _ in 0..3 {
245 self.collect_garbage();
246 let result = self.allocate_raw(size, gc_info);
247 if let Some(result) = result {
248 return result;
249 }
250 }
251 eprintln!("Allocation of {} bytes failed: OOM", size);
252 std::process::abort();
253 }
254 pub unsafe fn allocate_raw_or_fail(&mut self, size: usize, index: GCInfoIndex) -> UntypedGcRef {
256 let mem = self.allocate_raw(size, index);
257 if mem.is_none() {
258 return self.try_perform_collection_and_allocate_again(index, size);
259 }
260 mem.unwrap()
261 }
262
263 fn allocate_large(&mut self, size: usize, index: GCInfoIndex) -> Option<UntypedGcRef> {
264 unsafe {
265 let cell = self.global.large_space.allocate(size);
266 self.bytes_allocated_this_cycle += (*PreciseAllocation::from_cell(cell)).cell_size();
267 (*cell).set_gc_info(index);
268 (*cell).set_size(0);
269 Some(UntypedGcRef {
270 header: NonNull::new_unchecked(cell),
271 })
272 }
273 }
274
275 fn is_marked(&self, hdr: *const HeapObjectHeader) -> bool {
276 unsafe {
277 if !(*hdr).is_precise() {
278 self.global.mark_bitmap.test(hdr as _)
279 } else {
280 (*PreciseAllocation::from_cell(hdr as _)).is_marked()
281 }
282 }
283 }
284
285 fn update_weak_references(&self) {
286 let weak_refs = &self.weak_references;
287
288 for weak in weak_refs.iter() {
289 match weak.value {
290 Some(value) if self.is_marked(value.header.as_ptr()) => {
291 continue;
292 }
293 _ => {
294 let mut weak = *weak;
295 weak.value = None;
296 }
297 }
298 }
299 }
300
301 fn reset_weak_references(&mut self) {
302 let bitmap = &self.global.mark_bitmap;
303 self.weak_references
304 .retain(|ref_| bitmap.test(ref_.into_raw() as _));
305 }
306 fn will_start_collection(&mut self) {
307 log_if!(self.config.verbose, " => ");
308
309 self.collection_scope = Some(CollectionScope::Full);
310 self.should_do_full_collection = false;
311 logln_if!(self.config.verbose, "Collection, ");
312
313 self.size_before_last_full_collect =
314 self.size_after_last_collect + self.bytes_allocated_this_cycle;
315 }
316 fn update_object_counts(&mut self, bytes_visited: usize) {
317 self.total_bytes_visited = 0;
318
319 self.total_bytes_visited_this_cycle = bytes_visited;
320 self.total_bytes_visited += self.total_bytes_visited_this_cycle;
321 }
322 fn update_allocation_limits(&mut self) {
323 let current_heap_size = self.total_bytes_visited;
326
327 self.max_heap_size =
331 (self.config.heap_growth_factor * current_heap_size as f64).ceil() as _;
332 self.max_eden_size = self.max_heap_size - current_heap_size;
333 self.size_after_last_full_collect = current_heap_size;
334
335 self.size_after_last_collect = current_heap_size;
336 self.bytes_allocated_this_cycle = 0;
337 logln_if!(
338 self.config.verbose,
339 " => {}\n => threshold: {}kb",
340 current_heap_size,
341 self.max_heap_size as f64 / 1024.
342 );
343 self.total_gc_count += 1;
344 }
345 pub(crate) fn test_and_set_marked(&self, hdr: *const HeapObjectHeader) -> bool {
346 unsafe {
347 if self.global.block_allocator.is_in_space(hdr as _) {
348 self.global.mark_bitmap.set(hdr as _)
349 } else {
350 debug_assert!(!self.global.large_space.contains(hdr as _).is_null());
351 (*PreciseAllocation::from_cell(hdr as _)).test_and_set_marked()
352 }
353 }
354 }
355 #[inline(never)]
356 pub(crate) fn perform_garbage_collection(&mut self) {
357 unsafe {
358 self.will_start_collection();
359 self.global
360 .prepare_for_marking(self.collection_scope == Some(CollectionScope::Eden));
361 let (live, blocks) = {
362 let blocks = self.global.begin_marking();
363 let mut marking = SynchronousMarking::new(self);
364 (marking.run(), blocks)
365 };
366 self.update_weak_references();
367 self.reset_weak_references();
368 self.sweep(blocks);
369 self.update_object_counts(live);
370
371 self.global
372 .large_space
373 .prepare_for_allocation(self.collection_scope == Some(CollectionScope::Eden));
374 self.update_allocation_limits();
375 }
376 }
377}
378
379pub struct DeferPoint {
381 defers: &'static AtomicUsize,
382}
383impl DeferPoint {
384 pub fn new(heap: &Heap) -> Self {
385 let this = Self {
386 defers: as_atomic!(& heap.defers;AtomicUsize),
387 };
388 this.defers.fetch_add(1, atomic::Ordering::SeqCst);
389 this
390 }
391}
392
393impl Drop for DeferPoint {
394 fn drop(&mut self) {
395 self.defers.fetch_sub(1, atomic::Ordering::SeqCst);
396 }
397}
398
399impl Drop for Heap {
400 fn drop(&mut self) {
401 self.global.release_memory();
402 }
403}
404#[inline(always)]
405fn approximate_stack_pointer() -> *mut u8 {
406 let mut result = null_mut();
407 result = &mut result as *mut *mut u8 as *mut u8;
408 result
409}