1pub mod allocator;
15pub mod barrier;
16pub mod fixup;
17pub mod generations;
18pub mod header;
19pub mod marker;
20pub mod platform;
21pub mod ptr;
22pub mod region;
23pub mod relocator;
24pub mod roots;
25pub mod safepoint;
26pub mod scheduler;
27pub mod trap_handler;
28
29use std::sync::atomic::{AtomicUsize, Ordering};
30use std::time::{Duration, Instant};
31
32use allocator::BumpAllocator;
33use barrier::SatbBuffer;
34use generations::GenerationalCollector;
35use marker::{MarkPhase, Marker};
36use safepoint::SafepointState;
37use scheduler::{AdaptiveScheduler, CollectionType, HeapMetrics};
38
39#[derive(Debug, Clone, PartialEq, Eq)]
41pub enum CollectResult {
42 InProgress,
44 Complete(SweepStats),
46}
47
48#[derive(Debug, Clone, Default, PartialEq, Eq)]
50pub struct SweepStats {
51 pub bytes_collected: usize,
53 pub objects_collected: usize,
55 pub bytes_retained: usize,
57}
58
59#[cfg(feature = "gc-stress")]
66#[derive(Debug, Clone)]
67pub struct StressConfig {
68 pub collect_interval: usize,
70}
71
72#[cfg(feature = "gc-stress")]
73impl Default for StressConfig {
74 fn default() -> Self {
75 Self {
76 collect_interval: 16,
77 }
78 }
79}
80
81pub struct GcHeap {
83 allocator: BumpAllocator,
85 marker: Marker,
87 generations: GenerationalCollector,
89 safepoint: SafepointState,
91 satb_buffer: SatbBuffer,
93 bytes_since_gc: AtomicUsize,
95 gc_threshold: usize,
97 stats: GcHeapStats,
99 scheduler: Option<AdaptiveScheduler>,
101 #[cfg(feature = "gc-stress")]
103 stress_counter: usize,
104 #[cfg(feature = "gc-stress")]
106 stress_interval: Option<usize>,
107}
108
109#[derive(Debug, Clone, Default)]
111pub struct GcHeapStats {
112 pub collections: u64,
113 pub young_collections: u64,
114 pub old_collections: u64,
115 pub total_collected_bytes: u64,
116 pub total_collection_time: Duration,
117 pub peak_heap_bytes: usize,
118 pub current_heap_bytes: usize,
119 pub last_collection: Option<Instant>,
120}
121
122impl GcHeap {
123 pub fn new() -> Self {
125 Self::with_threshold(4 * 1024 * 1024) }
127
128 pub fn with_threshold(gc_threshold: usize) -> Self {
130 Self {
131 allocator: BumpAllocator::new(),
132 marker: Marker::new(),
133 generations: GenerationalCollector::new(),
134 safepoint: SafepointState::new(),
135 satb_buffer: SatbBuffer::new(256),
136 bytes_since_gc: AtomicUsize::new(0),
137 gc_threshold,
138 stats: GcHeapStats::default(),
139 scheduler: None,
140 #[cfg(feature = "gc-stress")]
141 stress_counter: 0,
142 #[cfg(feature = "gc-stress")]
143 stress_interval: None,
144 }
145 }
146
147 #[cfg(feature = "gc-stress")]
151 pub fn enable_stress(&mut self, config: StressConfig) {
152 self.stress_interval = Some(config.collect_interval);
153 self.stress_counter = 0;
154 }
155
156 #[cfg(feature = "gc-stress")]
163 pub fn alloc_stressed<T>(
164 &mut self,
165 value: T,
166 trace_roots: &mut dyn FnMut(&mut dyn FnMut(*mut u8)),
167 ) -> *mut T {
168 if let Some(interval) = self.stress_interval {
169 self.stress_counter += 1;
170 if self.stress_counter % interval == 0 {
171 self.collect(trace_roots);
172 }
173 }
174 self.alloc(value)
175 }
176
177 pub fn alloc<T>(&self, value: T) -> *mut T {
184 let layout = std::alloc::Layout::new::<T>();
185 let ptr = self.allocator.alloc(layout);
186 let typed = ptr as *mut T;
187 unsafe { typed.write(value) };
188 self.bytes_since_gc.fetch_add(
189 layout.size() + std::mem::size_of::<header::GcHeader>(),
190 Ordering::Relaxed,
191 );
192 typed
193 }
194
195 pub fn alloc_raw(&self, layout: std::alloc::Layout) -> *mut u8 {
197 let ptr = self.allocator.alloc(layout);
198 self.bytes_since_gc.fetch_add(
199 layout.size() + std::mem::size_of::<header::GcHeader>(),
200 Ordering::Relaxed,
201 );
202 ptr
203 }
204
205 pub fn should_collect(&self) -> bool {
207 self.bytes_since_gc.load(Ordering::Relaxed) >= self.gc_threshold
208 }
209
210 pub fn collect(&mut self, trace_roots: &mut dyn FnMut(&mut dyn FnMut(*mut u8))) {
214 let start = Instant::now();
215
216 self.marker.reset();
218 trace_roots(&mut |ptr| {
219 self.marker.mark_root(ptr);
220 });
221 self.marker.mark_all();
222
223 let collected = self.allocator.sweep(&self.marker);
225
226 let elapsed = start.elapsed();
228 self.stats.collections += 1;
229 self.stats.total_collected_bytes += collected as u64;
230 self.stats.total_collection_time += elapsed;
231 self.stats.last_collection = Some(Instant::now());
232 self.bytes_since_gc.store(0, Ordering::Relaxed);
233 }
234
235 pub fn collect_incremental(
247 &mut self,
248 mark_budget: usize,
249 trace_roots: &mut dyn FnMut(&mut dyn FnMut(*mut u8)),
250 ) -> CollectResult {
251 if self.marker.phase() == MarkPhase::Idle {
252 self.marker.start_marking();
254 trace_roots(&mut |ptr| {
255 self.marker.mark_root(ptr);
256 });
257 }
258
259 let worklist_empty = self.marker.mark_step(mark_budget);
261
262 if worklist_empty {
263 let terminated = self.marker.terminate_marking(&mut self.satb_buffer);
265
266 if terminated {
267 let sweep_stats = self.sweep_regions();
269
270 self.marker.finish_marking();
272 self.stats.collections += 1;
273 self.stats.total_collected_bytes += sweep_stats.bytes_collected as u64;
274 self.stats.last_collection = Some(Instant::now());
275 self.bytes_since_gc.store(0, Ordering::Relaxed);
276
277 return CollectResult::Complete(sweep_stats);
278 }
279 }
280
281 CollectResult::InProgress
282 }
283
284 #[inline(always)]
290 pub fn write_barrier(&mut self, old_ref: *mut u8) {
291 if self.marker.is_marking() {
292 self.satb_buffer.enqueue(old_ref);
293 }
294 }
295
296 #[inline(always)]
316 pub fn write_barrier_combined(&mut self, slot_addr: usize, _new_val: *mut u8) {
317 if let Some(ref mut ct) = self.generations.card_table_mut() {
319 ct.mark_dirty(slot_addr);
320 }
321
322 if self.marker.is_marking() {
324 let old_ptr = unsafe { *(slot_addr as *const *mut u8) };
325 self.satb_buffer.enqueue(old_ptr);
326 }
327 }
328
329 pub fn generations_mut(&mut self) -> &mut GenerationalCollector {
331 &mut self.generations
332 }
333
334 pub fn generations(&self) -> &GenerationalCollector {
336 &self.generations
337 }
338
339 fn sweep_regions(&self) -> SweepStats {
343 self.allocator.flush_tlab_for_sweep();
345
346 let regions = self.allocator.regions_mut();
347 let mut stats = SweepStats::default();
348
349 for region in regions.iter_mut() {
350 let mut live_bytes = 0;
351 region.for_each_object_mut(|hdr, _obj_ptr| {
352 if hdr.color() == header::GcColor::White {
353 stats.bytes_collected += hdr.size as usize;
355 stats.objects_collected += 1;
356 } else {
357 let size = hdr.size as usize;
359 live_bytes += size;
360 stats.bytes_retained += size;
361 hdr.set_color(header::GcColor::White);
362 }
363 });
364 region.set_live_bytes(live_bytes);
365 }
366
367 stats
368 }
369
370 pub fn is_marking(&self) -> bool {
372 self.marker.is_marking()
373 }
374
375 pub fn satb_buffer_mut(&mut self) -> &mut SatbBuffer {
377 &mut self.satb_buffer
378 }
379
380 pub fn mark_phase(&self) -> MarkPhase {
382 self.marker.phase()
383 }
384
385 pub fn stats(&self) -> &GcHeapStats {
387 &self.stats
388 }
389
390 pub fn safepoint(&self) -> &SafepointState {
392 &self.safepoint
393 }
394
395 pub fn allocator(&self) -> &BumpAllocator {
397 &self.allocator
398 }
399
400 pub fn heap_size(&self) -> usize {
402 self.allocator.total_region_bytes()
403 }
404
405 pub fn enable_adaptive_scheduling(&mut self) {
409 self.scheduler = Some(AdaptiveScheduler::new());
410 }
411
412 pub fn enable_adaptive_scheduling_with_config(
414 &mut self,
415 young_utilization_threshold: f64,
416 headroom_factor: f64,
417 ) {
418 self.scheduler = Some(AdaptiveScheduler::with_config(
419 young_utilization_threshold,
420 headroom_factor,
421 ));
422 }
423
424 pub fn scheduler(&self) -> Option<&AdaptiveScheduler> {
426 self.scheduler.as_ref()
427 }
428
429 pub fn scheduler_mut(&mut self) -> Option<&mut AdaptiveScheduler> {
431 self.scheduler.as_mut()
432 }
433
434 pub fn should_collect_adaptive(&self) -> CollectionType {
440 if let Some(ref sched) = self.scheduler {
441 let metrics = HeapMetrics {
442 young_utilization: self.generations.young_utilization(),
443 old_free_bytes: self.generations.old_free_bytes(),
444 bytes_since_gc: self.bytes_since_gc.load(Ordering::Relaxed),
445 gc_threshold: self.gc_threshold,
446 avg_gc_pause_secs: self.avg_gc_pause_secs(),
447 };
448 sched.should_collect(&metrics)
449 } else {
450 if self.bytes_since_gc.load(Ordering::Relaxed) >= self.gc_threshold {
452 CollectionType::Full
453 } else {
454 CollectionType::None
455 }
456 }
457 }
458
459 pub fn avg_gc_pause_secs(&self) -> f64 {
461 if self.stats.collections == 0 {
462 return 0.0;
463 }
464 self.stats.total_collection_time.as_secs_f64() / self.stats.collections as f64
465 }
466
467 pub fn collect_young(&mut self, roots: &[*mut u8]) -> SweepStats {
474 let start = Instant::now();
475
476 let stats = self.generations.collect_young(&mut self.marker, roots);
477
478 let elapsed = start.elapsed();
479 self.stats.collections += 1;
480 self.stats.young_collections += 1;
481 self.stats.total_collected_bytes += stats.bytes_collected as u64;
482 self.stats.total_collection_time += elapsed;
483 self.stats.last_collection = Some(Instant::now());
484 self.bytes_since_gc.store(0, Ordering::Relaxed);
485
486 if let Some(ref mut sched) = self.scheduler {
488 sched.record_young_gc(elapsed);
489 }
490
491 stats
492 }
493
494 pub fn collect_old(&mut self, roots: &[*mut u8]) -> SweepStats {
498 let start = Instant::now();
499
500 let stats = self.generations.collect_old(&mut self.marker, roots);
501
502 let elapsed = start.elapsed();
503 self.stats.collections += 1;
504 self.stats.old_collections += 1;
505 self.stats.total_collected_bytes += stats.bytes_collected as u64;
506 self.stats.total_collection_time += elapsed;
507 self.stats.last_collection = Some(Instant::now());
508 self.bytes_since_gc.store(0, Ordering::Relaxed);
509
510 if let Some(ref mut sched) = self.scheduler {
512 sched.record_old_gc(elapsed);
513 }
514
515 stats
516 }
517
518 pub fn record_allocation(&mut self, bytes: usize) {
523 if let Some(ref mut sched) = self.scheduler {
524 sched.record_allocation(bytes);
525 }
526 }
527
528 pub fn young_gen_utilization(&self) -> f64 {
530 self.generations.young_utilization()
531 }
532
533 pub fn old_gen_free_bytes(&self) -> usize {
535 self.generations.old_free_bytes()
536 }
537}
538
539impl Default for GcHeap {
540 fn default() -> Self {
541 Self::new()
542 }
543}
544
545thread_local! {
548 static THREAD_GC_HEAP: std::cell::Cell<*mut GcHeap> = const { std::cell::Cell::new(std::ptr::null_mut()) };
549}
550
551pub unsafe fn set_thread_gc_heap(heap: *mut GcHeap) {
556 THREAD_GC_HEAP.with(|cell| cell.set(heap));
557}
558
559pub fn thread_gc_heap() -> &'static GcHeap {
561 THREAD_GC_HEAP.with(|cell| {
562 let ptr = cell.get();
563 assert!(!ptr.is_null(), "GC heap not initialized for this thread");
564 unsafe { &*ptr }
565 })
566}
567
568#[cfg(test)]
569mod tests {
570 use super::*;
571 use crate::header::{GcColor, GcHeader};
572
573 fn header_of(ptr: *mut u8) -> &'static GcHeader {
575 unsafe {
576 let header_ptr = ptr.sub(std::mem::size_of::<GcHeader>()) as *const GcHeader;
577 &*header_ptr
578 }
579 }
580
581 fn header_of_mut(ptr: *mut u8) -> &'static mut GcHeader {
583 unsafe {
584 let header_ptr = ptr.sub(std::mem::size_of::<GcHeader>()) as *mut GcHeader;
585 &mut *header_ptr
586 }
587 }
588
589 #[test]
592 fn test_alloc_returns_valid_pointer() {
593 let heap = GcHeap::new();
594 let ptr = heap.alloc(42u64);
595 assert!(!ptr.is_null());
596 let val = unsafe { *ptr };
597 assert_eq!(val, 42u64);
598 }
599
600 #[test]
601 fn test_alloc_multiple_distinct() {
602 let heap = GcHeap::new();
603 let a = heap.alloc(1u64);
604 let b = heap.alloc(2u64);
605 let c = heap.alloc(3u64);
606 assert_ne!(a, b);
607 assert_ne!(b, c);
608 assert_ne!(a, c);
609 unsafe {
610 assert_eq!(*a, 1);
611 assert_eq!(*b, 2);
612 assert_eq!(*c, 3);
613 }
614 }
615
616 #[test]
617 fn test_collect_with_empty_roots_clears_all() {
618 let mut heap = GcHeap::new();
619 let _a = heap.alloc(100u64);
620 let _b = heap.alloc(200u64);
621
622 heap.collect(&mut |_visitor| {
624 });
626
627 assert!(heap.stats().collections == 1);
628 assert!(heap.stats().total_collected_bytes > 0);
629 }
630
631 #[test]
634 fn test_live_object_survives_collection() {
635 let mut heap = GcHeap::new();
636 let ptr = heap.alloc(12345u64);
637
638 heap.collect(&mut |visitor| {
640 visitor(ptr as *mut u8);
641 });
642
643 let val = unsafe { *ptr };
645 assert_eq!(val, 12345);
646 let header = header_of(ptr as *mut u8);
648 assert_eq!(header.color(), GcColor::White);
649 }
650
651 #[test]
652 fn test_multiple_live_objects_survive() {
653 let mut heap = GcHeap::new();
654 let a = heap.alloc(111u64);
655 let b = heap.alloc(222u64);
656 let c = heap.alloc(333u64);
657
658 heap.collect(&mut |visitor| {
660 visitor(a as *mut u8);
661 visitor(b as *mut u8);
662 visitor(c as *mut u8);
663 });
664
665 unsafe {
666 assert_eq!(*a, 111);
667 assert_eq!(*b, 222);
668 assert_eq!(*c, 333);
669 }
670 }
671
672 #[test]
675 fn test_unreachable_objects_collected() {
676 let mut heap = GcHeap::with_threshold(1024); let live = heap.alloc(1u64);
678 let _dead1 = heap.alloc(2u64);
679 let _dead2 = heap.alloc(3u64);
680
681 heap.collect(&mut |visitor| {
682 visitor(live as *mut u8); });
684
685 let stats = heap.stats();
686 assert_eq!(stats.collections, 1);
687 assert!(stats.total_collected_bytes >= 2 * std::mem::size_of::<u64>() as u64);
689
690 assert_eq!(unsafe { *live }, 1);
692 }
693
694 #[test]
697 fn test_alloc_collect_cycle_repeated() {
698 let mut heap = GcHeap::with_threshold(1024);
699
700 for cycle in 0..5 {
701 let mut live_ptrs = Vec::new();
703 for i in 0..10u64 {
704 let ptr = heap.alloc(cycle * 100 + i);
705 live_ptrs.push(ptr);
706 }
707 for _ in 0..10 {
709 let _ = heap.alloc(0xDEADu64);
710 }
711
712 heap.collect(&mut |visitor| {
714 for ptr in &live_ptrs {
715 visitor(*ptr as *mut u8);
716 }
717 });
718
719 for (i, ptr) in live_ptrs.iter().enumerate() {
721 let val = unsafe { **ptr };
722 assert_eq!(val, cycle * 100 + i as u64);
723 }
724 }
725
726 let stats = heap.stats();
727 assert_eq!(stats.collections, 5);
728 }
729
730 #[test]
733 fn test_should_collect_threshold() {
734 let heap = GcHeap::with_threshold(256);
735 assert!(!heap.should_collect());
736
737 for _ in 0..20 {
739 let _ = heap.alloc([0u8; 64]);
740 }
741 assert!(heap.should_collect());
742 }
743
744 #[test]
745 fn test_collect_resets_bytes_counter() {
746 let mut heap = GcHeap::with_threshold(256);
747
748 for _ in 0..20 {
750 let _ = heap.alloc([0u8; 64]);
751 }
752 assert!(heap.should_collect());
753
754 heap.collect(&mut |_visitor| {});
755
756 assert!(!heap.should_collect());
758 }
759
760 #[test]
763 fn test_large_allocation() {
764 let heap = GcHeap::new();
765 let ptr = heap.alloc([0u8; 64 * 1024]);
767 assert!(!ptr.is_null());
768 let val = unsafe { &*ptr };
769 assert_eq!(val.len(), 64 * 1024);
770 }
771
772 #[test]
775 fn test_stats_initial_state() {
776 let heap = GcHeap::new();
777 let stats = heap.stats();
778 assert_eq!(stats.collections, 0);
779 assert_eq!(stats.total_collected_bytes, 0);
780 assert!(stats.last_collection.is_none());
781 }
782
783 #[test]
784 fn test_stats_after_collection() {
785 let mut heap = GcHeap::new();
786 let _dead = heap.alloc(42u64);
787
788 heap.collect(&mut |_| {});
789
790 let stats = heap.stats();
791 assert_eq!(stats.collections, 1);
792 assert!(stats.last_collection.is_some());
793 }
794
795 #[test]
798 fn test_thread_local_gc_heap() {
799 let mut heap = GcHeap::new();
800 unsafe { set_thread_gc_heap(&mut heap as *mut _) };
801 let tl = thread_gc_heap();
802 let ptr = tl.alloc(999u64);
803 assert!(!ptr.is_null());
804 assert_eq!(unsafe { *ptr }, 999);
805 unsafe { set_thread_gc_heap(std::ptr::null_mut()) };
807 }
808
809 #[test]
812 fn test_partial_liveness() {
813 let mut heap = GcHeap::new();
814 let mut ptrs = Vec::new();
815 for i in 0..20u64 {
816 ptrs.push(heap.alloc(i));
817 }
818
819 let live_ptrs: Vec<*mut u64> = ptrs.iter().copied().step_by(2).collect();
821
822 heap.collect(&mut |visitor| {
823 for ptr in &live_ptrs {
824 visitor(*ptr as *mut u8);
825 }
826 });
827
828 for (idx, ptr) in live_ptrs.iter().enumerate() {
830 let val = unsafe { **ptr };
831 assert_eq!(val, (idx * 2) as u64);
832 }
833
834 let stats = heap.stats();
835 assert!(stats.total_collected_bytes >= 10 * std::mem::size_of::<u64>() as u64);
837 }
838
839 #[test]
842 fn test_double_collection_safe() {
843 let mut heap = GcHeap::new();
844 let ptr = heap.alloc(42u64);
845
846 heap.collect(&mut |visitor| {
848 visitor(ptr as *mut u8);
849 });
850 assert_eq!(unsafe { *ptr }, 42);
851
852 heap.collect(&mut |visitor| {
854 visitor(ptr as *mut u8);
855 });
856 assert_eq!(unsafe { *ptr }, 42);
857 assert_eq!(heap.stats().collections, 2);
858 }
859
860 #[test]
863 fn test_heap_size_grows_with_allocation() {
864 let heap = GcHeap::new();
865 let size_before = heap.heap_size();
866 let _ = heap.alloc(0u64);
868 let size_after = heap.heap_size();
869 assert!(size_after >= size_before);
870 assert!(size_after >= crate::region::REGION_SIZE);
872 }
873
874 #[test]
877 fn test_trace_nanboxed_bits_heap_tag() {
878 use crate::roots::trace_nanboxed_bits;
879
880 let fake_ptr: u64 = 0x0000_1234_5678_0000;
883 let tagged = 0xFFF8_0000_0000_0000u64 | fake_ptr;
884
885 let mut found_ptrs = Vec::new();
886 trace_nanboxed_bits(tagged, &mut |ptr| {
887 found_ptrs.push(ptr as u64);
888 });
889
890 assert_eq!(found_ptrs.len(), 1);
891 assert_eq!(found_ptrs[0], fake_ptr);
892 }
893
894 #[test]
895 fn test_trace_nanboxed_bits_int_tag_is_noop() {
896 use crate::roots::trace_nanboxed_bits;
897
898 let int_tagged = 0xFFF8_0000_0000_0000u64 | (0b001u64 << 48) | 42;
900
901 let mut found = false;
902 trace_nanboxed_bits(int_tagged, &mut |_| {
903 found = true;
904 });
905 assert!(!found);
906 }
907
908 #[cfg(feature = "gc-stress")]
911 #[test]
912 fn test_stress_mode_triggers_collection() {
913 let mut heap = GcHeap::with_threshold(1024 * 1024); heap.enable_stress(StressConfig {
915 collect_interval: 4,
916 });
917
918 let mut root: Option<*mut u64> = None;
920
921 for i in 0..12u64 {
924 let ptr = heap.alloc_stressed(i, &mut |visitor| {
925 if let Some(r) = root {
926 visitor(r as *mut u8);
927 }
928 });
929 root = Some(ptr);
930 }
931
932 assert_eq!(heap.stats().collections, 3);
933 }
934
935 #[cfg(feature = "gc-stress")]
936 #[test]
937 fn test_stress_mode_live_objects_survive() {
938 let mut heap = GcHeap::with_threshold(1024 * 1024);
939 heap.enable_stress(StressConfig {
940 collect_interval: 2,
941 });
942
943 let root = heap.alloc(42u64);
945
946 for i in 0..10u64 {
947 let _garbage = heap.alloc_stressed(i * 100, &mut |visitor| {
948 visitor(root as *mut u8);
949 });
950 }
951
952 assert_eq!(unsafe { *root }, 42);
954 assert!(heap.stats().collections >= 5); }
956
957 #[test]
960 fn test_incremental_marking_small_budget() {
961 let mut heap = GcHeap::new();
962
963 let mut ptrs = Vec::new();
965 for i in 0..10u64 {
966 ptrs.push(heap.alloc(i));
967 }
968
969 let ptrs_clone = ptrs.clone();
971
972 let mut steps = 0;
974 loop {
975 let result = heap.collect_incremental(2, &mut |visitor| {
976 for &ptr in &ptrs_clone {
977 visitor(ptr as *mut u8);
978 }
979 });
980 steps += 1;
981 if let CollectResult::Complete(_stats) = result {
982 break;
983 }
984 assert!(steps < 100, "incremental marking did not converge");
986 }
987
988 assert!(steps >= 2, "expected multiple steps, got {}", steps);
990
991 for (i, &ptr) in ptrs.iter().enumerate() {
993 let val = unsafe { *ptr };
994 assert_eq!(val, i as u64);
995 }
996
997 assert_eq!(heap.stats().collections, 1);
998 }
999
1000 #[test]
1003 fn test_satb_barrier_captures_during_marking() {
1004 let mut heap = GcHeap::new();
1005
1006 let live = heap.alloc(100u64);
1007 let overwritten = heap.alloc(200u64);
1008
1009 let first = heap.collect_incremental(0, &mut |visitor| {
1011 visitor(live as *mut u8);
1012 });
1013 assert_eq!(first, CollectResult::InProgress);
1014 assert!(heap.is_marking());
1015
1016 heap.write_barrier(overwritten as *mut u8);
1018
1019 loop {
1021 let result = heap.collect_incremental(100, &mut |visitor| {
1022 visitor(live as *mut u8);
1023 });
1024 if let CollectResult::Complete(stats) = result {
1025 assert_eq!(
1028 stats.objects_collected, 0,
1029 "SATB should have saved the overwritten reference"
1030 );
1031 break;
1032 }
1033 }
1034 }
1035
1036 #[test]
1039 fn test_mark_termination_drains_satb() {
1040 let mut heap = GcHeap::new();
1041 let root = heap.alloc(1u64);
1042 let saved = heap.alloc(2u64);
1043
1044 let _ = heap.collect_incremental(0, &mut |visitor| {
1046 visitor(root as *mut u8);
1047 });
1048
1049 heap.satb_buffer_mut().enqueue(saved as *mut u8);
1051
1052 loop {
1054 let result = heap.collect_incremental(100, &mut |visitor| {
1055 visitor(root as *mut u8);
1056 });
1057 if let CollectResult::Complete(_) = result {
1058 break;
1059 }
1060 }
1061
1062 assert_eq!(unsafe { *root }, 1);
1064 assert_eq!(unsafe { *saved }, 2);
1065 }
1066
1067 #[test]
1070 fn test_incremental_sweep_preserves_live() {
1071 let mut heap = GcHeap::new();
1072 let live = heap.alloc(42u64);
1073 let _dead = heap.alloc(99u64);
1074
1075 loop {
1077 let result = heap.collect_incremental(100, &mut |visitor| {
1078 visitor(live as *mut u8);
1079 });
1080 if let CollectResult::Complete(stats) = result {
1081 assert!(stats.bytes_collected > 0);
1083 assert_eq!(stats.objects_collected, 1);
1084 break;
1085 }
1086 }
1087
1088 assert_eq!(unsafe { *live }, 42);
1090 }
1091
1092 #[test]
1095 fn test_full_incremental_cycle() {
1096 let mut heap = GcHeap::new();
1097
1098 let mut live_ptrs = Vec::new();
1099 for i in 0..5u64 {
1100 live_ptrs.push(heap.alloc(i * 10));
1101 }
1102 for _ in 0..5 {
1104 let _ = heap.alloc(0xDEADu64);
1105 }
1106
1107 let roots = live_ptrs.clone();
1108 let result = loop {
1109 let r = heap.collect_incremental(2, &mut |visitor| {
1110 for &ptr in &roots {
1111 visitor(ptr as *mut u8);
1112 }
1113 });
1114 if let CollectResult::Complete(stats) = r {
1115 break stats;
1116 }
1117 };
1118
1119 assert_eq!(result.objects_collected, 5);
1121 assert!(result.bytes_collected >= 5 * std::mem::size_of::<u64>());
1122 assert!(result.bytes_retained >= 5 * std::mem::size_of::<u64>());
1123
1124 for (i, &ptr) in live_ptrs.iter().enumerate() {
1126 assert_eq!(unsafe { *ptr }, (i as u64) * 10);
1127 }
1128
1129 assert_eq!(heap.stats().collections, 1);
1131 assert!(!heap.is_marking());
1132 assert_eq!(heap.mark_phase(), MarkPhase::Idle);
1133 }
1134
1135 #[test]
1138 fn test_write_barrier_noop_when_not_marking() {
1139 let mut heap = GcHeap::new();
1140 assert!(!heap.is_marking());
1141
1142 heap.write_barrier(0x1000 as *mut u8);
1144 assert!(heap.satb_buffer_mut().is_empty());
1145 }
1146
1147 #[test]
1150 fn test_repeated_incremental_cycles() {
1151 let mut heap = GcHeap::new();
1152 let root = heap.alloc(42u64);
1153
1154 for cycle in 0..3 {
1155 for _ in 0..5 {
1157 let _ = heap.alloc(0xDEADu64);
1158 }
1159
1160 loop {
1161 let result = heap.collect_incremental(10, &mut |visitor| {
1162 visitor(root as *mut u8);
1163 });
1164 if let CollectResult::Complete(_) = result {
1165 break;
1166 }
1167 }
1168
1169 assert_eq!(heap.stats().collections, cycle + 1);
1170 assert_eq!(unsafe { *root }, 42);
1171 }
1172 }
1173
1174 #[test]
1177 fn test_combined_write_barrier_card_table_dirty() {
1178 let mut heap = GcHeap::new();
1179
1180 let base_addr = 0x1_0000usize;
1182 let size = 4096;
1183 heap.generations_mut().init_card_table(base_addr, size);
1184
1185 let slot_addr = base_addr + 100;
1187 assert!(!heap.generations().card_table().unwrap().is_dirty(slot_addr));
1188
1189 heap.write_barrier_combined(slot_addr, 0xBEEF as *mut u8);
1191
1192 assert!(heap.generations().card_table().unwrap().is_dirty(slot_addr));
1193 }
1194
1195 #[test]
1196 fn test_combined_write_barrier_satb_enqueue_during_marking() {
1197 let mut heap = GcHeap::new();
1198 let live = heap.alloc(10u64);
1199
1200 let _ = heap.collect_incremental(0, &mut |visitor| {
1202 visitor(live as *mut u8);
1203 });
1204 assert!(heap.is_marking());
1205
1206 let mut slot: *mut u8 = 0xABCD_0000 as *mut u8;
1208 let slot_addr = &mut slot as *mut *mut u8 as usize;
1209
1210 heap.write_barrier_combined(slot_addr, 0x5678 as *mut u8);
1212
1213 assert!(!heap.satb_buffer_mut().is_empty());
1215 let drained = heap.satb_buffer_mut().drain();
1216 assert_eq!(drained.len(), 1);
1217 assert_eq!(drained[0], 0xABCD_0000 as *mut u8);
1218 }
1219
1220 #[test]
1221 fn test_combined_write_barrier_satb_noop_when_not_marking() {
1222 let mut heap = GcHeap::new();
1223 assert!(!heap.is_marking());
1224
1225 let base_addr = 0x2_0000usize;
1227 heap.generations_mut().init_card_table(base_addr, 4096);
1228
1229 let slot_addr = base_addr + 256;
1230
1231 heap.write_barrier_combined(slot_addr, 0x1234 as *mut u8);
1233
1234 assert!(heap.generations().card_table().unwrap().is_dirty(slot_addr));
1235 assert!(heap.satb_buffer_mut().is_empty());
1236 }
1237
1238 #[test]
1239 fn test_combined_write_barrier_no_card_table() {
1240 let mut heap = GcHeap::new();
1241 let slot_addr = 0x3_0000usize;
1243 heap.write_barrier_combined(slot_addr, 0x1234 as *mut u8);
1244 assert!(heap.generations().card_table().is_none());
1246 }
1247
1248 #[test]
1249 fn test_combined_write_barrier_both_card_and_satb() {
1250 let mut heap = GcHeap::new();
1251 let live = heap.alloc(42u64);
1252
1253 let _ = heap.collect_incremental(0, &mut |visitor| {
1255 visitor(live as *mut u8);
1256 });
1257 assert!(heap.is_marking());
1258
1259 let mut slot: *mut u8 = 0xDEAD_BEEF as *mut u8;
1261 let slot_addr = &mut slot as *mut *mut u8 as usize;
1262
1263 let card_base = slot_addr & !0xFFF; heap.generations_mut().init_card_table(card_base, 8192);
1266
1267 heap.write_barrier_combined(slot_addr, 0x1111 as *mut u8);
1269
1270 assert!(!heap.satb_buffer_mut().is_empty());
1272 let drained = heap.satb_buffer_mut().drain();
1273 assert_eq!(drained[0], 0xDEAD_BEEF as *mut u8);
1274
1275 assert!(heap.generations().card_table().unwrap().is_dirty(slot_addr));
1277 }
1278}