tidepool_codegen/gc/frame_walker.rs
1use crate::stack_map::StackMapRegistry;
2
3/// A collected GC root: the address on the stack where a heap pointer lives.
4#[derive(Debug, Clone, Copy)]
5pub struct StackRoot {
6 /// Address on the stack containing the heap pointer.
7 pub stack_slot_addr: *mut u64,
8 /// Current value of the heap pointer.
9 pub heap_ptr: *mut u8,
10}
11
12/// Walk JIT frames starting from the given frame pointer, collecting all GC roots.
13///
14/// Uses Cranelift's `frame_size` metadata (the FP-to-SP distance, aka `active_size()`)
15/// to compute SP at each safepoint: `SP = caller_FP - frame_size`. This is the same
16/// approach Wasmtime uses and is correct on both x86_64 and aarch64, regardless of
17/// prologue structure or callee-saved register layout.
18///
19/// # Safety
20/// - `start_fp` must be a valid frame pointer from within a JIT call chain
21/// (typically gc_trigger's FP, read via inline asm).
22/// - `stack_maps` must contain entries for all JIT functions in the call chain.
23/// - All frames in the chain must have frame pointers (`force-frame-pointers = true`).
24#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
25pub unsafe fn walk_frames(start_fp: usize, stack_maps: &StackMapRegistry) -> Vec<StackRoot> {
26 // SAFETY: Caller guarantees start_fp is a valid frame pointer from a JIT call chain,
27 // stack_maps contains entries for all JIT functions, and all frames use frame pointers.
28 let mut roots = Vec::new();
29 let mut fp = start_fp;
30
31 loop {
32 if fp == 0 {
33 break;
34 }
35
36 // [FP+8] = return address (into the caller of this frame's function)
37 let return_addr = *((fp + 8) as *const usize);
38
39 // Check if this return address is in JIT code
40 if !stack_maps.contains_address(return_addr) {
41 // Not a JIT frame — skip it and keep walking.
42 // This handles both pre-JIT frames (gc_trigger → perform_gc)
43 // and JIT→Host→JIT sandwiches (heap_force, trampoline_resolve).
44 let next_fp = *(fp as *const usize);
45 if next_fp == 0 || next_fp == fp || next_fp <= fp {
46 break;
47 }
48 fp = next_fp;
49 continue;
50 }
51
52 // Found a JIT return address. The stack map at this address describes
53 // the caller's GC roots at the point it made the call.
54 if let Some(info) = stack_maps.lookup(return_addr) {
55 // The caller's FP is saved at [current_FP + 0].
56 let caller_fp = *(fp as *const usize);
57 // SP at the safepoint = caller's FP - caller's active frame size.
58 // Cranelift's frame_size is active_size(): the distance from FP down to SP.
59 let sp_at_safepoint = caller_fp - info.frame_size as usize;
60
61 for &offset in &info.offsets {
62 let root_addr = (sp_at_safepoint + offset as usize) as *mut u64;
63 let heap_ptr = *root_addr as *mut u8;
64 roots.push(StackRoot {
65 stack_slot_addr: root_addr,
66 heap_ptr,
67 });
68 }
69 }
70
71 // Walk to next frame: [FP+0] is the saved caller FP
72 let next_fp = *(fp as *const usize);
73
74 // Sanity checks to prevent infinite loops
75 if next_fp == 0 || next_fp == fp || next_fp <= fp {
76 break;
77 }
78 fp = next_fp;
79 }
80
81 roots
82}
83
84/// Rewrite forwarding pointers in stack slots after GC.
85///
86/// For each root, if the heap object has been moved (forwarding pointer),
87/// update the stack slot to point to the new location.
88///
89/// # Safety
90/// All roots must still be valid stack addresses.
91pub unsafe fn rewrite_roots(roots: &[StackRoot], forwarding_map: &dyn Fn(*mut u8) -> *mut u8) {
92 // SAFETY: Caller guarantees all roots contain valid stack addresses from a recent walk_frames call.
93 for root in roots {
94 let new_ptr = forwarding_map(root.heap_ptr);
95 if new_ptr != root.heap_ptr {
96 *root.stack_slot_addr = new_ptr as u64;
97 }
98 }
99}