Skip to main content

tidepool_codegen/
host_fns.rs

1use crate::context::VMContext;
2use crate::gc::frame_walker::{self, StackRoot};
3use crate::stack_map::StackMapRegistry;
4use std::cell::RefCell;
5use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
6
7type GcHook = fn(&[StackRoot]);
8
9/// Runtime errors raised by JIT code via host functions.
10#[derive(Debug, Clone)]
11pub enum RuntimeError {
12    DivisionByZero,
13    Overflow,
14}
15
16impl std::fmt::Display for RuntimeError {
17    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18        match self {
19            RuntimeError::DivisionByZero => write!(f, "division by zero"),
20            RuntimeError::Overflow => write!(f, "arithmetic overflow"),
21        }
22    }
23}
24
25thread_local! {
26    /// Registry of stack maps for JIT functions.
27    /// This is set before calling into JIT code so gc_trigger can access it.
28    static STACK_MAP_REGISTRY: RefCell<Option<*const StackMapRegistry>> = const { RefCell::new(None) };
29
30    /// Collected roots from the last gc_trigger call.
31    /// Used for test inspection.
32    static LAST_ROOTS: RefCell<Vec<StackRoot>> = const { RefCell::new(Vec::new()) };
33
34    static HOOK: RefCell<Option<GcHook>> = const { RefCell::new(None) };
35
36    /// Runtime error from JIT code. Checked after JIT returns.
37    static RUNTIME_ERROR: RefCell<Option<RuntimeError>> = const { RefCell::new(None) };
38}
39
40/// GC trigger: called by JIT code when alloc_ptr exceeds alloc_limit.
41///
42/// This function MUST be compiled with frame pointers preserved
43/// (the whole crate uses preserve_frame_pointers, and the Rust profile
44/// should have force-frame-pointers = true for the gc path).
45///
46/// The frame walker in gc_trigger reads RBP to walk the JIT stack.
47#[inline(never)]
48pub extern "C" fn gc_trigger(vmctx: *mut VMContext) {
49    // Force a frame to be created
50    let mut _dummy = [0u64; 2];
51    std::hint::black_box(&mut _dummy);
52
53    GC_TRIGGER_CALL_COUNT.fetch_add(1, Ordering::SeqCst);
54    GC_TRIGGER_LAST_VMCTX.store(vmctx as usize, Ordering::SeqCst);
55
56    #[cfg(target_arch = "x86_64")]
57    {
58        let rbp: usize;
59        let rsp: usize;
60        unsafe {
61            std::arch::asm!("mov {}, rbp", out(reg) rbp, options(nomem, nostack));
62            std::arch::asm!("mov {}, rsp", out(reg) rsp, options(nomem, nostack));
63        }
64
65        STACK_MAP_REGISTRY.with(|reg_cell| {
66            if let Some(registry_ptr) = *reg_cell.borrow() {
67                let registry = unsafe { &*registry_ptr };
68                // Walk frames starting from gc_trigger's own frame.
69                let roots = unsafe { frame_walker::walk_frames(rbp, registry, rsp) };
70                
71                // Call test hook if present
72                HOOK.with(|hook_cell| {
73                    if let Some(hook) = *hook_cell.borrow() {
74                        hook(&roots);
75                    }
76                });
77
78                LAST_ROOTS.with(|roots_cell| {
79                    *roots_cell.borrow_mut() = roots;
80                });
81            }
82        });
83    }
84}
85
86/// Set a hook to be called during gc_trigger with the collected roots.
87pub fn set_gc_test_hook(hook: GcHook) {
88    HOOK.with(|hook_cell| {
89        *hook_cell.borrow_mut() = Some(hook);
90    });
91}
92
93/// Clear the GC test hook.
94pub fn clear_gc_test_hook() {
95    HOOK.with(|hook_cell| {
96        *hook_cell.borrow_mut() = None;
97    });
98}
99
100/// Set the stack map registry for the current thread.
101///
102/// # Safety
103/// The registry must outlive any JIT code execution that might trigger GC, and should
104/// be cleared (via `clear_stack_map_registry`) before the registry is dropped.
105pub fn set_stack_map_registry(registry: &StackMapRegistry) {
106    STACK_MAP_REGISTRY.with(|reg_cell| {
107        *reg_cell.borrow_mut() = Some(registry as *const _);
108    });
109}
110
111/// Clear the stack map registry for the current thread.
112pub fn clear_stack_map_registry() {
113    STACK_MAP_REGISTRY.with(|reg_cell| {
114        *reg_cell.borrow_mut() = None;
115    });
116}
117
118/// Get collected roots from the last gc_trigger call.
119pub fn last_gc_roots() -> Vec<StackRoot> {
120    LAST_ROOTS.with(|roots_cell| roots_cell.borrow().clone())
121}
122
123/// Heap allocation: called by JIT code for large or slow-path allocations.
124pub extern "C" fn heap_alloc(_vmctx: *mut VMContext, _size: u64) -> *mut u8 {
125    std::ptr::null_mut() // Placeholder for scaffold
126}
127
128/// Force a thunk to WHNF.
129pub extern "C" fn heap_force(_vmctx: *mut VMContext, _thunk: *mut u8) -> *mut u8 {
130    std::ptr::null_mut() // Placeholder for scaffold
131}
132
133// Test instrumentation — NOT part of the public API.
134// These use atomics to be thread-safe during parallel test execution.
135static GC_TRIGGER_CALL_COUNT: AtomicU64 = AtomicU64::new(0);
136static GC_TRIGGER_LAST_VMCTX: AtomicUsize = AtomicUsize::new(0);
137
138/// Reset test counters. Only call from tests.
139pub fn reset_test_counters() {
140    GC_TRIGGER_CALL_COUNT.store(0, Ordering::SeqCst);
141    GC_TRIGGER_LAST_VMCTX.store(0, Ordering::SeqCst);
142    LAST_ROOTS.with(|roots_cell| {
143        roots_cell.borrow_mut().clear();
144    });
145}
146
147/// Get gc_trigger call count. Only call from tests.
148pub fn gc_trigger_call_count() -> u64 {
149    GC_TRIGGER_CALL_COUNT.load(Ordering::SeqCst)
150}
151
152/// Get last vmctx passed to gc_trigger. Only call from tests.
153pub fn gc_trigger_last_vmctx() -> usize {
154    GC_TRIGGER_LAST_VMCTX.load(Ordering::SeqCst)
155}
156
157/// Called by JIT code when an unresolved external variable is forced.
158/// Returns null to allow execution to continue (will likely segfault later).
159/// In debug mode (TIDEPOOL_TRACE), logs and returns null.
160pub extern "C" fn unresolved_var_trap(var_id: u64) -> *mut u8 {
161    let tag_char = (var_id >> 56) as u8 as char;
162    let key = var_id & ((1u64 << 56) - 1);
163    eprintln!(
164        "[FATAL] Forced unresolved external variable: VarId({}) [tag='{}', key={}]",
165        var_id, tag_char, key
166    );
167    eprintln!("  Backtrace: set RUST_BACKTRACE=1 for details");
168    std::ptr::null_mut()
169}
170
171/// Called by JIT code for runtime errors (divZeroError, overflowError).
172/// Sets a thread-local error flag and returns null. The effect machine
173/// checks this after JIT returns and converts to Yield::Error.
174/// kind: 0 = divZeroError, 1 = overflowError
175pub extern "C" fn runtime_error(kind: u64) -> *mut u8 {
176    let err = match kind {
177        0 => RuntimeError::DivisionByZero,
178        1 => RuntimeError::Overflow,
179        _ => RuntimeError::Overflow,
180    };
181    RUNTIME_ERROR.with(|cell| {
182        *cell.borrow_mut() = Some(err);
183    });
184    std::ptr::null_mut()
185}
186
187/// Check and take any pending runtime error from JIT code.
188pub fn take_runtime_error() -> Option<RuntimeError> {
189    RUNTIME_ERROR.with(|cell| cell.borrow_mut().take())
190}
191
192/// Return the list of host function symbols for JIT registration.
193///
194/// Usage: `CodegenPipeline::new(&host_fn_symbols())`
195pub fn host_fn_symbols() -> Vec<(&'static str, *const u8)> {
196    vec![
197        ("gc_trigger", gc_trigger as *const u8),
198        ("heap_alloc", heap_alloc as *const u8),
199        ("heap_force", heap_force as *const u8),
200        ("unresolved_var_trap", unresolved_var_trap as *const u8),
201        ("runtime_error", runtime_error as *const u8),
202    ]
203}