Skip to main content

shape_jit/translator/
compiler.rs

1//! Main compilation logic for BytecodeToIR
2
3use cranelift::codegen::ir::FuncRef;
4use cranelift::prelude::*;
5use std::collections::HashMap;
6
7use crate::context::*;
8use crate::nan_boxing::*;
9use shape_vm::bytecode::{BytecodeProgram, DeoptInfo, InlineFrameInfo, OpCode, Operand};
10use shape_vm::feedback::FeedbackVector;
11use shape_vm::type_tracking::{SlotKind, StorageHint};
12
13use super::loop_analysis;
14use super::types::{BytecodeToIR, CompilationMode, FFIFuncRefs, InlineCandidate};
15use crate::optimizer;
16
17impl<'a, 'b> BytecodeToIR<'a, 'b> {
18    pub(crate) fn new(
19        builder: &'a mut FunctionBuilder<'b>,
20        program: &'a BytecodeProgram,
21        ctx_ptr: Value,
22        ffi: FFIFuncRefs,
23        user_funcs: HashMap<u16, FuncRef>,
24        user_func_arities: HashMap<u16, u16>,
25    ) -> Self {
26        // Pre-compute loop end targets by scanning for matching LoopStart/LoopEnd pairs
27        let mut loop_ends = HashMap::new();
28        let mut loop_starts = Vec::new();
29        for (i, instr) in program.instructions.iter().enumerate() {
30            match instr.opcode {
31                OpCode::LoopStart => loop_starts.push(i),
32                OpCode::LoopEnd => {
33                    if let Some(start_idx) = loop_starts.pop() {
34                        loop_ends.insert(start_idx, i);
35                    }
36                }
37                _ => {}
38            }
39        }
40
41        // Run loop analysis before code generation
42        let loop_info = loop_analysis::analyze_loops(program);
43        let optimization_plan = optimizer::build_function_plan(program, &loop_info);
44
45        // Analyze which functions can be inlined at call sites
46        let inline_candidates = Self::analyze_inline_candidates(program);
47        let mut local_types = HashMap::new();
48        for (idx, hint) in program
49            .top_level_local_storage_hints
50            .iter()
51            .copied()
52            .enumerate()
53        {
54            if hint != StorageHint::Unknown {
55                local_types.insert(idx as u16, hint);
56            }
57        }
58        let mut module_binding_types = HashMap::new();
59        for (idx, hint) in program
60            .module_binding_storage_hints
61            .iter()
62            .copied()
63            .enumerate()
64        {
65            if hint != StorageHint::Unknown {
66                module_binding_types.insert(idx as u16, hint);
67            }
68        }
69
70        Self {
71            builder,
72            program,
73            ctx_ptr,
74            stack_depth: 0,
75            stack_vars: HashMap::new(),
76            locals: HashMap::new(),
77            next_var: 0,
78            blocks: HashMap::new(),
79            current_block_idx: 0,
80            ffi,
81            loop_stack: Vec::new(),
82            loop_ends,
83            exit_block: None,
84            compile_time_sp: 0,
85            merge_blocks: std::collections::HashSet::new(),
86            block_stack_depth: HashMap::new(),
87            pending_data_offset: None,
88            exception_handlers: Vec::new(),
89            current_instr_idx: 0,
90            user_funcs,
91            user_func_arities,
92            stack_types: HashMap::new(),
93            local_types,
94            module_binding_types,
95            typed_stack: super::storage::TypedStack::new(),
96            // Kernel mode fields (unused in standard mode)
97            mode: CompilationMode::Standard,
98            kernel_cursor_index: None,
99            kernel_series_ptrs: None,
100            kernel_state_ptr: None,
101            kernel_config: None,
102            loop_info,
103            optimization_plan,
104            hoisted_locals: HashMap::new(),
105            local_f64_cache: HashMap::new(),
106            // Function inlining
107            inline_candidates,
108            inline_local_base: 0,
109            inline_depth: 0,
110            // Reference tracking
111            ref_stack_slots: HashMap::new(),
112            // Integer unboxing
113            unboxed_int_locals: std::collections::HashSet::new(),
114            unboxed_int_module_bindings: std::collections::HashSet::new(),
115            promoted_module_bindings: HashMap::new(),
116            register_carried_module_bindings: std::collections::HashSet::new(),
117            unboxed_loop_depth: 0,
118            unboxed_scope_stack: Vec::new(),
119            register_carried_loop_depth: 0,
120            pending_rebox: None,
121            pending_rebox_module_bindings: None,
122            pending_flush_module_bindings: None,
123            // Float unboxing
124            unboxed_f64_locals: std::collections::HashSet::new(),
125            f64_local_vars: HashMap::new(),
126            pending_rebox_f64: None,
127            precomputed_f64_for_invariant_int: HashMap::new(),
128            precomputed_f64_scope_stack: Vec::new(),
129            // Skip ranges (empty by default)
130            skip_ranges: Vec::new(),
131            // Array LICM
132            hoisted_array_info: HashMap::new(),
133            hoisted_ref_array_info: HashMap::new(),
134            // Numeric parameter hints (compile-time)
135            numeric_param_hints: std::collections::HashSet::new(),
136            deopt_block: None,
137            deopt_signal_var: None,
138            // Deopt tracking
139            deopt_points: Vec::new(),
140            func_locals_count: 0,
141            deferred_spills: Vec::new(),
142            // Loop unrolling
143            pending_unroll: None,
144            trusted_array_push_local_sites: std::collections::HashSet::new(),
145            trusted_array_push_local_iv_by_site: HashMap::new(),
146            // Shape guard tracking
147            shape_guards_emitted: Vec::new(),
148            // Feedback-guided speculation (populated by Tier 2 requests)
149            feedback: None,
150            // Multi-frame inline deopt
151            compiling_function_id: 0, // Set by caller (compile_optimizing_function)
152            inline_frame_stack: Vec::new(),
153        }
154    }
155
156    /// Create compiler in kernel mode for simulation hot path.
157    ///
158    /// Kernel mode bypasses JITContext and uses direct pointers:
159    /// - cursor_index: Current row in the simulation (usize)
160    /// - series_ptrs: Pointer to series data array (*const *const f64)
161    /// - state_ptr: Pointer to TypedObject state buffer (*mut u8)
162    ///
163    /// This enables >10M ticks/sec by eliminating all indirection.
164    pub(crate) fn new_kernel_mode(
165        builder: &'a mut FunctionBuilder<'b>,
166        program: &'a BytecodeProgram,
167        cursor_index: Value,
168        series_ptrs: Value,
169        state_ptr: Value,
170        ffi: FFIFuncRefs,
171        config: SimulationKernelConfig,
172    ) -> Self {
173        // Pre-compute loop ends (same as standard mode)
174        let mut loop_ends = HashMap::new();
175        let mut loop_starts = Vec::new();
176        for (i, instr) in program.instructions.iter().enumerate() {
177            match instr.opcode {
178                OpCode::LoopStart => loop_starts.push(i),
179                OpCode::LoopEnd => {
180                    if let Some(start_idx) = loop_starts.pop() {
181                        loop_ends.insert(start_idx, i);
182                    }
183                }
184                _ => {}
185            }
186        }
187
188        // Run loop analysis for kernel mode too
189        let loop_info = loop_analysis::analyze_loops(program);
190        let optimization_plan = optimizer::build_function_plan(program, &loop_info);
191        let mut local_types = HashMap::new();
192        for (idx, hint) in program
193            .top_level_local_storage_hints
194            .iter()
195            .copied()
196            .enumerate()
197        {
198            if hint != StorageHint::Unknown {
199                local_types.insert(idx as u16, hint);
200            }
201        }
202        let mut module_binding_types = HashMap::new();
203        for (idx, hint) in program
204            .module_binding_storage_hints
205            .iter()
206            .copied()
207            .enumerate()
208        {
209            if hint != StorageHint::Unknown {
210                module_binding_types.insert(idx as u16, hint);
211            }
212        }
213
214        Self {
215            builder,
216            program,
217            ctx_ptr: cursor_index, // Reuse field (not used as ctx in kernel mode)
218            stack_depth: 0,
219            stack_vars: HashMap::new(),
220            locals: HashMap::new(),
221            next_var: 0,
222            blocks: HashMap::new(),
223            current_block_idx: 0,
224            ffi,
225            loop_stack: Vec::new(),
226            loop_ends,
227            exit_block: None,
228            compile_time_sp: 0,
229            merge_blocks: std::collections::HashSet::new(),
230            block_stack_depth: HashMap::new(),
231            pending_data_offset: None,
232            exception_handlers: Vec::new(),
233            current_instr_idx: 0,
234            user_funcs: HashMap::new(),
235            user_func_arities: HashMap::new(),
236            stack_types: HashMap::new(),
237            local_types,
238            module_binding_types,
239            typed_stack: super::storage::TypedStack::new(),
240            // Kernel mode fields
241            mode: CompilationMode::Kernel,
242            kernel_cursor_index: Some(cursor_index),
243            kernel_series_ptrs: Some(series_ptrs),
244            kernel_state_ptr: Some(state_ptr),
245            kernel_config: Some(config),
246            loop_info,
247            optimization_plan,
248            hoisted_locals: HashMap::new(),
249            local_f64_cache: HashMap::new(),
250            // No inlining in kernel mode (no user functions)
251            inline_candidates: HashMap::new(),
252            inline_local_base: 0,
253            inline_depth: 0,
254            // Reference tracking
255            ref_stack_slots: HashMap::new(),
256            // Integer unboxing
257            unboxed_int_locals: std::collections::HashSet::new(),
258            unboxed_int_module_bindings: std::collections::HashSet::new(),
259            promoted_module_bindings: HashMap::new(),
260            register_carried_module_bindings: std::collections::HashSet::new(),
261            unboxed_loop_depth: 0,
262            unboxed_scope_stack: Vec::new(),
263            register_carried_loop_depth: 0,
264            pending_rebox: None,
265            pending_rebox_module_bindings: None,
266            pending_flush_module_bindings: None,
267            // Float unboxing
268            unboxed_f64_locals: std::collections::HashSet::new(),
269            f64_local_vars: HashMap::new(),
270            pending_rebox_f64: None,
271            precomputed_f64_for_invariant_int: HashMap::new(),
272            precomputed_f64_scope_stack: Vec::new(),
273            // Skip ranges (empty by default)
274            skip_ranges: Vec::new(),
275            // Array LICM
276            hoisted_array_info: HashMap::new(),
277            hoisted_ref_array_info: HashMap::new(),
278            // Numeric parameter hints (compile-time)
279            numeric_param_hints: std::collections::HashSet::new(),
280            deopt_block: None,
281            deopt_signal_var: None,
282            // Deopt tracking
283            deopt_points: Vec::new(),
284            func_locals_count: 0,
285            deferred_spills: Vec::new(),
286            // Loop unrolling
287            pending_unroll: None,
288            trusted_array_push_local_sites: std::collections::HashSet::new(),
289            trusted_array_push_local_iv_by_site: HashMap::new(),
290            // Shape guard tracking
291            shape_guards_emitted: Vec::new(),
292            // Feedback-guided speculation (not used in kernel mode)
293            feedback: None,
294            // Multi-frame inline deopt (not used in kernel mode)
295            compiling_function_id: 0,
296            inline_frame_stack: Vec::new(),
297        }
298    }
299
300    /// Check if an instruction index falls within a skip range.
301    fn is_skipped(&self, idx: usize) -> bool {
302        self.skip_ranges
303            .iter()
304            .any(|&(start, end)| idx >= start && idx < end)
305    }
306
307    pub(crate) fn compile(&mut self) -> Result<Value, String> {
308        // Phase 1: Find all jump targets and create basic blocks
309        self.create_blocks_for_jumps();
310
311        // Create an exit block for the epilogue - all paths will jump here
312        let exit_block = self.builder.create_block();
313        self.builder.append_block_param(exit_block, types::I64);
314        self.exit_block = Some(exit_block);
315
316        // Initialize function signal to success (0). Some guarded helper paths
317        // may set this to a negative value and jump to exit_block.
318        let deopt_signal_var = Variable::new(self.next_var);
319        self.next_var += 1;
320        self.builder.declare_var(deopt_signal_var, types::I32);
321        let zero_i32 = self.builder.ins().iconst(types::I32, 0);
322        self.builder.def_var(deopt_signal_var, zero_i32);
323        self.deopt_signal_var = Some(deopt_signal_var);
324
325        // Find the first non-skipped instruction and jump from entry to its block.
326        let first_idx = (0..self.program.instructions.len())
327            .find(|&i| !self.is_skipped(i))
328            .unwrap_or(0);
329        if let Some(&block0) = self.blocks.get(&first_idx) {
330            if !self.numeric_param_hints.is_empty() {
331                let mut params: Vec<u16> = self.numeric_param_hints.iter().copied().collect();
332                params.sort_unstable();
333                for local_idx in params {
334                    self.local_types
335                        .entry(local_idx)
336                        .or_insert(StorageHint::Float64);
337                }
338            }
339            self.builder.ins().jump(block0, &[]);
340            self.block_stack_depth.insert(first_idx, 0);
341        }
342
343        // Phase 2: Compile instructions with control flow
344        let instrs = self.program.instructions.clone();
345        let mut need_fallthrough = false;
346        let mut block_terminated = false;
347
348        for (i, instr) in instrs.iter().enumerate() {
349            // Skip function body instructions (compiled separately)
350            if self.is_skipped(i) {
351                continue;
352            }
353
354            if let Some(&block) = self.blocks.get(&i) {
355                if need_fallthrough && !block_terminated {
356                    self.block_stack_depth.entry(i).or_insert(self.stack_depth);
357                    if self.merge_blocks.contains(&i) {
358                        let val = self.stack_pop().unwrap_or_else(|| {
359                            self.builder.ins().iconst(types::I64, TAG_NULL as i64)
360                        });
361                        self.builder.ins().jump(block, &[val]);
362                    } else {
363                        self.builder.ins().jump(block, &[]);
364                    }
365                }
366                self.builder.switch_to_block(block);
367                self.current_block_idx = i;
368                need_fallthrough = false;
369                block_terminated = false;
370
371                // Integer unboxing: rebox raw i64 locals at loop exit.
372                // compile_loop_end sets pending_rebox; the rebox code runs at the
373                // start of the loop's end_block (the first block switch after LoopEnd).
374                if let Some(rebox_locals) = self.pending_rebox.take() {
375                    for &local_idx in &rebox_locals {
376                        let var = self.get_or_create_local(local_idx);
377                        let raw_int = self.builder.use_var(var);
378                        let f64_val = self.builder.ins().fcvt_from_sint(types::F64, raw_int);
379                        let boxed = self.f64_to_i64(f64_val);
380                        self.builder.def_var(var, boxed);
381                    }
382                }
383
384                // Float unboxing rebox: convert raw f64 → NaN-boxed i64.
385                if let Some(rebox_f64s) = self.pending_rebox_f64.take() {
386                    for &local_idx in &rebox_f64s {
387                        if let Some(&f64_var) = self.f64_local_vars.get(&local_idx) {
388                            let f64_val = self.builder.use_var(f64_var);
389                            let boxed = self.f64_to_i64(f64_val);
390                            let i64_var = self.get_or_create_local(local_idx);
391                            self.builder.def_var(i64_var, boxed);
392                        }
393                        self.f64_local_vars.remove(&local_idx);
394                    }
395                    // Only clear all f64 vars when no outer scopes remain
396                    if self.unboxed_scope_stack.is_empty() {
397                        self.f64_local_vars.clear();
398                    }
399                }
400
401                // Rebox promoted module bindings: convert raw i64 → NaN-boxed
402                // and write back to ctx.locals[] memory.
403                if let Some(rebox_mbs) = self.pending_rebox_module_bindings.take() {
404                    for &mb_idx in &rebox_mbs {
405                        if let Some(&var) = self.promoted_module_bindings.get(&mb_idx) {
406                            let raw_int = self.builder.use_var(var);
407                            let f64_val = self.builder.ins().fcvt_from_sint(types::F64, raw_int);
408                            let boxed = self.f64_to_i64(f64_val);
409                            // Write back to memory
410                            let byte_offset = LOCALS_OFFSET + (mb_idx as i32 * 8);
411                            self.builder.ins().store(
412                                MemFlags::new(),
413                                boxed,
414                                self.ctx_ptr,
415                                byte_offset,
416                            );
417                        }
418                        self.promoted_module_bindings.remove(&mb_idx);
419                        self.register_carried_module_bindings.remove(&mb_idx);
420                    }
421                }
422
423                // Flush boxed, register-carried module bindings to ctx.locals[] at loop exit.
424                if let Some(flush_mbs) = self.pending_flush_module_bindings.take() {
425                    for &mb_idx in &flush_mbs {
426                        if let Some(&var) = self.promoted_module_bindings.get(&mb_idx) {
427                            let val = self.builder.use_var(var);
428                            let byte_offset = LOCALS_OFFSET + (mb_idx as i32 * 8);
429                            self.builder.ins().store(
430                                MemFlags::new(),
431                                val,
432                                self.ctx_ptr,
433                                byte_offset,
434                            );
435                        }
436                        self.promoted_module_bindings.remove(&mb_idx);
437                        self.register_carried_module_bindings.remove(&mb_idx);
438                    }
439                }
440
441                if let Some(&expected_depth) = self.block_stack_depth.get(&i) {
442                    self.stack_depth = expected_depth;
443                    // Clear typed_stack at block boundaries: f64 SSA Values from
444                    // predecessor blocks may not dominate this block, so cached
445                    // shadows are invalid. The optimization still applies within
446                    // basic blocks (where tight inner loops live).
447                    self.typed_stack.clear();
448                    // Clear local_f64_cache: cached f64 Values from predecessor
449                    // blocks may not dominate this block.
450                    self.local_f64_cache.clear();
451                }
452
453                if self.merge_blocks.contains(&i) {
454                    let params = self.builder.block_params(block);
455                    if !params.is_empty() {
456                        self.stack_push(params[0]);
457                    }
458                }
459            }
460
461            if block_terminated {
462                continue;
463            }
464
465            // Track current instruction index for property lookup in compile_get_prop
466            self.current_instr_idx = i;
467            self.compile_instruction(instr, i)?;
468
469            match instr.opcode {
470                OpCode::Jump
471                | OpCode::Return
472                | OpCode::ReturnValue
473                | OpCode::Break
474                | OpCode::Continue
475                | OpCode::Throw => {
476                    block_terminated = true;
477                }
478                OpCode::JumpIfFalse | OpCode::JumpIfFalseTrusted | OpCode::JumpIfTrue => {
479                    block_terminated = true;
480                }
481                _ => {
482                    need_fallthrough = self.blocks.contains_key(&(i + 1));
483                }
484            }
485        }
486
487        if !block_terminated {
488            let default_val = self
489                .stack_pop_boxed()
490                .unwrap_or_else(|| self.builder.ins().iconst(types::I64, TAG_NULL as i64));
491            self.builder.ins().jump(exit_block, &[default_val]);
492        }
493
494        for block in self.blocks.values() {
495            self.builder.seal_block(*block);
496        }
497
498        // Emit deferred per-guard spill blocks.
499        // Each block stores live locals + operand stack to ctx_buf,
500        // then jumps to the shared deopt block with its deopt_id.
501        let deferred = std::mem::take(&mut self.deferred_spills);
502        for spill in &deferred {
503            self.builder.switch_to_block(spill.block);
504
505            // Store live locals to ctx_buf[LOCALS_OFFSET + bc_idx * 8]
506            // Unboxed locals need type-aware storage:
507            // - f64 locals: bitcast(I64, f64_val) to get raw bits
508            // - int locals: store directly (raw i64 fits in u64)
509            // - NaN-boxed: store as-is
510            for &(bc_idx, var) in &spill.live_locals {
511                let val = self.builder.use_var(var);
512                let store_val = if spill.f64_locals.contains(&bc_idx) {
513                    // Float-unboxed local: val is Cranelift f64, bitcast to i64 bits
514                    // Check if this local has an f64 variable
515                    if let Some(&f64_var) = self.f64_local_vars.get(&bc_idx) {
516                        let f64_val = self.builder.use_var(f64_var);
517                        self.builder
518                            .ins()
519                            .bitcast(types::I64, MemFlags::new(), f64_val)
520                    } else {
521                        // Fallback: the regular variable holds NaN-boxed, use as-is
522                        val
523                    }
524                } else {
525                    // Int-unboxed or NaN-boxed: store directly.
526                    // Int locals hold raw i64, which unmarshal_jit_result handles
527                    // with SlotKind::Int64 → ValueWord::from_i64(bits as i64).
528                    // NaN-boxed locals store as-is (SlotKind::Unknown passthrough).
529                    val
530                };
531                let offset = LOCALS_OFFSET + (bc_idx as i32) * 8;
532                self.builder
533                    .ins()
534                    .store(MemFlags::trusted(), store_val, self.ctx_ptr, offset);
535            }
536
537            // Store on-stack operand values (via stack_vars)
538            for i in 0..spill.on_stack_count {
539                let var = self.get_or_create_stack_var(i);
540                let val = self.builder.use_var(var);
541                let offset = LOCALS_OFFSET + (128 + i as i32) * 8;
542                self.builder
543                    .ins()
544                    .store(MemFlags::trusted(), val, self.ctx_ptr, offset);
545            }
546
547            // Store extra pre-popped values (passed as block params)
548            let block_params = self.builder.block_params(spill.block).to_vec();
549            for (j, &param) in block_params.iter().enumerate() {
550                let stack_pos = spill.on_stack_count + j;
551                let offset = LOCALS_OFFSET + (128 + stack_pos as i32) * 8;
552                self.builder
553                    .ins()
554                    .store(MemFlags::trusted(), param, self.ctx_ptr, offset);
555            }
556
557            // Store inline frame locals for multi-frame deopt
558            let mut ctx_buf_pos = 128u16 + (spill.on_stack_count + spill.extra_param_count) as u16;
559            // Use the ctx_buf_positions from the DeoptInfo inline_frames
560            for iframe in &spill.inline_frames {
561                for &(_, var) in &iframe.live_locals {
562                    let val = self.builder.use_var(var);
563                    let offset = LOCALS_OFFSET + (ctx_buf_pos as i32) * 8;
564                    self.builder
565                        .ins()
566                        .store(MemFlags::trusted(), val, self.ctx_ptr, offset);
567                    ctx_buf_pos += 1;
568                }
569            }
570
571            // Jump to shared deopt block
572            let deopt = self.get_or_create_deopt_block();
573            let deopt_id_val = self
574                .builder
575                .ins()
576                .iconst(types::I32, spill.deopt_id as i64);
577            self.builder.ins().jump(deopt, &[deopt_id_val]);
578            self.builder.seal_block(spill.block);
579        }
580
581        if let Some(deopt_block) = self.deopt_block {
582            self.builder.switch_to_block(deopt_block);
583            let deopt_signal_var = self
584                .deopt_signal_var
585                .expect("deopt_signal_var must be initialized in compile()");
586            let deopt_id_i32 = self.builder.block_params(deopt_block)[0];
587            let deopt_id_u64 = self.builder.ins().uextend(types::I64, deopt_id_i32);
588            // VM deopt handler reads deopt_id from ctx word 0.
589            self.builder
590                .ins()
591                .store(MemFlags::trusted(), deopt_id_u64, self.ctx_ptr, 0);
592            let deopt_code = self.builder.ins().iconst(types::I32, (u32::MAX - 1) as i64);
593            self.builder.def_var(deopt_signal_var, deopt_code);
594            let null_val = self.builder.ins().iconst(types::I64, TAG_NULL as i64);
595            self.builder.ins().jump(exit_block, &[null_val]);
596            self.builder.seal_block(deopt_block);
597        }
598
599        self.builder.switch_to_block(exit_block);
600        self.builder.seal_block(exit_block);
601
602        let ret_val_i64 = self.builder.block_params(exit_block)[0];
603
604        self.builder
605            .ins()
606            .store(MemFlags::trusted(), ret_val_i64, self.ctx_ptr, STACK_OFFSET);
607
608        let one = self.builder.ins().iconst(types::I64, 1);
609        self.builder
610            .ins()
611            .store(MemFlags::trusted(), one, self.ctx_ptr, STACK_PTR_OFFSET);
612
613        // Return signal (0 success, negative deopt).
614        let signal_var = self
615            .deopt_signal_var
616            .expect("deopt_signal_var must be initialized in compile()");
617        let signal = self.builder.use_var(signal_var);
618        Ok(signal)
619    }
620
621    fn create_blocks_for_jumps(&mut self) {
622        let mut block_starts: std::collections::HashSet<usize> = std::collections::HashSet::new();
623        let mut incoming_edges: HashMap<usize, usize> = HashMap::new();
624
625        for (i, instr) in self.program.instructions.iter().enumerate() {
626            if self.is_skipped(i) {
627                continue;
628            }
629            match instr.opcode {
630                OpCode::Jump => {
631                    if let Some(Operand::Offset(offset)) = &instr.operand {
632                        let target_idx = ((i as i32) + 1 + *offset) as usize;
633                        if !self.is_skipped(target_idx) {
634                            block_starts.insert(target_idx);
635                            *incoming_edges.entry(target_idx).or_insert(0) += 1;
636                        }
637                    }
638                }
639                OpCode::JumpIfFalse | OpCode::JumpIfFalseTrusted | OpCode::JumpIfTrue => {
640                    if let Some(Operand::Offset(offset)) = &instr.operand {
641                        let target_idx = ((i as i32) + 1 + *offset) as usize;
642                        if !self.is_skipped(target_idx) {
643                            block_starts.insert(target_idx);
644                            *incoming_edges.entry(target_idx).or_insert(0) += 1;
645                        }
646                        let next_idx = i + 1;
647                        if !self.is_skipped(next_idx) {
648                            block_starts.insert(next_idx);
649                            *incoming_edges.entry(next_idx).or_insert(0) += 1;
650                        }
651                    }
652                }
653                OpCode::LoopStart | OpCode::LoopEnd => {
654                    block_starts.insert(i);
655                    *incoming_edges.entry(i).or_insert(0) += 1;
656                    let next_idx = i + 1;
657                    if next_idx < self.program.instructions.len() && !self.is_skipped(next_idx) {
658                        block_starts.insert(next_idx);
659                        *incoming_edges.entry(next_idx).or_insert(0) += 1;
660                    }
661                }
662                OpCode::SetupTry => {
663                    if let Some(Operand::Offset(offset)) = &instr.operand {
664                        let catch_idx = ((i as i32) + 1 + *offset) as usize;
665                        if !self.is_skipped(catch_idx) {
666                            block_starts.insert(catch_idx);
667                            *incoming_edges.entry(catch_idx).or_insert(0) += 1;
668                        }
669                    }
670                }
671                _ => {}
672            }
673        }
674
675        // Find the first non-skipped instruction index to use as block 0.
676        // When stdlib is prepended, instruction 0 is in a skip range — we must
677        // start from the first instruction the JIT will actually compile.
678        let first_idx = (0..self.program.instructions.len())
679            .find(|&i| !self.is_skipped(i))
680            .unwrap_or(0);
681        block_starts.insert(first_idx);
682        *incoming_edges.entry(first_idx).or_insert(0) += 1;
683
684        for (i, instr) in self.program.instructions.iter().enumerate() {
685            if self.is_skipped(i) {
686                continue;
687            }
688            let is_terminator = matches!(
689                instr.opcode,
690                OpCode::Jump
691                    | OpCode::Return
692                    | OpCode::ReturnValue
693                    | OpCode::Break
694                    | OpCode::Continue
695                    | OpCode::Throw
696            );
697            let is_conditional = matches!(
698                instr.opcode,
699                OpCode::JumpIfFalse | OpCode::JumpIfFalseTrusted | OpCode::JumpIfTrue
700            );
701
702            if !is_terminator && !is_conditional {
703                let next_idx = i + 1;
704                if next_idx < self.program.instructions.len()
705                    && block_starts.contains(&next_idx)
706                    && !self.is_skipped(next_idx)
707                {
708                    *incoming_edges.entry(next_idx).or_insert(0) += 1;
709                }
710            }
711        }
712
713        if !self.blocks.contains_key(&first_idx) {
714            let block = self.builder.create_block();
715            self.blocks.insert(first_idx, block);
716        }
717
718        for (i, instr) in self.program.instructions.iter().enumerate() {
719            if self.is_skipped(i) {
720                continue;
721            }
722            match instr.opcode {
723                OpCode::Jump
724                | OpCode::JumpIfFalse
725                | OpCode::JumpIfFalseTrusted
726                | OpCode::JumpIfTrue => {
727                    if let Some(Operand::Offset(offset)) = &instr.operand {
728                        let target_idx = ((i as i32) + 1 + *offset) as usize;
729                        if !self.is_skipped(target_idx) && !self.blocks.contains_key(&target_idx) {
730                            let block = self.builder.create_block();
731                            let needs_merge_param = false;
732                            if needs_merge_param {
733                                self.builder.append_block_param(block, types::I64);
734                                self.merge_blocks.insert(target_idx);
735                            }
736                            self.blocks.insert(target_idx, block);
737                        }
738                    }
739                }
740                OpCode::LoopStart | OpCode::LoopEnd => {
741                    if !self.blocks.contains_key(&i) {
742                        let block = self.builder.create_block();
743                        self.blocks.insert(i, block);
744                    }
745                    let next_idx = i + 1;
746                    if next_idx < self.program.instructions.len()
747                        && !self.is_skipped(next_idx)
748                        && !self.blocks.contains_key(&next_idx)
749                    {
750                        let block = self.builder.create_block();
751                        let needs_merge_param = false;
752                        if needs_merge_param {
753                            self.builder.append_block_param(block, types::I64);
754                            self.merge_blocks.insert(next_idx);
755                        }
756                        self.blocks.insert(next_idx, block);
757                    }
758                }
759                OpCode::SetupTry => {
760                    if let Some(Operand::Offset(offset)) = &instr.operand {
761                        let catch_idx = ((i as i32) + 1 + *offset) as usize;
762                        if !self.is_skipped(catch_idx) && !self.blocks.contains_key(&catch_idx) {
763                            let block = self.builder.create_block();
764                            self.builder.append_block_param(block, types::I64);
765                            self.merge_blocks.insert(catch_idx);
766                            self.blocks.insert(catch_idx, block);
767                        }
768                    }
769                }
770                _ => {}
771            }
772            if matches!(
773                instr.opcode,
774                OpCode::JumpIfFalse | OpCode::JumpIfFalseTrusted | OpCode::JumpIfTrue
775            ) {
776                let next_idx = i + 1;
777                if !self.is_skipped(next_idx) && !self.blocks.contains_key(&next_idx) {
778                    let block = self.builder.create_block();
779                    let needs_merge_param = false;
780                    if needs_merge_param {
781                        self.builder.append_block_param(block, types::I64);
782                        self.merge_blocks.insert(next_idx);
783                    }
784                    self.blocks.insert(next_idx, block);
785                }
786            }
787        }
788    }
789
790    pub(crate) fn get_or_create_local(&mut self, idx: u16) -> Variable {
791        // Apply inline base offset to avoid caller/callee local collisions
792        let effective_idx = idx.wrapping_add(self.inline_local_base);
793        if let Some(var) = self.locals.get(&effective_idx) {
794            return *var;
795        }
796
797        let var = Variable::new(self.next_var);
798        self.next_var += 1;
799        self.builder.declare_var(var, types::I64);
800        self.locals.insert(effective_idx, var);
801        var
802    }
803
804    /// Analyze which functions are eligible for inlining at call sites.
805    ///
806    /// A function is an inline candidate if:
807    /// - It has < 80 bytecode instructions
808    /// - It is not a closure (no captured state)
809    /// - It does not use CallValue (closure calls need captured state)
810    /// - It is straight-line (no jumps, loops, or exception handlers)
811    /// Non-leaf functions (with Call/CallMethod/BuiltinCall) ARE allowed.
812    pub(crate) fn analyze_inline_candidates(program: &BytecodeProgram) -> HashMap<u16, InlineCandidate> {
813        let mut candidates = HashMap::new();
814        let num_funcs = program.functions.len();
815        if num_funcs == 0 {
816            return candidates;
817        }
818
819        for (fn_id, func) in program.functions.iter().enumerate() {
820            let fn_id = fn_id as u16;
821
822            // Skip closures — they have captured state
823            if func.is_closure || func.body_length == 0 {
824                continue;
825            }
826
827            let entry_point = func.entry_point;
828            let func_end = entry_point + func.body_length;
829            let instr_count = func.body_length;
830
831            // Skip if too large or out of bounds
832            if instr_count > 80 || instr_count == 0 {
833                continue;
834            }
835            if entry_point >= program.instructions.len() || func_end > program.instructions.len() {
836                continue;
837            }
838
839            let body = &program.instructions[entry_point..func_end];
840
841            // Allow non-leaf functions (functions that call other functions).
842            // Nested calls are handled by compile_call which respects inline_depth.
843            // Only exclude CallValue (closure calls need captured state management
844            // that may not be set up correctly in the inline namespace).
845            let has_closure_calls = body.iter().any(|i| matches!(i.opcode, OpCode::CallValue));
846            if has_closure_calls {
847                continue;
848            }
849
850            // Must be straight-line (no branches, loops, exception handling,
851            // or reference operations that create internal blocks)
852            let has_control_flow = body.iter().any(|i| {
853                matches!(
854                    i.opcode,
855                    OpCode::Jump
856                        | OpCode::JumpIfFalse
857                        | OpCode::JumpIfTrue
858                        | OpCode::LoopStart
859                        | OpCode::LoopEnd
860                        | OpCode::Break
861                        | OpCode::Continue
862                        | OpCode::SetupTry
863                        | OpCode::SetIndexRef  // Creates 4 internal blocks — cannot inline
864                        | OpCode::MakeRef      // Creates stack slots + ref tracking
865                        | OpCode::DerefLoad    // Reference dereference
866                        | OpCode::DerefStore // Reference write-through
867                )
868            });
869            if has_control_flow {
870                continue;
871            }
872
873            candidates.insert(
874                fn_id,
875                InlineCandidate {
876                    entry_point,
877                    instruction_count: instr_count,
878                    arity: func.arity,
879                    locals_count: func.locals_count,
880                },
881            );
882        }
883
884        candidates
885    }
886
887    /// Compile bytecode to kernel IR (simplified linear compilation).
888    ///
889    /// Kernel mode uses a simplified compilation path:
890    /// - Linear instruction stream (no complex control flow for V1)
891    /// - Returns i32 result code (0 = continue, 1 = done, negative = error)
892    /// - All data access goes through kernel_series_ptrs/kernel_state_ptr
893    /// Record a deopt point for a non-speculative guard (shape guards,
894    /// signal propagation, etc.).
895    ///
896    /// For speculative guards (arithmetic, property, call), prefer
897    /// `emit_deopt_point_with_spill()` which creates a per-guard spill
898    /// block that stores live locals and operand stack values to ctx_buf,
899    /// enabling the VM to resume execution at the exact guard failure
900    /// point instead of re-executing from function entry.
901    ///
902    /// `bytecode_ip` is sub-program-local (0-based within the function
903    /// slice); the caller in `compile_optimizing_function` rebases it
904    /// to global program IP after `take_deopt_points()`.
905    ///
906    /// # Returns
907    /// Stable deopt point id (index into `deopt_points`) for this guard site.
908    pub(crate) fn emit_deopt_point(
909        &mut self,
910        bytecode_ip: usize,
911        live_locals: &[u16],
912        local_kinds: &[SlotKind],
913    ) -> usize {
914        let deopt_id = self.deopt_points.len();
915        let deopt_info = DeoptInfo {
916            resume_ip: bytecode_ip,
917            local_mapping: live_locals
918                .iter()
919                .enumerate()
920                .map(|(jit_idx, &bc_idx)| (jit_idx as u16, bc_idx))
921                .collect(),
922            local_kinds: local_kinds.to_vec(),
923            stack_depth: 0, // Filled by caller if needed
924            innermost_function_id: None,
925            inline_frames: Vec::new(),
926        };
927        self.deopt_points.push(deopt_info);
928        deopt_id
929    }
930
931    /// Record a deopt point with a per-guard spill block.
932    ///
933    /// Creates a dedicated Cranelift block that stores all live locals
934    /// and operand stack values to ctx_buf, then jumps to the shared
935    /// deopt block. The returned `(deopt_id, spill_block)` tuple lets
936    /// the caller emit `brif(cond, cont, [], spill_block, [extra_vals])`.
937    ///
938    /// `extra_stack_values`: Cranelift Values that were popped from the
939    /// JIT operand stack before the guard but must be on the interpreter
940    /// stack at resume. Passed as block parameters to the spill block.
941    ///
942    /// Handles unboxed int/f64 locals (marks them with proper SlotKind)
943    /// and multi-frame inline deopt (captures caller frame state).
944    pub(crate) fn emit_deopt_point_with_spill(
945        &mut self,
946        bytecode_ip: usize,
947        extra_stack_values: &[Value],
948    ) -> (usize, Option<Block>) {
949        let locals_count = self.func_locals_count;
950
951        // Snapshot live locals for the innermost (current) frame.
952        // When inlining, locals use inline_local_base offset keys.
953        let inline_base = self.inline_local_base;
954        let live_locals: Vec<(u16, Variable)> = self
955            .locals
956            .iter()
957            .filter(|(idx, _)| {
958                if inline_base > 0 {
959                    // Inlined frame: only include locals in the current inline namespace
960                    **idx >= inline_base && **idx < inline_base + 128
961                } else {
962                    **idx < 128 // cap at DEOPT_STACK_CTX_BASE
963                }
964            })
965            .map(|(idx, var)| {
966                // Map back to bytecode-local index (subtract inline base)
967                let bc_idx = idx.wrapping_sub(inline_base);
968                (bc_idx, *var)
969            })
970            .collect();
971
972        // Determine SlotKind for each local based on unboxing state
973        let local_kinds: Vec<SlotKind> = live_locals
974            .iter()
975            .map(|&(bc_idx, _)| {
976                if self.unboxed_int_locals.contains(&bc_idx) {
977                    SlotKind::Int64
978                } else if self.unboxed_f64_locals.contains(&bc_idx) {
979                    SlotKind::Float64
980                } else {
981                    SlotKind::NanBoxed // boxed local: NaN-boxed passthrough
982                }
983            })
984            .collect();
985
986        // Track which locals are unboxed for the spill emission
987        let f64_locals: std::collections::HashSet<u16> = live_locals
988            .iter()
989            .filter(|&&(bc_idx, _)| self.unboxed_f64_locals.contains(&bc_idx))
990            .map(|&(bc_idx, _)| bc_idx)
991            .collect();
992        let int_locals: std::collections::HashSet<u16> = live_locals
993            .iter()
994            .filter(|&&(bc_idx, _)| self.unboxed_int_locals.contains(&bc_idx))
995            .map(|&(bc_idx, _)| bc_idx)
996            .collect();
997
998        let on_stack_count = self.stack_depth;
999        let extra_count = extra_stack_values.len();
1000        let total_stack_depth = on_stack_count + extra_count;
1001
1002        // Build DeoptInfo with real data
1003        let deopt_id = self.deopt_points.len();
1004        let mut local_mapping = Vec::new();
1005        let mut all_kinds = Vec::new();
1006
1007        // Locals: identity mapping (ctx_buf_position, bytecode_local_idx)
1008        for (i, &(bc_idx, _)) in live_locals.iter().enumerate() {
1009            local_mapping.push((bc_idx, bc_idx));
1010            all_kinds.push(local_kinds[i]);
1011        }
1012        // Operand stack: (DEOPT_STACK_CTX_BASE + i, locals_count + i)
1013        // Stack values are always NaN-boxed.
1014        for i in 0..total_stack_depth {
1015            local_mapping.push((128 + i as u16, locals_count + i as u16));
1016            all_kinds.push(SlotKind::NanBoxed); // operand stack: NaN-boxed passthrough
1017        }
1018
1019        // Build inline_frames for multi-frame deopt
1020        let mut inline_frames = Vec::new();
1021        let mut deferred_inline_frames = Vec::new();
1022        if self.inline_depth > 0 {
1023            // Capture caller frame(s) from the inline_frame_stack.
1024            // inline_frame_stack is ordered outermost-first; DeoptInfo uses
1025            // the same outermost-first ordering ([0]=outermost physical function).
1026            let mut ctx_buf_offset = live_locals.len() as u16 + total_stack_depth as u16 + 128;
1027            for ictx in self.inline_frame_stack.iter() {
1028                let frame_mapping: Vec<(u16, u16)> = ictx
1029                    .locals_snapshot
1030                    .iter()
1031                    .enumerate()
1032                    .map(|(j, &(bc_idx, _))| {
1033                        let ctx_pos = ctx_buf_offset + j as u16;
1034                        (ctx_pos, bc_idx)
1035                    })
1036                    .collect();
1037                let frame_kinds = ictx.local_kinds.clone();
1038
1039                inline_frames.push(InlineFrameInfo {
1040                    function_id: ictx.function_id,
1041                    resume_ip: ictx.call_site_ip,
1042                    local_mapping: frame_mapping,
1043                    local_kinds: frame_kinds.clone(),
1044                    stack_depth: ictx.stack_depth as u16,
1045                });
1046
1047                deferred_inline_frames.push(super::types::DeferredInlineFrame {
1048                    live_locals: ictx.locals_snapshot.clone(),
1049                    local_kinds: frame_kinds,
1050                    f64_locals: ictx.f64_locals.clone(),
1051                    int_locals: ictx.int_locals.clone(),
1052                });
1053
1054                ctx_buf_offset += ictx.locals_snapshot.len() as u16;
1055            }
1056        }
1057
1058        // For multi-frame deopt, record the innermost (inlined callee) function ID
1059        // so the VM can push a synthetic frame for it.
1060        let innermost_function_id = if self.inline_depth > 0 {
1061            // The last entry on inline_frame_stack is the immediate caller.
1062            // The callee_fn_id of that entry is the function where the guard fired.
1063            self.inline_frame_stack.last().map(|ctx| ctx.callee_fn_id)
1064        } else {
1065            None
1066        };
1067
1068        self.deopt_points.push(DeoptInfo {
1069            resume_ip: bytecode_ip,
1070            local_mapping,
1071            local_kinds: all_kinds,
1072            stack_depth: total_stack_depth as u16,
1073            innermost_function_id,
1074            inline_frames,
1075        });
1076
1077        // Create per-guard spill block with block params for extra values
1078        let spill_block = self.builder.create_block();
1079        for _ in 0..extra_count {
1080            self.builder.append_block_param(spill_block, types::I64);
1081        }
1082
1083        // Defer the spill block body emission to compile() epilogue
1084        self.deferred_spills.push(super::types::DeferredSpill {
1085            block: spill_block,
1086            deopt_id: deopt_id as u32,
1087            live_locals: live_locals.clone(),
1088            local_kinds,
1089            on_stack_count,
1090            extra_param_count: extra_count,
1091            f64_locals,
1092            int_locals,
1093            inline_frames: deferred_inline_frames,
1094        });
1095
1096        (deopt_id, Some(spill_block))
1097    }
1098
1099    /// Return the deopt points accumulated during compilation.
1100    ///
1101    /// This transfers ownership of the collected deopt metadata out of the
1102    /// compiler so it can be attached to the compilation result.
1103    pub(crate) fn take_deopt_points(&mut self) -> Vec<DeoptInfo> {
1104        std::mem::take(&mut self.deopt_points)
1105    }
1106
1107    /// Verify deopt point metadata for consistency.
1108    ///
1109    /// Checks:
1110    /// - `local_mapping` and `local_kinds` have equal length
1111    /// - Unboxed locals are NOT tagged as `SlotKind::Unknown`
1112    /// - ctx_buf positions are within bounds
1113    ///
1114    /// Returns `Err` on validation failure, causing the JIT compile to abort
1115    /// and the function to fall back to the interpreter.
1116    pub(crate) fn verify_deopt_points(
1117        points: &[DeoptInfo],
1118        unboxed_ints: &std::collections::HashSet<u16>,
1119        unboxed_f64s: &std::collections::HashSet<u16>,
1120    ) -> Result<(), String> {
1121        // VM ctx_buf is 216 u64 words with locals starting at offset 8.
1122        // Max ctx_pos before overflow: 216 - 8 = 208.
1123        const CTX_BUF_LOCALS_MAX: u16 = 208;
1124
1125        for (i, dp) in points.iter().enumerate() {
1126            if dp.local_mapping.len() != dp.local_kinds.len() {
1127                return Err(format!(
1128                    "DeoptInfo[{}]: local_mapping len {} != local_kinds len {}",
1129                    i,
1130                    dp.local_mapping.len(),
1131                    dp.local_kinds.len()
1132                ));
1133            }
1134            // Skip empty deopt points (generic fallback)
1135            if dp.local_mapping.is_empty() {
1136                continue;
1137            }
1138            for (j, &(ctx_pos, bc_idx)) in dp.local_mapping.iter().enumerate() {
1139                let kind = dp.local_kinds[j];
1140
1141                // ctx_buf bounds check: ctx_pos must fit within VM's ctx_buf
1142                if ctx_pos >= CTX_BUF_LOCALS_MAX {
1143                    return Err(format!(
1144                        "DeoptInfo[{}] mapping[{}]: ctx_pos {} exceeds ctx_buf limit {}",
1145                        i, j, ctx_pos, CTX_BUF_LOCALS_MAX
1146                    ));
1147                }
1148
1149                // Precise deopt points must not use SlotKind::Unknown.
1150                // Boxed locals → NanBoxed, unboxed int → Int64, unboxed f64 → Float64.
1151                if kind == SlotKind::Unknown {
1152                    return Err(format!(
1153                        "DeoptInfo[{}] mapping[{}]: slot (ctx_pos={}, bc_idx={}) tagged as Unknown \
1154                         in precise deopt path — use NanBoxed, Int64, or Float64",
1155                        i, j, ctx_pos, bc_idx
1156                    ));
1157                }
1158
1159                // Unboxed int locals must be tagged Int64
1160                if unboxed_ints.contains(&bc_idx)
1161                    && ctx_pos < 128
1162                    && kind != SlotKind::Int64
1163                {
1164                    return Err(format!(
1165                        "DeoptInfo[{}] mapping[{}]: unboxed int local {} tagged as {:?}, expected Int64",
1166                        i, j, bc_idx, kind
1167                    ));
1168                }
1169                // Unboxed f64 locals must be tagged Float64
1170                if unboxed_f64s.contains(&bc_idx)
1171                    && ctx_pos < 128
1172                    && kind != SlotKind::Float64
1173                {
1174                    return Err(format!(
1175                        "DeoptInfo[{}] mapping[{}]: unboxed f64 local {} tagged as {:?}, expected Float64",
1176                        i, j, bc_idx, kind
1177                    ));
1178                }
1179            }
1180
1181            // Also verify inline frames
1182            for (fi, iframe) in dp.inline_frames.iter().enumerate() {
1183                if iframe.local_mapping.len() != iframe.local_kinds.len() {
1184                    return Err(format!(
1185                        "DeoptInfo[{}].inline_frames[{}]: local_mapping len {} != local_kinds len {}",
1186                        i, fi, iframe.local_mapping.len(), iframe.local_kinds.len()
1187                    ));
1188                }
1189                for (j, &(ctx_pos, bc_idx)) in iframe.local_mapping.iter().enumerate() {
1190                    if ctx_pos >= CTX_BUF_LOCALS_MAX {
1191                        return Err(format!(
1192                            "DeoptInfo[{}].inline_frames[{}] mapping[{}]: ctx_pos {} exceeds ctx_buf limit {}",
1193                            i, fi, j, ctx_pos, CTX_BUF_LOCALS_MAX
1194                        ));
1195                    }
1196                    let kind = iframe.local_kinds.get(j).copied().unwrap_or(SlotKind::Unknown);
1197                    if kind == SlotKind::Unknown {
1198                        return Err(format!(
1199                            "DeoptInfo[{}].inline_frames[{}] mapping[{}]: slot (ctx_pos={}, bc_idx={}) \
1200                             tagged as Unknown in precise path",
1201                            i, fi, j, ctx_pos, bc_idx
1202                        ));
1203                    }
1204                }
1205            }
1206        }
1207        Ok(())
1208    }
1209
1210    /// Attach a feedback vector snapshot for feedback-guided speculation.
1211    ///
1212    /// When set, the compiler consults IC feedback at each eligible bytecode
1213    /// site (call, property access, arithmetic) to emit speculative guards
1214    /// with typed fast paths. Guard failures branch to the deopt block.
1215    pub(crate) fn set_feedback(&mut self, feedback: FeedbackVector) {
1216        self.feedback = Some(feedback);
1217    }
1218
1219    /// Return the shape guard IDs accumulated during compilation.
1220    ///
1221    /// These should be registered as shape dependencies with the DeoptTracker
1222    /// so that shape transitions can invalidate stale JIT code.
1223    pub(crate) fn take_shape_guards(&mut self) -> Vec<shape_value::shape_graph::ShapeId> {
1224        std::mem::take(&mut self.shape_guards_emitted)
1225    }
1226
1227    pub fn compile_kernel(&mut self) -> Result<Value, String> {
1228        assert!(
1229            self.mode == CompilationMode::Kernel,
1230            "compile_kernel() requires kernel mode"
1231        );
1232
1233        // Kernel mode: simple linear instruction stream
1234        // For V1, we don't support complex control flow in kernels
1235        let instrs = self.program.instructions.clone();
1236        for (idx, instr) in instrs.iter().enumerate() {
1237            self.current_instr_idx = idx;
1238            self.compile_instruction(instr, idx)?;
1239        }
1240
1241        // Return result: 0 (continue) or value from stack converted to i32
1242        let result = if self.stack_depth > 0 {
1243            let val = self.stack_pop().unwrap();
1244            // Convert NaN-boxed value to i32 result code
1245            // If it's a number, truncate to i32; otherwise return 0
1246            self.builder.ins().ireduce(types::I32, val)
1247        } else {
1248            self.builder.ins().iconst(types::I32, 0)
1249        };
1250
1251        Ok(result)
1252    }
1253}
1254
1255#[cfg(test)]
1256#[path = "compiler_tests.rs"]
1257mod tests;