#![cfg(feature = "jit")]
use fusevm::{ChunkBuilder, JitCompiler, Op, TraceMetadata, VMResult, Value, VM};
fn build_counter_loop(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
fn ensure_slots(vm: &mut VM, n: usize) {
let frame = vm.frames.last_mut().unwrap();
while frame.slots.len() < n {
frame.slots.push(Value::Int(0));
}
}
#[test]
fn trace_compiles_and_runs_hot_counter() {
let (chunk, anchor) = build_counter_loop(200);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 200, "loop should count to limit");
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace at anchor {} should have compiled after hot loop",
anchor
);
assert!(
!jit.trace_is_blacklisted(&chunk, anchor),
"trace should not be blacklisted on golden path"
);
}
#[test]
fn cold_loop_below_threshold_does_not_compile() {
let (chunk, anchor) = build_counter_loop(20);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
let jit = JitCompiler::new();
assert!(
!jit.trace_is_compiled(&chunk, anchor),
"cold loop should not produce a compiled trace"
);
}
#[test]
fn tracing_disabled_by_default() {
let (chunk, anchor) = build_counter_loop(500);
let mut vm = VM::new(chunk.clone());
ensure_slots(&mut vm, 1);
let _ = vm.run();
let jit = JitCompiler::new();
assert!(
!jit.trace_is_compiled(&chunk, anchor),
"tracing JIT must be opt-in: a VM with default settings should never compile a trace"
);
}
#[test]
fn second_run_reuses_compiled_trace() {
let (chunk, anchor) = build_counter_loop(200);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace should be in cache after first run"
);
let mut vm2 = VM::new(chunk.clone());
vm2.enable_tracing_jit();
ensure_slots(&mut vm2, 1);
let result = vm2.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 200);
}
#[test]
fn float_slot_at_anchor_triggers_guard_mismatch() {
let (chunk, anchor) = build_counter_loop(150);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
let mut b = ChunkBuilder::new();
b.emit(Op::LoadFloat(0.0), 1);
b.emit(Op::SetSlot(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(1), 1);
let float_anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(1), 1);
b.emit(Op::GetSlot(1), 1);
b.emit(Op::LoadInt(120), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, float_anchor);
b.emit(Op::GetSlot(1), 1);
let float_chunk = b.build();
let mut vm = VM::new(float_chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 2);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 120, "loop must still produce correct result");
assert!(
jit.trace_is_compiled(&float_chunk, float_anchor),
"trace touching only int slot should compile despite a float slot 0 \
in the frame — entry guard only covers slots the trace references"
);
}
#[test]
fn ineligible_loop_body_aborts_recording() {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::Pop, 1); b.emit(Op::Nop, 1); b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(80), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, anchor);
b.emit(Op::GetSlot(0), 1);
let chunk = b.build();
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
assert_eq!(
match result {
VMResult::Ok(Value::Int(n)) => n,
_ => unreachable!(),
},
80
);
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
}
#[test]
fn is_trace_eligible_rejects_non_closing_last_op() {
let jit = JitCompiler::new();
let ops_no_close = vec![Op::LoadInt(1), Op::LoadInt(2), Op::Add];
assert!(!jit.is_trace_eligible(&ops_no_close, 0));
let ops_wrong_target = vec![Op::LoadInt(1), Op::JumpIfTrue(99)];
assert!(!jit.is_trace_eligible(&ops_wrong_target, 0));
let ops_good = vec![
Op::PreIncSlotVoid(0),
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(jit.is_trace_eligible(&ops_good, 0));
}
#[test]
fn is_trace_eligible_rejects_internal_backward_jumps() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::JumpIfTrue(0), Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(!jit.is_trace_eligible(&ops, 0));
}
fn build_loop_with_constant_helper(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
let name = b.add_name("seven");
let skip = b.emit(Op::Jump(0), 1);
let helper_entry = b.current_pos();
b.emit(Op::LoadInt(7), 1);
b.emit(Op::ReturnValue, 1);
b.add_sub_entry(name, helper_entry);
let main_start = b.current_pos();
b.patch_jump(skip, main_start);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::Call(name, 0), 1);
b.emit(Op::Pop, 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
#[test]
fn inlined_constant_helper_compiles_and_runs() {
let (chunk, anchor) = build_loop_with_constant_helper(180);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 180);
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace should compile with inlined helper call"
);
assert!(!jit.trace_is_blacklisted(&chunk, anchor));
}
fn build_loop_with_argpassing_helper(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
let name = b.add_name("double");
let skip = b.emit(Op::Jump(0), 1);
let helper_entry = b.current_pos();
b.emit(Op::SetSlot(0), 1); b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(2), 1);
b.emit(Op::Mul, 1);
b.emit(Op::ReturnValue, 1);
b.add_sub_entry(name, helper_entry);
let main_start = b.current_pos();
b.patch_jump(skip, main_start);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::GetSlot(0), 1); b.emit(Op::Call(name, 1), 1);
b.emit(Op::Pop, 1); b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
#[test]
fn inlined_arg_passing_helper_runs_correctly() {
let (chunk, anchor) = build_loop_with_argpassing_helper(160);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 160);
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
}
#[test]
fn call_builtin_in_loop_aborts_recording() {
fn zero_builtin(_vm: &mut VM, _argc: u8) -> Value {
Value::Int(0)
}
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::CallBuiltin(7, 0), 1); b.emit(Op::Pop, 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(120), 1);
b.emit(Op::NumLt, 1);
let jmp = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(jmp, anchor);
b.emit(Op::GetSlot(0), 1);
let chunk = b.build();
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
vm.register_builtin(7, zero_builtin);
ensure_slots(&mut vm, 1);
let _ = vm.run();
let jit = JitCompiler::new();
assert!(
!jit.trace_is_compiled(&chunk, anchor),
"Op::CallBuiltin in the loop body must abort recording"
);
}
#[test]
fn is_trace_eligible_rejects_unbalanced_frames() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::Call(0, 0),
Op::LoadInt(0),
Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(!jit.is_trace_eligible(&ops, 0));
let ops_underflow = vec![Op::PreIncSlotVoid(0), Op::ReturnValue, Op::JumpIfTrue(0)];
assert!(!jit.is_trace_eligible(&ops_underflow, 0));
}
#[test]
fn is_trace_eligible_accepts_callee_with_internal_branch() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::Call(0, 0),
Op::LoadInt(1),
Op::JumpIfFalse(99), Op::LoadInt(0),
Op::ReturnValue,
Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(jit.is_trace_eligible(&ops, 0));
}
#[test]
fn is_trace_eligible_accepts_balanced_inlined_call() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::Call(0, 1),
Op::SetSlot(0), Op::LoadInt(2),
Op::GetSlot(0),
Op::Mul,
Op::ReturnValue,
Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(jit.is_trace_eligible(&ops, 0));
}
#[test]
fn is_trace_eligible_rejects_callbuiltin() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::CallBuiltin(0, 0),
Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(!jit.is_trace_eligible(&ops, 0));
}
#[test]
fn is_trace_eligible_accepts_caller_internal_branch() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::LoadInt(1),
Op::JumpIfFalse(99), Op::Nop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(jit.is_trace_eligible(&ops, 0));
}
#[test]
fn is_trace_eligible_rejects_keep_variants() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::LoadInt(1),
Op::JumpIfTrueKeep(99), Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(!jit.is_trace_eligible(&ops, 0));
}
#[test]
fn is_trace_eligible_rejects_caller_backward_jump_to_anchor() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::Jump(0), Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(!jit.is_trace_eligible(&ops, 0));
}
fn build_loop_with_stable_branch(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::NumGt, 1);
let if_jmp = b.emit(Op::JumpIfFalse(0), 1);
b.emit(Op::Nop, 1);
let after_if = b.current_pos();
b.patch_jump(if_jmp, after_if);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let close = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(close, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
#[test]
fn loop_with_caller_internal_branch_compiles_and_runs() {
let (chunk, anchor) = build_loop_with_stable_branch(140);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 140);
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace with internal caller-frame branch should compile in phase 3"
);
assert!(!jit.trace_is_blacklisted(&chunk, anchor));
}
fn build_loop_with_data_dependent_branch(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::GetSlot(1), 1);
let if_jmp = b.emit(Op::JumpIfFalse(0), 1);
b.emit(Op::PreIncSlotVoid(0), 1);
let after_if = b.current_pos();
b.patch_jump(if_jmp, after_if);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let close = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(close, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
fn build_loop_with_branching_helper(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
let name = b.add_name("clamp_pos");
let skip = b.emit(Op::Jump(0), 1);
let helper_entry = b.current_pos();
b.emit(Op::SetSlot(0), 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::NumGt, 1);
let jif = b.emit(Op::JumpIfFalse(0), 1);
b.emit(Op::GetSlot(0), 1);
let after_if_jmp = b.emit(Op::Jump(0), 1);
let else_arm = b.current_pos();
b.patch_jump(jif, else_arm);
b.emit(Op::LoadInt(0), 1);
let after_if = b.current_pos();
b.patch_jump(after_if_jmp, after_if);
b.emit(Op::ReturnValue, 1);
b.add_sub_entry(name, helper_entry);
let main_start = b.current_pos();
b.patch_jump(skip, main_start);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::GetSlot(0), 1); b.emit(Op::Call(name, 1), 1);
b.emit(Op::Pop, 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let close = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(close, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
fn build_loop_with_stack_at_branch(limit: i64) -> (fusevm::Chunk, usize) {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::LoadInt(1), 1);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::NumGt, 1);
let jit = b.emit(Op::JumpIfTrue(0), 1);
b.emit(Op::Pop, 1);
let after_pop = b.emit(Op::Jump(0), 1);
let alt = b.current_pos();
b.patch_jump(jit, alt);
b.emit(Op::Pop, 1);
let done = b.current_pos();
b.patch_jump(after_pop, done);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(limit), 1);
b.emit(Op::NumLt, 1);
let close = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(close, anchor);
b.emit(Op::GetSlot(0), 1);
(b.build(), anchor)
}
#[test]
fn loop_with_stack_at_internal_branch_compiles_and_runs() {
let (chunk, anchor) = build_loop_with_stack_at_branch(150);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 150);
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace with non-empty abstract stack at branch should compile in phase 5"
);
}
#[test]
fn is_trace_eligible_accepts_branch_with_int_stack() {
let jit = JitCompiler::new();
let ops = vec![
Op::PreIncSlotVoid(0),
Op::LoadInt(7), Op::GetSlot(0),
Op::LoadInt(0),
Op::NumGt,
Op::JumpIfTrue(99), Op::Pop,
Op::GetSlot(0),
Op::LoadInt(10),
Op::NumLt,
Op::JumpIfTrue(0),
];
assert!(jit.is_trace_eligible(&ops, 0));
}
#[test]
fn chained_dispatch_observable_via_side_exit_count_no_bump_when_handled() {
let (chunk, anchor) = build_loop_with_data_dependent_branch(220);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 2);
vm.frames.last_mut().unwrap().slots[1] = Value::Int(1);
let _ = vm.run();
}
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
let mut vm2 = VM::new(chunk.clone());
vm2.enable_tracing_jit();
ensure_slots(&mut vm2, 2);
let _ = vm2.run();
let _ = jit.trace_side_exit_count(&chunk, anchor);
}
#[test]
fn trace_loop_anchors_returns_metadata() {
let (chunk, anchor) = build_counter_loop(140);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
let pair = jit.trace_loop_anchors(&chunk, anchor);
assert!(
pair.is_some(),
"anchors should be queryable for installed trace"
);
let (recorded_anchor, fallthrough) = pair.unwrap();
assert_eq!(recorded_anchor, anchor);
assert!(fallthrough > anchor);
}
#[test]
fn side_trace_install_with_kind_distinct_record_and_close() {
let (chunk, anchor) = build_counter_loop(120);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
let meta = jit
.trace_export(&chunk, anchor)
.expect("export should succeed");
let record_anchor = anchor.wrapping_add(1000);
let installed = jit.trace_install_with_kind(
&chunk,
record_anchor,
meta.anchor_ip,
meta.fallthrough_ip,
&meta.ops,
&meta.recorded_ips,
&meta.slot_kinds_at_anchor,
);
assert!(
installed,
"trace_install_with_kind should accept record/close anchor split"
);
assert!(
jit.trace_is_compiled(&chunk, record_anchor),
"installed trace should be queryable at the synthetic record_anchor"
);
}
#[test]
fn loop_with_float_stack_at_branch_compiles_and_runs() {
let mut b = ChunkBuilder::new();
b.emit(Op::LoadInt(0), 1);
b.emit(Op::SetSlot(0), 1);
let anchor = b.current_pos();
b.emit(Op::PreIncSlotVoid(0), 1);
b.emit(Op::LoadFloat(0.5), 1); b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(0), 1);
b.emit(Op::NumGt, 1);
let jit = b.emit(Op::JumpIfTrue(0), 1);
b.emit(Op::Pop, 1);
let after_pop = b.emit(Op::Jump(0), 1);
let alt = b.current_pos();
b.patch_jump(jit, alt);
b.emit(Op::Pop, 1);
let done = b.current_pos();
b.patch_jump(after_pop, done);
b.emit(Op::GetSlot(0), 1);
b.emit(Op::LoadInt(120), 1);
b.emit(Op::NumLt, 1);
let close = b.emit(Op::JumpIfTrue(0), 1);
b.patch_jump(close, anchor);
b.emit(Op::GetSlot(0), 1);
let chunk = b.build();
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int, got {:?}", other),
};
assert_eq!(final_i, 120);
let jit_compiler = JitCompiler::new();
assert!(
jit_compiler.trace_is_compiled(&chunk, anchor),
"trace with Float on abstract stack at branch should compile in phase 5b"
);
}
#[test]
fn side_exit_count_observable_via_jit_compiler() {
let (chunk, anchor) = build_loop_with_data_dependent_branch(120);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 2);
vm.frames.last_mut().unwrap().slots[1] = Value::Int(1);
let _ = vm.run();
}
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
let mut vm2 = VM::new(chunk.clone());
vm2.enable_tracing_jit();
ensure_slots(&mut vm2, 2);
let _ = vm2.run();
let side_exits = jit.trace_side_exit_count(&chunk, anchor);
assert!(
side_exits > 0,
"expected mid-trace side-exits to be observable; got {}",
side_exits
);
}
#[test]
fn trace_metadata_roundtrip_via_export_import() {
let (chunk, anchor) = build_counter_loop(180);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
let meta: TraceMetadata = jit
.trace_export(&chunk, anchor)
.expect("trace should be exportable after install");
let serialized = serde_json::to_string(&meta).expect("TraceMetadata should serialize");
let deserialized: TraceMetadata =
serde_json::from_str(&serialized).expect("TraceMetadata should deserialize");
assert_eq!(deserialized.chunk_op_hash, chunk.op_hash);
assert_eq!(deserialized.anchor_ip, anchor);
assert_eq!(deserialized.ops, meta.ops);
assert_eq!(deserialized.recorded_ips, meta.recorded_ips);
assert!(jit.trace_import(&chunk, &deserialized));
}
#[test]
fn trace_import_rejects_chunk_hash_mismatch() {
let (chunk_a, anchor) = build_counter_loop(120);
{
let mut vm = VM::new(chunk_a.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
}
let jit = JitCompiler::new();
let mut meta = jit
.trace_export(&chunk_a, anchor)
.expect("trace should be exportable");
meta.chunk_op_hash = meta.chunk_op_hash.wrapping_add(1);
assert!(
!jit.trace_import(&chunk_a, &meta),
"import must reject when chunk_op_hash mismatches"
);
}
#[test]
fn callee_with_internal_branch_compiles_and_runs() {
let (chunk, anchor) = build_loop_with_branching_helper(120);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int result, got {:?}", other),
};
assert_eq!(final_i, 120);
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace inlining a branching callee should compile in phase 4"
);
assert!(!jit.trace_is_blacklisted(&chunk, anchor));
}
#[test]
fn deopt_from_callee_materializes_frame_correctly() {
let (chunk, anchor) = build_loop_with_branching_helper(80);
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 1);
let _ = vm.run();
let jit = JitCompiler::new();
assert!(jit.trace_is_compiled(&chunk, anchor));
assert_eq!(
vm.frames.len(),
1,
"frame stack must be balanced after loop completes (no leaked synthetic frames)"
);
}
#[test]
fn side_exit_fires_when_branch_flips() {
let (chunk, anchor) = build_loop_with_data_dependent_branch(160);
{
let mut vm = VM::new(chunk.clone());
vm.enable_tracing_jit();
ensure_slots(&mut vm, 2);
vm.frames.last_mut().unwrap().slots[1] = Value::Int(1);
let result = vm.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int, got {:?}", other),
};
assert_eq!(final_i, 160);
}
let jit = JitCompiler::new();
assert!(
jit.trace_is_compiled(&chunk, anchor),
"trace must install during the recording run"
);
let mut vm2 = VM::new(chunk.clone());
vm2.enable_tracing_jit();
ensure_slots(&mut vm2, 2);
let result = vm2.run();
let final_i = match result {
VMResult::Ok(Value::Int(n)) => n,
other => panic!("expected Int, got {:?}", other),
};
assert_eq!(
final_i, 160,
"side-exit cleanup must restore correct slot state for the interpreter"
);
}