Skip to main content

tidepool_codegen/
alloc.rs

1use cranelift_codegen::ir::{self, types, InstBuilder, Value, MemFlags};
2use cranelift_frontend::FunctionBuilder;
3
4/// Offset of alloc_ptr within VMContext (byte 0).
5const VMCTX_ALLOC_PTR_OFFSET: i32 = 0;
6/// Offset of alloc_limit within VMContext (byte 8).
7const VMCTX_ALLOC_LIMIT_OFFSET: i32 = 8;
8/// Offset of gc_trigger within VMContext (byte 16).
9const VMCTX_GC_TRIGGER_OFFSET: i32 = 16;
10
11/// Emit the alloc fast-path as inline Cranelift IR.
12///
13/// This is a bump-pointer allocation:
14/// 1. Load alloc_ptr from VMContext
15/// 2. new_ptr = alloc_ptr + size (8-byte aligned)
16/// 3. If new_ptr > alloc_limit: call gc_trigger (cold path), then retry
17/// 4. Store new_ptr as alloc_ptr
18/// 5. Return old alloc_ptr (start of allocated region)
19///
20/// `vmctx_val` is the SSA value holding the VMContext pointer.
21/// `size` is the number of bytes to allocate (will be rounded up to 8-byte alignment).
22/// `gc_trigger_sig` is the signature reference for the gc_trigger call.
23///
24/// Returns the SSA value pointing to the start of the allocated memory.
25pub fn emit_alloc_fast_path(
26    builder: &mut FunctionBuilder,
27    vmctx_val: Value,
28    size: u64,
29    gc_trigger_sig: ir::SigRef,
30) -> Value {
31    let aligned_size = (size + 7) & !7;
32    // SAFETY: We use `MemFlags::trusted()` because all VMContext accesses here are
33    // guaranteed safe by construction:
34    // - `vmctx_val` is the first parameter to this function and is always a valid
35    //   pointer to VMContext, provided by the embedding runtime under a fixed ABI.
36    // - VMContext is properly aligned, and the field offsets used below
37    //   (`VMCTX_ALLOC_PTR_OFFSET`, `VMCTX_ALLOC_LIMIT_OFFSET`,
38    //   `VMCTX_GC_TRIGGER_OFFSET`) correspond to a frozen layout that is
39    //   validated via const assertions.
40    // - All loads/stores are 64-bit and respect the alignment and bounds of these fields.
41    let flags = MemFlags::trusted();
42
43    // Load current alloc_ptr
44    let alloc_ptr = builder.ins().load(types::I64, flags, vmctx_val, VMCTX_ALLOC_PTR_OFFSET);
45
46    // Compute new_ptr = alloc_ptr + aligned_size
47    let size_val = builder.ins().iconst(types::I64, aligned_size as i64);
48    let new_ptr = builder.ins().iadd(alloc_ptr, size_val);
49
50    // Load alloc_limit
51    let alloc_limit = builder.ins().load(types::I64, flags, vmctx_val, VMCTX_ALLOC_LIMIT_OFFSET);
52
53    // Compare: new_ptr > alloc_limit
54    let overflow = builder.ins().icmp(ir::condcodes::IntCC::UnsignedGreaterThan, new_ptr, alloc_limit);
55
56    let slow_block = builder.create_block();
57    let fast_store_block = builder.create_block();
58    let continue_block = builder.create_block();
59    builder.append_block_param(continue_block, types::I64); // result ptr
60
61    builder.ins().brif(overflow, slow_block, &[], fast_store_block, &[]);
62
63    // --- Fast path: store new_ptr, jump to continue with old alloc_ptr ---
64    builder.switch_to_block(fast_store_block);
65    builder.seal_block(fast_store_block);
66    builder.ins().store(flags, new_ptr, vmctx_val, VMCTX_ALLOC_PTR_OFFSET);
67    builder.ins().jump(continue_block, &[alloc_ptr]);
68
69    // --- Slow path: call gc_trigger, retry alloc ---
70    builder.switch_to_block(slow_block);
71    builder.seal_block(slow_block);
72
73    let gc_trigger_ptr = builder.ins().load(types::I64, flags, vmctx_val, VMCTX_GC_TRIGGER_OFFSET);
74    builder.ins().call_indirect(gc_trigger_sig, gc_trigger_ptr, &[vmctx_val]);
75
76    // After GC: reload alloc_ptr and alloc_limit, bump, check, then store or trap.
77    let post_gc_ptr = builder.ins().load(types::I64, flags, vmctx_val, VMCTX_ALLOC_PTR_OFFSET);
78    let post_gc_limit = builder.ins().load(types::I64, flags, vmctx_val, VMCTX_ALLOC_LIMIT_OFFSET);
79    let post_gc_new = builder.ins().iadd(post_gc_ptr, size_val);
80    let post_gc_overflow = builder
81        .ins()
82        .icmp(ir::condcodes::IntCC::UnsignedGreaterThan, post_gc_new, post_gc_limit);
83
84    let slow_fail_block = builder.create_block();
85    let slow_store_block = builder.create_block();
86
87    builder
88        .ins()
89        .brif(post_gc_overflow, slow_fail_block, &[], slow_store_block, &[]);
90
91    // Slow path success: store new alloc_ptr and continue.
92    builder.switch_to_block(slow_store_block);
93    builder.seal_block(slow_store_block);
94    builder.ins().store(flags, post_gc_new, vmctx_val, VMCTX_ALLOC_PTR_OFFSET);
95    builder.ins().jump(continue_block, &[post_gc_ptr]);
96
97    // Slow path failure: allocation still does not fit, trap.
98    builder.switch_to_block(slow_fail_block);
99    builder.seal_block(slow_fail_block);
100    // Use a generic trap code for heap overflow.
101    builder.ins().trap(ir::TrapCode::unwrap_user(1));
102
103    // --- Continue: result is the old alloc_ptr from whichever path ---
104    builder.switch_to_block(continue_block);
105    builder.seal_block(continue_block);
106
107    builder.block_params(continue_block)[0]
108}