use crate::{
Reachability,
func_environ::FuncEnvironment,
translate::{HeapData, TargetEnvironment},
};
use Reachability::*;
use cranelift_codegen::{
cursor::{Cursor, FuncCursor},
ir::{self, InstBuilder, RelSourceLoc, condcodes::IntCC},
ir::{Expr, Fact},
};
use cranelift_frontend::FunctionBuilder;
use wasmtime_environ::Unsigned;
#[derive(Debug)]
pub enum BoundsCheck {
StaticOffset { offset: u32, access_size: u8 },
#[cfg(feature = "gc")]
StaticObjectField {
offset: u32,
access_size: u8,
object_size: u32,
},
#[cfg(feature = "gc")]
DynamicObjectField {
offset: ir::Value,
object_size: ir::Value,
},
}
pub fn bounds_check_and_compute_addr(
builder: &mut FunctionBuilder,
env: &mut FuncEnvironment<'_>,
heap: &HeapData,
index: ir::Value,
bounds_check: BoundsCheck,
trap: ir::TrapCode,
) -> Reachability<ir::Value> {
match bounds_check {
BoundsCheck::StaticOffset {
offset,
access_size,
} => bounds_check_field_access(builder, env, heap, index, offset, access_size, trap),
#[cfg(feature = "gc")]
BoundsCheck::StaticObjectField {
offset,
access_size,
object_size,
} => {
let offset_and_access_size = offset.checked_add(access_size.into()).unwrap();
assert!(offset_and_access_size <= object_size);
if let Ok(object_size) = u8::try_from(object_size) {
let obj_ptr = match bounds_check_field_access(
builder,
env,
heap,
index,
0,
object_size,
trap,
) {
Reachable(v) => v,
u @ Unreachable => return u,
};
let offset = builder.ins().iconst(env.pointer_type(), i64::from(offset));
let field_ptr = builder.ins().iadd(obj_ptr, offset);
return Reachable(field_ptr);
}
bounds_check_field_access(builder, env, heap, index, offset, access_size, trap)
}
#[cfg(feature = "gc")]
BoundsCheck::DynamicObjectField {
offset,
object_size,
} => {
assert_eq!(heap.index_type(), ir::types::I32);
assert_eq!(builder.func.dfg.value_type(index), ir::types::I32);
assert_eq!(builder.func.dfg.value_type(offset), ir::types::I32);
assert_eq!(builder.func.dfg.value_type(object_size), ir::types::I32);
let index_and_object_size = builder.ins().uadd_overflow_trap(index, object_size, trap);
let ptr_just_after_obj = match bounds_check_field_access(
builder,
env,
heap,
index_and_object_size,
0,
0,
trap,
) {
Reachable(v) => v,
u @ Unreachable => return u,
};
let backwards_offset = builder.ins().isub(object_size, offset);
let backwards_offset = cast_index_to_pointer_ty(
backwards_offset,
ir::types::I32,
env.pointer_type(),
false,
&mut builder.cursor(),
trap,
);
let field_ptr = builder.ins().isub(ptr_just_after_obj, backwards_offset);
Reachable(field_ptr)
}
}
}
fn bounds_check_field_access(
builder: &mut FunctionBuilder,
env: &mut FuncEnvironment<'_>,
heap: &HeapData,
index: ir::Value,
offset: u32,
access_size: u8,
trap: ir::TrapCode,
) -> Reachability<ir::Value> {
let pointer_bit_width = u16::try_from(env.pointer_type().bits()).unwrap();
let bound_gv = heap.bound;
let orig_index = index;
let clif_memory_traps_enabled = env.clif_memory_traps_enabled();
let spectre_mitigations_enabled =
env.heap_access_spectre_mitigation() && clif_memory_traps_enabled;
let pcc = env.proof_carrying_code();
let host_page_size_log2 = env.target_config().page_size_align_log2;
let can_use_virtual_memory = heap
.memory
.can_use_virtual_memory(env.tunables(), host_page_size_log2)
&& clif_memory_traps_enabled;
let can_elide_bounds_check = heap
.memory
.can_elide_bounds_check(env.tunables(), host_page_size_log2)
&& clif_memory_traps_enabled;
let memory_guard_size = env.tunables().memory_guard_size;
let memory_reservation = env.tunables().memory_reservation;
let offset_and_size = offset_plus_size(offset, access_size);
let statically_in_bounds = statically_in_bounds(&builder.func, heap, index, offset_and_size);
let index = cast_index_to_pointer_ty(
index,
heap.index_type(),
env.pointer_type(),
heap.pcc_memory_type.is_some(),
&mut builder.cursor(),
trap,
);
let oob_behavior = if spectre_mitigations_enabled {
OobBehavior::ConditionallyLoadFromZero {
select_spectre_guard: true,
}
} else if env.load_from_zero_allowed() {
OobBehavior::ConditionallyLoadFromZero {
select_spectre_guard: false,
}
} else {
OobBehavior::ExplicitTrap
};
let make_compare = |builder: &mut FunctionBuilder,
compare_kind: IntCC,
lhs: ir::Value,
lhs_off: Option<i64>,
rhs: ir::Value,
rhs_off: Option<i64>| {
let result = builder.ins().icmp(compare_kind, lhs, rhs);
if pcc {
builder.func.dfg.facts[orig_index] = Some(Fact::Def { value: orig_index });
if index != orig_index {
builder.func.dfg.facts[index] = Some(Fact::value(pointer_bit_width, orig_index));
}
builder.func.dfg.facts[lhs] = Some(Fact::value_offset(
pointer_bit_width,
orig_index,
lhs_off.unwrap(),
));
if let Some(rhs) = builder.func.dfg.facts[rhs]
.as_ref()
.and_then(|f| f.as_symbol())
{
builder.func.dfg.facts[result] = Some(Fact::Compare {
kind: compare_kind,
lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
rhs: Expr::offset(rhs, rhs_off.unwrap()).unwrap(),
});
}
if let Some(k) = builder.func.dfg.facts[rhs]
.as_ref()
.and_then(|f| f.as_const(pointer_bit_width))
{
builder.func.dfg.facts[result] = Some(Fact::Compare {
kind: compare_kind,
lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
rhs: Expr::constant((k as i64).checked_add(rhs_off.unwrap()).unwrap()),
});
}
}
result
};
if offset_and_size > heap.memory.maximum_byte_size().unwrap_or(u64::MAX) {
env.before_unconditionally_trapping_memory_access(builder);
env.trap(builder, trap);
return Unreachable;
}
if pointer_bit_width < 64 && offset_and_size >= (1 << pointer_bit_width) {
env.before_unconditionally_trapping_memory_access(builder);
env.trap(builder, trap);
return Unreachable;
}
if can_elide_bounds_check
&& u64::from(u32::MAX) <= memory_reservation + memory_guard_size - offset_and_size
{
assert!(heap.index_type() == ir::types::I32);
assert!(
can_use_virtual_memory,
"static memories require the ability to use virtual memory"
);
return Reachable(compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
AddrPcc::static32(heap.pcc_memory_type, memory_reservation + memory_guard_size),
));
}
if statically_in_bounds {
return Reachable(compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
AddrPcc::static32(heap.pcc_memory_type, memory_reservation + memory_guard_size),
));
}
if can_use_virtual_memory
&& heap.memory.minimum_byte_size().unwrap_or(u64::MAX) <= memory_reservation
&& !heap.memory.memory_may_move(env.tunables())
{
let adjusted_bound = memory_reservation.checked_sub(offset_and_size).unwrap();
let adjusted_bound_value = builder
.ins()
.iconst(env.pointer_type(), adjusted_bound as i64);
if pcc {
builder.func.dfg.facts[adjusted_bound_value] =
Some(Fact::constant(pointer_bit_width, adjusted_bound));
}
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
adjusted_bound_value,
Some(0),
);
return Reachable(explicit_check_oob_condition_and_compute_addr(
env,
builder,
heap,
index,
offset,
access_size,
oob_behavior,
AddrPcc::static32(heap.pcc_memory_type, memory_reservation),
oob,
trap,
));
}
if offset_and_size == 1 && !env.is_pulley() {
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThanOrEqual,
index,
Some(0),
bound,
Some(0),
);
return Reachable(explicit_check_oob_condition_and_compute_addr(
env,
builder,
heap,
index,
offset,
access_size,
oob_behavior,
AddrPcc::dynamic(heap.pcc_memory_type, bound_gv),
oob,
trap,
));
}
if can_use_virtual_memory && offset_and_size <= memory_guard_size {
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
bound,
Some(0),
);
return Reachable(explicit_check_oob_condition_and_compute_addr(
env,
builder,
heap,
index,
offset,
access_size,
oob_behavior,
AddrPcc::dynamic(heap.pcc_memory_type, bound_gv),
oob,
trap,
));
}
if offset_and_size <= heap.memory.minimum_byte_size().unwrap_or(u64::MAX) {
let bound = get_dynamic_heap_bound(builder, env, heap);
let adjustment = offset_and_size as i64;
let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment);
if pcc {
builder.func.dfg.facts[adjustment_value] =
Some(Fact::constant(pointer_bit_width, offset_and_size));
}
let adjusted_bound = builder.ins().isub(bound, adjustment_value);
if pcc {
builder.func.dfg.facts[adjusted_bound] = Some(Fact::global_value_offset(
pointer_bit_width,
bound_gv,
-adjustment,
));
}
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
adjusted_bound,
Some(adjustment),
);
return Reachable(explicit_check_oob_condition_and_compute_addr(
env,
builder,
heap,
index,
offset,
access_size,
oob_behavior,
AddrPcc::dynamic(heap.pcc_memory_type, bound_gv),
oob,
trap,
));
}
let access_size_val = builder
.ins()
.iconst(env.pointer_type(), offset_and_size as i64);
if pcc {
builder.func.dfg.facts[access_size_val] =
Some(Fact::constant(pointer_bit_width, offset_and_size));
}
let adjusted_index = env.uadd_overflow_trap(builder, index, access_size_val, trap);
if pcc {
builder.func.dfg.facts[adjusted_index] = Some(Fact::value_offset(
pointer_bit_width,
index,
i64::try_from(offset_and_size).unwrap(),
));
}
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
adjusted_index,
i64::try_from(offset_and_size).ok(),
bound,
Some(0),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
env,
builder,
heap,
index,
offset,
access_size,
oob_behavior,
AddrPcc::dynamic(heap.pcc_memory_type, bound_gv),
oob,
trap,
))
}
fn get_dynamic_heap_bound(
builder: &mut FunctionBuilder,
env: &mut FuncEnvironment<'_>,
heap: &HeapData,
) -> ir::Value {
let enable_pcc = heap.pcc_memory_type.is_some();
let (value, gv) = match heap.memory.static_heap_size() {
Some(max_size) if !enable_pcc => (
builder.ins().iconst(env.pointer_type(), max_size as i64),
heap.bound,
),
_ => (
builder.ins().global_value(env.pointer_type(), heap.bound),
heap.bound,
),
};
if enable_pcc {
builder.func.dfg.facts[value] = Some(Fact::global_value(
u16::try_from(env.pointer_type().bits()).unwrap(),
gv,
));
}
value
}
fn cast_index_to_pointer_ty(
index: ir::Value,
index_ty: ir::Type,
pointer_ty: ir::Type,
pcc: bool,
pos: &mut FuncCursor,
trap: ir::TrapCode,
) -> ir::Value {
if index_ty == pointer_ty {
return index;
}
if index_ty.bits() > pointer_ty.bits() {
assert_eq!(index_ty, ir::types::I64);
assert_eq!(pointer_ty, ir::types::I32);
let low_bits = pos.ins().ireduce(pointer_ty, index);
let c32 = pos.ins().iconst(pointer_ty, 32);
let high_bits = pos.ins().ushr(index, c32);
let high_bits = pos.ins().ireduce(pointer_ty, high_bits);
pos.ins().trapnz(high_bits, trap);
return low_bits;
}
let extended_index = pos.ins().uextend(pointer_ty, index);
if pcc {
pos.func.dfg.facts[extended_index] = Some(Fact::max_range_for_width_extended(
u16::try_from(index_ty.bits()).unwrap(),
u16::try_from(pointer_ty.bits()).unwrap(),
));
}
let loc = pos.srcloc();
let loc = RelSourceLoc::from_base_offset(pos.func.params.base_srcloc(), loc);
pos.func
.stencil
.dfg
.add_value_label_alias(extended_index, loc, index);
extended_index
}
#[derive(Clone, Copy, Debug)]
enum AddrPcc {
Static32(ir::MemoryType, u64),
Dynamic(ir::MemoryType, ir::GlobalValue),
}
impl AddrPcc {
fn static32(memory_type: Option<ir::MemoryType>, size: u64) -> Option<Self> {
memory_type.map(|ty| AddrPcc::Static32(ty, size))
}
fn dynamic(memory_type: Option<ir::MemoryType>, bound: ir::GlobalValue) -> Option<Self> {
memory_type.map(|ty| AddrPcc::Dynamic(ty, bound))
}
}
enum OobBehavior {
ExplicitTrap,
ConditionallyLoadFromZero {
select_spectre_guard: bool,
},
}
fn explicit_check_oob_condition_and_compute_addr(
env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
heap: &HeapData,
index: ir::Value,
offset: u32,
access_size: u8,
oob_behavior: OobBehavior,
pcc: Option<AddrPcc>,
oob_condition: ir::Value,
trap: ir::TrapCode,
) -> ir::Value {
if let OobBehavior::ExplicitTrap = oob_behavior {
env.trapnz(builder, oob_condition, trap);
}
let addr_ty = env.pointer_type();
let mut addr = compute_addr(&mut builder.cursor(), heap, addr_ty, index, offset, pcc);
if let OobBehavior::ConditionallyLoadFromZero {
select_spectre_guard,
} = oob_behavior
{
assert!(env.load_from_zero_allowed());
let null = builder.ins().iconst(addr_ty, 0);
addr = if select_spectre_guard {
builder
.ins()
.select_spectre_guard(oob_condition, null, addr)
} else {
builder.ins().select(oob_condition, null, addr)
};
match pcc {
None => {}
Some(AddrPcc::Static32(ty, size)) => {
builder.func.dfg.facts[null] =
Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
builder.func.dfg.facts[addr] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: size.checked_sub(u64::from(access_size)).unwrap(),
nullable: true,
});
}
Some(AddrPcc::Dynamic(ty, gv)) => {
builder.func.dfg.facts[null] =
Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
builder.func.dfg.facts[addr] = Some(Fact::DynamicMem {
ty,
min: Expr::constant(0),
max: Expr::offset(
&Expr::global_value(gv),
i64::try_from(env.tunables().memory_guard_size)
.unwrap()
.checked_sub(i64::from(access_size))
.unwrap(),
)
.unwrap(),
nullable: true,
});
}
}
}
addr
}
fn compute_addr(
pos: &mut FuncCursor,
heap: &HeapData,
addr_ty: ir::Type,
index: ir::Value,
offset: u32,
pcc: Option<AddrPcc>,
) -> ir::Value {
debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);
let heap_base = pos.ins().global_value(addr_ty, heap.base);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _size)) => {
pos.func.dfg.facts[heap_base] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: 0,
nullable: false,
});
}
Some(AddrPcc::Dynamic(ty, _limit)) => {
pos.func.dfg.facts[heap_base] = Some(Fact::dynamic_base_ptr(ty));
}
}
let base_and_index = pos.ins().iadd(heap_base, index);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
if let Some(idx) = pos.func.dfg.facts[index]
.as_ref()
.and_then(|f| f.as_symbol())
.cloned()
{
pos.func.dfg.facts[base_and_index] = Some(Fact::DynamicMem {
ty,
min: idx.clone(),
max: idx,
nullable: false,
});
} else {
pos.func.dfg.facts[base_and_index] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: u64::from(u32::MAX),
nullable: false,
});
}
}
}
if offset == 0 {
base_and_index
} else {
let offset_val = pos.ins().iconst(addr_ty, i64::from(offset));
if pcc.is_some() {
pos.func.dfg.facts[offset_val] = Some(Fact::constant(
u16::try_from(addr_ty.bits()).unwrap(),
u64::from(offset),
));
}
let result = pos.ins().iadd(base_and_index, offset_val);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
if let Some(idx) = pos.func.dfg.facts[index]
.as_ref()
.and_then(|f| f.as_symbol())
{
pos.func.dfg.facts[result] = Some(Fact::DynamicMem {
ty,
min: idx.clone(),
max: Expr::offset(idx, i64::from(offset)).unwrap(),
nullable: false,
});
} else {
pos.func.dfg.facts[result] = Some(Fact::Mem {
ty,
min_offset: u64::from(offset),
max_offset: u64::from(u32::MAX) + u64::from(offset),
nullable: false,
});
}
}
}
result
}
}
#[inline]
fn offset_plus_size(offset: u32, size: u8) -> u64 {
offset as u64 + size as u64
}
fn statically_in_bounds(
func: &ir::Function,
heap: &HeapData,
index: ir::Value,
offset_and_size: u64,
) -> bool {
func.dfg
.value_def(index)
.inst()
.and_then(|i| {
let imm = match func.dfg.insts[i] {
ir::InstructionData::UnaryImm {
opcode: ir::Opcode::Iconst,
imm,
} => imm,
_ => return None,
};
let ty = func.dfg.value_type(index);
let index = imm.zero_extend_from_width(ty.bits()).bits().unsigned();
let final_addr = index.checked_add(offset_and_size)?;
Some(final_addr <= heap.memory.minimum_byte_size().unwrap_or(u64::MAX))
})
.unwrap_or(false)
}