use super::Reachability;
use crate::{FuncEnvironment, HeapData, HeapStyle};
use cranelift_codegen::{
cursor::{Cursor, FuncCursor},
ir::{self, condcodes::IntCC, InstBuilder, RelSourceLoc},
ir::{Expr, Fact},
};
use cranelift_frontend::FunctionBuilder;
use wasmtime_types::WasmResult;
use Reachability::*;
pub fn bounds_check_and_compute_addr<Env>(
builder: &mut FunctionBuilder,
env: &mut Env,
heap: &HeapData,
index: ir::Value,
offset: u32,
access_size: u8,
) -> WasmResult<Reachability<ir::Value>>
where
Env: FuncEnvironment + ?Sized,
{
let pointer_bit_width = u16::try_from(env.pointer_type().bits()).unwrap();
let orig_index = index;
let index = cast_index_to_pointer_ty(
index,
heap.index_type,
env.pointer_type(),
heap.memory_type.is_some(),
&mut builder.cursor(),
);
let offset_and_size = offset_plus_size(offset, access_size);
let spectre_mitigations_enabled = env.heap_access_spectre_mitigation();
let pcc = env.proof_carrying_code();
let host_page_size_log2 = env.target_config().page_size_align_log2;
let can_use_virtual_memory = heap.page_size_log2 >= host_page_size_log2;
let make_compare = |builder: &mut FunctionBuilder,
compare_kind: IntCC,
lhs: ir::Value,
lhs_off: Option<i64>,
rhs: ir::Value,
rhs_off: Option<i64>| {
let result = builder.ins().icmp(compare_kind, lhs, rhs);
if pcc {
builder.func.dfg.facts[orig_index] = Some(Fact::Def { value: orig_index });
if index != orig_index {
builder.func.dfg.facts[index] = Some(Fact::value(pointer_bit_width, orig_index));
}
builder.func.dfg.facts[lhs] = Some(Fact::value_offset(
pointer_bit_width,
orig_index,
lhs_off.unwrap(),
));
if let Some(rhs) = builder.func.dfg.facts[rhs]
.as_ref()
.and_then(|f| f.as_symbol())
{
builder.func.dfg.facts[result] = Some(Fact::Compare {
kind: compare_kind,
lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
rhs: Expr::offset(rhs, rhs_off.unwrap()).unwrap(),
});
}
if let Some(k) = builder.func.dfg.facts[rhs]
.as_ref()
.and_then(|f| f.as_const(pointer_bit_width))
{
builder.func.dfg.facts[result] = Some(Fact::Compare {
kind: compare_kind,
lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
rhs: Expr::constant((k as i64).checked_add(rhs_off.unwrap()).unwrap()),
});
}
}
result
};
Ok(match heap.style {
HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => {
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThanOrEqual,
index,
Some(0),
bound,
Some(0),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
access_size,
spectre_mitigations_enabled,
AddrPcc::dynamic(heap.memory_type, bound_gv),
oob,
))
}
HeapStyle::Dynamic { bound_gv }
if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size =>
{
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
bound,
Some(0),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
access_size,
spectre_mitigations_enabled,
AddrPcc::dynamic(heap.memory_type, bound_gv),
oob,
))
}
HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size.into() => {
let bound = get_dynamic_heap_bound(builder, env, heap);
let adjustment = offset_and_size as i64;
let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment);
if pcc {
builder.func.dfg.facts[adjustment_value] =
Some(Fact::constant(pointer_bit_width, offset_and_size));
}
let adjusted_bound = builder.ins().isub(bound, adjustment_value);
if pcc {
builder.func.dfg.facts[adjusted_bound] = Some(Fact::global_value_offset(
pointer_bit_width,
bound_gv,
-adjustment,
));
}
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
adjusted_bound,
Some(adjustment),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
access_size,
spectre_mitigations_enabled,
AddrPcc::dynamic(heap.memory_type, bound_gv),
oob,
))
}
HeapStyle::Dynamic { bound_gv } => {
let access_size_val = builder
.ins()
.iconst(env.pointer_type(), offset_and_size as i64);
if pcc {
builder.func.dfg.facts[access_size_val] =
Some(Fact::constant(pointer_bit_width, offset_and_size));
}
let adjusted_index = builder.ins().uadd_overflow_trap(
index,
access_size_val,
ir::TrapCode::HeapOutOfBounds,
);
if pcc {
builder.func.dfg.facts[adjusted_index] = Some(Fact::value_offset(
pointer_bit_width,
index,
i64::try_from(offset_and_size).unwrap(),
));
}
let bound = get_dynamic_heap_bound(builder, env, heap);
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
adjusted_index,
i64::try_from(offset_and_size).ok(),
bound,
Some(0),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
access_size,
spectre_mitigations_enabled,
AddrPcc::dynamic(heap.memory_type, bound_gv),
oob,
))
}
HeapStyle::Static { bound } if offset_and_size > bound.into() => {
assert!(
can_use_virtual_memory,
"static memories require the ability to use virtual memory"
);
env.before_unconditionally_trapping_memory_access(builder)?;
builder.ins().trap(ir::TrapCode::HeapOutOfBounds);
Unreachable
}
HeapStyle::Static { bound }
if can_use_virtual_memory
&& heap.index_type == ir::types::I32
&& u64::from(u32::MAX)
<= u64::from(bound) + u64::from(heap.offset_guard_size) - offset_and_size =>
{
assert!(
can_use_virtual_memory,
"static memories require the ability to use virtual memory"
);
Reachable(compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
AddrPcc::static32(
heap.memory_type,
u64::from(bound) + u64::from(heap.offset_guard_size),
),
))
}
HeapStyle::Static { bound } => {
assert!(
can_use_virtual_memory,
"static memories require the ability to use virtual memory"
);
let adjusted_bound = u64::from(bound) - offset_and_size;
let adjusted_bound_value = builder
.ins()
.iconst(env.pointer_type(), adjusted_bound as i64);
if pcc {
builder.func.dfg.facts[adjusted_bound_value] =
Some(Fact::constant(pointer_bit_width, adjusted_bound));
}
let oob = make_compare(
builder,
IntCC::UnsignedGreaterThan,
index,
Some(0),
adjusted_bound_value,
Some(0),
);
Reachable(explicit_check_oob_condition_and_compute_addr(
&mut builder.cursor(),
heap,
env.pointer_type(),
index,
offset,
access_size,
spectre_mitigations_enabled,
AddrPcc::static32(heap.memory_type, u64::from(bound)),
oob,
))
}
})
}
fn get_dynamic_heap_bound<Env>(
builder: &mut FunctionBuilder,
env: &mut Env,
heap: &HeapData,
) -> ir::Value
where
Env: FuncEnvironment + ?Sized,
{
let enable_pcc = heap.memory_type.is_some();
let (value, gv) = match (heap.max_size, &heap.style) {
(Some(max_size), HeapStyle::Dynamic { bound_gv })
if heap.min_size == max_size && !enable_pcc =>
{
(
builder.ins().iconst(env.pointer_type(), max_size as i64),
*bound_gv,
)
}
(_, HeapStyle::Dynamic { bound_gv }) => (
builder.ins().global_value(env.pointer_type(), *bound_gv),
*bound_gv,
),
(_, HeapStyle::Static { .. }) => unreachable!("not a dynamic heap"),
};
if enable_pcc {
builder.func.dfg.facts[value] = Some(Fact::global_value(
u16::try_from(env.pointer_type().bits()).unwrap(),
gv,
));
}
value
}
fn cast_index_to_pointer_ty(
index: ir::Value,
index_ty: ir::Type,
pointer_ty: ir::Type,
pcc: bool,
pos: &mut FuncCursor,
) -> ir::Value {
if index_ty == pointer_ty {
return index;
}
assert!(index_ty.bits() < pointer_ty.bits());
let extended_index = pos.ins().uextend(pointer_ty, index);
if pcc {
pos.func.dfg.facts[extended_index] = Some(Fact::max_range_for_width_extended(
u16::try_from(index_ty.bits()).unwrap(),
u16::try_from(pointer_ty.bits()).unwrap(),
));
}
let loc = pos.srcloc();
let loc = RelSourceLoc::from_base_offset(pos.func.params.base_srcloc(), loc);
pos.func
.stencil
.dfg
.add_value_label_alias(extended_index, loc, index);
extended_index
}
#[derive(Clone, Copy, Debug)]
enum AddrPcc {
Static32(ir::MemoryType, u64),
Dynamic(ir::MemoryType, ir::GlobalValue),
}
impl AddrPcc {
fn static32(memory_type: Option<ir::MemoryType>, size: u64) -> Option<Self> {
memory_type.map(|ty| AddrPcc::Static32(ty, size))
}
fn dynamic(memory_type: Option<ir::MemoryType>, bound: ir::GlobalValue) -> Option<Self> {
memory_type.map(|ty| AddrPcc::Dynamic(ty, bound))
}
}
fn explicit_check_oob_condition_and_compute_addr(
pos: &mut FuncCursor,
heap: &HeapData,
addr_ty: ir::Type,
index: ir::Value,
offset: u32,
access_size: u8,
spectre_mitigations_enabled: bool,
pcc: Option<AddrPcc>,
oob_condition: ir::Value,
) -> ir::Value {
if !spectre_mitigations_enabled {
pos.ins()
.trapnz(oob_condition, ir::TrapCode::HeapOutOfBounds);
}
let mut addr = compute_addr(pos, heap, addr_ty, index, offset, pcc);
if spectre_mitigations_enabled {
let null = pos.ins().iconst(addr_ty, 0);
addr = pos.ins().select_spectre_guard(oob_condition, null, addr);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, size)) => {
pos.func.dfg.facts[null] =
Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
pos.func.dfg.facts[addr] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: size.checked_sub(u64::from(access_size)).unwrap(),
nullable: true,
});
}
Some(AddrPcc::Dynamic(ty, gv)) => {
pos.func.dfg.facts[null] =
Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
pos.func.dfg.facts[addr] = Some(Fact::DynamicMem {
ty,
min: Expr::constant(0),
max: Expr::offset(
&Expr::global_value(gv),
i64::try_from(heap.offset_guard_size)
.unwrap()
.checked_sub(i64::from(access_size))
.unwrap(),
)
.unwrap(),
nullable: true,
});
}
}
}
addr
}
fn compute_addr(
pos: &mut FuncCursor,
heap: &HeapData,
addr_ty: ir::Type,
index: ir::Value,
offset: u32,
pcc: Option<AddrPcc>,
) -> ir::Value {
debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);
let heap_base = pos.ins().global_value(addr_ty, heap.base);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _size)) => {
pos.func.dfg.facts[heap_base] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: 0,
nullable: false,
});
}
Some(AddrPcc::Dynamic(ty, _limit)) => {
pos.func.dfg.facts[heap_base] = Some(Fact::dynamic_base_ptr(ty));
}
}
let base_and_index = pos.ins().iadd(heap_base, index);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
if let Some(idx) = pos.func.dfg.facts[index]
.as_ref()
.and_then(|f| f.as_symbol())
.cloned()
{
pos.func.dfg.facts[base_and_index] = Some(Fact::DynamicMem {
ty,
min: idx.clone(),
max: idx,
nullable: false,
});
} else {
pos.func.dfg.facts[base_and_index] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: u64::from(u32::MAX),
nullable: false,
});
}
}
}
if offset == 0 {
base_and_index
} else {
let offset_val = pos.ins().iconst(addr_ty, i64::from(offset));
if pcc.is_some() {
pos.func.dfg.facts[offset_val] = Some(Fact::constant(
u16::try_from(addr_ty.bits()).unwrap(),
u64::from(offset),
));
}
let result = pos.ins().iadd(base_and_index, offset_val);
match pcc {
None => {}
Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
if let Some(idx) = pos.func.dfg.facts[index]
.as_ref()
.and_then(|f| f.as_symbol())
{
pos.func.dfg.facts[result] = Some(Fact::DynamicMem {
ty,
min: idx.clone(),
max: Expr::offset(idx, i64::from(offset)).unwrap(),
nullable: false,
});
} else {
pos.func.dfg.facts[result] = Some(Fact::Mem {
ty,
min_offset: u64::from(offset),
max_offset: u64::from(u32::MAX) + u64::from(offset),
nullable: false,
});
}
}
}
result
}
}
#[inline]
fn offset_plus_size(offset: u32, size: u8) -> u64 {
offset as u64 + size as u64
}