use super::env::HeapData;
use crate::{
Result,
abi::vmctx,
codegen::{CodeGenContext, Emission},
isa::reg::{Reg, writable},
masm::{Imm, IntCmpKind, IntScratch, MacroAssembler, OperandSize, RegImm, TrapCode},
stack::TypedReg,
};
use wasmtime_environ::WasmValType;
#[derive(Debug, Copy, Clone)]
pub(crate) struct ImmOffset(u32);
impl ImmOffset {
pub fn from_u32(raw: u32) -> Self {
Self(raw)
}
pub fn as_u32(&self) -> u32 {
self.0
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum Bounds {
Static(u64),
Dynamic(TypedReg),
}
impl Bounds {
pub fn from_typed_reg(tr: TypedReg) -> Self {
Self::Dynamic(tr)
}
pub fn from_u64(raw: u64) -> Self {
Self::Static(raw)
}
pub fn as_typed_reg(&self) -> TypedReg {
match self {
Self::Dynamic(tr) => *tr,
_ => panic!(),
}
}
pub fn as_u64(&self) -> u64 {
match self {
Self::Static(v) => *v,
_ => panic!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Index(TypedReg);
impl Index {
pub fn from_typed_reg(tr: TypedReg) -> Self {
Self(tr)
}
pub fn as_typed_reg(&self) -> TypedReg {
self.0
}
}
pub(crate) fn load_dynamic_heap_bounds<M>(
context: &mut CodeGenContext<Emission>,
masm: &mut M,
heap: &HeapData,
ptr_size: OperandSize,
) -> Result<Bounds>
where
M: MacroAssembler,
{
let dst = context.any_gpr(masm)?;
match heap.memory.static_heap_size() {
Some(size) => masm.mov(writable!(dst), RegImm::i64(size.cast_signed()), ptr_size)?,
None => {
masm.with_scratch::<IntScratch, _>(|masm, scratch| {
let base = if let Some(offset) = heap.import_from {
let addr = masm.address_at_vmctx(offset)?;
masm.load_ptr(addr, scratch.writable())?;
scratch.inner()
} else {
vmctx!(M)
};
let addr = masm.address_at_reg(base, heap.current_length_offset)?;
masm.load_ptr(addr, writable!(dst))
})?;
}
}
Ok(Bounds::from_typed_reg(TypedReg::new(
heap.index_type(),
dst,
)))
}
#[inline]
pub(crate) fn ensure_index_and_offset<M: MacroAssembler>(
masm: &mut M,
index: Index,
offset: u64,
heap_ty_size: OperandSize,
) -> Result<ImmOffset> {
match u32::try_from(offset) {
Ok(offs) => Ok(ImmOffset::from_u32(offs)),
Err(_) => {
masm.checked_uadd(
writable!(index.as_typed_reg().into()),
index.as_typed_reg().into(),
Imm::i64(offset as i64),
heap_ty_size,
TrapCode::HEAP_OUT_OF_BOUNDS,
)?;
Ok(ImmOffset::from_u32(0))
}
}
}
pub(crate) fn load_heap_addr_checked<M, F>(
masm: &mut M,
context: &mut CodeGenContext<Emission>,
ptr_size: OperandSize,
heap: &HeapData,
enable_spectre_mitigation: bool,
bounds: Bounds,
index: Index,
offset: ImmOffset,
mut emit_check_condition: F,
) -> Result<Reg>
where
M: MacroAssembler,
F: FnMut(&mut M, Bounds, Index) -> Result<IntCmpKind>,
{
let cmp_kind = emit_check_condition(masm, bounds, index)?;
masm.trapif(cmp_kind, TrapCode::HEAP_OUT_OF_BOUNDS)?;
let addr = context.any_gpr(masm)?;
load_heap_addr_unchecked(masm, heap, index, offset, addr, ptr_size)?;
if !enable_spectre_mitigation {
Ok(addr)
} else {
let tmp = context.any_gpr(masm)?;
masm.mov(writable!(tmp), RegImm::i64(0), ptr_size)?;
let cmp_kind = emit_check_condition(masm, bounds, index)?;
masm.cmov(writable!(addr), tmp, cmp_kind, ptr_size)?;
context.free_reg(tmp);
Ok(addr)
}
}
pub(crate) fn load_heap_addr_unchecked<M>(
masm: &mut M,
heap: &HeapData,
index: Index,
offset: ImmOffset,
dst: Reg,
ptr_size: OperandSize,
) -> Result<()>
where
M: MacroAssembler,
{
masm.with_scratch::<IntScratch, _>(|masm, scratch| {
let base = if let Some(offset) = heap.import_from {
masm.load_ptr(masm.address_at_vmctx(offset)?, scratch.writable())?;
scratch.inner()
} else {
vmctx!(M)
};
masm.load_ptr(masm.address_at_reg(base, heap.offset)?, writable!(dst))
})?;
let index_typed = index.as_typed_reg();
let heap_size: OperandSize = heap.index_type().try_into()?;
if ptr_size == OperandSize::S64 && heap_size == OperandSize::S32 {
masm.add_uextend(
writable!(dst),
dst,
index_typed.reg,
OperandSize::S32,
ptr_size,
)?;
} else {
assert!(index_typed.ty == WasmValType::I64);
masm.add(writable!(dst), dst, index_typed.reg.into(), ptr_size)?;
}
if offset.as_u32() > 0 {
masm.add(
writable!(dst),
dst,
RegImm::i64(offset.as_u32() as i64),
ptr_size,
)?;
}
Ok(())
}