use super::env::{HeapData, HeapStyle};
use crate::{
abi::{scratch, vmctx},
codegen::CodeGenContext,
isa::reg::Reg,
masm::{IntCmpKind, MacroAssembler, OperandSize, RegImm, TrapCode},
stack::TypedReg,
};
#[derive(Debug, Copy, Clone)]
pub(crate) struct ImmOffset(u32);
impl ImmOffset {
pub fn from_u32(raw: u32) -> Self {
Self(raw)
}
pub fn as_u32(&self) -> u32 {
self.0
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum Bounds {
Static(u64),
Dynamic(TypedReg),
}
impl Bounds {
pub fn from_typed_reg(tr: TypedReg) -> Self {
Self::Dynamic(tr)
}
pub fn from_u64(raw: u64) -> Self {
Self::Static(raw)
}
pub fn as_typed_reg(&self) -> TypedReg {
match self {
Self::Dynamic(tr) => *tr,
_ => panic!(),
}
}
pub fn as_u64(&self) -> u64 {
match self {
Self::Static(v) => *v,
_ => panic!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Index(TypedReg);
impl Index {
pub fn from_typed_reg(tr: TypedReg) -> Self {
Self(tr)
}
pub fn as_typed_reg(&self) -> TypedReg {
self.0
}
}
pub(crate) fn load_dynamic_heap_bounds<M>(
context: &mut CodeGenContext,
masm: &mut M,
heap: &HeapData,
ptr_size: OperandSize,
) -> Bounds
where
M: MacroAssembler,
{
let dst = context.any_gpr(masm);
match (heap.max_size, &heap.style) {
(Some(max_size), HeapStyle::Dynamic) if heap.min_size == max_size => {
masm.mov(RegImm::i64(max_size as i64), dst, ptr_size)
}
(_, HeapStyle::Dynamic) => {
let scratch = scratch!(M);
let base = if let Some(offset) = heap.import_from {
let addr = masm.address_at_vmctx(offset);
masm.load_ptr(addr, scratch);
scratch
} else {
vmctx!(M)
};
let addr = masm.address_at_reg(base, heap.current_length_offset);
masm.load_ptr(addr, dst);
}
(_, HeapStyle::Static { .. }) => unreachable!("Loading dynamic bounds of a static heap"),
}
Bounds::from_typed_reg(TypedReg::new(heap.ty, dst))
}
#[inline]
pub(crate) fn ensure_index_and_offset<M: MacroAssembler>(
masm: &mut M,
index: Index,
offset: u64,
heap_ty_size: OperandSize,
) -> ImmOffset {
match u32::try_from(offset) {
Ok(offs) => ImmOffset::from_u32(offs),
Err(_) => {
masm.checked_uadd(
index.as_typed_reg().into(),
index.as_typed_reg().into(),
RegImm::i64(offset as i64),
heap_ty_size,
TrapCode::HeapOutOfBounds,
);
ImmOffset::from_u32(0)
}
}
}
pub(crate) fn load_heap_addr_checked<M, F>(
masm: &mut M,
context: &mut CodeGenContext,
ptr_size: OperandSize,
heap: &HeapData,
enable_spectre_mitigation: bool,
bounds: Bounds,
index: Index,
offset: ImmOffset,
mut emit_check_condition: F,
) -> Reg
where
M: MacroAssembler,
F: FnMut(&mut M, Bounds, Index) -> IntCmpKind,
{
let cmp_kind = emit_check_condition(masm, bounds, index);
masm.trapif(cmp_kind, TrapCode::HeapOutOfBounds);
let addr = context.any_gpr(masm);
load_heap_addr_unchecked(masm, heap, index, offset, addr, ptr_size);
if !enable_spectre_mitigation {
addr
} else {
let tmp = context.any_gpr(masm);
masm.mov(RegImm::i64(0), tmp, ptr_size);
let cmp_kind = emit_check_condition(masm, bounds, index);
masm.cmov(tmp, addr, cmp_kind, ptr_size);
context.free_reg(tmp);
addr
}
}
pub(crate) fn load_heap_addr_unchecked<M>(
masm: &mut M,
heap: &HeapData,
index: Index,
offset: ImmOffset,
dst: Reg,
ptr_size: OperandSize,
) where
M: MacroAssembler,
{
let base = if let Some(offset) = heap.import_from {
let scratch = scratch!(M);
masm.load_ptr(masm.address_at_vmctx(offset), scratch);
scratch
} else {
vmctx!(M)
};
masm.load_ptr(masm.address_at_reg(base, heap.offset), dst);
let index_reg = index.as_typed_reg().reg;
masm.add(dst, dst, index_reg.into(), ptr_size);
if offset.as_u32() > 0 {
masm.add(dst, dst, RegImm::i64(offset.as_u32() as i64), ptr_size);
}
}