use crate::Result;
use crate::abi::{self, LocalSlot, align_to};
use crate::codegen::{CodeGenContext, Emission, FuncEnv};
use crate::isa::{
CallingConvention,
reg::{Reg, RegClass, WritableReg, writable},
};
use cranelift_codegen::{
Final, MachBufferFinalized, MachLabel,
binemit::CodeOffset,
ir::{Endianness, MemFlags, RelSourceLoc, SourceLoc, UserExternalNameRef},
};
use std::{fmt::Debug, ops::Range};
use wasmtime_environ::{PtrSize, WasmHeapType, WasmRefType, WasmValType};
pub(crate) use cranelift_codegen::ir::TrapCode;
#[derive(Eq, PartialEq)]
pub(crate) enum DivKind {
Signed,
Unsigned,
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum AtomicWaitKind {
Wait32,
Wait64,
}
#[derive(Copy, Clone)]
pub(crate) enum RemKind {
Signed,
Unsigned,
}
impl RemKind {
pub fn is_signed(&self) -> bool {
matches!(self, Self::Signed)
}
}
pub(crate) enum V128MinKind {
F32x4,
F64x2,
I8x16S,
I8x16U,
I16x8S,
I16x8U,
I32x4S,
I32x4U,
}
impl V128MinKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::F32x4 | Self::I32x4S | Self::I32x4U => OperandSize::S32,
Self::F64x2 => OperandSize::S64,
Self::I8x16S | Self::I8x16U => OperandSize::S8,
Self::I16x8S | Self::I16x8U => OperandSize::S16,
}
}
}
pub(crate) enum V128MaxKind {
F32x4,
F64x2,
I8x16S,
I8x16U,
I16x8S,
I16x8U,
I32x4S,
I32x4U,
}
impl V128MaxKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::F32x4 | Self::I32x4S | Self::I32x4U => OperandSize::S32,
Self::F64x2 => OperandSize::S64,
Self::I8x16S | Self::I8x16U => OperandSize::S8,
Self::I16x8S | Self::I16x8U => OperandSize::S16,
}
}
}
#[derive(Eq, PartialEq)]
pub(crate) enum MulWideKind {
Signed,
Unsigned,
}
pub(crate) enum RmwOp {
Add,
Sub,
Xchg,
And,
Or,
Xor,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub(crate) enum MemMoveDirection {
HighToLow,
LowToHigh,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) enum TruncKind {
Checked,
Unchecked,
}
impl TruncKind {
pub(crate) fn is_checked(&self) -> bool {
*self == TruncKind::Checked
}
#[must_use]
pub(crate) fn is_unchecked(&self) -> bool {
matches!(self, Self::Unchecked)
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, PartialOrd, Ord, Default)]
pub struct SPOffset(u32);
impl SPOffset {
pub fn from_u32(offs: u32) -> Self {
Self(offs)
}
pub fn as_u32(&self) -> u32 {
self.0
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct StackSlot {
pub offset: SPOffset,
pub size: u32,
}
impl StackSlot {
pub fn new(offs: SPOffset, size: u32) -> Self {
Self { offset: offs, size }
}
}
pub trait ScratchType {
fn reg_class() -> RegClass;
}
pub struct IntScratch;
pub struct FloatScratch;
impl ScratchType for IntScratch {
fn reg_class() -> RegClass {
RegClass::Int
}
}
impl ScratchType for FloatScratch {
fn reg_class() -> RegClass {
RegClass::Float
}
}
#[derive(Debug, Clone, Copy)]
pub struct Scratch(Reg);
impl Scratch {
pub fn new(r: Reg) -> Self {
Self(r)
}
#[inline]
pub fn inner(&self) -> Reg {
self.0
}
#[inline]
pub fn writable(&self) -> WritableReg {
writable!(self.0)
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum IntCmpKind {
Eq,
Ne,
LtS,
LtU,
GtS,
GtU,
LeS,
LeU,
GeS,
GeU,
}
#[derive(Debug)]
pub(crate) enum FloatCmpKind {
Eq,
Ne,
Lt,
Gt,
Le,
Ge,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum ShiftKind {
Shl,
ShrS,
ShrU,
Rotl,
Rotr,
}
#[derive(Copy, Clone)]
pub(crate) enum ExtendKind {
Signed(Extend<Signed>),
Unsigned(Extend<Zero>),
}
#[derive(Copy, Clone)]
pub(crate) enum Signed {}
#[derive(Copy, Clone)]
pub(crate) enum Zero {}
pub(crate) trait ExtendType {}
impl ExtendType for Signed {}
impl ExtendType for Zero {}
#[derive(Copy, Clone)]
pub(crate) enum Extend<T: ExtendType> {
I32Extend8,
I32Extend16,
I64Extend8,
I64Extend16,
I64Extend32,
__Kind(T),
}
impl From<Extend<Zero>> for ExtendKind {
fn from(value: Extend<Zero>) -> Self {
ExtendKind::Unsigned(value)
}
}
impl<T: ExtendType> Extend<T> {
pub fn from_size(&self) -> OperandSize {
match self {
Extend::I32Extend8 | Extend::I64Extend8 => OperandSize::S8,
Extend::I32Extend16 | Extend::I64Extend16 => OperandSize::S16,
Extend::I64Extend32 => OperandSize::S32,
Extend::__Kind(_) => unreachable!(),
}
}
pub fn to_size(&self) -> OperandSize {
match self {
Extend::I32Extend8 | Extend::I32Extend16 => OperandSize::S32,
Extend::I64Extend8 | Extend::I64Extend16 | Extend::I64Extend32 => OperandSize::S64,
Extend::__Kind(_) => unreachable!(),
}
}
pub fn from_bits(&self) -> u8 {
self.from_size().num_bits()
}
pub fn to_bits(&self) -> u8 {
self.to_size().num_bits()
}
}
impl From<Extend<Signed>> for ExtendKind {
fn from(value: Extend<Signed>) -> Self {
ExtendKind::Signed(value)
}
}
impl ExtendKind {
pub fn signed(&self) -> bool {
match self {
Self::Signed(_) => true,
_ => false,
}
}
pub fn from_bits(&self) -> u8 {
match self {
Self::Signed(s) => s.from_bits(),
Self::Unsigned(u) => u.from_bits(),
}
}
pub fn to_bits(&self) -> u8 {
match self {
Self::Signed(s) => s.to_bits(),
Self::Unsigned(u) => u.to_bits(),
}
}
}
#[derive(Copy, Clone)]
pub(crate) enum V128LoadExtendKind {
E8x8S,
E8x8U,
E16x4S,
E16x4U,
E32x2S,
E32x2U,
}
pub(crate) enum SplatLoadKind {
S8,
S16,
S32,
S64,
}
#[derive(Copy, Debug, Clone, Eq, PartialEq)]
pub(crate) enum SplatKind {
I8x16,
I16x8,
I32x4,
I64x2,
F32x4,
F64x2,
}
impl SplatKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
SplatKind::I8x16 => OperandSize::S8,
SplatKind::I16x8 => OperandSize::S16,
SplatKind::I32x4 | SplatKind::F32x4 => OperandSize::S32,
SplatKind::I64x2 | SplatKind::F64x2 => OperandSize::S64,
}
}
}
#[derive(Copy, Debug, Clone, Eq, PartialEq)]
pub(crate) enum ExtractLaneKind {
I8x16S,
I8x16U,
I16x8S,
I16x8U,
I32x4,
I64x2,
F32x4,
F64x2,
}
impl ExtractLaneKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
ExtractLaneKind::I8x16S | ExtractLaneKind::I8x16U => OperandSize::S8,
ExtractLaneKind::I16x8S | ExtractLaneKind::I16x8U => OperandSize::S16,
ExtractLaneKind::I32x4 | ExtractLaneKind::F32x4 => OperandSize::S32,
ExtractLaneKind::I64x2 | ExtractLaneKind::F64x2 => OperandSize::S64,
}
}
}
impl From<ExtractLaneKind> for Extend<Signed> {
fn from(value: ExtractLaneKind) -> Self {
match value {
ExtractLaneKind::I8x16S => Extend::I32Extend8,
ExtractLaneKind::I16x8S => Extend::I32Extend16,
_ => unimplemented!(),
}
}
}
pub(crate) enum ReplaceLaneKind {
I8x16,
I16x8,
I32x4,
I64x2,
F32x4,
F64x2,
}
impl ReplaceLaneKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
ReplaceLaneKind::I8x16 => OperandSize::S8,
ReplaceLaneKind::I16x8 => OperandSize::S16,
ReplaceLaneKind::I32x4 => OperandSize::S32,
ReplaceLaneKind::I64x2 => OperandSize::S64,
ReplaceLaneKind::F32x4 => OperandSize::S32,
ReplaceLaneKind::F64x2 => OperandSize::S64,
}
}
}
pub(crate) enum LoadKind {
Operand(OperandSize),
Atomic(OperandSize, Option<ExtendKind>),
Splat(SplatLoadKind),
ScalarExtend(ExtendKind),
VectorExtend(V128LoadExtendKind),
VectorLane(LaneSelector),
VectorZero(OperandSize),
}
impl LoadKind {
pub(crate) fn derive_operand_size(&self) -> OperandSize {
match self {
Self::ScalarExtend(extend) | Self::Atomic(_, Some(extend)) => {
Self::operand_size_for_scalar(extend)
}
Self::VectorExtend(_) => OperandSize::S64,
Self::Splat(kind) => Self::operand_size_for_splat(kind),
Self::Operand(size)
| Self::Atomic(size, None)
| Self::VectorLane(LaneSelector { size, .. })
| Self::VectorZero(size) => *size,
}
}
pub fn vector_lane(lane: u8, size: OperandSize) -> Self {
Self::VectorLane(LaneSelector { lane, size })
}
fn operand_size_for_scalar(extend_kind: &ExtendKind) -> OperandSize {
match extend_kind {
ExtendKind::Signed(s) => s.from_size(),
ExtendKind::Unsigned(u) => u.from_size(),
}
}
fn operand_size_for_splat(kind: &SplatLoadKind) -> OperandSize {
match kind {
SplatLoadKind::S8 => OperandSize::S8,
SplatLoadKind::S16 => OperandSize::S16,
SplatLoadKind::S32 => OperandSize::S32,
SplatLoadKind::S64 => OperandSize::S64,
}
}
pub(crate) fn is_atomic(&self) -> bool {
matches!(self, Self::Atomic(_, _))
}
}
#[derive(Copy, Clone)]
pub enum StoreKind {
Operand(OperandSize),
Atomic(OperandSize),
VectorLane(LaneSelector),
}
impl StoreKind {
pub fn vector_lane(lane: u8, size: OperandSize) -> Self {
Self::VectorLane(LaneSelector { lane, size })
}
}
#[derive(Copy, Clone)]
pub struct LaneSelector {
pub lane: u8,
pub size: OperandSize,
}
pub(crate) enum V128ConvertKind {
I32x4S,
I32x4U,
I32x4LowS,
I32x4LowU,
}
impl V128ConvertKind {
pub(crate) fn src_lane_size(&self) -> OperandSize {
match self {
V128ConvertKind::I32x4S
| V128ConvertKind::I32x4U
| V128ConvertKind::I32x4LowS
| V128ConvertKind::I32x4LowU => OperandSize::S32,
}
}
pub(crate) fn dst_lane_size(&self) -> OperandSize {
match self {
V128ConvertKind::I32x4S | V128ConvertKind::I32x4U => OperandSize::S32,
V128ConvertKind::I32x4LowS | V128ConvertKind::I32x4LowU => OperandSize::S64,
}
}
}
pub(crate) enum V128NarrowKind {
I16x8S,
I16x8U,
I32x4S,
I32x4U,
}
impl V128NarrowKind {
pub(crate) fn dst_lane_size(&self) -> OperandSize {
match self {
Self::I16x8S | Self::I16x8U => OperandSize::S8,
Self::I32x4S | Self::I32x4U => OperandSize::S16,
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum V128ExtendKind {
LowI8x16S,
HighI8x16S,
LowI8x16U,
HighI8x16U,
LowI16x8S,
HighI16x8S,
LowI16x8U,
HighI16x8U,
LowI32x4S,
HighI32x4S,
LowI32x4U,
HighI32x4U,
}
impl V128ExtendKind {
pub(crate) fn src_lane_size(&self) -> OperandSize {
match self {
Self::LowI8x16S | Self::LowI8x16U | Self::HighI8x16S | Self::HighI8x16U => {
OperandSize::S8
}
Self::LowI16x8S | Self::LowI16x8U | Self::HighI16x8S | Self::HighI16x8U => {
OperandSize::S16
}
Self::LowI32x4S | Self::LowI32x4U | Self::HighI32x4S | Self::HighI32x4U => {
OperandSize::S32
}
}
}
}
pub(crate) enum VectorEqualityKind {
I8x16,
I16x8,
I32x4,
I64x2,
F32x4,
F64x2,
}
impl VectorEqualityKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::I8x16 => OperandSize::S8,
Self::I16x8 => OperandSize::S16,
Self::I32x4 | Self::F32x4 => OperandSize::S32,
Self::I64x2 | Self::F64x2 => OperandSize::S64,
}
}
}
pub(crate) enum VectorCompareKind {
I8x16S,
I8x16U,
I16x8S,
I16x8U,
I32x4S,
I32x4U,
I64x2S,
F32x4,
F64x2,
}
impl VectorCompareKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::I8x16S | Self::I8x16U => OperandSize::S8,
Self::I16x8S | Self::I16x8U => OperandSize::S16,
Self::I32x4S | Self::I32x4U | Self::F32x4 => OperandSize::S32,
Self::I64x2S | Self::F64x2 => OperandSize::S64,
}
}
}
#[derive(Copy, Debug, Clone, Eq, PartialEq)]
pub(crate) enum V128AbsKind {
I8x16,
I16x8,
I32x4,
I64x2,
F32x4,
F64x2,
}
impl V128AbsKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::I8x16 => OperandSize::S8,
Self::I16x8 => OperandSize::S16,
Self::I32x4 | Self::F32x4 => OperandSize::S32,
Self::I64x2 | Self::F64x2 => OperandSize::S64,
}
}
}
pub(crate) enum V128TruncKind {
F32x4,
F64x2,
I32x4FromF32x4S,
I32x4FromF32x4U,
I32x4FromF64x2SZero,
I32x4FromF64x2UZero,
}
impl V128TruncKind {
pub(crate) fn src_lane_size(&self) -> OperandSize {
match self {
V128TruncKind::F32x4
| V128TruncKind::I32x4FromF32x4S
| V128TruncKind::I32x4FromF32x4U => OperandSize::S32,
V128TruncKind::F64x2
| V128TruncKind::I32x4FromF64x2SZero
| V128TruncKind::I32x4FromF64x2UZero => OperandSize::S64,
}
}
pub(crate) fn dst_lane_size(&self) -> OperandSize {
if let V128TruncKind::F64x2 = self {
OperandSize::S64
} else {
OperandSize::S32
}
}
}
pub(crate) enum V128AddKind {
F32x4,
F64x2,
I8x16,
I8x16SatS,
I8x16SatU,
I16x8,
I16x8SatS,
I16x8SatU,
I32x4,
I64x2,
}
pub(crate) enum V128SubKind {
F32x4,
F64x2,
I8x16,
I8x16SatS,
I8x16SatU,
I16x8,
I16x8SatS,
I16x8SatU,
I32x4,
I64x2,
}
impl From<V128NegKind> for V128SubKind {
fn from(value: V128NegKind) -> Self {
match value {
V128NegKind::I8x16 => Self::I8x16,
V128NegKind::I16x8 => Self::I16x8,
V128NegKind::I32x4 => Self::I32x4,
V128NegKind::I64x2 => Self::I64x2,
V128NegKind::F32x4 | V128NegKind::F64x2 => unimplemented!(),
}
}
}
pub(crate) enum V128MulKind {
F32x4,
F64x2,
I16x8,
I32x4,
I64x2,
}
#[derive(Copy, Clone)]
pub(crate) enum V128NegKind {
F32x4,
F64x2,
I8x16,
I16x8,
I32x4,
I64x2,
}
impl V128NegKind {
pub(crate) fn lane_size(&self) -> OperandSize {
match self {
Self::F32x4 | Self::I32x4 => OperandSize::S32,
Self::F64x2 | Self::I64x2 => OperandSize::S64,
Self::I8x16 => OperandSize::S8,
Self::I16x8 => OperandSize::S16,
}
}
}
pub(crate) enum V128ExtAddKind {
I8x16S,
I8x16U,
I16x8S,
I16x8U,
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum V128ExtMulKind {
LowI8x16S,
HighI8x16S,
LowI8x16U,
HighI8x16U,
LowI16x8S,
HighI16x8S,
LowI16x8U,
HighI16x8U,
LowI32x4S,
HighI32x4S,
LowI32x4U,
HighI32x4U,
}
impl From<V128ExtMulKind> for V128ExtendKind {
fn from(value: V128ExtMulKind) -> Self {
match value {
V128ExtMulKind::LowI8x16S => Self::LowI8x16S,
V128ExtMulKind::HighI8x16S => Self::HighI8x16S,
V128ExtMulKind::LowI8x16U => Self::LowI8x16U,
V128ExtMulKind::HighI8x16U => Self::HighI8x16U,
V128ExtMulKind::LowI16x8S => Self::LowI16x8S,
V128ExtMulKind::HighI16x8S => Self::HighI16x8S,
V128ExtMulKind::LowI16x8U => Self::LowI16x8U,
V128ExtMulKind::HighI16x8U => Self::HighI16x8U,
V128ExtMulKind::LowI32x4S => Self::LowI32x4S,
V128ExtMulKind::HighI32x4S => Self::HighI32x4S,
V128ExtMulKind::LowI32x4U => Self::LowI32x4U,
V128ExtMulKind::HighI32x4U => Self::HighI32x4U,
}
}
}
impl From<V128ExtMulKind> for V128MulKind {
fn from(value: V128ExtMulKind) -> Self {
match value {
V128ExtMulKind::LowI8x16S
| V128ExtMulKind::HighI8x16S
| V128ExtMulKind::LowI8x16U
| V128ExtMulKind::HighI8x16U => Self::I16x8,
V128ExtMulKind::LowI16x8S
| V128ExtMulKind::HighI16x8S
| V128ExtMulKind::LowI16x8U
| V128ExtMulKind::HighI16x8U => Self::I32x4,
V128ExtMulKind::LowI32x4S
| V128ExtMulKind::HighI32x4S
| V128ExtMulKind::LowI32x4U
| V128ExtMulKind::HighI32x4U => Self::I64x2,
}
}
}
#[derive(Copy, Debug, Clone, Eq, PartialEq)]
pub(crate) enum OperandSize {
S8,
S16,
S32,
S64,
S128,
}
impl OperandSize {
pub fn num_bits(&self) -> u8 {
match self {
OperandSize::S8 => 8,
OperandSize::S16 => 16,
OperandSize::S32 => 32,
OperandSize::S64 => 64,
OperandSize::S128 => 128,
}
}
pub fn bytes(&self) -> u32 {
match self {
Self::S8 => 1,
Self::S16 => 2,
Self::S32 => 4,
Self::S64 => 8,
Self::S128 => 16,
}
}
pub fn log2(&self) -> u8 {
match self {
OperandSize::S8 => 3,
OperandSize::S16 => 4,
OperandSize::S32 => 5,
OperandSize::S64 => 6,
OperandSize::S128 => 7,
}
}
pub fn from_bytes(bytes: u8) -> Self {
use OperandSize::*;
match bytes {
4 => S32,
8 => S64,
16 => S128,
_ => panic!("Invalid bytes {bytes} for OperandSize"),
}
}
pub fn extend_to<T: ExtendType>(&self, to: Self) -> Option<Extend<T>> {
match to {
OperandSize::S32 => match self {
OperandSize::S8 => Some(Extend::I32Extend8),
OperandSize::S16 => Some(Extend::I32Extend16),
_ => None,
},
OperandSize::S64 => match self {
OperandSize::S8 => Some(Extend::I64Extend8),
OperandSize::S16 => Some(Extend::I64Extend16),
OperandSize::S32 => Some(Extend::I64Extend32),
_ => None,
},
_ => None,
}
}
pub fn mantissa_bits(&self) -> u8 {
match self {
Self::S32 => 8,
Self::S64 => 11,
_ => unimplemented!(),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(crate) enum RegImm {
Reg(Reg),
Imm(Imm),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(crate) enum Imm {
I32(u32),
I64(u64),
F32(u32),
F64(u64),
V128(i128),
}
impl Imm {
pub fn i64(val: i64) -> Self {
Self::I64(val as u64)
}
pub fn i32(val: i32) -> Self {
Self::I32(val as u32)
}
pub fn f32(bits: u32) -> Self {
Self::F32(bits)
}
pub fn f64(bits: u64) -> Self {
Self::F64(bits)
}
pub fn v128(bits: i128) -> Self {
Self::V128(bits)
}
pub fn to_i32(&self) -> Option<i32> {
match self {
Self::I32(v) => Some(*v as i32),
Self::I64(v) => i32::try_from(*v as i64).ok(),
_ => None,
}
}
pub fn unwrap_as_u64(&self) -> u64 {
match self {
Self::I32(v) => *v as u64,
Self::I64(v) => *v,
Self::F32(v) => *v as u64,
Self::F64(v) => *v,
_ => unreachable!(),
}
}
pub fn size(&self) -> OperandSize {
match self {
Self::I32(_) | Self::F32(_) => OperandSize::S32,
Self::I64(_) | Self::F64(_) => OperandSize::S64,
Self::V128(_) => OperandSize::S128,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
match self {
Imm::I32(n) => n.to_le_bytes().to_vec(),
Imm::I64(n) => n.to_le_bytes().to_vec(),
Imm::F32(n) => n.to_le_bytes().to_vec(),
Imm::F64(n) => n.to_le_bytes().to_vec(),
Imm::V128(n) => n.to_le_bytes().to_vec(),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) enum VMContextLoc {
Reg(Reg),
Pinned,
OffsetFromPinned(u32),
}
pub(crate) const MAX_CONTEXT_ARGS: usize = 2;
#[derive(Clone, Debug)]
pub(crate) enum ContextArgs {
VMContext([VMContextLoc; 1]),
CalleeAndCallerVMContext([VMContextLoc; MAX_CONTEXT_ARGS]),
}
impl ContextArgs {
pub fn pinned_callee_and_caller_vmctx() -> Self {
Self::CalleeAndCallerVMContext([VMContextLoc::Pinned, VMContextLoc::Pinned])
}
pub fn pinned_vmctx() -> Self {
Self::VMContext([VMContextLoc::Pinned])
}
pub fn offset_from_pinned_vmctx(offset: u32) -> Self {
Self::VMContext([VMContextLoc::OffsetFromPinned(offset)])
}
pub fn with_callee_and_pinned_caller(callee_vmctx: Reg) -> Self {
Self::CalleeAndCallerVMContext([VMContextLoc::Reg(callee_vmctx), VMContextLoc::Pinned])
}
pub fn len(&self) -> usize {
self.as_slice().len()
}
pub fn as_slice(&self) -> &[VMContextLoc] {
match self {
Self::VMContext(a) => a.as_slice(),
Self::CalleeAndCallerVMContext(a) => a.as_slice(),
}
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum CalleeKind {
Indirect(Reg),
Direct(UserExternalNameRef),
}
impl CalleeKind {
pub fn indirect(reg: Reg) -> Self {
Self::Indirect(reg)
}
pub fn direct(name: UserExternalNameRef) -> Self {
Self::Direct(name)
}
}
impl RegImm {
pub fn reg(r: Reg) -> Self {
RegImm::Reg(r)
}
pub fn i64(val: i64) -> Self {
RegImm::Imm(Imm::i64(val))
}
pub fn i32(val: i32) -> Self {
RegImm::Imm(Imm::i32(val))
}
pub fn f32(bits: u32) -> Self {
RegImm::Imm(Imm::f32(bits))
}
pub fn f64(bits: u64) -> Self {
RegImm::Imm(Imm::f64(bits))
}
pub fn v128(bits: i128) -> Self {
RegImm::Imm(Imm::v128(bits))
}
}
impl From<Reg> for RegImm {
fn from(r: Reg) -> Self {
Self::Reg(r)
}
}
#[derive(Debug)]
pub enum RoundingMode {
Nearest,
Up,
Down,
Zero,
}
pub const TRUSTED_FLAGS: MemFlags = MemFlags::trusted();
pub const UNTRUSTED_FLAGS: MemFlags = MemFlags::new().with_endianness(Endianness::Little);
pub(crate) trait MacroAssembler {
type Address: Copy + Debug;
type Ptr: PtrSize;
type ABI: abi::ABI;
fn prologue(&mut self, vmctx: Reg) -> Result<()> {
self.frame_setup()?;
self.check_stack(vmctx)
}
fn frame_setup(&mut self) -> Result<()>;
fn frame_restore(&mut self) -> Result<()>;
fn check_stack(&mut self, vmctx: Reg) -> Result<()>;
fn epilogue(&mut self) -> Result<()> {
self.frame_restore()
}
fn reserve_stack(&mut self, bytes: u32) -> Result<()>;
fn free_stack(&mut self, bytes: u32) -> Result<()>;
fn reset_stack_pointer(&mut self, offset: SPOffset) -> Result<()>;
fn local_address(&mut self, local: &LocalSlot) -> Result<Self::Address>;
fn address_from_sp(&self, offset: SPOffset) -> Result<Self::Address>;
fn address_at_sp(&self, offset: SPOffset) -> Result<Self::Address>;
fn address_at_vmctx(&self, offset: u32) -> Result<Self::Address>;
fn address_at_reg(&self, reg: Reg, offset: u32) -> Result<Self::Address>;
fn call(
&mut self,
stack_args_size: u32,
f: impl FnMut(&mut Self) -> Result<(CalleeKind, CallingConvention)>,
) -> Result<u32>;
fn with_scratch<T: ScratchType, R>(&mut self, f: impl FnOnce(&mut Self, Scratch) -> R) -> R;
fn with_scratch_for<R>(
&mut self,
ty: WasmValType,
f: impl FnOnce(&mut Self, Scratch) -> R,
) -> R {
match ty {
WasmValType::I32
| WasmValType::I64
| WasmValType::Ref(WasmRefType {
heap_type: WasmHeapType::Func,
..
}) => self.with_scratch::<IntScratch, _>(f),
WasmValType::F32 | WasmValType::F64 | WasmValType::V128 => {
self.with_scratch::<FloatScratch, _>(f)
}
_ => unimplemented!(),
}
}
fn sp_offset(&self) -> Result<SPOffset>;
fn store(&mut self, src: RegImm, dst: Self::Address, size: OperandSize) -> Result<()>;
fn store_ptr(&mut self, src: Reg, dst: Self::Address) -> Result<()>;
fn wasm_store(&mut self, src: Reg, dst: Self::Address, store_kind: StoreKind) -> Result<()>;
fn load(&mut self, src: Self::Address, dst: WritableReg, size: OperandSize) -> Result<()>;
fn wasm_load(&mut self, src: Self::Address, dst: WritableReg, kind: LoadKind) -> Result<()>;
fn load_ptr(&mut self, src: Self::Address, dst: WritableReg) -> Result<()>;
fn compute_addr(
&mut self,
_src: Self::Address,
_dst: WritableReg,
_size: OperandSize,
) -> Result<()>;
fn pop(&mut self, dst: WritableReg, size: OperandSize) -> Result<()>;
fn mov(&mut self, dst: WritableReg, src: RegImm, size: OperandSize) -> Result<()>;
fn cmov(&mut self, dst: WritableReg, src: Reg, cc: IntCmpKind, size: OperandSize)
-> Result<()>;
fn memmove(
&mut self,
src: SPOffset,
dst: SPOffset,
bytes: u32,
direction: MemMoveDirection,
) -> Result<()> {
match direction {
MemMoveDirection::LowToHigh => debug_assert!(dst.as_u32() < src.as_u32()),
MemMoveDirection::HighToLow => debug_assert!(dst.as_u32() > src.as_u32()),
}
debug_assert!(bytes % 4 == 0);
let mut remaining = bytes;
let word_bytes = <Self::ABI as abi::ABI>::word_bytes();
let word_bytes = word_bytes as u32;
let mut dst_offs;
let mut src_offs;
match direction {
MemMoveDirection::LowToHigh => {
dst_offs = dst.as_u32() - bytes;
src_offs = src.as_u32() - bytes;
self.with_scratch::<IntScratch, _>(|masm, scratch| {
while remaining >= word_bytes {
remaining -= word_bytes;
dst_offs += word_bytes;
src_offs += word_bytes;
masm.load_ptr(
masm.address_from_sp(SPOffset::from_u32(src_offs))?,
scratch.writable(),
)?;
masm.store_ptr(
scratch.inner(),
masm.address_from_sp(SPOffset::from_u32(dst_offs))?,
)?;
}
wasmtime_environ::error::Ok(())
})?;
}
MemMoveDirection::HighToLow => {
src_offs = src.as_u32();
dst_offs = dst.as_u32();
self.with_scratch::<IntScratch, _>(|masm, scratch| {
while remaining >= word_bytes {
masm.load_ptr(
masm.address_from_sp(SPOffset::from_u32(src_offs))?,
scratch.writable(),
)?;
masm.store_ptr(
scratch.inner(),
masm.address_from_sp(SPOffset::from_u32(dst_offs))?,
)?;
remaining -= word_bytes;
src_offs -= word_bytes;
dst_offs -= word_bytes;
}
wasmtime_environ::error::Ok(())
})?;
}
}
if remaining > 0 {
let half_word = word_bytes / 2;
let ptr_size = OperandSize::from_bytes(half_word as u8);
debug_assert!(remaining == half_word);
if direction == MemMoveDirection::LowToHigh {
dst_offs += half_word;
src_offs += half_word;
}
self.with_scratch::<IntScratch, _>(|masm, scratch| {
masm.load(
masm.address_from_sp(SPOffset::from_u32(src_offs))?,
scratch.writable(),
ptr_size,
)?;
masm.store(
scratch.inner().into(),
masm.address_from_sp(SPOffset::from_u32(dst_offs))?,
ptr_size,
)?;
wasmtime_environ::error::Ok(())
})?;
}
Ok(())
}
fn add(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn checked_uadd(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: RegImm,
size: OperandSize,
trap: TrapCode,
) -> Result<()>;
fn sub(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn mul(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn float_add(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_sub(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_mul(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_div(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_min(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_max(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, size: OperandSize) -> Result<()>;
fn float_copysign(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
size: OperandSize,
) -> Result<()>;
fn float_abs(&mut self, dst: WritableReg, size: OperandSize) -> Result<()>;
fn float_neg(&mut self, dst: WritableReg, size: OperandSize) -> Result<()>;
fn float_round<
F: FnMut(&mut FuncEnv<Self::Ptr>, &mut CodeGenContext<Emission>, &mut Self) -> Result<()>,
>(
&mut self,
mode: RoundingMode,
env: &mut FuncEnv<Self::Ptr>,
context: &mut CodeGenContext<Emission>,
size: OperandSize,
fallback: F,
) -> Result<()>;
fn float_sqrt(&mut self, dst: WritableReg, src: Reg, size: OperandSize) -> Result<()>;
fn and(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn or(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn xor(&mut self, dst: WritableReg, lhs: Reg, rhs: RegImm, size: OperandSize) -> Result<()>;
fn shift_ir(
&mut self,
dst: WritableReg,
imm: Imm,
lhs: Reg,
kind: ShiftKind,
size: OperandSize,
) -> Result<()>;
fn shift(
&mut self,
context: &mut CodeGenContext<Emission>,
kind: ShiftKind,
size: OperandSize,
) -> Result<()>;
fn div(
&mut self,
context: &mut CodeGenContext<Emission>,
kind: DivKind,
size: OperandSize,
) -> Result<()>;
fn rem(
&mut self,
context: &mut CodeGenContext<Emission>,
kind: RemKind,
size: OperandSize,
) -> Result<()>;
fn cmp(&mut self, src1: Reg, src2: RegImm, size: OperandSize) -> Result<()>;
fn cmp_with_set(
&mut self,
dst: WritableReg,
src: RegImm,
kind: IntCmpKind,
size: OperandSize,
) -> Result<()>;
fn float_cmp_with_set(
&mut self,
dst: WritableReg,
src1: Reg,
src2: Reg,
kind: FloatCmpKind,
size: OperandSize,
) -> Result<()>;
fn clz(&mut self, dst: WritableReg, src: Reg, size: OperandSize) -> Result<()>;
fn ctz(&mut self, dst: WritableReg, src: Reg, size: OperandSize) -> Result<()>;
fn push(&mut self, src: Reg, size: OperandSize) -> Result<StackSlot>;
fn finalize(self, base: Option<SourceLoc>) -> Result<MachBufferFinalized<Final>>;
fn zero(&mut self, reg: WritableReg) -> Result<()>;
fn popcnt(&mut self, context: &mut CodeGenContext<Emission>, size: OperandSize) -> Result<()>;
fn wrap(&mut self, dst: WritableReg, src: Reg) -> Result<()>;
fn extend(&mut self, dst: WritableReg, src: Reg, kind: ExtendKind) -> Result<()>;
fn signed_truncate(
&mut self,
dst: WritableReg,
src: Reg,
src_size: OperandSize,
dst_size: OperandSize,
kind: TruncKind,
) -> Result<()>;
fn unsigned_truncate(
&mut self,
context: &mut CodeGenContext<Emission>,
src_size: OperandSize,
dst_size: OperandSize,
kind: TruncKind,
) -> Result<()>;
fn signed_convert(
&mut self,
dst: WritableReg,
src: Reg,
src_size: OperandSize,
dst_size: OperandSize,
) -> Result<()>;
fn unsigned_convert(
&mut self,
dst: WritableReg,
src: Reg,
tmp_gpr: Reg,
src_size: OperandSize,
dst_size: OperandSize,
) -> Result<()>;
fn reinterpret_float_as_int(
&mut self,
dst: WritableReg,
src: Reg,
size: OperandSize,
) -> Result<()>;
fn reinterpret_int_as_float(
&mut self,
dst: WritableReg,
src: Reg,
size: OperandSize,
) -> Result<()>;
fn demote(&mut self, dst: WritableReg, src: Reg) -> Result<()>;
fn promote(&mut self, dst: WritableReg, src: Reg) -> Result<()>;
fn zero_mem_range(&mut self, mem: &Range<u32>) -> Result<()> {
let word_size = <Self::ABI as abi::ABI>::word_bytes() as u32;
if mem.is_empty() {
return Ok(());
}
let start = if mem.start % word_size == 0 {
mem.start
} else {
assert!(mem.start % 4 == 0);
let start = align_to(mem.start, word_size);
let addr: Self::Address = self.local_address(&LocalSlot::i32(start))?;
self.store(RegImm::i32(0), addr, OperandSize::S32)?;
assert!(start % word_size == 0);
start
};
let end = align_to(mem.end, word_size);
let slots = (end - start) / word_size;
if slots == 1 {
let slot = LocalSlot::i64(start + word_size);
let addr: Self::Address = self.local_address(&slot)?;
self.store(RegImm::i64(0), addr, OperandSize::S64)?;
} else {
self.with_scratch::<IntScratch, _>(|masm, scratch| {
masm.zero(scratch.writable())?;
let zero = RegImm::reg(scratch.inner());
for step in (start..end).step_by(word_size as usize) {
let slot = LocalSlot::i64(step + word_size);
let addr: Self::Address = masm.local_address(&slot)?;
masm.store(zero, addr, OperandSize::S64)?;
}
wasmtime_environ::error::Ok(())
})?;
}
Ok(())
}
fn get_label(&mut self) -> Result<MachLabel>;
fn bind(&mut self, label: MachLabel) -> Result<()>;
fn branch(
&mut self,
kind: IntCmpKind,
lhs: Reg,
rhs: RegImm,
taken: MachLabel,
size: OperandSize,
) -> Result<()>;
fn jmp(&mut self, target: MachLabel) -> Result<()>;
fn jmp_table(&mut self, targets: &[MachLabel], index: Reg, tmp: Reg) -> Result<()>;
fn unreachable(&mut self) -> Result<()>;
fn trap(&mut self, code: TrapCode) -> Result<()>;
fn trapif(&mut self, cc: IntCmpKind, code: TrapCode) -> Result<()>;
fn trapz(&mut self, src: Reg, code: TrapCode) -> Result<()>;
fn ensure_sp_for_jump(&mut self, target: SPOffset) -> Result<()> {
let bytes = self
.sp_offset()?
.as_u32()
.checked_sub(target.as_u32())
.unwrap_or(0);
if bytes > 0 {
self.free_stack(bytes)?;
}
Ok(())
}
fn start_source_loc(&mut self, loc: RelSourceLoc) -> Result<(CodeOffset, RelSourceLoc)>;
fn end_source_loc(&mut self) -> Result<()>;
fn current_code_offset(&self) -> Result<CodeOffset>;
fn add128(
&mut self,
dst_lo: WritableReg,
dst_hi: WritableReg,
lhs_lo: Reg,
lhs_hi: Reg,
rhs_lo: Reg,
rhs_hi: Reg,
) -> Result<()>;
fn sub128(
&mut self,
dst_lo: WritableReg,
dst_hi: WritableReg,
lhs_lo: Reg,
lhs_hi: Reg,
rhs_lo: Reg,
rhs_hi: Reg,
) -> Result<()>;
fn mul_wide(&mut self, context: &mut CodeGenContext<Emission>, kind: MulWideKind)
-> Result<()>;
fn splat(&mut self, context: &mut CodeGenContext<Emission>, size: SplatKind) -> Result<()>;
fn shuffle(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg, lanes: [u8; 16]) -> Result<()>;
fn swizzle(&mut self, dst: WritableReg, lhs: Reg, rhs: Reg) -> Result<()>;
fn atomic_rmw(
&mut self,
context: &mut CodeGenContext<Emission>,
addr: Self::Address,
size: OperandSize,
op: RmwOp,
flags: MemFlags,
extend: Option<Extend<Zero>>,
) -> Result<()>;
fn extract_lane(
&mut self,
src: Reg,
dst: WritableReg,
lane: u8,
kind: ExtractLaneKind,
) -> Result<()>;
fn replace_lane(
&mut self,
src: RegImm,
dst: WritableReg,
lane: u8,
kind: ReplaceLaneKind,
) -> Result<()>;
fn atomic_cas(
&mut self,
context: &mut CodeGenContext<Emission>,
addr: Self::Address,
size: OperandSize,
flags: MemFlags,
extend: Option<Extend<Zero>>,
) -> Result<()>;
fn v128_eq(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorEqualityKind,
) -> Result<()>;
fn v128_ne(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorEqualityKind,
) -> Result<()>;
fn v128_lt(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorCompareKind,
) -> Result<()>;
fn v128_le(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorCompareKind,
) -> Result<()>;
fn v128_gt(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorCompareKind,
) -> Result<()>;
fn v128_ge(
&mut self,
dst: WritableReg,
lhs: Reg,
rhs: Reg,
kind: VectorCompareKind,
) -> Result<()>;
fn fence(&mut self) -> Result<()>;
fn v128_not(&mut self, dst: WritableReg) -> Result<()>;
fn v128_and(&mut self, src1: Reg, src2: Reg, dst: WritableReg) -> Result<()>;
fn v128_and_not(&mut self, src1: Reg, src2: Reg, dst: WritableReg) -> Result<()>;
fn v128_or(&mut self, src1: Reg, src2: Reg, dst: WritableReg) -> Result<()>;
fn v128_xor(&mut self, src1: Reg, src2: Reg, dst: WritableReg) -> Result<()>;
fn v128_bitselect(&mut self, src1: Reg, src2: Reg, mask: Reg, dst: WritableReg) -> Result<()>;
fn v128_any_true(&mut self, src: Reg, dst: WritableReg) -> Result<()>;
fn v128_convert(&mut self, src: Reg, dst: WritableReg, kind: V128ConvertKind) -> Result<()>;
fn v128_narrow(
&mut self,
src1: Reg,
src2: Reg,
dst: WritableReg,
kind: V128NarrowKind,
) -> Result<()>;
fn v128_demote(&mut self, src: Reg, dst: WritableReg) -> Result<()>;
fn v128_promote(&mut self, src: Reg, dst: WritableReg) -> Result<()>;
fn v128_extend(&mut self, src: Reg, dst: WritableReg, kind: V128ExtendKind) -> Result<()>;
fn v128_add(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, kind: V128AddKind) -> Result<()>;
fn v128_sub(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, kind: V128SubKind) -> Result<()>;
fn v128_mul(&mut self, context: &mut CodeGenContext<Emission>, kind: V128MulKind)
-> Result<()>;
fn v128_abs(&mut self, src: Reg, dst: WritableReg, kind: V128AbsKind) -> Result<()>;
fn v128_neg(&mut self, op: WritableReg, kind: V128NegKind) -> Result<()>;
fn v128_shift(
&mut self,
context: &mut CodeGenContext<Emission>,
lane_width: OperandSize,
kind: ShiftKind,
) -> Result<()>;
fn v128_q15mulr_sat_s(
&mut self,
lhs: Reg,
rhs: Reg,
dst: WritableReg,
size: OperandSize,
) -> Result<()>;
fn v128_all_true(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_bitmask(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_trunc(
&mut self,
context: &mut CodeGenContext<Emission>,
kind: V128TruncKind,
) -> Result<()>;
fn v128_min(&mut self, src1: Reg, src2: Reg, dst: WritableReg, kind: V128MinKind)
-> Result<()>;
fn v128_max(&mut self, src1: Reg, src2: Reg, dst: WritableReg, kind: V128MaxKind)
-> Result<()>;
fn v128_extmul(
&mut self,
context: &mut CodeGenContext<Emission>,
kind: V128ExtMulKind,
) -> Result<()>;
fn v128_extadd_pairwise(
&mut self,
src: Reg,
dst: WritableReg,
kind: V128ExtAddKind,
) -> Result<()>;
fn v128_dot(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg) -> Result<()>;
fn v128_popcnt(&mut self, context: &mut CodeGenContext<Emission>) -> Result<()>;
fn v128_avgr(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_div(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_sqrt(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_ceil(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_floor(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_nearest(&mut self, src: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_pmin(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
fn v128_pmax(&mut self, lhs: Reg, rhs: Reg, dst: WritableReg, size: OperandSize) -> Result<()>;
}