#![allow(dead_code)]
use crate::binemit::CodeOffset;
use crate::ir::types::{
B1, B16, B16X8, B32, B32X4, B64, B64X2, B8, B8X16, F32, F32X4, F64, F64X2, FFLAGS, I16, I16X8,
I32, I32X4, I64, I64X2, I8, I8X16, IFLAGS, R32, R64,
};
use crate::ir::{ExternalName, Opcode, SourceLoc, TrapCode, Type};
use crate::machinst::*;
use crate::{settings, CodegenError, CodegenResult};
use regalloc::{RealRegUniverse, Reg, RegClass, SpillSlot, VirtualReg, Writable};
use regalloc::{RegUsageCollector, RegUsageMapper};
use alloc::boxed::Box;
use alloc::vec::Vec;
use smallvec::{smallvec, SmallVec};
use std::string::{String, ToString};
pub mod regs;
pub use self::regs::*;
pub mod imms;
pub use self::imms::*;
pub mod args;
pub use self::args::*;
pub mod emit;
pub use self::emit::*;
#[cfg(test)]
mod emit_tests;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ALUOp {
Add32,
Add64,
Sub32,
Sub64,
Orr32,
Orr64,
OrrNot32,
OrrNot64,
And32,
And64,
AndNot32,
AndNot64,
Eor32,
Eor64,
EorNot32,
EorNot64,
AddS32,
AddS64,
SubS32,
SubS64,
SubS64XR,
MAdd32,
MAdd64,
MSub32,
MSub64,
SMulH,
UMulH,
SDiv64,
UDiv64,
RotR32,
RotR64,
Lsr32,
Lsr64,
Asr32,
Asr64,
Lsl32,
Lsl64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum FPUOp1 {
Abs32,
Abs64,
Neg32,
Neg64,
Sqrt32,
Sqrt64,
Cvt32To64,
Cvt64To32,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum FPUOp2 {
Add32,
Add64,
Sub32,
Sub64,
Mul32,
Mul64,
Div32,
Div64,
Max32,
Max64,
Min32,
Min64,
Sqadd64,
Uqadd64,
Sqsub64,
Uqsub64,
}
#[derive(Copy, Clone, Debug)]
pub enum FPUOpRI {
UShr32(FPURightShiftImm),
UShr64(FPURightShiftImm),
Sli32(FPULeftShiftImm),
Sli64(FPULeftShiftImm),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum FPUOp3 {
MAdd32,
MAdd64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum FpuToIntOp {
F32ToU32,
F32ToI32,
F32ToU64,
F32ToI64,
F64ToU32,
F64ToI32,
F64ToU64,
F64ToI64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum IntToFpuOp {
U32ToF32,
I32ToF32,
U32ToF64,
I32ToF64,
U64ToF32,
I64ToF32,
U64ToF64,
I64ToF64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum FpuRoundMode {
Minus32,
Minus64,
Plus32,
Plus64,
Zero32,
Zero64,
Nearest32,
Nearest64,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum VecExtendOp {
Sxtl8,
Sxtl16,
Sxtl32,
Uxtl8,
Uxtl16,
Uxtl32,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum VecALUOp {
Sqadd,
Uqadd,
Sqsub,
Uqsub,
Cmeq,
Cmge,
Cmgt,
Cmhs,
Cmhi,
Fcmeq,
Fcmgt,
Fcmge,
And,
Bic,
Orr,
Eor,
Bsl,
Umaxp,
Add,
Sub,
Mul,
Sshl,
Ushl,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum VecMisc2 {
Not,
Neg,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum VecLanesOp {
Uminv,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum BitOp {
RBit32,
RBit64,
Clz32,
Clz64,
Cls32,
Cls64,
}
impl BitOp {
pub fn operand_size(&self) -> OperandSize {
match self {
BitOp::RBit32 | BitOp::Clz32 | BitOp::Cls32 => OperandSize::Size32,
_ => OperandSize::Size64,
}
}
pub fn op_str(&self) -> &'static str {
match self {
BitOp::RBit32 | BitOp::RBit64 => "rbit",
BitOp::Clz32 | BitOp::Clz64 => "clz",
BitOp::Cls32 | BitOp::Cls64 => "cls",
}
}
}
impl From<(Opcode, Type)> for BitOp {
fn from(op_ty: (Opcode, Type)) -> BitOp {
match op_ty {
(Opcode::Bitrev, I32) => BitOp::RBit32,
(Opcode::Bitrev, I64) => BitOp::RBit64,
(Opcode::Clz, I32) => BitOp::Clz32,
(Opcode::Clz, I64) => BitOp::Clz64,
(Opcode::Cls, I32) => BitOp::Cls32,
(Opcode::Cls, I64) => BitOp::Cls64,
_ => unreachable!("Called with non-bit op!: {:?}", op_ty),
}
}
}
#[derive(Clone, Debug)]
pub struct CallInfo {
pub dest: ExternalName,
pub uses: Vec<Reg>,
pub defs: Vec<Writable<Reg>>,
pub loc: SourceLoc,
pub opcode: Opcode,
}
#[derive(Clone, Debug)]
pub struct CallIndInfo {
pub rn: Reg,
pub uses: Vec<Reg>,
pub defs: Vec<Writable<Reg>>,
pub loc: SourceLoc,
pub opcode: Opcode,
}
#[derive(Clone, Debug)]
pub struct JTSequenceInfo {
pub targets: Vec<BranchTarget>,
pub default_target: BranchTarget,
pub targets_for_term: Vec<MachLabel>, }
#[derive(Clone, Debug)]
pub enum Inst {
Nop0,
Nop4,
AluRRR {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
},
AluRRRR {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
ra: Reg,
},
AluRRImm12 {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
imm12: Imm12,
},
AluRRImmLogic {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
imml: ImmLogic,
},
AluRRImmShift {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
immshift: ImmShift,
},
AluRRRShift {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
shiftop: ShiftOpAndAmt,
},
AluRRRExtend {
alu_op: ALUOp,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
extendop: ExtendOp,
},
BitRR {
op: BitOp,
rd: Writable<Reg>,
rn: Reg,
},
ULoad8 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
SLoad8 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
ULoad16 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
SLoad16 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
ULoad32 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
SLoad32 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
ULoad64 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
Store8 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
Store16 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
Store32 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
Store64 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
StoreP64 {
rt: Reg,
rt2: Reg,
mem: PairMemArg,
},
LoadP64 {
rt: Writable<Reg>,
rt2: Writable<Reg>,
mem: PairMemArg,
},
Mov {
rd: Writable<Reg>,
rm: Reg,
},
Mov32 {
rd: Writable<Reg>,
rm: Reg,
},
MovZ {
rd: Writable<Reg>,
imm: MoveWideConst,
},
MovN {
rd: Writable<Reg>,
imm: MoveWideConst,
},
MovK {
rd: Writable<Reg>,
imm: MoveWideConst,
},
Extend {
rd: Writable<Reg>,
rn: Reg,
signed: bool,
from_bits: u8,
to_bits: u8,
},
CSel {
rd: Writable<Reg>,
cond: Cond,
rn: Reg,
rm: Reg,
},
CSet {
rd: Writable<Reg>,
cond: Cond,
},
CCmpImm {
size: OperandSize,
rn: Reg,
imm: UImm5,
nzcv: NZCV,
cond: Cond,
},
FpuMove64 {
rd: Writable<Reg>,
rn: Reg,
},
FpuMove128 {
rd: Writable<Reg>,
rn: Reg,
},
FpuMoveFromVec {
rd: Writable<Reg>,
rn: Reg,
idx: u8,
size: VectorSize,
},
FpuRR {
fpu_op: FPUOp1,
rd: Writable<Reg>,
rn: Reg,
},
FpuRRR {
fpu_op: FPUOp2,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
},
FpuRRI {
fpu_op: FPUOpRI,
rd: Writable<Reg>,
rn: Reg,
},
FpuRRRR {
fpu_op: FPUOp3,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
ra: Reg,
},
FpuCmp32 {
rn: Reg,
rm: Reg,
},
FpuCmp64 {
rn: Reg,
rm: Reg,
},
FpuLoad32 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
FpuStore32 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
FpuLoad64 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
FpuStore64 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
FpuLoad128 {
rd: Writable<Reg>,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
FpuStore128 {
rd: Reg,
mem: MemArg,
srcloc: Option<SourceLoc>,
},
LoadFpuConst32 {
rd: Writable<Reg>,
const_data: f32,
},
LoadFpuConst64 {
rd: Writable<Reg>,
const_data: f64,
},
LoadFpuConst128 {
rd: Writable<Reg>,
const_data: u128,
},
FpuToInt {
op: FpuToIntOp,
rd: Writable<Reg>,
rn: Reg,
},
IntToFpu {
op: IntToFpuOp,
rd: Writable<Reg>,
rn: Reg,
},
FpuCSel32 {
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
cond: Cond,
},
FpuCSel64 {
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
cond: Cond,
},
FpuRound {
op: FpuRoundMode,
rd: Writable<Reg>,
rn: Reg,
},
MovToVec64 {
rd: Writable<Reg>,
rn: Reg,
},
MovFromVec {
rd: Writable<Reg>,
rn: Reg,
idx: u8,
size: VectorSize,
},
VecDup {
rd: Writable<Reg>,
rn: Reg,
size: VectorSize,
},
VecDupFromFpu {
rd: Writable<Reg>,
rn: Reg,
size: VectorSize,
},
VecExtend {
t: VecExtendOp,
rd: Writable<Reg>,
rn: Reg,
},
VecRRR {
alu_op: VecALUOp,
rd: Writable<Reg>,
rn: Reg,
rm: Reg,
size: VectorSize,
},
VecMisc {
op: VecMisc2,
rd: Writable<Reg>,
rn: Reg,
size: VectorSize,
},
VecLanes {
op: VecLanesOp,
rd: Writable<Reg>,
rn: Reg,
size: VectorSize,
},
MovToNZCV {
rn: Reg,
},
MovFromNZCV {
rd: Writable<Reg>,
},
CondSet {
rd: Writable<Reg>,
cond: Cond,
},
Call {
info: Box<CallInfo>,
},
CallInd {
info: Box<CallIndInfo>,
},
Ret,
EpiloguePlaceholder,
Jump {
dest: BranchTarget,
},
CondBr {
taken: BranchTarget,
not_taken: BranchTarget,
kind: CondBrKind,
},
TrapIf {
kind: CondBrKind,
trap_info: (SourceLoc, TrapCode),
},
IndirectBr {
rn: Reg,
targets: Vec<MachLabel>,
},
Brk,
Udf {
trap_info: (SourceLoc, TrapCode),
},
Adr {
rd: Writable<Reg>,
off: i32,
},
Word4 {
data: u32,
},
Word8 {
data: u64,
},
JTSequence {
info: Box<JTSequenceInfo>,
ridx: Reg,
rtmp1: Writable<Reg>,
rtmp2: Writable<Reg>,
},
LoadConst64 {
rd: Writable<Reg>,
const_data: u64,
},
LoadExtName {
rd: Writable<Reg>,
name: Box<ExternalName>,
srcloc: SourceLoc,
offset: i64,
},
LoadAddr {
rd: Writable<Reg>,
mem: MemArg,
},
VirtualSPOffsetAdj {
offset: i64,
},
EmitIsland {
needed_space: CodeOffset,
},
}
fn count_zero_half_words(mut value: u64) -> usize {
let mut count = 0;
for _ in 0..4 {
if value & 0xffff == 0 {
count += 1;
}
value >>= 16;
}
count
}
#[test]
fn inst_size_test() {
assert_eq!(32, std::mem::size_of::<Inst>());
}
impl Inst {
pub fn mov(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
assert!(to_reg.to_reg().get_class() == from_reg.get_class());
if from_reg.get_class() == RegClass::I64 {
Inst::Mov {
rd: to_reg,
rm: from_reg,
}
} else if from_reg.get_class() == RegClass::V128 {
Inst::FpuMove128 {
rd: to_reg,
rn: from_reg,
}
} else {
Inst::FpuMove64 {
rd: to_reg,
rn: from_reg,
}
}
}
pub fn mov32(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
Inst::Mov32 {
rd: to_reg,
rm: from_reg,
}
}
pub fn load_constant(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]> {
if let Some(imm) = MoveWideConst::maybe_from_u64(value) {
smallvec![Inst::MovZ { rd, imm }]
} else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) {
smallvec![Inst::MovN { rd, imm }]
} else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) {
smallvec![Inst::AluRRImmLogic {
alu_op: ALUOp::Orr64,
rd,
rn: zero_reg(),
imml,
}]
} else {
let mut insts = smallvec![];
let first_is_inverted = count_zero_half_words(!value) > count_zero_half_words(value);
let ignored_halfword = if first_is_inverted { 0xffff } else { 0 };
let mut first_mov_emitted = false;
for i in 0..4 {
let imm16 = (value >> (16 * i)) & 0xffff;
if imm16 != ignored_halfword {
if !first_mov_emitted {
first_mov_emitted = true;
if first_is_inverted {
let imm =
MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, i * 16)
.unwrap();
insts.push(Inst::MovN { rd, imm });
} else {
let imm =
MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
insts.push(Inst::MovZ { rd, imm });
}
} else {
let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
insts.push(Inst::MovK { rd, imm });
}
}
}
assert!(first_mov_emitted);
insts
}
}
pub fn load_fp_constant32(rd: Writable<Reg>, value: f32) -> Inst {
Inst::LoadFpuConst32 {
rd,
const_data: value,
}
}
pub fn load_fp_constant64(rd: Writable<Reg>, value: f64) -> Inst {
Inst::LoadFpuConst64 {
rd,
const_data: value,
}
}
pub fn load_fp_constant128(rd: Writable<Reg>, value: u128) -> Inst {
Inst::LoadFpuConst128 {
rd,
const_data: value,
}
}
}
fn memarg_regs(memarg: &MemArg, collector: &mut RegUsageCollector) {
match memarg {
&MemArg::Unscaled(reg, ..) | &MemArg::UnsignedOffset(reg, ..) => {
collector.add_use(reg);
}
&MemArg::RegReg(r1, r2, ..)
| &MemArg::RegScaled(r1, r2, ..)
| &MemArg::RegScaledExtended(r1, r2, ..)
| &MemArg::RegExtended(r1, r2, ..) => {
collector.add_use(r1);
collector.add_use(r2);
}
&MemArg::Label(..) => {}
&MemArg::PreIndexed(reg, ..) | &MemArg::PostIndexed(reg, ..) => {
collector.add_mod(reg);
}
&MemArg::FPOffset(..) => {
collector.add_use(fp_reg());
}
&MemArg::SPOffset(..) | &MemArg::NominalSPOffset(..) => {
collector.add_use(stack_reg());
}
&MemArg::RegOffset(r, ..) => {
collector.add_use(r);
}
}
}
fn pairmemarg_regs(pairmemarg: &PairMemArg, collector: &mut RegUsageCollector) {
match pairmemarg {
&PairMemArg::SignedOffset(reg, ..) => {
collector.add_use(reg);
}
&PairMemArg::PreIndexed(reg, ..) | &PairMemArg::PostIndexed(reg, ..) => {
collector.add_mod(reg);
}
}
}
fn aarch64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
match inst {
&Inst::AluRRR { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::AluRRRR { rd, rn, rm, ra, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
collector.add_use(ra);
}
&Inst::AluRRImm12 { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::AluRRImmLogic { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::AluRRImmShift { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::AluRRRShift { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::AluRRRExtend { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::BitRR { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::ULoad8 { rd, ref mem, .. }
| &Inst::SLoad8 { rd, ref mem, .. }
| &Inst::ULoad16 { rd, ref mem, .. }
| &Inst::SLoad16 { rd, ref mem, .. }
| &Inst::ULoad32 { rd, ref mem, .. }
| &Inst::SLoad32 { rd, ref mem, .. }
| &Inst::ULoad64 { rd, ref mem, .. } => {
collector.add_def(rd);
memarg_regs(mem, collector);
}
&Inst::Store8 { rd, ref mem, .. }
| &Inst::Store16 { rd, ref mem, .. }
| &Inst::Store32 { rd, ref mem, .. }
| &Inst::Store64 { rd, ref mem, .. } => {
collector.add_use(rd);
memarg_regs(mem, collector);
}
&Inst::StoreP64 {
rt, rt2, ref mem, ..
} => {
collector.add_use(rt);
collector.add_use(rt2);
pairmemarg_regs(mem, collector);
}
&Inst::LoadP64 {
rt, rt2, ref mem, ..
} => {
collector.add_def(rt);
collector.add_def(rt2);
pairmemarg_regs(mem, collector);
}
&Inst::Mov { rd, rm } => {
collector.add_def(rd);
collector.add_use(rm);
}
&Inst::Mov32 { rd, rm } => {
collector.add_def(rd);
collector.add_use(rm);
}
&Inst::MovZ { rd, .. } | &Inst::MovN { rd, .. } => {
collector.add_def(rd);
}
&Inst::MovK { rd, .. } => {
collector.add_mod(rd);
}
&Inst::CSel { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::CSet { rd, .. } => {
collector.add_def(rd);
}
&Inst::CCmpImm { rn, .. } => {
collector.add_use(rn);
}
&Inst::FpuMove64 { rd, rn } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuMove128 { rd, rn } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuMoveFromVec { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuRR { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuRRR { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::FpuRRI { fpu_op, rd, rn, .. } => {
match fpu_op {
FPUOpRI::UShr32(..) | FPUOpRI::UShr64(..) => collector.add_def(rd),
FPUOpRI::Sli32(..) | FPUOpRI::Sli64(..) => collector.add_mod(rd),
}
collector.add_use(rn);
}
&Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
collector.add_use(ra);
}
&Inst::VecMisc { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::VecLanes { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuCmp32 { rn, rm } | &Inst::FpuCmp64 { rn, rm } => {
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::FpuLoad32 { rd, ref mem, .. } => {
collector.add_def(rd);
memarg_regs(mem, collector);
}
&Inst::FpuLoad64 { rd, ref mem, .. } => {
collector.add_def(rd);
memarg_regs(mem, collector);
}
&Inst::FpuLoad128 { rd, ref mem, .. } => {
collector.add_def(rd);
memarg_regs(mem, collector);
}
&Inst::FpuStore32 { rd, ref mem, .. } => {
collector.add_use(rd);
memarg_regs(mem, collector);
}
&Inst::FpuStore64 { rd, ref mem, .. } => {
collector.add_use(rd);
memarg_regs(mem, collector);
}
&Inst::FpuStore128 { rd, ref mem, .. } => {
collector.add_use(rd);
memarg_regs(mem, collector);
}
&Inst::LoadFpuConst32 { rd, .. }
| &Inst::LoadFpuConst64 { rd, .. }
| &Inst::LoadFpuConst128 { rd, .. } => {
collector.add_def(rd);
}
&Inst::FpuToInt { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::IntToFpu { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::FpuCSel32 { rd, rn, rm, .. } | &Inst::FpuCSel64 { rd, rn, rm, .. } => {
collector.add_def(rd);
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::FpuRound { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::MovToVec64 { rd, rn } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::MovFromVec { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::VecDup { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::VecDupFromFpu { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::VecExtend { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::VecRRR {
alu_op, rd, rn, rm, ..
} => {
if alu_op == VecALUOp::Bsl {
collector.add_mod(rd);
} else {
collector.add_def(rd);
}
collector.add_use(rn);
collector.add_use(rm);
}
&Inst::MovToNZCV { rn } => {
collector.add_use(rn);
}
&Inst::MovFromNZCV { rd } => {
collector.add_def(rd);
}
&Inst::CondSet { rd, .. } => {
collector.add_def(rd);
}
&Inst::Extend { rd, rn, .. } => {
collector.add_def(rd);
collector.add_use(rn);
}
&Inst::Jump { .. } | &Inst::Ret | &Inst::EpiloguePlaceholder => {}
&Inst::Call { ref info, .. } => {
collector.add_uses(&*info.uses);
collector.add_defs(&*info.defs);
}
&Inst::CallInd { ref info, .. } => {
collector.add_uses(&*info.uses);
collector.add_defs(&*info.defs);
collector.add_use(info.rn);
}
&Inst::CondBr { ref kind, .. } => match kind {
CondBrKind::Zero(rt) | CondBrKind::NotZero(rt) => {
collector.add_use(*rt);
}
CondBrKind::Cond(_) => {}
},
&Inst::IndirectBr { rn, .. } => {
collector.add_use(rn);
}
&Inst::Nop0 | Inst::Nop4 => {}
&Inst::Brk => {}
&Inst::Udf { .. } => {}
&Inst::TrapIf { ref kind, .. } => match kind {
CondBrKind::Zero(rt) | CondBrKind::NotZero(rt) => {
collector.add_use(*rt);
}
CondBrKind::Cond(_) => {}
},
&Inst::Adr { rd, .. } => {
collector.add_def(rd);
}
&Inst::Word4 { .. } | &Inst::Word8 { .. } => {}
&Inst::JTSequence {
ridx, rtmp1, rtmp2, ..
} => {
collector.add_use(ridx);
collector.add_def(rtmp1);
collector.add_def(rtmp2);
}
&Inst::LoadConst64 { rd, .. } | &Inst::LoadExtName { rd, .. } => {
collector.add_def(rd);
}
&Inst::LoadAddr { rd, mem: _ } => {
collector.add_def(rd);
}
&Inst::VirtualSPOffsetAdj { .. } => {}
&Inst::EmitIsland { .. } => {}
}
}
fn aarch64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
fn map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg) {
if r.is_virtual() {
let new = m.get_use(r.to_virtual_reg()).unwrap().to_reg();
*r = new;
}
}
fn map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
if r.to_reg().is_virtual() {
let new = m.get_def(r.to_reg().to_virtual_reg()).unwrap().to_reg();
*r = Writable::from_reg(new);
}
}
fn map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
if r.to_reg().is_virtual() {
let new = m.get_mod(r.to_reg().to_virtual_reg()).unwrap().to_reg();
*r = Writable::from_reg(new);
}
}
fn map_mem<RUM: RegUsageMapper>(m: &RUM, mem: &mut MemArg) {
match mem {
&mut MemArg::Unscaled(ref mut reg, ..) => map_use(m, reg),
&mut MemArg::UnsignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut MemArg::RegReg(ref mut r1, ref mut r2)
| &mut MemArg::RegScaled(ref mut r1, ref mut r2, ..)
| &mut MemArg::RegScaledExtended(ref mut r1, ref mut r2, ..)
| &mut MemArg::RegExtended(ref mut r1, ref mut r2, ..) => {
map_use(m, r1);
map_use(m, r2);
}
&mut MemArg::Label(..) => {}
&mut MemArg::PreIndexed(ref mut r, ..) => map_mod(m, r),
&mut MemArg::PostIndexed(ref mut r, ..) => map_mod(m, r),
&mut MemArg::FPOffset(..)
| &mut MemArg::SPOffset(..)
| &mut MemArg::NominalSPOffset(..) => {}
&mut MemArg::RegOffset(ref mut r, ..) => map_use(m, r),
};
}
fn map_pairmem<RUM: RegUsageMapper>(m: &RUM, mem: &mut PairMemArg) {
match mem {
&mut PairMemArg::SignedOffset(ref mut reg, ..) => map_use(m, reg),
&mut PairMemArg::PreIndexed(ref mut reg, ..) => map_def(m, reg),
&mut PairMemArg::PostIndexed(ref mut reg, ..) => map_def(m, reg),
}
}
fn map_br<RUM: RegUsageMapper>(m: &RUM, br: &mut CondBrKind) {
match br {
&mut CondBrKind::Zero(ref mut reg) => map_use(m, reg),
&mut CondBrKind::NotZero(ref mut reg) => map_use(m, reg),
&mut CondBrKind::Cond(..) => {}
};
}
match inst {
&mut Inst::AluRRR {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::AluRRRR {
ref mut rd,
ref mut rn,
ref mut rm,
ref mut ra,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
map_use(mapper, ra);
}
&mut Inst::AluRRImm12 {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::AluRRImmLogic {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::AluRRImmShift {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::AluRRRShift {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::AluRRRExtend {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::BitRR {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::ULoad8 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::SLoad8 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::ULoad16 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::SLoad16 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::ULoad32 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::SLoad32 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::ULoad64 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::Store8 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::Store16 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::Store32 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::Store64 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::StoreP64 {
ref mut rt,
ref mut rt2,
ref mut mem,
} => {
map_use(mapper, rt);
map_use(mapper, rt2);
map_pairmem(mapper, mem);
}
&mut Inst::LoadP64 {
ref mut rt,
ref mut rt2,
ref mut mem,
} => {
map_def(mapper, rt);
map_def(mapper, rt2);
map_pairmem(mapper, mem);
}
&mut Inst::Mov {
ref mut rd,
ref mut rm,
} => {
map_def(mapper, rd);
map_use(mapper, rm);
}
&mut Inst::Mov32 {
ref mut rd,
ref mut rm,
} => {
map_def(mapper, rd);
map_use(mapper, rm);
}
&mut Inst::MovZ { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::MovN { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::MovK { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::CSel {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::CSet { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::CCmpImm { ref mut rn, .. } => {
map_use(mapper, rn);
}
&mut Inst::FpuMove64 {
ref mut rd,
ref mut rn,
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuMove128 {
ref mut rd,
ref mut rn,
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuMoveFromVec {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuRR {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuRRR {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::FpuRRI {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuRRRR {
ref mut rd,
ref mut rn,
ref mut rm,
ref mut ra,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
map_use(mapper, ra);
}
&mut Inst::VecMisc {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::VecLanes {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuCmp32 {
ref mut rn,
ref mut rm,
} => {
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::FpuCmp64 {
ref mut rn,
ref mut rm,
} => {
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::FpuLoad32 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::FpuLoad64 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::FpuLoad128 {
ref mut rd,
ref mut mem,
..
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::FpuStore32 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::FpuStore64 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::FpuStore128 {
ref mut rd,
ref mut mem,
..
} => {
map_use(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::LoadFpuConst32 { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::LoadFpuConst64 { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::LoadFpuConst128 { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::FpuToInt {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::IntToFpu {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::FpuCSel32 {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::FpuCSel64 {
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::FpuRound {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::MovToVec64 {
ref mut rd,
ref mut rn,
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::MovFromVec {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::VecDup {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::VecDupFromFpu {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::VecExtend {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::VecRRR {
alu_op,
ref mut rd,
ref mut rn,
ref mut rm,
..
} => {
if alu_op == VecALUOp::Bsl {
map_mod(mapper, rd);
} else {
map_def(mapper, rd);
}
map_use(mapper, rn);
map_use(mapper, rm);
}
&mut Inst::MovToNZCV { ref mut rn } => {
map_use(mapper, rn);
}
&mut Inst::MovFromNZCV { ref mut rd } => {
map_def(mapper, rd);
}
&mut Inst::CondSet { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::Extend {
ref mut rd,
ref mut rn,
..
} => {
map_def(mapper, rd);
map_use(mapper, rn);
}
&mut Inst::Jump { .. } => {}
&mut Inst::Call { ref mut info } => {
for r in info.uses.iter_mut() {
map_use(mapper, r);
}
for r in info.defs.iter_mut() {
map_def(mapper, r);
}
}
&mut Inst::Ret | &mut Inst::EpiloguePlaceholder => {}
&mut Inst::CallInd { ref mut info, .. } => {
for r in info.uses.iter_mut() {
map_use(mapper, r);
}
for r in info.defs.iter_mut() {
map_def(mapper, r);
}
map_use(mapper, &mut info.rn);
}
&mut Inst::CondBr { ref mut kind, .. } => {
map_br(mapper, kind);
}
&mut Inst::IndirectBr { ref mut rn, .. } => {
map_use(mapper, rn);
}
&mut Inst::Nop0 | &mut Inst::Nop4 | &mut Inst::Brk | &mut Inst::Udf { .. } => {}
&mut Inst::TrapIf { ref mut kind, .. } => {
map_br(mapper, kind);
}
&mut Inst::Adr { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::Word4 { .. } | &mut Inst::Word8 { .. } => {}
&mut Inst::JTSequence {
ref mut ridx,
ref mut rtmp1,
ref mut rtmp2,
..
} => {
map_use(mapper, ridx);
map_def(mapper, rtmp1);
map_def(mapper, rtmp2);
}
&mut Inst::LoadConst64 { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::LoadExtName { ref mut rd, .. } => {
map_def(mapper, rd);
}
&mut Inst::LoadAddr {
ref mut rd,
ref mut mem,
} => {
map_def(mapper, rd);
map_mem(mapper, mem);
}
&mut Inst::VirtualSPOffsetAdj { .. } => {}
&mut Inst::EmitIsland { .. } => {}
}
}
impl MachInst for Inst {
type LabelUse = LabelUse;
fn get_regs(&self, collector: &mut RegUsageCollector) {
aarch64_get_regs(self, collector)
}
fn map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
aarch64_map_regs(self, mapper);
}
fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
match self {
&Inst::Mov { rd, rm } => Some((rd, rm)),
&Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
&Inst::FpuMove128 { rd, rn } => Some((rd, rn)),
_ => None,
}
}
fn is_epilogue_placeholder(&self) -> bool {
if let Inst::EpiloguePlaceholder = self {
true
} else {
false
}
}
fn is_term<'a>(&'a self) -> MachTerminator<'a> {
match self {
&Inst::Ret | &Inst::EpiloguePlaceholder => MachTerminator::Ret,
&Inst::Jump { dest } => MachTerminator::Uncond(dest.as_label().unwrap()),
&Inst::CondBr {
taken, not_taken, ..
} => MachTerminator::Cond(taken.as_label().unwrap(), not_taken.as_label().unwrap()),
&Inst::IndirectBr { ref targets, .. } => MachTerminator::Indirect(&targets[..]),
&Inst::JTSequence { ref info, .. } => {
MachTerminator::Indirect(&info.targets_for_term[..])
}
_ => MachTerminator::None,
}
}
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
assert!(ty.bits() <= 128);
Inst::mov(to_reg, from_reg)
}
fn gen_constant(to_reg: Writable<Reg>, value: u64, ty: Type) -> SmallVec<[Inst; 4]> {
if ty == F64 {
let mut ret = SmallVec::new();
ret.push(Inst::load_fp_constant64(to_reg, f64::from_bits(value)));
ret
} else if ty == F32 {
let mut ret = SmallVec::new();
ret.push(Inst::load_fp_constant32(
to_reg,
f32::from_bits(value as u32),
));
ret
} else {
debug_assert!(
ty == B1
|| ty == I8
|| ty == B8
|| ty == I16
|| ty == B16
|| ty == I32
|| ty == B32
|| ty == I64
|| ty == B64
|| ty == R32
|| ty == R64
);
Inst::load_constant(to_reg, value)
}
}
fn gen_zero_len_nop() -> Inst {
Inst::Nop0
}
fn gen_nop(preferred_size: usize) -> Inst {
assert!(preferred_size >= 4);
Inst::Nop4
}
fn maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst> {
None
}
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
match ty {
I8 | I16 | I32 | I64 | B1 | B8 | B16 | B32 | B64 | R32 | R64 => Ok(RegClass::I64),
F32 | F64 => Ok(RegClass::V128),
IFLAGS | FFLAGS => Ok(RegClass::I64),
B8X16 | I8X16 | B16X8 | I16X8 | B32X4 | I32X4 | B64X2 | I64X2 | F32X4 | F64X2 => {
Ok(RegClass::V128)
}
_ => Err(CodegenError::Unsupported(format!(
"Unexpected SSA-value type: {}",
ty
))),
}
}
fn gen_jump(target: MachLabel) -> Inst {
Inst::Jump {
dest: BranchTarget::Label(target),
}
}
fn reg_universe(flags: &settings::Flags) -> RealRegUniverse {
create_reg_universe(flags)
}
fn worst_case_size() -> CodeOffset {
44
}
fn ref_type_regclass(_: &settings::Flags) -> RegClass {
RegClass::I64
}
}
fn mem_finalize_for_show(
mem: &MemArg,
mb_rru: Option<&RealRegUniverse>,
state: &EmitState,
) -> (String, MemArg) {
let (mem_insts, mem) = mem_finalize(0, mem, state);
let mut mem_str = mem_insts
.into_iter()
.map(|inst| inst.show_rru(mb_rru))
.collect::<Vec<_>>()
.join(" ; ");
if !mem_str.is_empty() {
mem_str += " ; ";
}
(mem_str, mem)
}
impl ShowWithRRU for Inst {
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
self.pretty_print(mb_rru, &mut EmitState::default())
}
}
impl Inst {
fn print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String {
fn op_name_size(alu_op: ALUOp) -> (&'static str, OperandSize) {
match alu_op {
ALUOp::Add32 => ("add", OperandSize::Size32),
ALUOp::Add64 => ("add", OperandSize::Size64),
ALUOp::Sub32 => ("sub", OperandSize::Size32),
ALUOp::Sub64 => ("sub", OperandSize::Size64),
ALUOp::Orr32 => ("orr", OperandSize::Size32),
ALUOp::Orr64 => ("orr", OperandSize::Size64),
ALUOp::And32 => ("and", OperandSize::Size32),
ALUOp::And64 => ("and", OperandSize::Size64),
ALUOp::Eor32 => ("eor", OperandSize::Size32),
ALUOp::Eor64 => ("eor", OperandSize::Size64),
ALUOp::AddS32 => ("adds", OperandSize::Size32),
ALUOp::AddS64 => ("adds", OperandSize::Size64),
ALUOp::SubS32 => ("subs", OperandSize::Size32),
ALUOp::SubS64 => ("subs", OperandSize::Size64),
ALUOp::SubS64XR => ("subs", OperandSize::Size64),
ALUOp::MAdd32 => ("madd", OperandSize::Size32),
ALUOp::MAdd64 => ("madd", OperandSize::Size64),
ALUOp::MSub32 => ("msub", OperandSize::Size32),
ALUOp::MSub64 => ("msub", OperandSize::Size64),
ALUOp::SMulH => ("smulh", OperandSize::Size64),
ALUOp::UMulH => ("umulh", OperandSize::Size64),
ALUOp::SDiv64 => ("sdiv", OperandSize::Size64),
ALUOp::UDiv64 => ("udiv", OperandSize::Size64),
ALUOp::AndNot32 => ("bic", OperandSize::Size32),
ALUOp::AndNot64 => ("bic", OperandSize::Size64),
ALUOp::OrrNot32 => ("orn", OperandSize::Size32),
ALUOp::OrrNot64 => ("orn", OperandSize::Size64),
ALUOp::EorNot32 => ("eon", OperandSize::Size32),
ALUOp::EorNot64 => ("eon", OperandSize::Size64),
ALUOp::RotR32 => ("ror", OperandSize::Size32),
ALUOp::RotR64 => ("ror", OperandSize::Size64),
ALUOp::Lsr32 => ("lsr", OperandSize::Size32),
ALUOp::Lsr64 => ("lsr", OperandSize::Size64),
ALUOp::Asr32 => ("asr", OperandSize::Size32),
ALUOp::Asr64 => ("asr", OperandSize::Size64),
ALUOp::Lsl32 => ("lsl", OperandSize::Size32),
ALUOp::Lsl64 => ("lsl", OperandSize::Size64),
}
}
match self {
&Inst::Nop0 => "nop-zero-len".to_string(),
&Inst::Nop4 => "nop".to_string(),
&Inst::AluRRR { alu_op, rd, rn, rm } => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size);
format!("{} {}, {}, {}", op, rd, rn, rm)
}
&Inst::AluRRRR {
alu_op,
rd,
rn,
rm,
ra,
} => {
let (op, size) = op_name_size(alu_op);
let four_args = alu_op != ALUOp::SMulH && alu_op != ALUOp::UMulH;
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size);
let ra = show_ireg_sized(ra, mb_rru, size);
if four_args {
format!("{} {}, {}, {}, {}", op, rd, rn, rm, ra)
} else {
format!("{} {}, {}, {}", op, rd, rn, rm)
}
}
&Inst::AluRRImm12 {
alu_op,
rd,
rn,
ref imm12,
} => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
if imm12.bits == 0 && alu_op == ALUOp::Add64 {
format!("mov {}, {}", rd, rn)
} else {
let imm12 = imm12.show_rru(mb_rru);
format!("{} {}, {}, {}", op, rd, rn, imm12)
}
}
&Inst::AluRRImmLogic {
alu_op,
rd,
rn,
ref imml,
} => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let imml = imml.show_rru(mb_rru);
format!("{} {}, {}, {}", op, rd, rn, imml)
}
&Inst::AluRRImmShift {
alu_op,
rd,
rn,
ref immshift,
} => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let immshift = immshift.show_rru(mb_rru);
format!("{} {}, {}, {}", op, rd, rn, immshift)
}
&Inst::AluRRRShift {
alu_op,
rd,
rn,
rm,
ref shiftop,
} => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size);
let shiftop = shiftop.show_rru(mb_rru);
format!("{} {}, {}, {}, {}", op, rd, rn, rm, shiftop)
}
&Inst::AluRRRExtend {
alu_op,
rd,
rn,
rm,
ref extendop,
} => {
let (op, size) = op_name_size(alu_op);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
let rm = show_ireg_sized(rm, mb_rru, size);
let extendop = extendop.show_rru(mb_rru);
format!("{} {}, {}, {}, {}", op, rd, rn, rm, extendop)
}
&Inst::BitRR { op, rd, rn } => {
let size = op.operand_size();
let op = op.op_str();
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size);
format!("{} {}, {}", op, rd, rn)
}
&Inst::ULoad8 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::SLoad8 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::ULoad16 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::SLoad16 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::ULoad32 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::SLoad32 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::ULoad64 {
rd,
ref mem,
srcloc: _srcloc,
..
} => {
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let is_unscaled = match &mem {
&MemArg::Unscaled(..) => true,
_ => false,
};
let (op, size) = match (self, is_unscaled) {
(&Inst::ULoad8 { .. }, false) => ("ldrb", OperandSize::Size32),
(&Inst::ULoad8 { .. }, true) => ("ldurb", OperandSize::Size32),
(&Inst::SLoad8 { .. }, false) => ("ldrsb", OperandSize::Size64),
(&Inst::SLoad8 { .. }, true) => ("ldursb", OperandSize::Size64),
(&Inst::ULoad16 { .. }, false) => ("ldrh", OperandSize::Size32),
(&Inst::ULoad16 { .. }, true) => ("ldurh", OperandSize::Size32),
(&Inst::SLoad16 { .. }, false) => ("ldrsh", OperandSize::Size64),
(&Inst::SLoad16 { .. }, true) => ("ldursh", OperandSize::Size64),
(&Inst::ULoad32 { .. }, false) => ("ldr", OperandSize::Size32),
(&Inst::ULoad32 { .. }, true) => ("ldur", OperandSize::Size32),
(&Inst::SLoad32 { .. }, false) => ("ldrsw", OperandSize::Size64),
(&Inst::SLoad32 { .. }, true) => ("ldursw", OperandSize::Size64),
(&Inst::ULoad64 { .. }, false) => ("ldr", OperandSize::Size64),
(&Inst::ULoad64 { .. }, true) => ("ldur", OperandSize::Size64),
_ => unreachable!(),
};
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size);
let mem = mem.show_rru(mb_rru);
format!("{}{} {}, {}", mem_str, op, rd, mem)
}
&Inst::Store8 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::Store16 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::Store32 {
rd,
ref mem,
srcloc: _srcloc,
}
| &Inst::Store64 {
rd,
ref mem,
srcloc: _srcloc,
..
} => {
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let is_unscaled = match &mem {
&MemArg::Unscaled(..) => true,
_ => false,
};
let (op, size) = match (self, is_unscaled) {
(&Inst::Store8 { .. }, false) => ("strb", OperandSize::Size32),
(&Inst::Store8 { .. }, true) => ("sturb", OperandSize::Size32),
(&Inst::Store16 { .. }, false) => ("strh", OperandSize::Size32),
(&Inst::Store16 { .. }, true) => ("sturh", OperandSize::Size32),
(&Inst::Store32 { .. }, false) => ("str", OperandSize::Size32),
(&Inst::Store32 { .. }, true) => ("stur", OperandSize::Size32),
(&Inst::Store64 { .. }, false) => ("str", OperandSize::Size64),
(&Inst::Store64 { .. }, true) => ("stur", OperandSize::Size64),
_ => unreachable!(),
};
let rd = show_ireg_sized(rd, mb_rru, size);
let mem = mem.show_rru(mb_rru);
format!("{}{} {}, {}", mem_str, op, rd, mem)
}
&Inst::StoreP64 { rt, rt2, ref mem } => {
let rt = rt.show_rru(mb_rru);
let rt2 = rt2.show_rru(mb_rru);
let mem = mem.show_rru_sized(mb_rru, 8);
format!("stp {}, {}, {}", rt, rt2, mem)
}
&Inst::LoadP64 { rt, rt2, ref mem } => {
let rt = rt.to_reg().show_rru(mb_rru);
let rt2 = rt2.to_reg().show_rru(mb_rru);
let mem = mem.show_rru_sized(mb_rru, 8);
format!("ldp {}, {}, {}", rt, rt2, mem)
}
&Inst::Mov { rd, rm } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rm = rm.show_rru(mb_rru);
format!("mov {}, {}", rd, rm)
}
&Inst::Mov32 { rd, rm } => {
let rd = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32);
let rm = show_ireg_sized(rm, mb_rru, OperandSize::Size32);
format!("mov {}, {}", rd, rm)
}
&Inst::MovZ { rd, ref imm } => {
let rd = rd.to_reg().show_rru(mb_rru);
let imm = imm.show_rru(mb_rru);
format!("movz {}, {}", rd, imm)
}
&Inst::MovN { rd, ref imm } => {
let rd = rd.to_reg().show_rru(mb_rru);
let imm = imm.show_rru(mb_rru);
format!("movn {}, {}", rd, imm)
}
&Inst::MovK { rd, ref imm } => {
let rd = rd.to_reg().show_rru(mb_rru);
let imm = imm.show_rru(mb_rru);
format!("movk {}, {}", rd, imm)
}
&Inst::CSel { rd, rn, rm, cond } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rn = rn.show_rru(mb_rru);
let rm = rm.show_rru(mb_rru);
let cond = cond.show_rru(mb_rru);
format!("csel {}, {}, {}, {}", rd, rn, rm, cond)
}
&Inst::CSet { rd, cond } => {
let rd = rd.to_reg().show_rru(mb_rru);
let cond = cond.show_rru(mb_rru);
format!("cset {}, {}", rd, cond)
}
&Inst::CCmpImm {
size,
rn,
imm,
nzcv,
cond,
} => {
let rn = show_ireg_sized(rn, mb_rru, size);
let imm = imm.show_rru(mb_rru);
let nzcv = nzcv.show_rru(mb_rru);
let cond = cond.show_rru(mb_rru);
format!("ccmp {}, {}, {}, {}", rn, imm, nzcv, cond)
}
&Inst::FpuMove64 { rd, rn } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rn = rn.show_rru(mb_rru);
format!("mov {}.8b, {}.8b", rd, rn)
}
&Inst::FpuMove128 { rd, rn } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rn = rn.show_rru(mb_rru);
format!("mov {}.16b, {}.16b", rd, rn)
}
&Inst::FpuMoveFromVec { rd, rn, idx, size } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size.lane_size());
let rn = show_vreg_element(rn, mb_rru, idx, size);
format!("mov {}, {}", rd, rn)
}
&Inst::FpuRR { fpu_op, rd, rn } => {
let (op, sizesrc, sizedest) = match fpu_op {
FPUOp1::Abs32 => ("fabs", ScalarSize::Size32, ScalarSize::Size32),
FPUOp1::Abs64 => ("fabs", ScalarSize::Size64, ScalarSize::Size64),
FPUOp1::Neg32 => ("fneg", ScalarSize::Size32, ScalarSize::Size32),
FPUOp1::Neg64 => ("fneg", ScalarSize::Size64, ScalarSize::Size64),
FPUOp1::Sqrt32 => ("fsqrt", ScalarSize::Size32, ScalarSize::Size32),
FPUOp1::Sqrt64 => ("fsqrt", ScalarSize::Size64, ScalarSize::Size64),
FPUOp1::Cvt32To64 => ("fcvt", ScalarSize::Size32, ScalarSize::Size64),
FPUOp1::Cvt64To32 => ("fcvt", ScalarSize::Size64, ScalarSize::Size32),
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, sizedest);
let rn = show_vreg_scalar(rn, mb_rru, sizesrc);
format!("{} {}, {}", op, rd, rn)
}
&Inst::FpuRRR { fpu_op, rd, rn, rm } => {
let (op, size) = match fpu_op {
FPUOp2::Add32 => ("fadd", ScalarSize::Size32),
FPUOp2::Add64 => ("fadd", ScalarSize::Size64),
FPUOp2::Sub32 => ("fsub", ScalarSize::Size32),
FPUOp2::Sub64 => ("fsub", ScalarSize::Size64),
FPUOp2::Mul32 => ("fmul", ScalarSize::Size32),
FPUOp2::Mul64 => ("fmul", ScalarSize::Size64),
FPUOp2::Div32 => ("fdiv", ScalarSize::Size32),
FPUOp2::Div64 => ("fdiv", ScalarSize::Size64),
FPUOp2::Max32 => ("fmax", ScalarSize::Size32),
FPUOp2::Max64 => ("fmax", ScalarSize::Size64),
FPUOp2::Min32 => ("fmin", ScalarSize::Size32),
FPUOp2::Min64 => ("fmin", ScalarSize::Size64),
FPUOp2::Sqadd64 => ("sqadd", ScalarSize::Size64),
FPUOp2::Uqadd64 => ("uqadd", ScalarSize::Size64),
FPUOp2::Sqsub64 => ("sqsub", ScalarSize::Size64),
FPUOp2::Uqsub64 => ("uqsub", ScalarSize::Size64),
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size);
let rn = show_vreg_scalar(rn, mb_rru, size);
let rm = show_vreg_scalar(rm, mb_rru, size);
format!("{} {}, {}, {}", op, rd, rn, rm)
}
&Inst::FpuRRI { fpu_op, rd, rn } => {
let (op, imm, vector) = match fpu_op {
FPUOpRI::UShr32(imm) => ("ushr", imm.show_rru(mb_rru), true),
FPUOpRI::UShr64(imm) => ("ushr", imm.show_rru(mb_rru), false),
FPUOpRI::Sli32(imm) => ("sli", imm.show_rru(mb_rru), true),
FPUOpRI::Sli64(imm) => ("sli", imm.show_rru(mb_rru), false),
};
let show_vreg_fn: fn(Reg, Option<&RealRegUniverse>) -> String = if vector {
|reg, mb_rru| show_vreg_vector(reg, mb_rru, VectorSize::Size32x2)
} else {
|reg, mb_rru| show_vreg_scalar(reg, mb_rru, ScalarSize::Size64)
};
let rd = show_vreg_fn(rd.to_reg(), mb_rru);
let rn = show_vreg_fn(rn, mb_rru);
format!("{} {}, {}, {}", op, rd, rn, imm)
}
&Inst::FpuRRRR {
fpu_op,
rd,
rn,
rm,
ra,
} => {
let (op, size) = match fpu_op {
FPUOp3::MAdd32 => ("fmadd", ScalarSize::Size32),
FPUOp3::MAdd64 => ("fmadd", ScalarSize::Size64),
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size);
let rn = show_vreg_scalar(rn, mb_rru, size);
let rm = show_vreg_scalar(rm, mb_rru, size);
let ra = show_vreg_scalar(ra, mb_rru, size);
format!("{} {}, {}, {}, {}", op, rd, rn, rm, ra)
}
&Inst::FpuCmp32 { rn, rm } => {
let rn = show_vreg_scalar(rn, mb_rru, ScalarSize::Size32);
let rm = show_vreg_scalar(rm, mb_rru, ScalarSize::Size32);
format!("fcmp {}, {}", rn, rm)
}
&Inst::FpuCmp64 { rn, rm } => {
let rn = show_vreg_scalar(rn, mb_rru, ScalarSize::Size64);
let rm = show_vreg_scalar(rm, mb_rru, ScalarSize::Size64);
format!("fcmp {}, {}", rn, rm)
}
&Inst::FpuLoad32 { rd, ref mem, .. } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size32);
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}ldr {}, {}", mem_str, rd, mem)
}
&Inst::FpuLoad64 { rd, ref mem, .. } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size64);
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}ldr {}, {}", mem_str, rd, mem)
}
&Inst::FpuLoad128 { rd, ref mem, .. } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rd = "q".to_string() + &rd[1..];
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}ldr {}, {}", mem_str, rd, mem)
}
&Inst::FpuStore32 { rd, ref mem, .. } => {
let rd = show_vreg_scalar(rd, mb_rru, ScalarSize::Size32);
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}str {}, {}", mem_str, rd, mem)
}
&Inst::FpuStore64 { rd, ref mem, .. } => {
let rd = show_vreg_scalar(rd, mb_rru, ScalarSize::Size64);
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}str {}, {}", mem_str, rd, mem)
}
&Inst::FpuStore128 { rd, ref mem, .. } => {
let rd = rd.show_rru(mb_rru);
let rd = "q".to_string() + &rd[1..];
let (mem_str, mem) = mem_finalize_for_show(mem, mb_rru, state);
let mem = mem.show_rru(mb_rru);
format!("{}str {}, {}", mem_str, rd, mem)
}
&Inst::LoadFpuConst32 { rd, const_data } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size32);
format!("ldr {}, pc+8 ; b 8 ; data.f32 {}", rd, const_data)
}
&Inst::LoadFpuConst64 { rd, const_data } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size64);
format!("ldr {}, pc+8 ; b 12 ; data.f64 {}", rd, const_data)
}
&Inst::LoadFpuConst128 { rd, const_data } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size128);
format!("ldr {}, pc+8 ; b 20 ; data.f128 0x{:032x}", rd, const_data)
}
&Inst::FpuToInt { op, rd, rn } => {
let (op, sizesrc, sizedest) = match op {
FpuToIntOp::F32ToI32 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size32),
FpuToIntOp::F32ToU32 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size32),
FpuToIntOp::F32ToI64 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size64),
FpuToIntOp::F32ToU64 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size64),
FpuToIntOp::F64ToI32 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size32),
FpuToIntOp::F64ToU32 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size32),
FpuToIntOp::F64ToI64 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size64),
FpuToIntOp::F64ToU64 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size64),
};
let rd = show_ireg_sized(rd.to_reg(), mb_rru, sizedest);
let rn = show_vreg_scalar(rn, mb_rru, sizesrc);
format!("{} {}, {}", op, rd, rn)
}
&Inst::IntToFpu { op, rd, rn } => {
let (op, sizesrc, sizedest) = match op {
IntToFpuOp::I32ToF32 => ("scvtf", OperandSize::Size32, ScalarSize::Size32),
IntToFpuOp::U32ToF32 => ("ucvtf", OperandSize::Size32, ScalarSize::Size32),
IntToFpuOp::I64ToF32 => ("scvtf", OperandSize::Size64, ScalarSize::Size32),
IntToFpuOp::U64ToF32 => ("ucvtf", OperandSize::Size64, ScalarSize::Size32),
IntToFpuOp::I32ToF64 => ("scvtf", OperandSize::Size32, ScalarSize::Size64),
IntToFpuOp::U32ToF64 => ("ucvtf", OperandSize::Size32, ScalarSize::Size64),
IntToFpuOp::I64ToF64 => ("scvtf", OperandSize::Size64, ScalarSize::Size64),
IntToFpuOp::U64ToF64 => ("ucvtf", OperandSize::Size64, ScalarSize::Size64),
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, sizedest);
let rn = show_ireg_sized(rn, mb_rru, sizesrc);
format!("{} {}, {}", op, rd, rn)
}
&Inst::FpuCSel32 { rd, rn, rm, cond } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size32);
let rn = show_vreg_scalar(rn, mb_rru, ScalarSize::Size32);
let rm = show_vreg_scalar(rm, mb_rru, ScalarSize::Size32);
let cond = cond.show_rru(mb_rru);
format!("fcsel {}, {}, {}, {}", rd, rn, rm, cond)
}
&Inst::FpuCSel64 { rd, rn, rm, cond } => {
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, ScalarSize::Size64);
let rn = show_vreg_scalar(rn, mb_rru, ScalarSize::Size64);
let rm = show_vreg_scalar(rm, mb_rru, ScalarSize::Size64);
let cond = cond.show_rru(mb_rru);
format!("fcsel {}, {}, {}, {}", rd, rn, rm, cond)
}
&Inst::FpuRound { op, rd, rn } => {
let (inst, size) = match op {
FpuRoundMode::Minus32 => ("frintm", ScalarSize::Size32),
FpuRoundMode::Minus64 => ("frintm", ScalarSize::Size64),
FpuRoundMode::Plus32 => ("frintp", ScalarSize::Size32),
FpuRoundMode::Plus64 => ("frintp", ScalarSize::Size64),
FpuRoundMode::Zero32 => ("frintz", ScalarSize::Size32),
FpuRoundMode::Zero64 => ("frintz", ScalarSize::Size64),
FpuRoundMode::Nearest32 => ("frintn", ScalarSize::Size32),
FpuRoundMode::Nearest64 => ("frintn", ScalarSize::Size64),
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size);
let rn = show_vreg_scalar(rn, mb_rru, size);
format!("{} {}, {}", inst, rd, rn)
}
&Inst::MovToVec64 { rd, rn } => {
let rd = rd.to_reg().show_rru(mb_rru);
let rn = rn.show_rru(mb_rru);
format!("mov {}.d[0], {}", rd, rn)
}
&Inst::MovFromVec { rd, rn, idx, size } => {
let op = match size {
VectorSize::Size8x16 => "umov",
VectorSize::Size16x8 => "umov",
VectorSize::Size32x4 => "mov",
VectorSize::Size64x2 => "mov",
_ => unimplemented!(),
};
let rd = show_ireg_sized(rd.to_reg(), mb_rru, size.operand_size());
let rn = show_vreg_element(rn, mb_rru, idx, size);
format!("{} {}, {}", op, rd, rn)
}
&Inst::VecDup { rd, rn, size } => {
let rd = show_vreg_vector(rd.to_reg(), mb_rru, size);
let rn = show_ireg_sized(rn, mb_rru, size.operand_size());
format!("dup {}, {}", rd, rn)
}
&Inst::VecDupFromFpu { rd, rn, size } => {
let rd = show_vreg_vector(rd.to_reg(), mb_rru, size);
let rn = show_vreg_element(rn, mb_rru, 0, size);
format!("dup {}, {}", rd, rn)
}
&Inst::VecExtend { t, rd, rn } => {
let (op, dest, src) = match t {
VecExtendOp::Sxtl8 => ("sxtl", VectorSize::Size16x8, VectorSize::Size8x8),
VecExtendOp::Sxtl16 => ("sxtl", VectorSize::Size32x4, VectorSize::Size16x4),
VecExtendOp::Sxtl32 => ("sxtl", VectorSize::Size64x2, VectorSize::Size32x2),
VecExtendOp::Uxtl8 => ("uxtl", VectorSize::Size16x8, VectorSize::Size8x8),
VecExtendOp::Uxtl16 => ("uxtl", VectorSize::Size32x4, VectorSize::Size16x4),
VecExtendOp::Uxtl32 => ("uxtl", VectorSize::Size64x2, VectorSize::Size32x2),
};
let rd = show_vreg_vector(rd.to_reg(), mb_rru, dest);
let rn = show_vreg_vector(rn, mb_rru, src);
format!("{} {}, {}", op, rd, rn)
}
&Inst::VecRRR {
rd,
rn,
rm,
alu_op,
size,
} => {
let (op, size) = match alu_op {
VecALUOp::Sqadd => ("sqadd", size),
VecALUOp::Uqadd => ("uqadd", size),
VecALUOp::Sqsub => ("sqsub", size),
VecALUOp::Uqsub => ("uqsub", size),
VecALUOp::Cmeq => ("cmeq", size),
VecALUOp::Cmge => ("cmge", size),
VecALUOp::Cmgt => ("cmgt", size),
VecALUOp::Cmhs => ("cmhs", size),
VecALUOp::Cmhi => ("cmhi", size),
VecALUOp::Fcmeq => ("fcmeq", size),
VecALUOp::Fcmgt => ("fcmgt", size),
VecALUOp::Fcmge => ("fcmge", size),
VecALUOp::And => ("and", VectorSize::Size8x16),
VecALUOp::Bic => ("bic", VectorSize::Size8x16),
VecALUOp::Orr => ("orr", VectorSize::Size8x16),
VecALUOp::Eor => ("eor", VectorSize::Size8x16),
VecALUOp::Bsl => ("bsl", VectorSize::Size8x16),
VecALUOp::Umaxp => ("umaxp", size),
VecALUOp::Add => ("add", size),
VecALUOp::Sub => ("sub", size),
VecALUOp::Mul => ("mul", size),
VecALUOp::Sshl => ("sshl", size),
VecALUOp::Ushl => ("ushl", size),
};
let rd = show_vreg_vector(rd.to_reg(), mb_rru, size);
let rn = show_vreg_vector(rn, mb_rru, size);
let rm = show_vreg_vector(rm, mb_rru, size);
format!("{} {}, {}, {}", op, rd, rn, rm)
}
&Inst::VecMisc { op, rd, rn, size } => {
let (op, size) = match op {
VecMisc2::Not => ("mvn", VectorSize::Size8x16),
VecMisc2::Neg => ("neg", size),
};
let rd = show_vreg_vector(rd.to_reg(), mb_rru, size);
let rn = show_vreg_vector(rn, mb_rru, size);
format!("{} {}, {}", op, rd, rn)
}
&Inst::VecLanes { op, rd, rn, size } => {
let op = match op {
VecLanesOp::Uminv => "uminv",
};
let rd = show_vreg_scalar(rd.to_reg(), mb_rru, size.lane_size());
let rn = show_vreg_vector(rn, mb_rru, size);
format!("{} {}, {}", op, rd, rn)
}
&Inst::MovToNZCV { rn } => {
let rn = rn.show_rru(mb_rru);
format!("msr nzcv, {}", rn)
}
&Inst::MovFromNZCV { rd } => {
let rd = rd.to_reg().show_rru(mb_rru);
format!("mrs {}, nzcv", rd)
}
&Inst::CondSet { rd, cond } => {
let rd = rd.to_reg().show_rru(mb_rru);
let cond = cond.show_rru(mb_rru);
format!("cset {}, {}", rd, cond)
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} if from_bits >= 8 => {
let dest_size = if !signed && from_bits == 32 && to_bits == 64 {
OperandSize::Size32
} else {
OperandSize::from_bits(to_bits)
};
let rd = show_ireg_sized(rd.to_reg(), mb_rru, dest_size);
let rn = show_ireg_sized(rn, mb_rru, OperandSize::from_bits(from_bits));
let op = match (signed, from_bits, to_bits) {
(false, 8, 32) => "uxtb",
(true, 8, 32) => "sxtb",
(false, 16, 32) => "uxth",
(true, 16, 32) => "sxth",
(false, 8, 64) => "uxtb",
(true, 8, 64) => "sxtb",
(false, 16, 64) => "uxth",
(true, 16, 64) => "sxth",
(false, 32, 64) => "mov", (true, 32, 64) => "sxtw",
_ => panic!("Unsupported Extend case: {:?}", self),
};
format!("{} {}, {}", op, rd, rn)
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
to_bits,
} if from_bits == 1 && signed => {
let dest_size = OperandSize::from_bits(to_bits);
let zr = if dest_size.is32() { "wzr" } else { "xzr" };
let rd32 = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32);
let rd = show_ireg_sized(rd.to_reg(), mb_rru, dest_size);
let rn = show_ireg_sized(rn, mb_rru, OperandSize::Size32);
format!("and {}, {}, #1 ; sub {}, {}, {}", rd32, rn, rd, zr, rd)
}
&Inst::Extend {
rd,
rn,
signed,
from_bits,
..
} if from_bits == 1 && !signed => {
let rd = show_ireg_sized(rd.to_reg(), mb_rru, OperandSize::Size32);
let rn = show_ireg_sized(rn, mb_rru, OperandSize::Size32);
format!("and {}, {}, #1", rd, rn)
}
&Inst::Extend { .. } => {
panic!("Unsupported Extend case");
}
&Inst::Call { .. } => format!("bl 0"),
&Inst::CallInd { ref info, .. } => {
let rn = info.rn.show_rru(mb_rru);
format!("blr {}", rn)
}
&Inst::Ret => "ret".to_string(),
&Inst::EpiloguePlaceholder => "epilogue placeholder".to_string(),
&Inst::Jump { ref dest } => {
let dest = dest.show_rru(mb_rru);
format!("b {}", dest)
}
&Inst::CondBr {
ref taken,
ref not_taken,
ref kind,
} => {
let taken = taken.show_rru(mb_rru);
let not_taken = not_taken.show_rru(mb_rru);
match kind {
&CondBrKind::Zero(reg) => {
let reg = reg.show_rru(mb_rru);
format!("cbz {}, {} ; b {}", reg, taken, not_taken)
}
&CondBrKind::NotZero(reg) => {
let reg = reg.show_rru(mb_rru);
format!("cbnz {}, {} ; b {}", reg, taken, not_taken)
}
&CondBrKind::Cond(c) => {
let c = c.show_rru(mb_rru);
format!("b.{} {} ; b {}", c, taken, not_taken)
}
}
}
&Inst::IndirectBr { rn, .. } => {
let rn = rn.show_rru(mb_rru);
format!("br {}", rn)
}
&Inst::Brk => "brk #0".to_string(),
&Inst::Udf { .. } => "udf".to_string(),
&Inst::TrapIf { ref kind, .. } => match kind {
&CondBrKind::Zero(reg) => {
let reg = reg.show_rru(mb_rru);
format!("cbnz {}, 8 ; udf", reg)
}
&CondBrKind::NotZero(reg) => {
let reg = reg.show_rru(mb_rru);
format!("cbz {}, 8 ; udf", reg)
}
&CondBrKind::Cond(c) => {
let c = c.invert().show_rru(mb_rru);
format!("b.{} 8 ; udf", c)
}
},
&Inst::Adr { rd, off } => {
let rd = rd.show_rru(mb_rru);
format!("adr {}, pc+{}", rd, off)
}
&Inst::Word4 { data } => format!("data.i32 {}", data),
&Inst::Word8 { data } => format!("data.i64 {}", data),
&Inst::JTSequence {
ref info,
ridx,
rtmp1,
rtmp2,
..
} => {
let ridx = ridx.show_rru(mb_rru);
let rtmp1 = rtmp1.show_rru(mb_rru);
let rtmp2 = rtmp2.show_rru(mb_rru);
let default_target = info.default_target.show_rru(mb_rru);
format!(
concat!(
"b.hs {} ; ",
"adr {}, pc+16 ; ",
"ldrsw {}, [{}, {}, LSL 2] ; ",
"add {}, {}, {} ; ",
"br {} ; ",
"jt_entries {:?}"
),
default_target,
rtmp1,
rtmp2,
rtmp1,
ridx,
rtmp1,
rtmp1,
rtmp2,
rtmp1,
info.targets
)
}
&Inst::LoadConst64 { rd, const_data } => {
let rd = rd.show_rru(mb_rru);
format!("ldr {}, 8 ; b 12 ; data {:?}", rd, const_data)
}
&Inst::LoadExtName {
rd,
ref name,
offset,
srcloc: _srcloc,
} => {
let rd = rd.show_rru(mb_rru);
format!("ldr {}, 8 ; b 12 ; data {:?} + {}", rd, name, offset)
}
&Inst::LoadAddr { rd, ref mem } => {
let (mem_insts, mem) = mem_finalize(0, mem, state);
let mut ret = String::new();
for inst in mem_insts.into_iter() {
ret.push_str(&inst.show_rru(mb_rru));
}
let (reg, offset) = match mem {
MemArg::Unscaled(r, simm9) => (r, simm9.value()),
MemArg::UnsignedOffset(r, uimm12scaled) => (r, uimm12scaled.value() as i32),
_ => panic!("Unsupported case for LoadAddr: {:?}", mem),
};
let abs_offset = if offset < 0 {
-offset as u64
} else {
offset as u64
};
let alu_op = if offset < 0 {
ALUOp::Sub64
} else {
ALUOp::Add64
};
if offset == 0 {
let mov = Inst::mov(rd, reg);
ret.push_str(&mov.show_rru(mb_rru));
} else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
let add = Inst::AluRRImm12 {
alu_op,
rd,
rn: reg,
imm12,
};
ret.push_str(&add.show_rru(mb_rru));
} else {
let tmp = writable_spilltmp_reg();
for inst in Inst::load_constant(tmp, abs_offset).into_iter() {
ret.push_str(&inst.show_rru(mb_rru));
}
let add = Inst::AluRRR {
alu_op,
rd,
rn: reg,
rm: tmp.to_reg(),
};
ret.push_str(&add.show_rru(mb_rru));
}
ret
}
&Inst::VirtualSPOffsetAdj { offset } => {
state.virtual_sp_offset += offset;
format!("virtual_sp_offset_adjust {}", offset)
}
&Inst::EmitIsland { needed_space } => format!("emit_island {}", needed_space),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum LabelUse {
Branch19,
Branch26,
Ldr19,
Adr21,
PCRel32,
}
impl MachInstLabelUse for LabelUse {
const ALIGN: CodeOffset = 4;
fn max_pos_range(self) -> CodeOffset {
match self {
LabelUse::Branch19 => (1 << 20) - 1,
LabelUse::Branch26 => (1 << 27) - 1,
LabelUse::Ldr19 => (1 << 20) - 1,
LabelUse::Adr21 => (1 << 20) - 1,
LabelUse::PCRel32 => 0x7fffffff,
}
}
fn max_neg_range(self) -> CodeOffset {
self.max_pos_range() + 1
}
fn patch_size(self) -> CodeOffset {
4
}
fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
let pc_rel = (label_offset as i64) - (use_offset as i64);
debug_assert!(pc_rel <= self.max_pos_range() as i64);
debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
let pc_rel = pc_rel as u32;
let insn_word = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
let mask = match self {
LabelUse::Branch19 => 0x00ffffe0, LabelUse::Branch26 => 0x03ffffff, LabelUse::Ldr19 => 0x00ffffe0, LabelUse::Adr21 => 0x60ffffe0, LabelUse::PCRel32 => 0xffffffff,
};
let pc_rel_shifted = match self {
LabelUse::Adr21 | LabelUse::PCRel32 => pc_rel,
_ => {
debug_assert!(pc_rel & 3 == 0);
pc_rel >> 2
}
};
let pc_rel_inserted = match self {
LabelUse::Branch19 | LabelUse::Ldr19 => (pc_rel_shifted & 0x7ffff) << 5,
LabelUse::Branch26 => pc_rel_shifted & 0x3ffffff,
LabelUse::Adr21 => (pc_rel_shifted & 0x7ffff) << 5 | (pc_rel_shifted & 0x180000) << 10,
LabelUse::PCRel32 => pc_rel_shifted,
};
let is_add = match self {
LabelUse::PCRel32 => true,
_ => false,
};
let insn_word = if is_add {
insn_word.wrapping_add(pc_rel_inserted)
} else {
(insn_word & !mask) | pc_rel_inserted
};
buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
}
fn supports_veneer(self) -> bool {
match self {
LabelUse::Branch19 => true, _ => false,
}
}
fn veneer_size(self) -> CodeOffset {
4
}
fn generate_veneer(
self,
buffer: &mut [u8],
veneer_offset: CodeOffset,
) -> (CodeOffset, LabelUse) {
match self {
LabelUse::Branch19 => {
let insn_word = 0b000101 << 26;
buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
(veneer_offset, LabelUse::Branch26)
}
_ => panic!("Unsupported label-reference type for veneer generation!"),
}
}
}