use crate::ir::condcodes::{FloatCC, IntCC};
use crate::ir::types::*;
use crate::ir::Inst as IRInst;
use crate::ir::{Opcode, Type, Value};
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::AArch64Backend;
use crate::machinst::lower::*;
use crate::machinst::{Reg, Writable};
use crate::{machinst::*, trace};
use smallvec::{smallvec, SmallVec};
pub mod isle;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum NarrowValueMode {
None,
ZeroExtend64,
}
impl NarrowValueMode {
fn is_32bit(&self) -> bool {
match self {
NarrowValueMode::None => false,
NarrowValueMode::ZeroExtend64 => false,
}
}
}
fn extend_reg(
ctx: &mut Lower<Inst>,
ty: Type,
in_reg: Reg,
is_const: bool,
narrow_mode: NarrowValueMode,
) -> Reg {
let from_bits = ty_bits(ty) as u8;
match (narrow_mode, from_bits) {
(NarrowValueMode::None, _) => in_reg,
(NarrowValueMode::ZeroExtend64, n) if n < 64 => {
if is_const {
in_reg
} else {
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
ctx.emit(Inst::Extend {
rd: tmp,
rn: in_reg,
signed: false,
from_bits,
to_bits: 64,
});
tmp.to_reg()
}
}
(_, 64) => in_reg,
(_, 128) => in_reg,
_ => panic!(
"Unsupported input width: input ty {} bits {} mode {:?}",
ty, from_bits, narrow_mode
),
}
}
fn lower_value_to_regs(ctx: &mut Lower<Inst>, value: Value) -> (ValueRegs<Reg>, Type, bool) {
trace!("lower_value_to_regs: value {:?}", value);
let ty = ctx.value_ty(value);
let inputs = ctx.get_value_as_source_or_const(value);
let is_const = inputs.constant.is_some();
let in_regs = if let Some(c) = inputs.constant {
let from_bits = ty_bits(ty);
let c = if from_bits < 64 {
c & ((1u64 << from_bits) - 1)
} else {
c
};
match ty {
I8 | I16 | I32 | I64 | R32 | R64 => {
let cst_copy = ctx.alloc_tmp(ty);
lower_constant_u64(ctx, cst_copy.only_reg().unwrap(), c);
non_writable_value_regs(cst_copy)
}
_ => unreachable!(), }
} else {
ctx.put_value_in_regs(value)
};
(in_regs, ty, is_const)
}
pub(crate) fn put_input_in_reg(
ctx: &mut Lower<Inst>,
input: InsnInput,
narrow_mode: NarrowValueMode,
) -> Reg {
let value = ctx.input_as_value(input.insn, input.input);
put_value_in_reg(ctx, value, narrow_mode)
}
fn put_value_in_reg(ctx: &mut Lower<Inst>, value: Value, narrow_mode: NarrowValueMode) -> Reg {
let (in_regs, ty, is_const) = lower_value_to_regs(ctx, value);
let reg = in_regs
.only_reg()
.expect("Multi-register value not expected");
extend_reg(ctx, ty, reg, is_const, narrow_mode)
}
fn get_as_extended_value(
ctx: &mut Lower<Inst>,
val: Value,
narrow_mode: NarrowValueMode,
) -> Option<(Value, ExtendOp)> {
let inputs = ctx.get_value_as_source_or_const(val);
let (insn, n) = inputs.inst.as_inst()?;
if n != 0 {
return None;
}
let op = ctx.data(insn).opcode();
let out_ty = ctx.output_ty(insn, 0);
let out_bits = ty_bits(out_ty);
if op == Opcode::Uextend || op == Opcode::Sextend {
let sign_extend = op == Opcode::Sextend;
let inner_ty = ctx.input_ty(insn, 0);
let inner_bits = ty_bits(inner_ty);
assert!(inner_bits < out_bits);
if match (sign_extend, narrow_mode) {
(_, NarrowValueMode::None) => true,
(false, NarrowValueMode::ZeroExtend64) => true,
(true, NarrowValueMode::ZeroExtend64) => false,
} {
let extendop = match (sign_extend, inner_bits) {
(true, 8) => ExtendOp::SXTB,
(false, 8) => ExtendOp::UXTB,
(true, 16) => ExtendOp::SXTH,
(false, 16) => ExtendOp::UXTH,
(true, 32) => ExtendOp::SXTW,
(false, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
return Some((ctx.input_as_value(insn, 0), extendop));
}
}
if narrow_mode != NarrowValueMode::None
&& ((narrow_mode.is_32bit() && out_bits < 32) || (!narrow_mode.is_32bit() && out_bits < 64))
{
let extendop = match (narrow_mode, out_bits) {
(NarrowValueMode::ZeroExtend64, 1) => ExtendOp::UXTB,
(NarrowValueMode::ZeroExtend64, 8) => ExtendOp::UXTB,
(NarrowValueMode::ZeroExtend64, 16) => ExtendOp::UXTH,
(NarrowValueMode::ZeroExtend64, 32) => ExtendOp::UXTW,
_ => unreachable!(),
};
return Some((val, extendop));
}
None
}
type AddressAddend32List = SmallVec<[(Reg, ExtendOp); 4]>;
type AddressAddend64List = SmallVec<[Reg; 4]>;
fn collect_address_addends(
ctx: &mut Lower<Inst>,
root: Value,
) -> (AddressAddend64List, AddressAddend32List, i64) {
let mut result32: AddressAddend32List = SmallVec::new();
let mut result64: AddressAddend64List = SmallVec::new();
let mut offset: i64 = 0;
let mut workqueue: SmallVec<[Value; 4]> = smallvec![root];
while let Some(value) = workqueue.pop() {
debug_assert_eq!(ty_bits(ctx.value_ty(value)), 64);
if let Some((op, insn)) = maybe_value_multi(
ctx,
value,
&[
Opcode::Uextend,
Opcode::Sextend,
Opcode::Iadd,
Opcode::Iconst,
],
) {
match op {
Opcode::Uextend | Opcode::Sextend if ty_bits(ctx.input_ty(insn, 0)) == 32 => {
let extendop = if op == Opcode::Uextend {
ExtendOp::UXTW
} else {
ExtendOp::SXTW
};
let extendee_input = InsnInput { insn, input: 0 };
if let (Some(insn), ExtendOp::UXTW) = (
maybe_input_insn(ctx, extendee_input, Opcode::Iconst),
extendop,
) {
let value = (ctx.get_constant(insn).unwrap() & 0xFFFF_FFFF_u64) as i64;
offset += value;
} else {
let reg = put_input_in_reg(ctx, extendee_input, NarrowValueMode::None);
result32.push((reg, extendop));
}
}
Opcode::Uextend | Opcode::Sextend => {
let reg = put_value_in_reg(ctx, value, NarrowValueMode::None);
result64.push(reg);
}
Opcode::Iadd => {
for input in 0..ctx.num_inputs(insn) {
let addend = ctx.input_as_value(insn, input);
workqueue.push(addend);
}
}
Opcode::Iconst => {
let value: i64 = ctx.get_constant(insn).unwrap() as i64;
offset += value;
}
_ => panic!("Unexpected opcode from maybe_input_insn_multi"),
}
} else {
let reg = put_value_in_reg(ctx, value, NarrowValueMode::ZeroExtend64);
result64.push(reg);
}
}
(result64, result32, offset)
}
pub(crate) fn lower_pair_address(ctx: &mut Lower<Inst>, addr: Value, offset: i32) -> PairAMode {
let (mut addends64, mut addends32, args_offset) = collect_address_addends(ctx, addr);
let offset = args_offset + (offset as i64);
trace!(
"lower_pair_address: addends64 {:?}, addends32 {:?}, offset {}",
addends64,
addends32,
offset
);
let base_reg = if let Some(reg64) = addends64.pop() {
reg64
} else if let Some((reg32, extendop)) = addends32.pop() {
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
let signed = match extendop {
ExtendOp::SXTW => true,
ExtendOp::UXTW => false,
_ => unreachable!(),
};
ctx.emit(Inst::Extend {
rd: tmp,
rn: reg32,
signed,
from_bits: 32,
to_bits: 64,
});
tmp.to_reg()
} else {
zero_reg()
};
let addr = lower_add_addends(ctx, base_reg, addends64, addends32);
let (addr, imm7) = if let Some(imm7) = SImm7Scaled::maybe_from_i64(offset, I64) {
(addr, imm7)
} else {
let res = lower_add_immediate(ctx, addr, offset);
(res, SImm7Scaled::maybe_from_i64(0, I64).unwrap())
};
PairAMode::SignedOffset(addr, imm7)
}
pub(crate) fn lower_address(
ctx: &mut Lower<Inst>,
elem_ty: Type,
addr: Value,
offset: i32,
) -> AMode {
let (mut addends64, mut addends32, args_offset) = collect_address_addends(ctx, addr);
let mut offset = args_offset + (offset as i64);
trace!(
"lower_address: addends64 {:?}, addends32 {:?}, offset {}",
addends64,
addends32,
offset
);
let memarg = if addends64.len() > 0 {
if addends32.len() > 0 {
let (reg32, extendop) = addends32.pop().unwrap();
let reg64 = addends64.pop().unwrap();
AMode::RegExtended {
rn: reg64,
rm: reg32,
extendop,
}
} else if offset > 0 && offset < 0x1000 {
let reg64 = addends64.pop().unwrap();
let off = offset;
offset = 0;
AMode::RegOffset {
rn: reg64,
off,
ty: elem_ty,
}
} else if addends64.len() >= 2 {
let reg1 = addends64.pop().unwrap();
let reg2 = addends64.pop().unwrap();
AMode::RegReg { rn: reg1, rm: reg2 }
} else {
let reg1 = addends64.pop().unwrap();
AMode::reg(reg1)
}
} else
{
if addends32.len() > 0 {
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
let (reg1, extendop) = addends32.pop().unwrap();
let signed = match extendop {
ExtendOp::SXTW => true,
ExtendOp::UXTW => false,
_ => unreachable!(),
};
ctx.emit(Inst::Extend {
rd: tmp,
rn: reg1,
signed,
from_bits: 32,
to_bits: 64,
});
if let Some((reg2, extendop)) = addends32.pop() {
AMode::RegExtended {
rn: tmp.to_reg(),
rm: reg2,
extendop,
}
} else {
AMode::reg(tmp.to_reg())
}
} else
{
let off_reg = ctx.alloc_tmp(I64).only_reg().unwrap();
lower_constant_u64(ctx, off_reg, offset as u64);
offset = 0;
AMode::reg(off_reg.to_reg())
}
};
if offset == 0 && addends32.len() == 0 && addends64.len() == 0 {
return memarg;
}
let addr = match memarg {
AMode::RegExtended { rn, .. } => rn,
AMode::RegOffset { rn, .. } => rn,
AMode::RegReg { rm, .. } => rm,
AMode::UnsignedOffset { rn, .. } => rn,
_ => unreachable!(),
};
let addr = if offset != 0 {
lower_add_immediate(ctx, addr, offset)
} else {
addr
};
let addr = lower_add_addends(ctx, addr, addends64, addends32);
match memarg {
AMode::RegExtended { rm, extendop, .. } => AMode::RegExtended {
rn: addr,
rm,
extendop,
},
AMode::RegOffset { off, ty, .. } => AMode::RegOffset { rn: addr, off, ty },
AMode::RegReg { rn, .. } => AMode::RegReg { rn: addr, rm: rn },
AMode::UnsignedOffset { uimm12, .. } => AMode::UnsignedOffset { rn: addr, uimm12 },
_ => unreachable!(),
}
}
fn lower_add_addends(
ctx: &mut Lower<Inst>,
init: Reg,
addends64: AddressAddend64List,
addends32: AddressAddend32List,
) -> Reg {
let init = addends64.into_iter().fold(init, |prev, reg| {
let reg = if reg == stack_reg() {
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp, stack_reg(), I64));
tmp.to_reg()
} else {
reg
};
let rd = ctx.alloc_tmp(I64).only_reg().unwrap();
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd,
rn: prev,
rm: reg,
});
rd.to_reg()
});
addends32.into_iter().fold(init, |prev, (reg, extendop)| {
assert!(reg != stack_reg());
let rd = ctx.alloc_tmp(I64).only_reg().unwrap();
ctx.emit(Inst::AluRRRExtend {
alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd,
rn: prev,
rm: reg,
extendop,
});
rd.to_reg()
})
}
fn lower_add_immediate(ctx: &mut Lower<Inst>, src: Reg, imm: i64) -> Reg {
let dst = ctx.alloc_tmp(I64).only_reg().unwrap();
if let Some(imm12) = Imm12::maybe_from_u64(imm as u64) {
ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: dst,
rn: src,
imm12,
});
} else if let Some(imm12) = Imm12::maybe_from_u64(imm.wrapping_neg() as u64) {
ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::Sub,
size: OperandSize::Size64,
rd: dst,
rn: src,
imm12,
});
} else {
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
lower_constant_u64(ctx, tmp, imm as u64);
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Add,
size: OperandSize::Size64,
rd: dst,
rn: tmp.to_reg(),
rm: src,
});
}
dst.to_reg()
}
pub(crate) fn lower_constant_u64(ctx: &mut Lower<Inst>, rd: Writable<Reg>, value: u64) {
for inst in Inst::load_constant(rd, value, &mut |ty| ctx.alloc_tmp(ty).only_reg().unwrap()) {
ctx.emit(inst);
}
}
pub(crate) fn lower_condcode(cc: IntCC) -> Cond {
match cc {
IntCC::Equal => Cond::Eq,
IntCC::NotEqual => Cond::Ne,
IntCC::SignedGreaterThanOrEqual => Cond::Ge,
IntCC::SignedGreaterThan => Cond::Gt,
IntCC::SignedLessThanOrEqual => Cond::Le,
IntCC::SignedLessThan => Cond::Lt,
IntCC::UnsignedGreaterThanOrEqual => Cond::Hs,
IntCC::UnsignedGreaterThan => Cond::Hi,
IntCC::UnsignedLessThanOrEqual => Cond::Ls,
IntCC::UnsignedLessThan => Cond::Lo,
}
}
pub(crate) fn lower_fp_condcode(cc: FloatCC) -> Cond {
match cc {
FloatCC::Ordered => Cond::Vc,
FloatCC::Unordered => Cond::Vs,
FloatCC::Equal => Cond::Eq,
FloatCC::NotEqual => Cond::Ne,
FloatCC::OrderedNotEqual => unimplemented!(),
FloatCC::UnorderedOrEqual => unimplemented!(),
FloatCC::LessThan => Cond::Mi,
FloatCC::LessThanOrEqual => Cond::Ls,
FloatCC::GreaterThan => Cond::Gt,
FloatCC::GreaterThanOrEqual => Cond::Ge,
FloatCC::UnorderedOrLessThan => unimplemented!(),
FloatCC::UnorderedOrLessThanOrEqual => unimplemented!(),
FloatCC::UnorderedOrGreaterThan => unimplemented!(),
FloatCC::UnorderedOrGreaterThanOrEqual => unimplemented!(),
}
}
pub(crate) fn maybe_input_insn(
c: &mut Lower<Inst>,
input: InsnInput,
op: Opcode,
) -> Option<IRInst> {
let inputs = c.get_input_as_source_or_const(input.insn, input.input);
trace!(
"maybe_input_insn: input {:?} has options {:?}; looking for op {:?}",
input,
inputs,
op
);
if let Some((src_inst, _)) = inputs.inst.as_inst() {
let data = c.data(src_inst);
trace!(" -> input inst {:?}", data);
if data.opcode() == op {
return Some(src_inst);
}
}
None
}
pub(crate) fn maybe_value(c: &mut Lower<Inst>, value: Value, op: Opcode) -> Option<IRInst> {
let inputs = c.get_value_as_source_or_const(value);
if let Some((src_inst, _)) = inputs.inst.as_inst() {
let data = c.data(src_inst);
if data.opcode() == op {
return Some(src_inst);
}
}
None
}
pub(crate) fn maybe_value_multi(
c: &mut Lower<Inst>,
value: Value,
ops: &[Opcode],
) -> Option<(Opcode, IRInst)> {
for &op in ops {
if let Some(inst) = maybe_value(c, value, op) {
return Some((op, inst));
}
}
None
}
impl LowerBackend for AArch64Backend {
type MInst = Inst;
fn lower(&self, ctx: &mut Lower<Inst>, ir_inst: IRInst) -> Option<InstOutput> {
isle::lower(ctx, self, ir_inst)
}
fn lower_branch(
&self,
ctx: &mut Lower<Inst>,
ir_inst: IRInst,
targets: &[MachLabel],
) -> Option<()> {
isle::lower_branch(ctx, self, ir_inst, targets)
}
fn maybe_pinned_reg(&self) -> Option<Reg> {
Some(regs::pinned_reg())
}
}