pub(super) mod isle;
use crate::ir::pcc::{FactContext, PccResult};
use crate::ir::{
Endianness, ExternalName, Inst as IRInst, InstructionData, LibCall, Opcode, Type, types,
};
use crate::isa::x64::abi::*;
use crate::isa::x64::inst::args::*;
use crate::isa::x64::inst::*;
use crate::isa::x64::pcc;
use crate::isa::{CallConv, x64::X64Backend};
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::Flags;
use alloc::boxed::Box;
use target_lexicon::Triple;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct InsnInput {
insn: IRInst,
input: usize,
}
impl Lower<'_, Inst> {
#[inline]
pub fn temp_writable_gpr(&mut self) -> WritableGpr {
WritableGpr::from_writable_reg(self.alloc_tmp(types::I64).only_reg().unwrap()).unwrap()
}
#[inline]
pub fn temp_writable_xmm(&mut self) -> WritableXmm {
WritableXmm::from_writable_reg(self.alloc_tmp(types::F64).only_reg().unwrap()).unwrap()
}
}
fn is_int_or_ref_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 => true,
_ => false,
}
}
fn matches_input(ctx: &mut Lower<Inst>, input: InsnInput, op: Opcode) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.as_inst().and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
if data.opcode() == op {
return Some(src_inst);
}
None
})
}
fn put_input_in_regs(ctx: &mut Lower<Inst>, spec: InsnInput) -> ValueRegs<Reg> {
let ty = ctx.input_ty(spec.insn, spec.input);
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = input.constant {
let size = if ty_bits(ty) < 64 {
OperandSize::Size32
} else {
OperandSize::Size64
};
assert!(is_int_or_ref_ty(ty)); let cst_copy = ctx.alloc_tmp(ty);
ctx.emit(Inst::imm(size, c, cst_copy.only_reg().unwrap()));
non_writable_value_regs(cst_copy)
} else {
ctx.put_input_in_regs(spec.insn, spec.input)
}
}
fn put_input_in_reg(ctx: &mut Lower<Inst>, spec: InsnInput) -> Reg {
put_input_in_regs(ctx, spec)
.only_reg()
.expect("Multi-register value not expected")
}
enum MergeableLoadSize {
Exact,
Min32,
}
fn is_mergeable_load(
ctx: &mut Lower<Inst>,
src_insn: IRInst,
size: MergeableLoadSize,
) -> Option<(InsnInput, i32)> {
let insn_data = ctx.data(src_insn);
let inputs = ctx.num_inputs(src_insn);
if inputs != 1 {
return None;
}
let load_ty = ctx.output_ty(src_insn, 0);
if ty_bits(load_ty) < 32 {
match size {
MergeableLoadSize::Exact => {}
MergeableLoadSize::Min32 => return None,
}
}
if let Some(flags) = ctx.memflags(src_insn) {
if flags.explicit_endianness() == Some(Endianness::Big) {
return None;
}
}
if let &InstructionData::Load {
opcode: Opcode::Load,
offset,
..
} = insn_data
{
Some((
InsnInput {
insn: src_insn,
input: 0,
},
offset.into(),
))
} else {
None
}
}
fn input_to_imm(ctx: &mut Lower<Inst>, spec: InsnInput) -> Option<u64> {
ctx.get_input_as_source_or_const(spec.insn, spec.input)
.constant
}
fn emit_vm_call(
ctx: &mut Lower<Inst>,
flags: &Flags,
triple: &Triple,
libcall: LibCall,
inputs: &[ValueRegs<Reg>],
) -> CodegenResult<InstOutput> {
let extname = ExternalName::LibCall(libcall);
let call_conv = CallConv::for_libcall(flags, CallConv::triple_default(triple));
let sig = libcall.signature(call_conv, types::I64);
let outputs = ctx.gen_call_output(&sig);
if !ctx.sigs().have_abi_sig_for_signature(&sig) {
ctx.sigs_mut()
.make_abi_sig_from_ir_signature::<X64ABIMachineSpec>(sig.clone(), flags)?;
}
let sig = ctx.sigs().abi_sig_for_signature(&sig);
let uses = ctx.gen_call_args(sig, inputs);
let defs = ctx.gen_call_rets(sig, &outputs);
let stack_ret_space = ctx.sigs()[sig].sized_stack_ret_space();
let stack_arg_space = ctx.sigs()[sig].sized_stack_arg_space();
ctx.abi_mut()
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
if flags.use_colocated_libcalls() {
let call_info = ctx.gen_call_info(sig, extname, uses, defs, None, false);
ctx.emit(Inst::call_known(Box::new(call_info)));
} else {
let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::LoadExtName {
dst: tmp.map(Gpr::unwrap_new),
name: Box::new(extname),
offset: 0,
distance: RelocDistance::Far,
});
let call_info = ctx.gen_call_info(sig, RegMem::reg(tmp.to_reg()), uses, defs, None, false);
ctx.emit(Inst::call_unknown(Box::new(call_info)));
}
Ok(outputs)
}
fn matches_small_constant_shift(ctx: &mut Lower<Inst>, spec: InsnInput) -> Option<(InsnInput, u8)> {
matches_input(ctx, spec, Opcode::Ishl).and_then(|shift| {
match input_to_imm(
ctx,
InsnInput {
insn: shift,
input: 1,
},
) {
Some(shift_amt) if shift_amt <= 3 => Some((
InsnInput {
insn: shift,
input: 0,
},
shift_amt as u8,
)),
_ => None,
}
})
}
fn lower_to_amode(ctx: &mut Lower<Inst>, spec: InsnInput, offset: i32) -> Amode {
let flags = ctx
.memflags(spec.insn)
.expect("Instruction with amode should have memflags");
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
let output_ty = ctx.output_ty(add, 0);
debug_assert_eq!(
output_ty,
types::I64,
"Address width of 64 expected, got {output_ty}"
);
let add_inputs = &[
InsnInput {
insn: add,
input: 0,
},
InsnInput {
insn: add,
input: 1,
},
];
let (base, index, shift) = if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[0])
{
(
put_input_in_reg(ctx, add_inputs[1]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[1])
{
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else {
for input in 0..=1 {
let (inst, inst_input) = if let Some(uextend) =
matches_input(ctx, InsnInput { insn: add, input }, Opcode::Uextend)
{
(uextend, 0)
} else {
(add, input)
};
if let Some(cst) = ctx.get_input_as_source_or_const(inst, inst_input).constant {
let final_offset = (offset as i64).wrapping_add(cst as i64);
if let Ok(final_offset) = i32::try_from(final_offset) {
let base = put_input_in_reg(ctx, add_inputs[1 - input]);
return Amode::imm_reg(final_offset, base).with_flags(flags);
}
}
}
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, add_inputs[1]),
0,
)
};
return Amode::imm_reg_reg_shift(
offset,
Gpr::unwrap_new(base),
Gpr::unwrap_new(index),
shift,
)
.with_flags(flags);
}
let input = put_input_in_reg(ctx, spec);
Amode::imm_reg(offset, input).with_flags(flags)
}
impl LowerBackend for X64Backend {
type MInst = Inst;
fn lower(&self, ctx: &mut Lower<Inst>, ir_inst: IRInst) -> Option<InstOutput> {
isle::lower(ctx, self, ir_inst)
}
fn lower_branch(
&self,
ctx: &mut Lower<Inst>,
ir_inst: IRInst,
targets: &[MachLabel],
) -> Option<()> {
isle::lower_branch(ctx, self, ir_inst, targets)
}
fn maybe_pinned_reg(&self) -> Option<Reg> {
Some(regs::pinned_reg())
}
fn check_fact(
&self,
ctx: &FactContext<'_>,
vcode: &mut VCode<Self::MInst>,
inst: InsnIndex,
state: &mut pcc::FactFlowState,
) -> PccResult<()> {
pcc::check(ctx, vcode, inst, state)
}
type FactFlowState = pcc::FactFlowState;
}