use crate::CodegenError;
use crate::FxHashMap;
use crate::HashMap;
use crate::entity::SecondaryMap;
use crate::ir::{ArgumentExtension, ArgumentPurpose, ExceptionTag, Signature};
use crate::ir::{StackSlotKey, types::*};
use crate::isa::TargetIsa;
use crate::settings::ProbestackStrategy;
use crate::{ir, isa};
use crate::{machinst::*, trace};
use alloc::boxed::Box;
use core::marker::PhantomData;
use regalloc2::{MachineEnv, PReg, PRegSet};
use smallvec::smallvec;
pub type SmallInstVec<I> = SmallVec<[I; 4]>;
#[derive(Clone, Debug)]
pub struct ArgPair {
pub vreg: Writable<Reg>,
pub preg: Reg,
}
#[derive(Clone, Debug)]
pub struct RetPair {
pub vreg: Reg,
pub preg: Reg,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ABIArgSlot {
Reg {
reg: RealReg,
ty: ir::Type,
extension: ir::ArgumentExtension,
},
Stack {
offset: i64,
ty: ir::Type,
extension: ir::ArgumentExtension,
},
}
impl ABIArgSlot {
pub fn get_type(&self) -> ir::Type {
match self {
ABIArgSlot::Reg { ty, .. } => *ty,
ABIArgSlot::Stack { ty, .. } => *ty,
}
}
}
pub type ABIArgSlotVec = SmallVec<[ABIArgSlot; 1]>;
#[derive(Clone, Debug)]
pub enum ABIArg {
Slots {
slots: ABIArgSlotVec,
purpose: ir::ArgumentPurpose,
},
StructArg {
offset: i64,
size: u64,
purpose: ir::ArgumentPurpose,
},
ImplicitPtrArg {
pointer: ABIArgSlot,
offset: i64,
ty: Type,
purpose: ir::ArgumentPurpose,
},
}
impl ABIArg {
pub fn reg(
reg: RealReg,
ty: ir::Type,
extension: ir::ArgumentExtension,
purpose: ir::ArgumentPurpose,
) -> ABIArg {
ABIArg::Slots {
slots: smallvec![ABIArgSlot::Reg { reg, ty, extension }],
purpose,
}
}
pub fn stack(
offset: i64,
ty: ir::Type,
extension: ir::ArgumentExtension,
purpose: ir::ArgumentPurpose,
) -> ABIArg {
ABIArg::Slots {
slots: smallvec![ABIArgSlot::Stack {
offset,
ty,
extension,
}],
purpose,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ArgsOrRets {
Args,
Rets,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum StackAMode {
IncomingArg(i64, u32),
Slot(i64),
OutgoingArg(i64),
}
impl StackAMode {
fn offset_by(&self, offset: u32) -> Self {
match self {
StackAMode::IncomingArg(off, size) => {
StackAMode::IncomingArg(off.checked_add(i64::from(offset)).unwrap(), *size)
}
StackAMode::Slot(off) => StackAMode::Slot(off.checked_add(i64::from(offset)).unwrap()),
StackAMode::OutgoingArg(off) => {
StackAMode::OutgoingArg(off.checked_add(i64::from(offset)).unwrap())
}
}
}
}
pub trait IsaFlags: Clone {
fn is_forward_edge_cfi_enabled(&self) -> bool {
false
}
}
pub struct ArgsAccumulator<'a> {
sig_set_abi_args: &'a mut Vec<ABIArg>,
start: usize,
non_formal_flag: bool,
}
impl<'a> ArgsAccumulator<'a> {
fn new(sig_set_abi_args: &'a mut Vec<ABIArg>) -> Self {
let start = sig_set_abi_args.len();
ArgsAccumulator {
sig_set_abi_args,
start,
non_formal_flag: false,
}
}
#[inline]
pub fn push(&mut self, arg: ABIArg) {
debug_assert!(!self.non_formal_flag);
self.sig_set_abi_args.push(arg)
}
#[inline]
pub fn push_non_formal(&mut self, arg: ABIArg) {
self.non_formal_flag = true;
self.sig_set_abi_args.push(arg)
}
#[inline]
pub fn args(&self) -> &[ABIArg] {
&self.sig_set_abi_args[self.start..]
}
#[inline]
pub fn args_mut(&mut self) -> &mut [ABIArg] {
&mut self.sig_set_abi_args[self.start..]
}
}
pub trait ABIMachineSpec {
type I: VCodeInst;
type F: IsaFlags;
const STACK_ARG_RET_SIZE_LIMIT: u32;
fn word_bits() -> u32;
fn word_bytes() -> u32 {
return Self::word_bits() / 8;
}
fn word_type() -> Type {
match Self::word_bits() {
32 => I32,
64 => I64,
_ => unreachable!(),
}
}
fn word_reg_class() -> RegClass {
RegClass::Int
}
fn stack_align(call_conv: isa::CallConv) -> u32;
fn compute_arg_locs(
call_conv: isa::CallConv,
flags: &settings::Flags,
params: &[ir::AbiParam],
args_or_rets: ArgsOrRets,
add_ret_area_ptr: bool,
args: ArgsAccumulator,
) -> CodegenResult<(u32, Option<usize>)>;
fn gen_load_stack(mem: StackAMode, into_reg: Writable<Reg>, ty: Type) -> Self::I;
fn gen_store_stack(mem: StackAMode, from_reg: Reg, ty: Type) -> Self::I;
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Self::I;
fn gen_extend(
to_reg: Writable<Reg>,
from_reg: Reg,
is_signed: bool,
from_bits: u8,
to_bits: u8,
) -> Self::I;
fn gen_args(args: Vec<ArgPair>) -> Self::I;
fn gen_rets(rets: Vec<RetPair>) -> Self::I;
fn gen_add_imm(
call_conv: isa::CallConv,
into_reg: Writable<Reg>,
from_reg: Reg,
imm: u32,
) -> SmallInstVec<Self::I>;
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I>;
fn gen_get_stack_addr(mem: StackAMode, into_reg: Writable<Reg>) -> Self::I;
fn get_stacklimit_reg(call_conv: isa::CallConv) -> Reg;
fn gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i32, ty: Type) -> Self::I;
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Self::I;
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I>;
fn compute_frame_layout(
call_conv: isa::CallConv,
flags: &settings::Flags,
sig: &Signature,
regs: &[Writable<RealReg>],
function_calls: FunctionCalls,
incoming_args_size: u32,
tail_args_size: u32,
stackslots_size: u32,
fixed_frame_storage_size: u32,
outgoing_args_size: u32,
) -> FrameLayout;
fn gen_prologue_frame_setup(
call_conv: isa::CallConv,
flags: &settings::Flags,
isa_flags: &Self::F,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I>;
fn gen_epilogue_frame_restore(
call_conv: isa::CallConv,
flags: &settings::Flags,
isa_flags: &Self::F,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I>;
fn gen_return(
call_conv: isa::CallConv,
isa_flags: &Self::F,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I>;
fn gen_probestack(insts: &mut SmallInstVec<Self::I>, frame_size: u32);
fn gen_inline_probestack(
insts: &mut SmallInstVec<Self::I>,
call_conv: isa::CallConv,
frame_size: u32,
guard_size: u32,
);
fn gen_clobber_save(
call_conv: isa::CallConv,
flags: &settings::Flags,
frame_layout: &FrameLayout,
) -> SmallVec<[Self::I; 16]>;
fn gen_clobber_restore(
call_conv: isa::CallConv,
flags: &settings::Flags,
frame_layout: &FrameLayout,
) -> SmallVec<[Self::I; 16]>;
fn gen_memcpy<F: FnMut(Type) -> Writable<Reg>>(
call_conv: isa::CallConv,
dst: Reg,
src: Reg,
size: usize,
alloc_tmp: F,
) -> SmallVec<[Self::I; 8]>;
fn get_number_of_spillslots_for_value(
rc: RegClass,
target_vector_bytes: u32,
isa_flags: &Self::F,
) -> u32;
fn get_machine_env(flags: &settings::Flags, call_conv: isa::CallConv) -> &MachineEnv;
fn get_regs_clobbered_by_call(
call_conv_of_callee: isa::CallConv,
is_exception: bool,
) -> PRegSet;
fn get_ext_mode(
call_conv: isa::CallConv,
specified: ir::ArgumentExtension,
) -> ir::ArgumentExtension;
fn retval_temp_reg(call_conv_of_callee: isa::CallConv) -> Writable<Reg>;
fn exception_payload_regs(callee_conv: isa::CallConv) -> &'static [Reg] {
let _ = callee_conv;
&[]
}
}
#[derive(Clone, Debug)]
pub struct CallInfo<T> {
pub dest: T,
pub uses: CallArgList,
pub defs: CallRetList,
pub clobbers: PRegSet,
pub callee_conv: isa::CallConv,
pub caller_conv: isa::CallConv,
pub callee_pop_size: u32,
pub try_call_info: Option<TryCallInfo>,
pub patchable: bool,
}
#[derive(Clone, Debug)]
pub struct TryCallInfo {
pub continuation: MachLabel,
pub exception_handlers: Box<[TryCallHandler]>,
}
#[derive(Clone, Debug)]
pub enum TryCallHandler {
Tag(ExceptionTag, MachLabel),
Default(MachLabel),
Context(Reg),
}
impl<T> CallInfo<T> {
pub fn empty(dest: T, call_conv: isa::CallConv) -> CallInfo<T> {
CallInfo {
dest,
uses: smallvec![],
defs: smallvec![],
clobbers: PRegSet::empty(),
caller_conv: call_conv,
callee_conv: call_conv,
callee_pop_size: 0,
try_call_info: None,
patchable: false,
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Sig(u32);
cranelift_entity::entity_impl!(Sig);
impl Sig {
fn prev(self) -> Option<Sig> {
self.0.checked_sub(1).map(Sig)
}
}
#[derive(Clone, Debug)]
pub struct SigData {
args_end: u32,
rets_end: u32,
sized_stack_arg_space: u32,
sized_stack_ret_space: u32,
stack_ret_arg: Option<u16>,
call_conv: isa::CallConv,
}
impl SigData {
pub fn sized_stack_arg_space(&self) -> u32 {
self.sized_stack_arg_space
}
pub fn sized_stack_ret_space(&self) -> u32 {
self.sized_stack_ret_space
}
pub fn call_conv(&self) -> isa::CallConv {
self.call_conv
}
pub fn stack_ret_arg(&self) -> Option<u16> {
self.stack_ret_arg
}
}
pub struct SigSet {
ir_signature_to_abi_sig: FxHashMap<ir::Signature, Sig>,
ir_sig_ref_to_abi_sig: SecondaryMap<ir::SigRef, Option<Sig>>,
abi_args: Vec<ABIArg>,
sigs: PrimaryMap<Sig, SigData>,
}
impl SigSet {
pub fn new<M>(func: &ir::Function, flags: &settings::Flags) -> CodegenResult<Self>
where
M: ABIMachineSpec,
{
let arg_estimate = func.dfg.signatures.len() * 6;
let mut sigs = SigSet {
ir_signature_to_abi_sig: FxHashMap::default(),
ir_sig_ref_to_abi_sig: SecondaryMap::with_capacity(func.dfg.signatures.len()),
abi_args: Vec::with_capacity(arg_estimate),
sigs: PrimaryMap::with_capacity(1 + func.dfg.signatures.len()),
};
sigs.make_abi_sig_from_ir_signature::<M>(func.signature.clone(), flags)?;
for sig_ref in func.dfg.signatures.keys() {
sigs.make_abi_sig_from_ir_sig_ref::<M>(sig_ref, &func.dfg, flags)?;
}
Ok(sigs)
}
pub fn have_abi_sig_for_signature(&self, signature: &ir::Signature) -> bool {
self.ir_signature_to_abi_sig.contains_key(signature)
}
pub fn make_abi_sig_from_ir_signature<M>(
&mut self,
signature: ir::Signature,
flags: &settings::Flags,
) -> CodegenResult<Sig>
where
M: ABIMachineSpec,
{
debug_assert!(!self.have_abi_sig_for_signature(&signature));
let sig_data = self.from_func_sig::<M>(&signature, flags)?;
let sig = self.sigs.push(sig_data);
self.ir_signature_to_abi_sig.insert(signature, sig);
Ok(sig)
}
fn make_abi_sig_from_ir_sig_ref<M>(
&mut self,
sig_ref: ir::SigRef,
dfg: &ir::DataFlowGraph,
flags: &settings::Flags,
) -> CodegenResult<Sig>
where
M: ABIMachineSpec,
{
if let Some(sig) = self.ir_sig_ref_to_abi_sig[sig_ref] {
return Ok(sig);
}
let signature = &dfg.signatures[sig_ref];
let sig_data = self.from_func_sig::<M>(signature, flags)?;
let sig = self.sigs.push(sig_data);
self.ir_sig_ref_to_abi_sig[sig_ref] = Some(sig);
Ok(sig)
}
pub fn abi_sig_for_sig_ref(&self, sig_ref: ir::SigRef) -> Sig {
self.ir_sig_ref_to_abi_sig[sig_ref]
.expect("must call `make_abi_sig_from_ir_sig_ref` before `get_abi_sig_for_sig_ref`")
}
pub fn abi_sig_for_signature(&self, signature: &ir::Signature) -> Sig {
self.ir_signature_to_abi_sig
.get(signature)
.copied()
.expect("must call `make_abi_sig_from_ir_signature` before `get_abi_sig_for_signature`")
}
pub fn from_func_sig<M: ABIMachineSpec>(
&mut self,
sig: &ir::Signature,
flags: &settings::Flags,
) -> CodegenResult<SigData> {
if sig.uses_special_return(ArgumentPurpose::StructReturn) {
panic!("Explicit StructReturn return value not allowed: {sig:?}")
}
let tmp;
let returns = if let Some(struct_ret_index) =
sig.special_param_index(ArgumentPurpose::StructReturn)
{
if !sig.returns.is_empty() {
panic!("No return values are allowed when using StructReturn: {sig:?}");
}
tmp = [sig.params[struct_ret_index]];
&tmp
} else {
sig.returns.as_slice()
};
let (sized_stack_ret_space, _) = M::compute_arg_locs(
sig.call_conv,
flags,
&returns,
ArgsOrRets::Rets,
false,
ArgsAccumulator::new(&mut self.abi_args),
)?;
if !flags.enable_multi_ret_implicit_sret() {
assert_eq!(sized_stack_ret_space, 0);
}
let rets_end = u32::try_from(self.abi_args.len()).unwrap();
if sized_stack_ret_space > M::STACK_ARG_RET_SIZE_LIMIT {
return Err(CodegenError::ImplLimitExceeded);
}
let need_stack_return_area = sized_stack_ret_space > 0;
if need_stack_return_area {
assert!(!sig.uses_special_param(ir::ArgumentPurpose::StructReturn));
}
let (sized_stack_arg_space, stack_ret_arg) = M::compute_arg_locs(
sig.call_conv,
flags,
&sig.params,
ArgsOrRets::Args,
need_stack_return_area,
ArgsAccumulator::new(&mut self.abi_args),
)?;
let args_end = u32::try_from(self.abi_args.len()).unwrap();
if sized_stack_arg_space > M::STACK_ARG_RET_SIZE_LIMIT {
return Err(CodegenError::ImplLimitExceeded);
}
trace!(
"ABISig: sig {:?} => args end = {} rets end = {}
arg stack = {} ret stack = {} stack_ret_arg = {:?}",
sig,
args_end,
rets_end,
sized_stack_arg_space,
sized_stack_ret_space,
need_stack_return_area,
);
let stack_ret_arg = stack_ret_arg.map(|s| u16::try_from(s).unwrap());
Ok(SigData {
args_end,
rets_end,
sized_stack_arg_space,
sized_stack_ret_space,
stack_ret_arg,
call_conv: sig.call_conv,
})
}
pub fn args(&self, sig: Sig) -> &[ABIArg] {
let sig_data = &self.sigs[sig];
let start = usize::try_from(sig_data.rets_end).unwrap();
let end = usize::try_from(sig_data.args_end).unwrap();
&self.abi_args[start..end]
}
pub fn get_ret_arg(&self, sig: Sig) -> Option<ABIArg> {
let sig_data = &self.sigs[sig];
if let Some(i) = sig_data.stack_ret_arg {
Some(self.args(sig)[usize::from(i)].clone())
} else {
None
}
}
pub fn get_arg(&self, sig: Sig, idx: usize) -> ABIArg {
self.args(sig)[idx].clone()
}
pub fn rets(&self, sig: Sig) -> &[ABIArg] {
let sig_data = &self.sigs[sig];
let start = usize::try_from(sig.prev().map_or(0, |prev| self.sigs[prev].args_end)).unwrap();
let end = usize::try_from(sig_data.rets_end).unwrap();
&self.abi_args[start..end]
}
pub fn get_ret(&self, sig: Sig, idx: usize) -> ABIArg {
self.rets(sig)[idx].clone()
}
pub fn num_args(&self, sig: Sig) -> usize {
let len = self.args(sig).len();
if self.sigs[sig].stack_ret_arg.is_some() {
len - 1
} else {
len
}
}
pub fn num_rets(&self, sig: Sig) -> usize {
self.rets(sig).len()
}
}
impl core::ops::Index<Sig> for SigSet {
type Output = SigData;
fn index(&self, sig: Sig) -> &Self::Output {
&self.sigs[sig]
}
}
#[derive(Clone, Debug, Default)]
pub struct FrameLayout {
pub word_bytes: u32,
pub incoming_args_size: u32,
pub tail_args_size: u32,
pub setup_area_size: u32,
pub clobber_size: u32,
pub fixed_frame_storage_size: u32,
pub stackslots_size: u32,
pub outgoing_args_size: u32,
pub clobbered_callee_saves: Vec<Writable<RealReg>>,
pub function_calls: FunctionCalls,
}
impl FrameLayout {
pub fn clobbered_callee_saves_by_class(&self) -> (&[Writable<RealReg>], &[Writable<RealReg>]) {
let (ints, floats) = self.clobbered_callee_saves.split_at(
self.clobbered_callee_saves
.partition_point(|r| r.to_reg().class() == RegClass::Int),
);
debug_assert!(floats.iter().all(|r| r.to_reg().class() == RegClass::Float));
(ints, floats)
}
pub fn active_size(&self) -> u32 {
self.outgoing_args_size + self.fixed_frame_storage_size + self.clobber_size
}
pub fn sp_to_sized_stack_slots(&self) -> u32 {
self.outgoing_args_size
}
pub fn spillslot_offset(&self, spillslot: SpillSlot) -> i64 {
let islot = spillslot.index() as i64;
let spill_off = islot * self.word_bytes as i64;
let sp_off = self.stackslots_size as i64 + spill_off;
sp_off
}
pub fn sp_to_fp(&self) -> u32 {
self.outgoing_args_size + self.fixed_frame_storage_size + self.clobber_size
}
}
pub struct Callee<M: ABIMachineSpec> {
ir_sig: ir::Signature,
sig: Sig,
dynamic_type_sizes: HashMap<Type, u32>,
dynamic_stackslots: PrimaryMap<DynamicStackSlot, u32>,
sized_stackslots: PrimaryMap<StackSlot, u32>,
sized_stackslot_keys: SecondaryMap<StackSlot, Option<StackSlotKey>>,
stackslots_size: u32,
outgoing_args_size: u32,
tail_args_size: u32,
reg_args: Vec<ArgPair>,
frame_layout: Option<FrameLayout>,
ret_area_ptr: Option<Reg>,
call_conv: isa::CallConv,
flags: settings::Flags,
isa_flags: M::F,
stack_limit: Option<(Reg, SmallInstVec<M::I>)>,
_mach: PhantomData<M>,
}
fn get_special_purpose_param_register(
f: &ir::Function,
sigs: &SigSet,
sig: Sig,
purpose: ir::ArgumentPurpose,
) -> Option<Reg> {
let idx = f.signature.special_param_index(purpose)?;
match &sigs.args(sig)[idx] {
&ABIArg::Slots { ref slots, .. } => match &slots[0] {
&ABIArgSlot::Reg { reg, .. } => Some(reg.into()),
_ => None,
},
_ => None,
}
}
fn checked_round_up(val: u32, mask: u32) -> Option<u32> {
Some(val.checked_add(mask)? & !mask)
}
impl<M: ABIMachineSpec> Callee<M> {
pub fn new(
f: &ir::Function,
isa: &dyn TargetIsa,
isa_flags: &M::F,
sigs: &SigSet,
) -> CodegenResult<Self> {
trace!("ABI: func signature {:?}", f.signature);
let flags = isa.flags().clone();
let sig = sigs.abi_sig_for_signature(&f.signature);
let call_conv = f.signature.call_conv;
debug_assert!(
call_conv == isa::CallConv::SystemV
|| call_conv == isa::CallConv::Tail
|| call_conv == isa::CallConv::Fast
|| call_conv == isa::CallConv::WindowsFastcall
|| call_conv == isa::CallConv::AppleAarch64
|| call_conv == isa::CallConv::Winch
|| call_conv == isa::CallConv::PreserveAll,
"Unsupported calling convention: {call_conv:?}"
);
let mut end_offset: u32 = 0;
let mut sized_stackslots = PrimaryMap::new();
let mut sized_stackslot_keys = SecondaryMap::new();
for (stackslot, data) in f.sized_stack_slots.iter() {
let unaligned_start_offset = end_offset;
debug_assert!(data.align_shift < 32);
let align = core::cmp::max(M::word_bytes(), 1u32 << data.align_shift);
let mask = align - 1;
let start_offset = checked_round_up(unaligned_start_offset, mask)
.ok_or(CodegenError::ImplLimitExceeded)?;
end_offset = start_offset
.checked_add(data.size)
.ok_or(CodegenError::ImplLimitExceeded)?;
debug_assert_eq!(stackslot.as_u32() as usize, sized_stackslots.len());
sized_stackslots.push(start_offset);
sized_stackslot_keys[stackslot] = data.key;
}
let mut dynamic_stackslots = PrimaryMap::new();
for (stackslot, data) in f.dynamic_stack_slots.iter() {
debug_assert_eq!(stackslot.as_u32() as usize, dynamic_stackslots.len());
let unaligned_start_offset = end_offset;
let mask = M::word_bytes() - 1;
let start_offset = checked_round_up(unaligned_start_offset, mask)
.ok_or(CodegenError::ImplLimitExceeded)?;
let ty = f.get_concrete_dynamic_ty(data.dyn_ty).ok_or_else(|| {
CodegenError::Unsupported(format!("invalid dynamic vector type: {}", data.dyn_ty))
})?;
end_offset = start_offset
.checked_add(isa.dynamic_vector_bytes(ty))
.ok_or(CodegenError::ImplLimitExceeded)?;
dynamic_stackslots.push(start_offset);
}
let stackslots_size = checked_round_up(end_offset, M::word_bytes() - 1)
.ok_or(CodegenError::ImplLimitExceeded)?;
let mut dynamic_type_sizes = HashMap::with_capacity(f.dfg.dynamic_types.len());
for (dyn_ty, _data) in f.dfg.dynamic_types.iter() {
let ty = f
.get_concrete_dynamic_ty(dyn_ty)
.unwrap_or_else(|| panic!("invalid dynamic vector type: {dyn_ty}"));
let size = isa.dynamic_vector_bytes(ty);
dynamic_type_sizes.insert(ty, size);
}
let stack_limit = f
.stack_limit
.map(|gv| gen_stack_limit::<M>(f, sigs, sig, gv));
let tail_args_size = sigs[sig].sized_stack_arg_space;
Ok(Self {
ir_sig: ensure_struct_return_ptr_is_returned(&f.signature),
sig,
dynamic_stackslots,
dynamic_type_sizes,
sized_stackslots,
sized_stackslot_keys,
stackslots_size,
outgoing_args_size: 0,
tail_args_size,
reg_args: vec![],
frame_layout: None,
ret_area_ptr: None,
call_conv,
flags,
isa_flags: isa_flags.clone(),
stack_limit,
_mach: PhantomData,
})
}
fn insert_stack_check(
&self,
stack_limit: Reg,
stack_size: u32,
insts: &mut SmallInstVec<M::I>,
) {
if stack_size == 0 {
insts.extend(M::gen_stack_lower_bound_trap(stack_limit));
return;
}
if stack_size >= 32 * 1024 {
insts.extend(M::gen_stack_lower_bound_trap(stack_limit));
}
let scratch = Writable::from_reg(M::get_stacklimit_reg(self.call_conv));
insts.extend(M::gen_add_imm(
self.call_conv,
scratch,
stack_limit,
stack_size,
));
insts.extend(M::gen_stack_lower_bound_trap(scratch.to_reg()));
}
}
fn gen_stack_limit<M: ABIMachineSpec>(
f: &ir::Function,
sigs: &SigSet,
sig: Sig,
gv: ir::GlobalValue,
) -> (Reg, SmallInstVec<M::I>) {
let mut insts = smallvec![];
let reg = generate_gv::<M>(f, sigs, sig, gv, &mut insts);
return (reg, insts);
}
fn generate_gv<M: ABIMachineSpec>(
f: &ir::Function,
sigs: &SigSet,
sig: Sig,
gv: ir::GlobalValue,
insts: &mut SmallInstVec<M::I>,
) -> Reg {
match f.global_values[gv] {
ir::GlobalValueData::VMContext => {
get_special_purpose_param_register(f, sigs, sig, ir::ArgumentPurpose::VMContext)
.expect("no vmcontext parameter found")
}
ir::GlobalValueData::Load {
base,
offset,
global_type: _,
flags: _,
} => {
let base = generate_gv::<M>(f, sigs, sig, base, insts);
let into_reg = Writable::from_reg(M::get_stacklimit_reg(f.stencil.signature.call_conv));
insts.push(M::gen_load_base_offset(
into_reg,
base,
offset.into(),
M::word_type(),
));
return into_reg.to_reg();
}
ref other => panic!("global value for stack limit not supported: {other}"),
}
}
fn missing_struct_return(sig: &ir::Signature) -> bool {
sig.uses_special_param(ArgumentPurpose::StructReturn)
&& !sig.uses_special_return(ArgumentPurpose::StructReturn)
}
fn ensure_struct_return_ptr_is_returned(sig: &ir::Signature) -> ir::Signature {
let mut sig = sig.clone();
if sig.uses_special_return(ArgumentPurpose::StructReturn) {
panic!("Explicit StructReturn return value not allowed: {sig:?}")
}
if let Some(struct_ret_index) = sig.special_param_index(ArgumentPurpose::StructReturn) {
if !sig.returns.is_empty() {
panic!("No return values are allowed when using StructReturn: {sig:?}");
}
sig.returns.insert(0, sig.params[struct_ret_index]);
}
sig
}
impl<M: ABIMachineSpec> Callee<M> {
pub fn signature(&self) -> &ir::Signature {
debug_assert!(
!missing_struct_return(&self.ir_sig),
"`Callee::ir_sig` is always legalized"
);
&self.ir_sig
}
pub fn init_retval_area(
&mut self,
sigs: &SigSet,
vregs: &mut VRegAllocator<M::I>,
) -> CodegenResult<()> {
if sigs[self.sig].stack_ret_arg.is_some() {
let ret_area_ptr = vregs.alloc(M::word_type())?;
self.ret_area_ptr = Some(ret_area_ptr.only_reg().unwrap());
}
Ok(())
}
pub fn ret_area_ptr(&self) -> Option<Reg> {
self.ret_area_ptr
}
pub fn accumulate_outgoing_args_size(&mut self, size: u32) {
if size > self.outgoing_args_size {
self.outgoing_args_size = size;
}
}
pub fn accumulate_tail_args_size(&mut self, size: u32) {
if size > self.tail_args_size {
self.tail_args_size = size;
}
}
pub fn is_forward_edge_cfi_enabled(&self) -> bool {
self.isa_flags.is_forward_edge_cfi_enabled()
}
pub fn call_conv(&self) -> isa::CallConv {
self.call_conv
}
pub fn machine_env(&self) -> &MachineEnv {
M::get_machine_env(&self.flags, self.call_conv)
}
pub fn sized_stackslot_offsets(&self) -> &PrimaryMap<StackSlot, u32> {
&self.sized_stackslots
}
pub fn dynamic_stackslot_offsets(&self) -> &PrimaryMap<DynamicStackSlot, u32> {
&self.dynamic_stackslots
}
pub fn gen_copy_arg_to_regs(
&mut self,
sigs: &SigSet,
idx: usize,
into_regs: ValueRegs<Writable<Reg>>,
vregs: &mut VRegAllocator<M::I>,
) -> SmallInstVec<M::I> {
let mut insts = smallvec![];
let mut copy_arg_slot_to_reg = |slot: &ABIArgSlot, into_reg: &Writable<Reg>| {
match slot {
&ABIArgSlot::Reg { reg, .. } => {
let arg = ArgPair {
vreg: *into_reg,
preg: reg.into(),
};
self.reg_args.push(arg);
}
&ABIArgSlot::Stack {
offset,
ty,
extension,
..
} => {
let ext = M::get_ext_mode(sigs[self.sig].call_conv, extension);
let ty =
if ext != ArgumentExtension::None && M::word_bits() > ty_bits(ty) as u32 {
M::word_type()
} else {
ty
};
insts.push(M::gen_load_stack(
StackAMode::IncomingArg(offset, sigs[self.sig].sized_stack_arg_space),
*into_reg,
ty,
));
}
}
};
match &sigs.args(self.sig)[idx] {
&ABIArg::Slots { ref slots, .. } => {
assert_eq!(into_regs.len(), slots.len());
for (slot, into_reg) in slots.iter().zip(into_regs.regs().iter()) {
copy_arg_slot_to_reg(&slot, &into_reg);
}
}
&ABIArg::StructArg { offset, .. } => {
let into_reg = into_regs.only_reg().unwrap();
insts.push(M::gen_get_stack_addr(
StackAMode::IncomingArg(offset, sigs[self.sig].sized_stack_arg_space),
into_reg,
));
}
&ABIArg::ImplicitPtrArg { pointer, ty, .. } => {
let into_reg = into_regs.only_reg().unwrap();
let base = match &pointer {
&ABIArgSlot::Reg { reg, ty, .. } => {
let tmp = vregs.alloc_with_deferred_error(ty).only_reg().unwrap();
self.reg_args.push(ArgPair {
vreg: Writable::from_reg(tmp),
preg: reg.into(),
});
tmp
}
&ABIArgSlot::Stack { offset, ty, .. } => {
let addr_reg = writable_value_regs(vregs.alloc_with_deferred_error(ty))
.only_reg()
.unwrap();
insts.push(M::gen_load_stack(
StackAMode::IncomingArg(offset, sigs[self.sig].sized_stack_arg_space),
addr_reg,
ty,
));
addr_reg.to_reg()
}
};
insts.push(M::gen_load_base_offset(into_reg, base, 0, ty));
}
}
insts
}
pub fn gen_copy_regs_to_retval(
&self,
sigs: &SigSet,
idx: usize,
from_regs: ValueRegs<Reg>,
vregs: &mut VRegAllocator<M::I>,
) -> (SmallVec<[RetPair; 2]>, SmallInstVec<M::I>) {
let mut reg_pairs = smallvec![];
let mut ret = smallvec![];
let word_bits = M::word_bits() as u8;
match &sigs.rets(self.sig)[idx] {
&ABIArg::Slots { ref slots, .. } => {
assert_eq!(from_regs.len(), slots.len());
for (slot, &from_reg) in slots.iter().zip(from_regs.regs().iter()) {
match slot {
&ABIArgSlot::Reg {
reg, ty, extension, ..
} => {
let from_bits = ty_bits(ty) as u8;
let ext = M::get_ext_mode(sigs[self.sig].call_conv, extension);
let vreg = match (ext, from_bits) {
(ir::ArgumentExtension::Uext, n)
| (ir::ArgumentExtension::Sext, n)
if n < word_bits =>
{
let signed = ext == ir::ArgumentExtension::Sext;
let dst =
writable_value_regs(vregs.alloc_with_deferred_error(ty))
.only_reg()
.unwrap();
ret.push(M::gen_extend(
dst, from_reg, signed, from_bits,
word_bits,
));
dst.to_reg()
}
_ => {
from_reg
}
};
reg_pairs.push(RetPair {
vreg,
preg: Reg::from(reg),
});
}
&ABIArgSlot::Stack {
offset,
ty,
extension,
..
} => {
let mut ty = ty;
let from_bits = ty_bits(ty) as u8;
let off = i32::try_from(offset).expect(
"Argument stack offset greater than 2GB; should hit impl limit first",
);
let ext = M::get_ext_mode(sigs[self.sig].call_conv, extension);
match (ext, from_bits) {
(ir::ArgumentExtension::Uext, n)
| (ir::ArgumentExtension::Sext, n)
if n < word_bits =>
{
assert_eq!(M::word_reg_class(), from_reg.class());
let signed = ext == ir::ArgumentExtension::Sext;
let dst =
writable_value_regs(vregs.alloc_with_deferred_error(ty))
.only_reg()
.unwrap();
ret.push(M::gen_extend(
dst, from_reg, signed, from_bits,
word_bits,
));
ty = M::word_type();
}
_ => {}
};
ret.push(M::gen_store_base_offset(
self.ret_area_ptr.unwrap(),
off,
from_reg,
ty,
));
}
}
}
}
ABIArg::StructArg { .. } => {
panic!("StructArg in return position is unsupported");
}
ABIArg::ImplicitPtrArg { .. } => {
panic!("ImplicitPtrArg in return position is unsupported");
}
}
(reg_pairs, ret)
}
pub fn gen_retval_area_setup(
&mut self,
sigs: &SigSet,
vregs: &mut VRegAllocator<M::I>,
) -> Option<M::I> {
if let Some(i) = sigs[self.sig].stack_ret_arg {
let ret_area_ptr = Writable::from_reg(self.ret_area_ptr.unwrap());
let insts =
self.gen_copy_arg_to_regs(sigs, i.into(), ValueRegs::one(ret_area_ptr), vregs);
insts.into_iter().next().map(|inst| {
trace!(
"gen_retval_area_setup: inst {:?}; ptr reg is {:?}",
inst,
ret_area_ptr.to_reg()
);
inst
})
} else {
trace!("gen_retval_area_setup: not needed");
None
}
}
pub fn gen_rets(&self, rets: Vec<RetPair>) -> M::I {
M::gen_rets(rets)
}
pub fn gen_call_args(
&self,
sigs: &SigSet,
sig: Sig,
args: &[ValueRegs<Reg>],
is_tail_call: bool,
flags: &settings::Flags,
vregs: &mut VRegAllocator<M::I>,
) -> (CallArgList, SmallInstVec<M::I>) {
let mut uses: CallArgList = smallvec![];
let mut insts = smallvec![];
assert_eq!(args.len(), sigs.num_args(sig));
let call_conv = sigs[sig].call_conv;
let stack_arg_space = sigs[sig].sized_stack_arg_space;
let stack_arg = |offset| {
if is_tail_call {
StackAMode::IncomingArg(offset, stack_arg_space)
} else {
StackAMode::OutgoingArg(offset)
}
};
let word_ty = M::word_type();
let word_rc = M::word_reg_class();
let word_bits = M::word_bits() as usize;
if is_tail_call {
debug_assert_eq!(
self.call_conv,
isa::CallConv::Tail,
"Can only do `return_call`s from within a `tail` calling convention function"
);
}
let mut process_arg_slot = |insts: &mut SmallInstVec<M::I>, slot, vreg, ty| {
match &slot {
&ABIArgSlot::Reg { reg, .. } => {
uses.push(CallArgPair {
vreg,
preg: reg.into(),
});
}
&ABIArgSlot::Stack { offset, .. } => {
insts.push(M::gen_store_stack(stack_arg(offset), vreg, ty));
}
};
};
for (idx, from_regs) in args.iter().enumerate() {
match &sigs.args(sig)[idx] {
&ABIArg::Slots { .. } | &ABIArg::ImplicitPtrArg { .. } => {}
&ABIArg::StructArg { offset, size, .. } => {
let tmp = vregs.alloc_with_deferred_error(word_ty).only_reg().unwrap();
insts.push(M::gen_get_stack_addr(
stack_arg(offset),
Writable::from_reg(tmp),
));
insts.extend(M::gen_memcpy(
isa::CallConv::for_libcall(flags, call_conv),
tmp,
from_regs.only_reg().unwrap(),
size as usize,
|ty| {
Writable::from_reg(
vregs.alloc_with_deferred_error(ty).only_reg().unwrap(),
)
},
));
}
}
}
for (idx, from_regs) in args.iter().enumerate() {
match sigs.args(sig)[idx] {
ABIArg::Slots { ref slots, .. } => {
assert_eq!(from_regs.len(), slots.len());
for (slot, from_reg) in slots.iter().zip(from_regs.regs().iter()) {
let (ty, extension) = match *slot {
ABIArgSlot::Reg { ty, extension, .. } => (ty, extension),
ABIArgSlot::Stack { ty, extension, .. } => (ty, extension),
};
let ext = M::get_ext_mode(call_conv, extension);
let (vreg, ty) = if ext != ir::ArgumentExtension::None
&& ty_bits(ty) < word_bits
{
assert_eq!(word_rc, from_reg.class());
let signed = match ext {
ir::ArgumentExtension::Uext => false,
ir::ArgumentExtension::Sext => true,
_ => unreachable!(),
};
let tmp = vregs.alloc_with_deferred_error(word_ty).only_reg().unwrap();
insts.push(M::gen_extend(
Writable::from_reg(tmp),
*from_reg,
signed,
ty_bits(ty) as u8,
word_bits as u8,
));
(tmp, word_ty)
} else {
(*from_reg, ty)
};
process_arg_slot(&mut insts, *slot, vreg, ty);
}
}
ABIArg::ImplicitPtrArg {
offset,
pointer,
ty,
..
} => {
let vreg = from_regs.only_reg().unwrap();
let tmp = vregs.alloc_with_deferred_error(word_ty).only_reg().unwrap();
insts.push(M::gen_get_stack_addr(
stack_arg(offset),
Writable::from_reg(tmp),
));
insts.push(M::gen_store_base_offset(tmp, 0, vreg, ty));
process_arg_slot(&mut insts, pointer, tmp, word_ty);
}
ABIArg::StructArg { .. } => {}
}
}
if let Some(ret_arg) = sigs.get_ret_arg(sig) {
let ret_area = if is_tail_call {
self.ret_area_ptr.expect(
"if the tail callee has a return pointer, then the tail caller must as well",
)
} else {
let tmp = vregs.alloc_with_deferred_error(word_ty).only_reg().unwrap();
let amode = StackAMode::OutgoingArg(stack_arg_space.into());
insts.push(M::gen_get_stack_addr(amode, Writable::from_reg(tmp)));
tmp
};
match ret_arg {
ABIArg::Slots { slots, .. } => {
assert_eq!(slots.len(), 1);
process_arg_slot(&mut insts, slots[0], ret_area, word_ty);
}
_ => unreachable!(),
}
}
(uses, insts)
}
pub fn gen_call_rets(
&self,
sigs: &SigSet,
sig: Sig,
outputs: &[ValueRegs<Reg>],
try_call_payloads: Option<&[Writable<Reg>]>,
vregs: &mut VRegAllocator<M::I>,
) -> CallRetList {
let callee_conv = sigs[sig].call_conv;
let stack_arg_space = sigs[sig].sized_stack_arg_space;
let word_ty = M::word_type();
let word_bits = M::word_bits() as usize;
let mut defs: CallRetList = smallvec![];
let mut outputs = outputs.into_iter();
let num_rets = sigs.num_rets(sig);
for idx in 0..num_rets {
let ret = sigs.rets(sig)[idx].clone();
match ret {
ABIArg::Slots {
ref slots, purpose, ..
} => {
if purpose == ArgumentPurpose::StructReturn {
continue;
}
let retval_regs = outputs.next().unwrap();
assert_eq!(retval_regs.len(), slots.len());
for (slot, retval_reg) in slots.iter().zip(retval_regs.regs().iter()) {
let (ty, extension) = match *slot {
ABIArgSlot::Reg { ty, extension, .. } => (ty, extension),
ABIArgSlot::Stack { ty, extension, .. } => (ty, extension),
};
let ext = M::get_ext_mode(callee_conv, extension);
let ty = if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
word_ty
} else {
ty
};
match slot {
&ABIArgSlot::Reg { reg, .. } => {
defs.push(CallRetPair {
vreg: Writable::from_reg(*retval_reg),
location: RetLocation::Reg(reg.into(), ty),
});
}
&ABIArgSlot::Stack { offset, .. } => {
let amode =
StackAMode::OutgoingArg(offset + i64::from(stack_arg_space));
defs.push(CallRetPair {
vreg: Writable::from_reg(*retval_reg),
location: RetLocation::Stack(amode, ty),
});
}
}
}
}
ABIArg::StructArg { .. } => {
panic!("StructArg not supported in return position");
}
ABIArg::ImplicitPtrArg { .. } => {
panic!("ImplicitPtrArg not supported in return position");
}
}
}
assert!(outputs.next().is_none());
if let Some(try_call_payloads) = try_call_payloads {
let pregs = M::exception_payload_regs(callee_conv);
assert_eq!(
callee_conv.exception_payload_types(M::word_type()).len(),
pregs.len()
);
for (i, &preg) in pregs.iter().enumerate() {
let vreg = try_call_payloads[i];
if let Some(existing) = defs.iter().find(|def| match def.location {
RetLocation::Reg(r, _) => r == preg,
_ => false,
}) {
vregs.set_vreg_alias(vreg.to_reg(), existing.vreg.to_reg());
} else {
defs.push(CallRetPair {
vreg,
location: RetLocation::Reg(preg, word_ty),
});
}
}
}
defs
}
pub fn gen_call_info<T>(
&self,
sigs: &SigSet,
sig: Sig,
dest: T,
uses: CallArgList,
defs: CallRetList,
try_call_info: Option<TryCallInfo>,
patchable: bool,
) -> CallInfo<T> {
let caller_conv = self.call_conv;
let callee_conv = sigs[sig].call_conv;
let stack_arg_space = sigs[sig].sized_stack_arg_space;
let clobbers = {
let mut clobbers =
<M>::get_regs_clobbered_by_call(callee_conv, try_call_info.is_some());
for def in &defs {
if let RetLocation::Reg(preg, _) = def.location {
clobbers.remove(PReg::from(preg.to_real_reg().unwrap()));
}
}
clobbers
};
let callee_pop_size = if callee_conv == isa::CallConv::Tail {
stack_arg_space
} else {
0
};
CallInfo {
dest,
uses,
defs,
clobbers,
callee_conv,
caller_conv,
callee_pop_size,
try_call_info,
patchable,
}
}
pub fn sized_stackslot_offset(&self, slot: StackSlot) -> u32 {
self.sized_stackslots[slot]
}
pub fn sized_stackslot_addr(
&self,
slot: StackSlot,
offset: u32,
into_reg: Writable<Reg>,
) -> M::I {
let stack_off = self.sized_stackslots[slot] as i64;
let sp_off: i64 = stack_off + (offset as i64);
M::gen_get_stack_addr(StackAMode::Slot(sp_off), into_reg)
}
pub fn dynamic_stackslot_addr(&self, slot: DynamicStackSlot, into_reg: Writable<Reg>) -> M::I {
let stack_off = self.dynamic_stackslots[slot] as i64;
M::gen_get_stack_addr(StackAMode::Slot(stack_off), into_reg)
}
pub fn take_args(&mut self) -> Option<M::I> {
if self.reg_args.len() > 0 {
Some(M::gen_args(core::mem::take(&mut self.reg_args)))
} else {
None
}
}
}
impl<M: ABIMachineSpec> Callee<M> {
pub fn compute_frame_layout(
&mut self,
sigs: &SigSet,
spillslots: usize,
clobbered: Vec<Writable<RealReg>>,
function_calls: FunctionCalls,
) {
let bytes = M::word_bytes();
let total_stacksize = self.stackslots_size + bytes * spillslots as u32;
let mask = M::stack_align(self.call_conv) - 1;
let total_stacksize = (total_stacksize + mask) & !mask; self.frame_layout = Some(M::compute_frame_layout(
self.call_conv,
&self.flags,
self.signature(),
&clobbered,
function_calls,
self.stack_args_size(sigs),
self.tail_args_size,
self.stackslots_size,
total_stacksize,
self.outgoing_args_size,
));
}
pub fn gen_prologue(&self) -> SmallInstVec<M::I> {
let frame_layout = self.frame_layout();
let mut insts = smallvec![];
insts.extend(M::gen_prologue_frame_setup(
self.call_conv,
&self.flags,
&self.isa_flags,
&frame_layout,
));
let total_stacksize = (frame_layout.tail_args_size - frame_layout.incoming_args_size)
+ frame_layout.clobber_size
+ frame_layout.fixed_frame_storage_size
+ frame_layout.outgoing_args_size
+ if frame_layout.function_calls == FunctionCalls::None {
0
} else {
frame_layout.setup_area_size
};
if total_stacksize > 0 || frame_layout.function_calls != FunctionCalls::None {
if let Some((reg, stack_limit_load)) = &self.stack_limit {
insts.extend(stack_limit_load.clone());
self.insert_stack_check(*reg, total_stacksize, &mut insts);
}
if self.flags.enable_probestack() {
let guard_size = 1 << self.flags.probestack_size_log2();
match self.flags.probestack_strategy() {
ProbestackStrategy::Inline => M::gen_inline_probestack(
&mut insts,
self.call_conv,
total_stacksize,
guard_size,
),
ProbestackStrategy::Outline => {
if total_stacksize >= guard_size {
M::gen_probestack(&mut insts, total_stacksize);
}
}
}
}
}
insts.extend(M::gen_clobber_save(
self.call_conv,
&self.flags,
&frame_layout,
));
insts
}
pub fn gen_epilogue(&self) -> SmallInstVec<M::I> {
let frame_layout = self.frame_layout();
let mut insts = smallvec![];
insts.extend(M::gen_clobber_restore(
self.call_conv,
&self.flags,
&frame_layout,
));
insts.extend(M::gen_epilogue_frame_restore(
self.call_conv,
&self.flags,
&self.isa_flags,
&frame_layout,
));
insts.extend(M::gen_return(
self.call_conv,
&self.isa_flags,
&frame_layout,
));
trace!("Epilogue: {:?}", insts);
insts
}
pub fn frame_layout(&self) -> &FrameLayout {
self.frame_layout
.as_ref()
.expect("frame layout not computed before prologue generation")
}
pub fn sp_to_fp_offset(&self) -> u32 {
let frame_layout = self.frame_layout();
frame_layout.clobber_size
+ frame_layout.fixed_frame_storage_size
+ frame_layout.outgoing_args_size
}
pub fn slot_base_to_caller_sp_offset(&self) -> u32 {
let frame_layout = self.frame_layout();
frame_layout.clobber_size
+ frame_layout.fixed_frame_storage_size
+ frame_layout.setup_area_size
+ (frame_layout.tail_args_size - frame_layout.incoming_args_size)
}
pub fn stack_args_size(&self, sigs: &SigSet) -> u32 {
sigs[self.sig].sized_stack_arg_space
}
pub fn get_spillslot_size(&self, rc: RegClass) -> u32 {
let max = if self.dynamic_type_sizes.len() == 0 {
16
} else {
*self
.dynamic_type_sizes
.iter()
.max_by(|x, y| x.1.cmp(&y.1))
.map(|(_k, v)| v)
.unwrap()
};
M::get_number_of_spillslots_for_value(rc, max, &self.isa_flags)
}
pub fn get_spillslot_offset(&self, slot: SpillSlot) -> i64 {
self.frame_layout().spillslot_offset(slot)
}
pub fn gen_spill(&self, to_slot: SpillSlot, from_reg: RealReg) -> M::I {
let ty = M::I::canonical_type_for_rc(from_reg.class());
debug_assert_eq!(<M>::I::rc_for_type(ty).unwrap().1, &[ty]);
let sp_off = self.get_spillslot_offset(to_slot);
trace!("gen_spill: {from_reg:?} into slot {to_slot:?} at offset {sp_off}");
let from = StackAMode::Slot(sp_off);
<M>::gen_store_stack(from, Reg::from(from_reg), ty)
}
pub fn gen_reload(&self, to_reg: Writable<RealReg>, from_slot: SpillSlot) -> M::I {
let ty = M::I::canonical_type_for_rc(to_reg.to_reg().class());
debug_assert_eq!(<M>::I::rc_for_type(ty).unwrap().1, &[ty]);
let sp_off = self.get_spillslot_offset(from_slot);
trace!("gen_reload: {to_reg:?} from slot {from_slot:?} at offset {sp_off}");
let from = StackAMode::Slot(sp_off);
<M>::gen_load_stack(from, to_reg.map(Reg::from), ty)
}
pub fn frame_slot_metadata(&self) -> MachBufferFrameLayout {
let frame_to_fp_offset = self.sp_to_fp_offset();
let mut stackslots = SecondaryMap::with_capacity(self.sized_stackslots.len());
let storage_area_base = self.frame_layout().outgoing_args_size;
for (slot, storage_area_offset) in &self.sized_stackslots {
stackslots[slot] = MachBufferStackSlot {
offset: storage_area_base.checked_add(*storage_area_offset).unwrap(),
key: self.sized_stackslot_keys[slot],
};
}
MachBufferFrameLayout {
frame_to_fp_offset,
stackslots,
}
}
}
#[derive(Clone, Debug)]
pub struct CallArgPair {
pub vreg: Reg,
pub preg: Reg,
}
#[derive(Clone, Debug)]
pub struct CallRetPair {
pub vreg: Writable<Reg>,
pub location: RetLocation,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RetLocation {
Reg(Reg, Type),
Stack(StackAMode, Type),
}
pub type CallArgList = SmallVec<[CallArgPair; 8]>;
pub type CallRetList = SmallVec<[CallRetPair; 8]>;
impl<T> CallInfo<T> {
pub fn emit_retval_loads<
M: ABIMachineSpec,
EmitFn: FnMut(M::I),
IslandFn: Fn(u32) -> Option<M::I>,
>(
&self,
stackslots_size: u32,
mut emit: EmitFn,
emit_island: IslandFn,
) {
let mut space_needed = 0;
for CallRetPair { location, .. } in &self.defs {
if let RetLocation::Stack(..) = location {
space_needed += 10 * M::I::worst_case_size();
}
}
if space_needed > 0 {
if let Some(island_inst) = emit_island(space_needed) {
emit(island_inst);
}
}
let temp = M::retval_temp_reg(self.callee_conv);
debug_assert!(
self.defs.is_empty()
|| M::get_regs_clobbered_by_call(self.callee_conv, self.try_call_info.is_some())
.contains(PReg::from(temp.to_reg().to_real_reg().unwrap()))
);
for CallRetPair { vreg, location } in &self.defs {
match location {
RetLocation::Reg(preg, ..) => {
debug_assert!(*preg != temp.to_reg());
}
RetLocation::Stack(amode, ty) => {
if let Some(spillslot) = vreg.to_reg().to_spillslot() {
let parts = (ty.bytes() + M::word_bytes() - 1) / M::word_bytes();
let one_part_load_ty =
Type::int_with_byte_size(M::word_bytes().min(ty.bytes()) as u16)
.unwrap();
for part in 0..parts {
emit(M::gen_load_stack(
amode.offset_by(part * M::word_bytes()),
temp,
one_part_load_ty,
));
emit(M::gen_store_stack(
StackAMode::Slot(
i64::from(stackslots_size)
+ i64::from(M::word_bytes())
* ((spillslot.index() as i64) + (part as i64)),
),
temp.to_reg(),
M::word_type(),
));
}
} else {
assert_ne!(*vreg, temp);
emit(M::gen_load_stack(*amode, *vreg, *ty));
}
}
}
}
}
}
impl TryCallInfo {
pub(crate) fn exception_handlers(
&self,
layout: &FrameLayout,
) -> impl Iterator<Item = MachExceptionHandler> {
self.exception_handlers.iter().map(|handler| match handler {
TryCallHandler::Tag(tag, label) => MachExceptionHandler::Tag(*tag, *label),
TryCallHandler::Default(label) => MachExceptionHandler::Default(*label),
TryCallHandler::Context(reg) => {
let loc = if let Some(spillslot) = reg.to_spillslot() {
let offset = layout.spillslot_offset(spillslot) + i64::from(layout.outgoing_args_size);
ExceptionContextLoc::SPOffset(u32::try_from(offset).expect("SP offset cannot be negative or larger than 4GiB"))
} else if let Some(realreg) = reg.to_real_reg() {
ExceptionContextLoc::GPR(realreg.hw_enc())
} else {
panic!("Virtual register present in try-call handler clause after register allocation");
};
MachExceptionHandler::Context(loc)
}
})
}
pub(crate) fn pretty_print_dests(&self) -> String {
self.exception_handlers
.iter()
.map(|handler| match handler {
TryCallHandler::Tag(tag, label) => format!("{tag:?}: {label:?}"),
TryCallHandler::Default(label) => format!("default: {label:?}"),
TryCallHandler::Context(loc) => format!("context {loc:?}"),
})
.collect::<Vec<_>>()
.join(", ")
}
pub(crate) fn collect_operands(&mut self, collector: &mut impl OperandVisitor) {
for handler in &mut self.exception_handlers {
match handler {
TryCallHandler::Context(ctx) => {
collector.any_late_use(ctx);
}
TryCallHandler::Tag(_, _) | TryCallHandler::Default(_) => {}
}
}
}
}
#[cfg(test)]
mod tests {
use super::SigData;
#[test]
fn sig_data_size() {
assert_eq!(core::mem::size_of::<SigData>(), 24);
}
}