use super::{PulleyFlags, PulleyTargetKind, inst::*};
use crate::isa::pulley_shared::PointerWidth;
use crate::{
CodegenResult,
ir::{self, MemFlags, Signature, types::*},
isa,
machinst::*,
settings,
};
use alloc::borrow::ToOwned;
use alloc::vec::Vec;
use core::marker::PhantomData;
use cranelift_bitset::ScalarBitSet;
use regalloc2::{MachineEnv, PRegSet};
use smallvec::{SmallVec, smallvec};
pub(crate) type PulleyCallee<P> = Callee<PulleyMachineDeps<P>>;
pub struct PulleyMachineDeps<P>
where
P: PulleyTargetKind,
{
_phantom: PhantomData<P>,
}
impl<P> ABIMachineSpec for PulleyMachineDeps<P>
where
P: PulleyTargetKind,
{
type I = InstAndKind<P>;
type F = PulleyFlags;
const STACK_ARG_RET_SIZE_LIMIT: u32 = 128 * 1024 * 1024;
fn word_bits() -> u32 {
P::pointer_width().bits().into()
}
fn stack_align(_call_conv: isa::CallConv) -> u32 {
16
}
fn compute_arg_locs(
call_conv: isa::CallConv,
flags: &settings::Flags,
params: &[ir::AbiParam],
args_or_rets: ArgsOrRets,
add_ret_area_ptr: bool,
mut args: ArgsAccumulator,
) -> CodegenResult<(u32, Option<usize>)> {
let x_end = 14;
let f_end = 15;
let v_end = 15;
let mut next_x_reg = 0;
let mut next_f_reg = 0;
let mut next_v_reg = 0;
let mut next_stack: u32 = 0;
let ret_area_ptr = if add_ret_area_ptr {
debug_assert_eq!(args_or_rets, ArgsOrRets::Args);
next_x_reg += 1;
Some(ABIArg::reg(
x_reg(next_x_reg - 1).to_real_reg().unwrap(),
I64,
ir::ArgumentExtension::None,
ir::ArgumentPurpose::Normal,
))
} else {
None
};
for param in params {
let (rcs, reg_tys) = Self::I::rc_for_type(param.value_type)?;
let mut slots = ABIArgSlotVec::new();
for (rc, reg_ty) in rcs.iter().zip(reg_tys.iter()) {
let next_reg = if (next_x_reg <= x_end) && *rc == RegClass::Int {
let x = Some(x_reg(next_x_reg));
next_x_reg += 1;
x
} else if (next_f_reg <= f_end) && *rc == RegClass::Float {
let f = Some(f_reg(next_f_reg));
next_f_reg += 1;
f
} else if (next_v_reg <= v_end) && *rc == RegClass::Vector {
let v = Some(v_reg(next_v_reg));
next_v_reg += 1;
v
} else {
None
};
if let Some(reg) = next_reg {
slots.push(ABIArgSlot::Reg {
reg: reg.to_real_reg().unwrap(),
ty: *reg_ty,
extension: param.extension,
});
} else {
if args_or_rets == ArgsOrRets::Rets && !flags.enable_multi_ret_implicit_sret() {
return Err(crate::CodegenError::Unsupported(
"Too many return values to fit in registers. \
Use a StructReturn argument instead. (#9510)"
.to_owned(),
));
}
let size = reg_ty.bits() / 8;
let size = core::cmp::max(size, 8);
debug_assert!(size.is_power_of_two());
next_stack = align_to(next_stack, size);
slots.push(ABIArgSlot::Stack {
offset: i64::from(next_stack),
ty: *reg_ty,
extension: param.extension,
});
next_stack += size;
}
}
args.push(ABIArg::Slots {
slots,
purpose: param.purpose,
});
}
let pos = if let Some(ret_area_ptr) = ret_area_ptr {
args.push_non_formal(ret_area_ptr);
Some(args.args().len() - 1)
} else {
None
};
next_stack = align_to(next_stack, Self::stack_align(call_conv));
Ok((next_stack, pos))
}
fn gen_load_stack(mem: StackAMode, into_reg: Writable<Reg>, ty: Type) -> Self::I {
let mut flags = MemFlags::trusted();
if ty.is_vector() {
flags.set_endianness(ir::Endianness::Little);
}
Inst::gen_load(into_reg, mem.into(), ty, flags).into()
}
fn gen_store_stack(mem: StackAMode, from_reg: Reg, ty: Type) -> Self::I {
let mut flags = MemFlags::trusted();
if ty.is_vector() {
flags.set_endianness(ir::Endianness::Little);
}
Inst::gen_store(mem.into(), from_reg, ty, flags).into()
}
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Self::I {
Self::I::gen_move(to_reg, from_reg, ty)
}
fn gen_extend(
dst: Writable<Reg>,
src: Reg,
signed: bool,
from_bits: u8,
to_bits: u8,
) -> Self::I {
assert!(from_bits < to_bits);
let src = XReg::new(src).unwrap();
let dst = dst.try_into().unwrap();
match (signed, from_bits) {
(true, 8) => RawInst::Sext8 { dst, src }.into(),
(true, 16) => RawInst::Sext16 { dst, src }.into(),
(true, 32) => RawInst::Sext32 { dst, src }.into(),
(false, 8) => RawInst::Zext8 { dst, src }.into(),
(false, 16) => RawInst::Zext16 { dst, src }.into(),
(false, 32) => RawInst::Zext32 { dst, src }.into(),
_ => unimplemented!("extend {from_bits} to {to_bits} as signed? {signed}"),
}
}
fn get_ext_mode(
_call_conv: isa::CallConv,
specified: ir::ArgumentExtension,
) -> ir::ArgumentExtension {
specified
}
fn gen_args(args: Vec<ArgPair>) -> Self::I {
Inst::Args { args }.into()
}
fn gen_rets(rets: Vec<RetPair>) -> Self::I {
Inst::Rets { rets }.into()
}
fn get_stacklimit_reg(_call_conv: isa::CallConv) -> Reg {
spilltmp_reg()
}
fn gen_add_imm(
_call_conv: isa::CallConv,
into_reg: Writable<Reg>,
from_reg: Reg,
imm: u32,
) -> SmallInstVec<Self::I> {
let dst = into_reg.try_into().unwrap();
let imm = imm as i32;
smallvec![
RawInst::Xconst32 { dst, imm }.into(),
RawInst::Xadd32 {
dst,
src1: from_reg.try_into().unwrap(),
src2: dst.to_reg(),
}
.into()
]
}
fn gen_stack_lower_bound_trap(_limit_reg: Reg) -> SmallInstVec<Self::I> {
unimplemented!("pulley shouldn't need stack bound checks")
}
fn gen_get_stack_addr(mem: StackAMode, dst: Writable<Reg>) -> Self::I {
let dst = dst.to_reg();
let dst = XReg::new(dst).unwrap();
let dst = WritableXReg::from_reg(dst);
let mem = mem.into();
Inst::LoadAddr { dst, mem }.into()
}
fn gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i32, ty: Type) -> Self::I {
let base = XReg::try_from(base).unwrap();
let mem = Amode::RegOffset { base, offset };
Inst::gen_load(into_reg, mem, ty, MemFlags::trusted()).into()
}
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Self::I {
let base = XReg::try_from(base).unwrap();
let mem = Amode::RegOffset { base, offset };
Inst::gen_store(mem, from_reg, ty, MemFlags::trusted()).into()
}
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I> {
if amount == 0 {
return smallvec![];
}
let inst = if amount < 0 {
let amount = amount.checked_neg().unwrap();
if let Ok(amt) = u32::try_from(amount) {
RawInst::StackAlloc32 { amt }
} else {
unreachable!()
}
} else {
if let Ok(amt) = u32::try_from(amount) {
RawInst::StackFree32 { amt }
} else {
unreachable!()
}
};
smallvec![inst.into()]
}
fn gen_prologue_frame_setup(
_call_conv: isa::CallConv,
_flags: &settings::Flags,
_isa_flags: &PulleyFlags,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I> {
let mut insts = SmallVec::new();
let incoming_args_diff = frame_layout.tail_args_size - frame_layout.incoming_args_size;
if incoming_args_diff > 0 {
insts.extend(Self::gen_sp_reg_adjust(-(incoming_args_diff as i32)));
}
let style = frame_layout.pulley_frame_style();
match &style {
FrameStyle::None => {}
FrameStyle::PulleyBasicSetup { frame_size } => {
insts.push(RawInst::PushFrame.into());
insts.extend(Self::gen_sp_reg_adjust(
-i32::try_from(*frame_size).unwrap(),
));
}
FrameStyle::PulleySetupAndSaveClobbers {
frame_size,
saved_by_pulley,
} => insts.push(
RawInst::PushFrameSave {
amt: *frame_size,
regs: pulley_interpreter::UpperRegSet::from_bitset(*saved_by_pulley),
}
.into(),
),
FrameStyle::Manual { frame_size } => insts.extend(Self::gen_sp_reg_adjust(
-i32::try_from(*frame_size).unwrap(),
)),
}
for (offset, ty, reg) in frame_layout.manually_managed_clobbers(&style) {
let mut flags = MemFlags::trusted();
if ty.is_vector() {
flags.set_endianness(ir::Endianness::Little);
}
insts.push(Inst::gen_store(Amode::SpOffset { offset }, reg, ty, flags).into());
}
insts
}
fn gen_epilogue_frame_restore(
_call_conv: isa::CallConv,
_flags: &settings::Flags,
_isa_flags: &PulleyFlags,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I> {
let mut insts = SmallVec::new();
let style = frame_layout.pulley_frame_style();
for (offset, ty, reg) in frame_layout.manually_managed_clobbers(&style) {
let mut flags = MemFlags::trusted();
if ty.is_vector() {
flags.set_endianness(ir::Endianness::Little);
}
insts.push(
Inst::gen_load(
Writable::from_reg(reg),
Amode::SpOffset { offset },
ty,
flags,
)
.into(),
);
}
match &style {
FrameStyle::None => {}
FrameStyle::PulleyBasicSetup { frame_size } => {
insts.extend(Self::gen_sp_reg_adjust(i32::try_from(*frame_size).unwrap()));
insts.push(RawInst::PopFrame.into());
}
FrameStyle::PulleySetupAndSaveClobbers {
frame_size,
saved_by_pulley,
} => insts.push(
RawInst::PopFrameRestore {
amt: *frame_size,
regs: pulley_interpreter::UpperRegSet::from_bitset(*saved_by_pulley),
}
.into(),
),
FrameStyle::Manual { frame_size } => {
insts.extend(Self::gen_sp_reg_adjust(i32::try_from(*frame_size).unwrap()))
}
}
insts
}
fn gen_return(
call_conv: isa::CallConv,
_isa_flags: &PulleyFlags,
frame_layout: &FrameLayout,
) -> SmallInstVec<Self::I> {
let mut insts = SmallVec::new();
if call_conv == isa::CallConv::Tail && frame_layout.tail_args_size > 0 {
insts.extend(Self::gen_sp_reg_adjust(
frame_layout.tail_args_size.try_into().unwrap(),
));
}
insts.push(RawInst::Ret {}.into());
insts
}
fn gen_probestack(_insts: &mut SmallInstVec<Self::I>, _frame_size: u32) {
}
fn gen_clobber_save(
_call_conv: isa::CallConv,
_flags: &settings::Flags,
_frame_layout: &FrameLayout,
) -> SmallVec<[Self::I; 16]> {
SmallVec::new()
}
fn gen_clobber_restore(
_call_conv: isa::CallConv,
_flags: &settings::Flags,
_frame_layout: &FrameLayout,
) -> SmallVec<[Self::I; 16]> {
SmallVec::new()
}
fn gen_memcpy<F: FnMut(Type) -> Writable<Reg>>(
_call_conv: isa::CallConv,
_dst: Reg,
_src: Reg,
_size: usize,
_alloc_tmp: F,
) -> SmallVec<[Self::I; 8]> {
todo!()
}
fn get_number_of_spillslots_for_value(
rc: RegClass,
_target_vector_bytes: u32,
_isa_flags: &PulleyFlags,
) -> u32 {
let slots_for_8bytes = match P::pointer_width() {
PointerWidth::PointerWidth32 => 2,
PointerWidth::PointerWidth64 => 1,
};
match rc {
RegClass::Int | RegClass::Float => slots_for_8bytes,
RegClass::Vector => 2 * slots_for_8bytes,
}
}
fn get_machine_env(_flags: &settings::Flags, _call_conv: isa::CallConv) -> &MachineEnv {
static MACHINE_ENV: MachineEnv = create_reg_environment();
&MACHINE_ENV
}
fn get_regs_clobbered_by_call(
call_conv_of_callee: isa::CallConv,
is_exception: bool,
) -> PRegSet {
if is_exception {
ALL_CLOBBERS
} else if call_conv_of_callee == isa::CallConv::PreserveAll {
NO_CLOBBERS
} else {
DEFAULT_CLOBBERS
}
}
fn compute_frame_layout(
call_conv: isa::CallConv,
flags: &settings::Flags,
_sig: &Signature,
regs: &[Writable<RealReg>],
function_calls: FunctionCalls,
incoming_args_size: u32,
tail_args_size: u32,
stackslots_size: u32,
fixed_frame_storage_size: u32,
outgoing_args_size: u32,
) -> FrameLayout {
let is_callee_save = |reg: &Writable<RealReg>| match call_conv {
isa::CallConv::PreserveAll => true,
_ => DEFAULT_CALLEE_SAVES.contains(reg.to_reg().into()),
};
let mut regs: Vec<Writable<RealReg>> =
regs.iter().cloned().filter(is_callee_save).collect();
regs.sort_unstable();
let clobber_size = compute_clobber_size(®s);
let setup_area_size = if flags.preserve_frame_pointers()
|| function_calls != FunctionCalls::None
|| incoming_args_size > 0
|| clobber_size > 0
|| fixed_frame_storage_size > 0
{
P::pointer_width().bytes() * 2 } else {
0
};
FrameLayout {
word_bytes: u32::from(P::pointer_width().bytes()),
incoming_args_size,
tail_args_size,
setup_area_size: setup_area_size.into(),
clobber_size,
fixed_frame_storage_size,
stackslots_size,
outgoing_args_size,
clobbered_callee_saves: regs,
function_calls,
}
}
fn gen_inline_probestack(
_insts: &mut SmallInstVec<Self::I>,
_call_conv: isa::CallConv,
_frame_size: u32,
_guard_size: u32,
) {
}
fn retval_temp_reg(_call_conv_of_callee: isa::CallConv) -> Writable<Reg> {
Writable::from_reg(regs::x_reg(15))
}
fn exception_payload_regs(call_conv: isa::CallConv) -> &'static [Reg] {
const PAYLOAD_REGS: &'static [Reg] = &[
Reg::from_real_reg(regs::px_reg(0)),
Reg::from_real_reg(regs::px_reg(1)),
];
match call_conv {
isa::CallConv::SystemV | isa::CallConv::Tail | isa::CallConv::PreserveAll => {
PAYLOAD_REGS
}
isa::CallConv::Fast
| isa::CallConv::WindowsFastcall
| isa::CallConv::AppleAarch64
| isa::CallConv::Probestack
| isa::CallConv::Winch => &[],
}
}
}
enum FrameStyle {
None,
PulleyBasicSetup { frame_size: u32 },
PulleySetupAndSaveClobbers {
frame_size: u16,
saved_by_pulley: ScalarBitSet<u16>,
},
Manual {
frame_size: u32,
},
}
impl FrameLayout {
fn setup_frame(&self) -> bool {
self.setup_area_size > 0
}
fn stack_size(&self) -> u32 {
self.clobber_size + self.fixed_frame_storage_size + self.outgoing_args_size
}
fn pulley_frame_style(&self) -> FrameStyle {
let saved_by_pulley = self.clobbered_xregs_saved_by_pulley();
match (
self.stack_size(),
self.setup_frame(),
saved_by_pulley.is_empty(),
) {
(0, false, true) => FrameStyle::None,
(0, true, true) => FrameStyle::PulleyBasicSetup { frame_size: 0 },
(frame_size, true, _) => match frame_size.try_into() {
Ok(frame_size) => FrameStyle::PulleySetupAndSaveClobbers {
frame_size,
saved_by_pulley,
},
Err(_) => FrameStyle::PulleyBasicSetup { frame_size },
},
(frame_size, false, true) => FrameStyle::Manual { frame_size },
(_, false, false) => unreachable!(),
}
}
fn clobbered_xregs_saved_by_pulley(&self) -> ScalarBitSet<u16> {
let mut clobbered: ScalarBitSet<u16> = ScalarBitSet::new();
if !self.setup_frame() {
return clobbered;
}
let mut found_manual_clobber = false;
for reg in self.clobbered_callee_saves.iter() {
let r_reg = reg.to_reg();
if r_reg.class() == RegClass::Int {
assert!(!found_manual_clobber);
if let Some(offset) = r_reg.hw_enc().checked_sub(16) {
clobbered.insert(offset);
}
} else {
found_manual_clobber = true;
}
}
clobbered
}
fn manually_managed_clobbers<'a>(
&'a self,
style: &'a FrameStyle,
) -> impl Iterator<Item = (i32, Type, Reg)> + 'a {
let mut offset = self.stack_size();
self.clobbered_callee_saves.iter().filter_map(move |reg| {
offset -= 8;
let r_reg = reg.to_reg();
let ty = match r_reg.class() {
RegClass::Int => {
if let FrameStyle::PulleySetupAndSaveClobbers {
saved_by_pulley, ..
} = style
{
if let Some(reg) = r_reg.hw_enc().checked_sub(16) {
if saved_by_pulley.contains(reg) {
return None;
}
}
}
I64
}
RegClass::Float => F64,
RegClass::Vector => I8X16,
};
let offset = i32::try_from(offset).unwrap();
Some((offset, ty, Reg::from(reg.to_reg())))
})
}
}
const DEFAULT_CALLEE_SAVES: PRegSet = PRegSet::empty()
.with(px_reg(16))
.with(px_reg(17))
.with(px_reg(18))
.with(px_reg(19))
.with(px_reg(20))
.with(px_reg(21))
.with(px_reg(22))
.with(px_reg(23))
.with(px_reg(24))
.with(px_reg(25))
.with(px_reg(26))
.with(px_reg(27))
.with(px_reg(28))
.with(px_reg(29))
.with(px_reg(30))
.with(px_reg(31))
;
fn compute_clobber_size(clobbers: &[Writable<RealReg>]) -> u32 {
let mut clobbered_size = 0;
for reg in clobbers {
match reg.to_reg().class() {
RegClass::Int => {
clobbered_size += 8;
}
RegClass::Float => {
clobbered_size += 8;
}
RegClass::Vector => {
clobbered_size += 16;
}
}
}
align_to(clobbered_size, 16)
}
const DEFAULT_CLOBBERS: PRegSet = PRegSet::empty()
.with(px_reg(0))
.with(px_reg(1))
.with(px_reg(2))
.with(px_reg(3))
.with(px_reg(4))
.with(px_reg(5))
.with(px_reg(6))
.with(px_reg(7))
.with(px_reg(8))
.with(px_reg(9))
.with(px_reg(10))
.with(px_reg(11))
.with(px_reg(12))
.with(px_reg(13))
.with(px_reg(14))
.with(px_reg(15))
.with(pf_reg(0))
.with(pf_reg(1))
.with(pf_reg(2))
.with(pf_reg(3))
.with(pf_reg(4))
.with(pf_reg(5))
.with(pf_reg(6))
.with(pf_reg(7))
.with(pf_reg(8))
.with(pf_reg(9))
.with(pf_reg(10))
.with(pf_reg(11))
.with(pf_reg(12))
.with(pf_reg(13))
.with(pf_reg(14))
.with(pf_reg(15))
.with(pf_reg(16))
.with(pf_reg(17))
.with(pf_reg(18))
.with(pf_reg(19))
.with(pf_reg(20))
.with(pf_reg(21))
.with(pf_reg(22))
.with(pf_reg(23))
.with(pf_reg(24))
.with(pf_reg(25))
.with(pf_reg(26))
.with(pf_reg(27))
.with(pf_reg(28))
.with(pf_reg(29))
.with(pf_reg(30))
.with(pf_reg(31))
.with(pv_reg(0))
.with(pv_reg(1))
.with(pv_reg(2))
.with(pv_reg(3))
.with(pv_reg(4))
.with(pv_reg(5))
.with(pv_reg(6))
.with(pv_reg(7))
.with(pv_reg(8))
.with(pv_reg(9))
.with(pv_reg(10))
.with(pv_reg(11))
.with(pv_reg(12))
.with(pv_reg(13))
.with(pv_reg(14))
.with(pv_reg(15))
.with(pv_reg(16))
.with(pv_reg(17))
.with(pv_reg(18))
.with(pv_reg(19))
.with(pv_reg(20))
.with(pv_reg(21))
.with(pv_reg(22))
.with(pv_reg(23))
.with(pv_reg(24))
.with(pv_reg(25))
.with(pv_reg(26))
.with(pv_reg(27))
.with(pv_reg(28))
.with(pv_reg(29))
.with(pv_reg(30))
.with(pv_reg(31));
const ALL_CLOBBERS: PRegSet = PRegSet::empty()
.with(px_reg(0))
.with(px_reg(1))
.with(px_reg(2))
.with(px_reg(3))
.with(px_reg(4))
.with(px_reg(5))
.with(px_reg(6))
.with(px_reg(7))
.with(px_reg(8))
.with(px_reg(9))
.with(px_reg(10))
.with(px_reg(11))
.with(px_reg(12))
.with(px_reg(13))
.with(px_reg(14))
.with(px_reg(15))
.with(px_reg(16))
.with(px_reg(17))
.with(px_reg(18))
.with(px_reg(19))
.with(px_reg(20))
.with(px_reg(21))
.with(px_reg(22))
.with(px_reg(23))
.with(px_reg(24))
.with(px_reg(25))
.with(px_reg(26))
.with(px_reg(27))
.with(px_reg(28))
.with(px_reg(29))
.with(px_reg(30))
.with(px_reg(31))
.with(pf_reg(0))
.with(pf_reg(1))
.with(pf_reg(2))
.with(pf_reg(3))
.with(pf_reg(4))
.with(pf_reg(5))
.with(pf_reg(6))
.with(pf_reg(7))
.with(pf_reg(8))
.with(pf_reg(9))
.with(pf_reg(10))
.with(pf_reg(11))
.with(pf_reg(12))
.with(pf_reg(13))
.with(pf_reg(14))
.with(pf_reg(15))
.with(pf_reg(16))
.with(pf_reg(17))
.with(pf_reg(18))
.with(pf_reg(19))
.with(pf_reg(20))
.with(pf_reg(21))
.with(pf_reg(22))
.with(pf_reg(23))
.with(pf_reg(24))
.with(pf_reg(25))
.with(pf_reg(26))
.with(pf_reg(27))
.with(pf_reg(28))
.with(pf_reg(29))
.with(pf_reg(30))
.with(pf_reg(31))
.with(pv_reg(0))
.with(pv_reg(1))
.with(pv_reg(2))
.with(pv_reg(3))
.with(pv_reg(4))
.with(pv_reg(5))
.with(pv_reg(6))
.with(pv_reg(7))
.with(pv_reg(8))
.with(pv_reg(9))
.with(pv_reg(10))
.with(pv_reg(11))
.with(pv_reg(12))
.with(pv_reg(13))
.with(pv_reg(14))
.with(pv_reg(15))
.with(pv_reg(16))
.with(pv_reg(17))
.with(pv_reg(18))
.with(pv_reg(19))
.with(pv_reg(20))
.with(pv_reg(21))
.with(pv_reg(22))
.with(pv_reg(23))
.with(pv_reg(24))
.with(pv_reg(25))
.with(pv_reg(26))
.with(pv_reg(27))
.with(pv_reg(28))
.with(pv_reg(29))
.with(pv_reg(30))
.with(pv_reg(31));
const NO_CLOBBERS: PRegSet = PRegSet::empty();
const fn create_reg_environment() -> MachineEnv {
let preferred_regs_by_class: [PRegSet; 3] = [
PRegSet::empty()
.with(px_reg(0))
.with(px_reg(1))
.with(px_reg(2))
.with(px_reg(3))
.with(px_reg(4))
.with(px_reg(5))
.with(px_reg(6))
.with(px_reg(7))
.with(px_reg(8))
.with(px_reg(9))
.with(px_reg(10))
.with(px_reg(11))
.with(px_reg(12))
.with(px_reg(13))
.with(px_reg(14))
.with(px_reg(15)),
PRegSet::empty()
.with(pf_reg(0))
.with(pf_reg(1))
.with(pf_reg(2))
.with(pf_reg(3))
.with(pf_reg(4))
.with(pf_reg(5))
.with(pf_reg(6))
.with(pf_reg(7))
.with(pf_reg(8))
.with(pf_reg(9))
.with(pf_reg(10))
.with(pf_reg(11))
.with(pf_reg(12))
.with(pf_reg(13))
.with(pf_reg(14))
.with(pf_reg(15))
.with(pf_reg(16))
.with(pf_reg(17))
.with(pf_reg(18))
.with(pf_reg(19))
.with(pf_reg(20))
.with(pf_reg(21))
.with(pf_reg(22))
.with(pf_reg(23))
.with(pf_reg(24))
.with(pf_reg(25))
.with(pf_reg(26))
.with(pf_reg(27))
.with(pf_reg(28))
.with(pf_reg(29))
.with(pf_reg(30))
.with(pf_reg(31)),
PRegSet::empty()
.with(pv_reg(0))
.with(pv_reg(1))
.with(pv_reg(2))
.with(pv_reg(3))
.with(pv_reg(4))
.with(pv_reg(5))
.with(pv_reg(6))
.with(pv_reg(7))
.with(pv_reg(8))
.with(pv_reg(9))
.with(pv_reg(10))
.with(pv_reg(11))
.with(pv_reg(12))
.with(pv_reg(13))
.with(pv_reg(14))
.with(pv_reg(15))
.with(pv_reg(16))
.with(pv_reg(17))
.with(pv_reg(18))
.with(pv_reg(19))
.with(pv_reg(20))
.with(pv_reg(21))
.with(pv_reg(22))
.with(pv_reg(23))
.with(pv_reg(24))
.with(pv_reg(25))
.with(pv_reg(26))
.with(pv_reg(27))
.with(pv_reg(28))
.with(pv_reg(29))
.with(pv_reg(30))
.with(pv_reg(31)),
];
let non_preferred_regs_by_class: [PRegSet; 3] = [
PRegSet::empty()
.with(px_reg(16))
.with(px_reg(17))
.with(px_reg(18))
.with(px_reg(19))
.with(px_reg(20))
.with(px_reg(21))
.with(px_reg(22))
.with(px_reg(23))
.with(px_reg(24))
.with(px_reg(25))
.with(px_reg(26))
.with(px_reg(27))
.with(px_reg(28))
.with(px_reg(29)),
PRegSet::empty(),
PRegSet::empty(),
];
debug_assert!(XReg::SPECIAL_START == 30);
MachineEnv {
preferred_regs_by_class,
non_preferred_regs_by_class,
fixed_stack_slots: vec![],
scratch_by_class: [None, None, None],
}
}