#![allow(dead_code)]
#![allow(clippy::all)]
#![no_std]
#[cfg(feature = "fuzzing")]
extern crate std;
extern crate alloc;
macro_rules! trace {
($($tt:tt)*) => {
if cfg!(feature = "trace-log") {
::log::trace!($($tt)*);
}
};
}
macro_rules! trace_enabled {
() => {
cfg!(feature = "trace-log") && ::log::log_enabled!(::log::Level::Trace)
};
}
use alloc::rc::Rc;
use allocator_api2::vec::Vec as Vec2;
use core::ops::Deref as _;
use core::{hash::BuildHasherDefault, iter::FromIterator};
use rustc_hash::FxHasher;
type FxHashMap<K, V> = hashbrown::HashMap<K, V, BuildHasherDefault<FxHasher>>;
type FxHashSet<V> = hashbrown::HashSet<V, BuildHasherDefault<FxHasher>>;
pub(crate) mod cfg;
pub(crate) mod domtree;
pub(crate) mod fastalloc;
pub mod indexset;
pub(crate) mod ion;
pub(crate) mod moves;
pub(crate) mod postorder;
pub mod ssa;
#[macro_use]
mod index;
pub use self::ion::data_structures::Ctx;
use alloc::vec::Vec;
pub use index::{Block, Inst, InstRange};
pub mod checker;
#[cfg(feature = "fuzzing")]
pub mod fuzzing;
#[cfg(feature = "enable-serde")]
pub mod serialize;
#[cfg(feature = "enable-serde")]
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum RegClass {
Int = 0,
Float = 1,
Vector = 2,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct PReg {
bits: u8,
}
impl PReg {
pub const MAX_BITS: usize = 6;
pub const MAX: usize = (1 << Self::MAX_BITS) - 1;
pub const NUM_INDEX: usize = 1 << (Self::MAX_BITS + 2); pub const INVALID: u8 = ((RegClass::Int as u8) << Self::MAX_BITS) | (Self::MAX as u8);
#[inline(always)]
pub const fn new(hw_enc: usize, class: RegClass) -> Self {
debug_assert!(hw_enc <= PReg::MAX);
PReg {
bits: ((class as u8) << Self::MAX_BITS) | (hw_enc as u8),
}
}
#[inline(always)]
pub const fn hw_enc(self) -> usize {
self.bits as usize & Self::MAX
}
#[inline(always)]
pub const fn class(self) -> RegClass {
match (self.bits >> Self::MAX_BITS) & 0b11 {
0 => RegClass::Int,
1 => RegClass::Float,
2 => RegClass::Vector,
_ => unreachable!(),
}
}
#[inline(always)]
pub const fn index(self) -> usize {
self.bits as usize
}
#[inline(always)]
pub const fn from_index(index: usize) -> Self {
PReg {
bits: (index & (Self::NUM_INDEX - 1)) as u8,
}
}
#[inline(always)]
pub const fn invalid() -> Self {
PReg {
bits: Self::INVALID,
}
}
#[inline(always)]
pub const fn as_valid(self) -> Option<Self> {
if self.bits == Self::INVALID {
None
} else {
Some(self)
}
}
}
impl Default for PReg {
fn default() -> Self {
Self::invalid()
}
}
impl core::fmt::Debug for PReg {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(
f,
"PReg(hw = {}, class = {:?}, index = {})",
self.hw_enc(),
self.class(),
self.index()
)
}
}
impl core::fmt::Display for PReg {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
let class = match self.class() {
RegClass::Int => "i",
RegClass::Float => "f",
RegClass::Vector => "v",
};
write!(f, "p{}{}", self.hw_enc(), class)
}
}
type Bits = u64;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct PRegSet {
bits: [Bits; Self::LEN],
}
impl PRegSet {
const BITS: usize = core::mem::size_of::<Bits>() * 8;
const LEN: usize = (PReg::NUM_INDEX + Self::BITS - 1) / Self::BITS;
pub const fn empty() -> Self {
Self {
bits: [0; Self::LEN],
}
}
const fn split_index(reg: PReg) -> (usize, usize) {
let index = reg.index();
(index >> Self::BITS.ilog2(), index & (Self::BITS - 1))
}
pub fn contains(&self, reg: PReg) -> bool {
let (index, bit) = Self::split_index(reg);
self.bits[index] & (1 << bit) != 0
}
pub const fn with(self, reg: PReg) -> Self {
let (index, bit) = Self::split_index(reg);
let mut out = self;
out.bits[index] |= 1 << bit;
out
}
pub const fn add(&mut self, reg: PReg) {
let (index, bit) = Self::split_index(reg);
self.bits[index] |= 1 << bit;
}
pub fn remove(&mut self, reg: PReg) {
let (index, bit) = Self::split_index(reg);
self.bits[index] &= !(1 << bit);
}
pub fn union_from(&mut self, other: PRegSet) {
for i in 0..self.bits.len() {
self.bits[i] |= other.bits[i];
}
}
pub fn intersect_from(&mut self, other: PRegSet) {
for i in 0..self.bits.len() {
self.bits[i] &= other.bits[i];
}
}
pub fn invert(&self) -> PRegSet {
let mut set = self.bits;
for i in 0..self.bits.len() {
set[i] = !self.bits[i];
}
PRegSet { bits: set }
}
pub fn is_empty(&self, regclass: RegClass) -> bool {
self.bits[regclass as usize] == 0
}
pub fn len(&self) -> u32 {
self.bits.iter().map(|s| s.count_ones()).sum()
}
pub fn max_preg(&self) -> Option<PReg> {
self.into_iter().last()
}
pub fn add_up_to(&mut self, reg: PReg) {
let (index, bit) = Self::split_index(reg);
for i in 0..index {
self.bits[i] = !0;
}
self.bits[index] = (1 << bit) - 1;
}
}
impl core::ops::BitAnd<PRegSet> for PRegSet {
type Output = PRegSet;
fn bitand(self, rhs: PRegSet) -> Self::Output {
let mut out = self;
out.intersect_from(rhs);
out
}
}
impl core::ops::BitOr<PRegSet> for PRegSet {
type Output = PRegSet;
fn bitor(self, rhs: PRegSet) -> Self::Output {
let mut out = self;
out.union_from(rhs);
out
}
}
impl IntoIterator for &PRegSet {
type Item = PReg;
type IntoIter = PRegSetIter;
fn into_iter(self) -> PRegSetIter {
(*self).into_iter()
}
}
impl IntoIterator for PRegSet {
type Item = PReg;
type IntoIter = PRegSetIter;
fn into_iter(self) -> PRegSetIter {
PRegSetIter {
bits: self.bits,
cur: 0,
}
}
}
pub struct PRegSetIter {
bits: [Bits; PRegSet::LEN],
cur: usize,
}
impl Iterator for PRegSetIter {
type Item = PReg;
fn next(&mut self) -> Option<PReg> {
loop {
let bits = self.bits.get_mut(self.cur)?;
if *bits != 0 {
let bit = bits.trailing_zeros();
*bits &= !(1 << bit);
let index = bit as usize + self.cur * PRegSet::BITS;
return Some(PReg::from_index(index));
}
self.cur += 1;
}
}
}
impl From<&MachineEnv> for PRegSet {
fn from(env: &MachineEnv) -> Self {
let mut res = Self::default();
for class in env.preferred_regs_by_class.iter() {
res.union_from(*class)
}
for class in env.non_preferred_regs_by_class.iter() {
res.union_from(*class)
}
res
}
}
impl FromIterator<PReg> for PRegSet {
fn from_iter<T: IntoIterator<Item = PReg>>(iter: T) -> Self {
let mut set = Self::default();
for preg in iter {
set.add(preg);
}
set
}
}
impl core::fmt::Display for PRegSet {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{{")?;
for preg in self.into_iter() {
write!(f, "{preg}, ")?;
}
write!(f, "}}")
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct VReg {
bits: u32,
}
impl VReg {
pub const MAX_BITS: usize = 30;
pub const MAX: usize = (1 << Self::MAX_BITS) - 1;
#[inline(always)]
pub const fn new(virt_reg: usize, class: RegClass) -> Self {
debug_assert!(virt_reg <= VReg::MAX);
VReg {
bits: ((virt_reg as u32) << 2) | (class as u8 as u32),
}
}
#[inline(always)]
pub const fn vreg(self) -> usize {
let vreg = (self.bits >> 2) as usize;
vreg
}
#[inline(always)]
pub const fn class(self) -> RegClass {
match self.bits & 0b11 {
0 => RegClass::Int,
1 => RegClass::Float,
2 => RegClass::Vector,
_ => unreachable!(),
}
}
#[inline(always)]
pub const fn invalid() -> Self {
VReg::new(Self::MAX, RegClass::Int)
}
#[inline(always)]
pub const fn bits(self) -> usize {
self.bits as usize
}
#[inline(always)]
pub const fn from_bits(bits: u32) -> VReg {
Self { bits }
}
}
impl From<u32> for VReg {
fn from(value: u32) -> Self {
Self { bits: value }
}
}
impl core::fmt::Debug for VReg {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(
f,
"VReg(vreg = {}, class = {:?})",
self.vreg(),
self.class()
)
}
}
impl core::fmt::Display for VReg {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "v{}", self.vreg())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct SpillSlot {
bits: u32,
}
impl SpillSlot {
pub const MAX: usize = (1 << 24) - 1;
#[inline(always)]
pub fn new(slot: usize) -> Self {
debug_assert!(slot <= Self::MAX);
SpillSlot { bits: slot as u32 }
}
#[inline(always)]
pub fn index(self) -> usize {
(self.bits & 0x00ffffff) as usize
}
#[inline(always)]
pub fn plus(self, offset: usize) -> Self {
SpillSlot::new(self.index() + offset)
}
#[inline(always)]
pub fn invalid() -> Self {
SpillSlot { bits: 0xffff_ffff }
}
#[inline(always)]
pub fn is_invalid(self) -> bool {
self == Self::invalid()
}
#[inline(always)]
pub fn is_valid(self) -> bool {
self != Self::invalid()
}
}
impl core::fmt::Display for SpillSlot {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "stack{}", self.index())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum OperandConstraint {
Any,
Reg,
Stack,
FixedReg(PReg),
Reuse(usize),
Limit(usize),
}
impl core::fmt::Display for OperandConstraint {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
Self::Any => write!(f, "any"),
Self::Reg => write!(f, "reg"),
Self::Stack => write!(f, "stack"),
Self::FixedReg(preg) => write!(f, "fixed({preg})"),
Self::Reuse(idx) => write!(f, "reuse({idx})"),
Self::Limit(max) => write!(f, "limit(0..={})", max - 1),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum OperandKind {
Def = 0,
Use = 1,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum OperandPos {
Early = 0,
Late = 1,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct Operand {
bits: u64,
}
impl Operand {
const VREG_BITS: usize = 32;
const VREG_SHIFT: usize = 0;
const VREG_MASK: u64 = (1 << Self::VREG_BITS) - 1;
const CLASS_BITS: usize = 2;
const CLASS_SHIFT: usize = Self::VREG_SHIFT + Self::VREG_BITS;
const CLASS_MASK: u64 = (1 << Self::CLASS_BITS) - 1;
const POS_BITS: usize = 1;
const POS_SHIFT: usize = Self::CLASS_SHIFT + Self::CLASS_BITS;
const POS_MASK: u64 = (1 << Self::POS_BITS) - 1;
const KIND_BITS: usize = 1;
const KIND_SHIFT: usize = Self::POS_SHIFT + Self::POS_BITS;
const KIND_MASK: u64 = (1 << Self::KIND_BITS) - 1;
const CONSTRAINT_BITS: usize = 7;
const CONSTRAINT_SHIFT: usize = Self::KIND_SHIFT + Self::KIND_BITS;
const CONSTRAINT_MASK: u64 = (1 << Self::CONSTRAINT_BITS) - 1;
const TOTAL_BITS: usize = Self::CONSTRAINT_SHIFT + Self::CONSTRAINT_BITS;
#[inline(always)]
pub fn new(
vreg: VReg,
constraint: OperandConstraint,
kind: OperandKind,
pos: OperandPos,
) -> Self {
let constraint_field = match constraint {
OperandConstraint::Any => 0,
OperandConstraint::Reg => 1,
OperandConstraint::Stack => 2,
OperandConstraint::FixedReg(preg) => {
debug_assert_eq!(preg.class(), vreg.class());
0b1000000 | preg.hw_enc() as u64
}
OperandConstraint::Reuse(which) => {
debug_assert!(which <= 0b11111);
0b0100000 | which as u64
}
OperandConstraint::Limit(max) => {
assert!(max.is_power_of_two());
assert!(
max <= PReg::MAX + 1,
"limit is larger than the allowed register encoding"
);
let log2 = max.ilog2();
debug_assert!(log2 <= 0b1111);
0b0010000 | log2 as u64
}
};
let class_field = vreg.class() as u8 as u64;
let pos_field = pos as u8 as u64;
let kind_field = kind as u8 as u64;
Operand {
bits: ((vreg.vreg() as u64) << Self::VREG_SHIFT)
| (class_field << Self::CLASS_SHIFT)
| (pos_field << Self::POS_SHIFT)
| (kind_field << Self::KIND_SHIFT)
| (constraint_field << Self::CONSTRAINT_SHIFT),
}
}
#[inline(always)]
pub fn reg_use(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Reg,
OperandKind::Use,
OperandPos::Early,
)
}
#[inline(always)]
pub fn reg_use_at_end(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Reg,
OperandKind::Use,
OperandPos::Late,
)
}
#[inline(always)]
pub fn reg_def(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Reg,
OperandKind::Def,
OperandPos::Late,
)
}
#[inline(always)]
pub fn reg_def_at_start(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Reg,
OperandKind::Def,
OperandPos::Early,
)
}
#[inline(always)]
pub fn reg_temp(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Reg,
OperandKind::Def,
OperandPos::Early,
)
}
#[inline(always)]
pub fn reg_reuse_def(vreg: VReg, idx: usize) -> Self {
Operand::new(
vreg,
OperandConstraint::Reuse(idx),
OperandKind::Def,
OperandPos::Late,
)
}
#[inline(always)]
pub fn reg_fixed_use(vreg: VReg, preg: PReg) -> Self {
Operand::new(
vreg,
OperandConstraint::FixedReg(preg),
OperandKind::Use,
OperandPos::Early,
)
}
#[inline(always)]
pub fn reg_fixed_def(vreg: VReg, preg: PReg) -> Self {
Operand::new(
vreg,
OperandConstraint::FixedReg(preg),
OperandKind::Def,
OperandPos::Late,
)
}
#[inline(always)]
pub fn reg_fixed_use_at_end(vreg: VReg, preg: PReg) -> Self {
Operand::new(
vreg,
OperandConstraint::FixedReg(preg),
OperandKind::Use,
OperandPos::Late,
)
}
#[inline(always)]
pub fn reg_fixed_def_at_start(vreg: VReg, preg: PReg) -> Self {
Operand::new(
vreg,
OperandConstraint::FixedReg(preg),
OperandKind::Def,
OperandPos::Early,
)
}
#[inline(always)]
pub fn any_use(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Any,
OperandKind::Use,
OperandPos::Early,
)
}
#[inline(always)]
pub fn any_def(vreg: VReg) -> Self {
Operand::new(
vreg,
OperandConstraint::Any,
OperandKind::Def,
OperandPos::Late,
)
}
#[inline(always)]
pub fn fixed_nonallocatable(preg: PReg) -> Self {
Operand::new(
VReg::new(VReg::MAX, preg.class()),
OperandConstraint::FixedReg(preg),
OperandKind::Use,
OperandPos::Early,
)
}
#[inline(always)]
pub fn vreg(self) -> VReg {
let vreg_idx = ((self.bits >> Self::VREG_SHIFT) & Self::VREG_MASK) as usize;
VReg::new(vreg_idx, self.class())
}
#[inline(always)]
pub fn class(self) -> RegClass {
let class_field = (self.bits >> Self::CLASS_SHIFT) & Self::CLASS_MASK;
match class_field {
0 => RegClass::Int,
1 => RegClass::Float,
2 => RegClass::Vector,
_ => unreachable!(),
}
}
#[inline(always)]
pub fn kind(self) -> OperandKind {
let kind_field = (self.bits >> Self::KIND_SHIFT) & Self::KIND_MASK;
match kind_field {
0 => OperandKind::Def,
1 => OperandKind::Use,
_ => unreachable!(),
}
}
#[inline(always)]
pub fn pos(self) -> OperandPos {
let pos_field = (self.bits >> Self::POS_SHIFT) & Self::POS_MASK;
match pos_field {
0 => OperandPos::Early,
1 => OperandPos::Late,
_ => unreachable!(),
}
}
#[inline(always)]
pub fn constraint(self) -> OperandConstraint {
let constraint_field =
((self.bits >> Self::CONSTRAINT_SHIFT) & Self::CONSTRAINT_MASK) as usize;
if constraint_field & 0b1000000 != 0 {
OperandConstraint::FixedReg(PReg::new(constraint_field & 0b0111111, self.class()))
} else if constraint_field & 0b0100000 != 0 {
OperandConstraint::Reuse(constraint_field & 0b0011111)
} else if constraint_field & 0b0010000 != 0 {
OperandConstraint::Limit(1 << (constraint_field & 0b0001111))
} else {
match constraint_field {
0 => OperandConstraint::Any,
1 => OperandConstraint::Reg,
2 => OperandConstraint::Stack,
_ => unreachable!(),
}
}
}
#[inline(always)]
pub fn as_fixed_nonallocatable(self) -> Option<PReg> {
match self.constraint() {
OperandConstraint::FixedReg(preg) if self.vreg().vreg() == VReg::MAX => Some(preg),
_ => None,
}
}
#[inline(always)]
pub fn bits(self) -> u64 {
self.bits
}
#[inline(always)]
pub fn from_bits(bits: u64) -> Self {
debug_assert_eq!(bits >> Self::TOTAL_BITS, 0);
Operand { bits }
}
}
impl core::fmt::Debug for Operand {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Display::fmt(self, f)
}
}
impl core::fmt::Display for Operand {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
if let Some(preg) = self.as_fixed_nonallocatable() {
return write!(f, "Fixed: {preg}");
}
match (self.kind(), self.pos()) {
(OperandKind::Def, OperandPos::Late) | (OperandKind::Use, OperandPos::Early) => {
write!(f, "{:?}", self.kind())?;
}
_ => {
write!(f, "{:?}@{:?}", self.kind(), self.pos())?;
}
}
write!(
f,
": {}{} {}",
self.vreg(),
match self.class() {
RegClass::Int => "i",
RegClass::Float => "f",
RegClass::Vector => "v",
},
self.constraint()
)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct Allocation {
bits: u32,
}
impl core::fmt::Debug for Allocation {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::Display::fmt(self, f)
}
}
impl core::fmt::Display for Allocation {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self.kind() {
AllocationKind::None => write!(f, "none"),
AllocationKind::Reg => write!(f, "{}", self.as_reg().unwrap()),
AllocationKind::Stack => write!(f, "{}", self.as_stack().unwrap()),
}
}
}
impl Allocation {
#[inline(always)]
pub(crate) fn new(kind: AllocationKind, index: usize) -> Self {
debug_assert!(index < (1 << 28));
Self {
bits: ((kind as u8 as u32) << 29) | (index as u32),
}
}
#[inline(always)]
pub fn none() -> Allocation {
Allocation::new(AllocationKind::None, 0)
}
#[inline(always)]
pub fn reg(preg: PReg) -> Allocation {
Allocation::new(AllocationKind::Reg, preg.index())
}
#[inline(always)]
pub fn stack(slot: SpillSlot) -> Allocation {
Allocation::new(AllocationKind::Stack, slot.bits as usize)
}
#[inline(always)]
pub fn kind(self) -> AllocationKind {
match (self.bits >> 29) & 7 {
0 => AllocationKind::None,
1 => AllocationKind::Reg,
2 => AllocationKind::Stack,
_ => unreachable!(),
}
}
#[inline(always)]
pub fn is_none(self) -> bool {
self.kind() == AllocationKind::None
}
#[inline(always)]
pub fn is_some(self) -> bool {
self.kind() != AllocationKind::None
}
#[inline(always)]
pub fn is_reg(self) -> bool {
self.kind() == AllocationKind::Reg
}
#[inline(always)]
pub fn is_stack(self) -> bool {
self.kind() == AllocationKind::Stack
}
#[inline(always)]
pub fn index(self) -> usize {
(self.bits & ((1 << 28) - 1)) as usize
}
#[inline(always)]
pub fn as_reg(self) -> Option<PReg> {
if self.kind() == AllocationKind::Reg {
Some(PReg::from_index(self.index()))
} else {
None
}
}
#[inline(always)]
pub fn as_stack(self) -> Option<SpillSlot> {
if self.kind() == AllocationKind::Stack {
Some(SpillSlot {
bits: self.index() as u32,
})
} else {
None
}
}
#[inline(always)]
pub fn bits(self) -> u32 {
self.bits
}
#[inline(always)]
pub fn from_bits(bits: u32) -> Self {
debug_assert!(bits >> 29 >= 5);
Self { bits }
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(u8)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum AllocationKind {
None = 0,
Reg = 1,
Stack = 2,
}
pub trait Function {
fn num_insts(&self) -> usize;
fn num_blocks(&self) -> usize;
fn entry_block(&self) -> Block;
fn block_insns(&self, block: Block) -> InstRange;
fn block_succs(&self, block: Block) -> &[Block];
fn block_preds(&self, block: Block) -> &[Block];
fn block_params(&self, block: Block) -> &[VReg];
fn is_ret(&self, insn: Inst) -> bool;
fn is_branch(&self, insn: Inst) -> bool;
fn branch_blockparams(&self, block: Block, insn: Inst, succ_idx: usize) -> &[VReg];
fn inst_operands(&self, insn: Inst) -> &[Operand];
fn inst_clobbers(&self, insn: Inst) -> PRegSet;
fn num_vregs(&self) -> usize;
fn debug_value_labels(&self) -> &[(VReg, Inst, Inst, u32)] {
&[]
}
fn spillslot_size(&self, regclass: RegClass) -> usize;
fn multi_spillslot_named_by_last_slot(&self) -> bool {
false
}
fn allow_multiple_vreg_defs(&self) -> bool {
false
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(u8)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum InstPosition {
Before = 0,
After = 1,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct ProgPoint {
bits: u32,
}
impl core::fmt::Debug for ProgPoint {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(
f,
"progpoint{}{}",
self.inst().index(),
match self.pos() {
InstPosition::Before => "-pre",
InstPosition::After => "-post",
}
)
}
}
impl ProgPoint {
#[inline(always)]
pub fn new(inst: Inst, pos: InstPosition) -> Self {
let bits = ((inst.0 as u32) << 1) | (pos as u8 as u32);
Self { bits }
}
#[inline(always)]
pub fn before(inst: Inst) -> Self {
Self::new(inst, InstPosition::Before)
}
#[inline(always)]
pub fn after(inst: Inst) -> Self {
Self::new(inst, InstPosition::After)
}
#[inline(always)]
pub fn inst(self) -> Inst {
Inst::new(((self.bits as i32) >> 1) as usize)
}
#[inline(always)]
pub fn pos(self) -> InstPosition {
match self.bits & 1 {
0 => InstPosition::Before,
1 => InstPosition::After,
_ => unreachable!(),
}
}
#[inline(always)]
pub fn next(self) -> ProgPoint {
Self {
bits: self.bits + 1,
}
}
#[inline(always)]
pub fn prev(self) -> ProgPoint {
Self {
bits: self.bits - 1,
}
}
#[inline(always)]
pub fn to_index(self) -> u32 {
self.bits
}
#[inline(always)]
pub fn from_index(index: u32) -> Self {
Self { bits: index }
}
#[inline(always)]
pub fn invalid() -> Self {
Self::before(Inst::new(usize::MAX))
}
}
#[derive(Clone, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum Edit {
Move { from: Allocation, to: Allocation },
}
#[derive(Clone, Debug)]
pub enum InstOrEdit<'a> {
Inst(Inst),
Edit(&'a Edit),
}
pub struct OutputIter<'a> {
edits: &'a [(ProgPoint, Edit)],
inst_range: InstRange,
}
impl<'a> Iterator for OutputIter<'a> {
type Item = InstOrEdit<'a>;
fn next(&mut self) -> Option<InstOrEdit<'a>> {
if self.inst_range.len() == 0 {
return None;
}
let next_inst = self.inst_range.first();
if let Some((edit, remaining_edits)) = self.edits.split_first() {
if edit.0 <= ProgPoint::before(next_inst) {
self.edits = remaining_edits;
return Some(InstOrEdit::Edit(&edit.1));
}
}
self.inst_range = self.inst_range.rest();
Some(InstOrEdit::Inst(next_inst))
}
}
#[derive(Clone, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct MachineEnv {
pub preferred_regs_by_class: [PRegSet; 3],
pub non_preferred_regs_by_class: [PRegSet; 3],
pub scratch_by_class: [Option<PReg>; 3],
pub fixed_stack_slots: Vec<PReg>,
}
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct Output {
pub num_spillslots: usize,
pub edits: Vec<(ProgPoint, Edit)>,
pub allocs: Vec<Allocation>,
pub inst_alloc_offsets: Vec<u32>,
pub debug_locations: Vec<(u32, ProgPoint, ProgPoint, Allocation)>,
pub stats: ion::Stats,
}
impl Output {
pub fn inst_allocs(&self, inst: Inst) -> &[Allocation] {
let start = self.inst_alloc_offsets[inst.index()] as usize;
let end = if inst.index() + 1 == self.inst_alloc_offsets.len() {
self.allocs.len()
} else {
self.inst_alloc_offsets[inst.index() + 1] as usize
};
&self.allocs[start..end]
}
pub fn block_insts_and_edits(&self, func: &impl Function, block: Block) -> OutputIter<'_> {
let inst_range = func.block_insns(block);
let edit_idx = self
.edits
.binary_search_by(|&(pos, _)| {
if pos < ProgPoint::before(inst_range.first()) {
core::cmp::Ordering::Less
} else {
core::cmp::Ordering::Greater
}
})
.unwrap_err();
let edits = &self.edits[edit_idx..];
OutputIter { inst_range, edits }
}
}
#[derive(Clone, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum RegAllocError {
CritEdge(Block, Block),
SSA(VReg, Inst),
BB(Block),
Branch(Inst),
EntryLivein,
DisallowedBranchArg(Inst),
TooManyLiveRegs,
TooManyOperands,
}
impl core::fmt::Display for RegAllocError {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?}", self)
}
}
impl core::error::Error for RegAllocError {}
pub fn run<F: Function>(
func: &F,
env: &MachineEnv,
options: &RegallocOptions,
) -> Result<Output, RegAllocError> {
match options.algorithm {
Algorithm::Ion => {
let mut ctx = Ctx::default();
run_with_ctx(func, env, options, &mut ctx)?;
Ok(ctx.output)
}
Algorithm::Fastalloc => {
fastalloc::run(func, env, options.verbose_log, options.validate_ssa)
}
}
}
pub fn run_with_ctx<'a, F: Function>(
func: &F,
env: &MachineEnv,
options: &RegallocOptions,
ctx: &'a mut Ctx,
) -> Result<&'a Output, RegAllocError> {
match options.algorithm {
Algorithm::Ion => ion::run(func, env, ctx, options.verbose_log, options.validate_ssa)?,
Algorithm::Fastalloc => {
ctx.output = fastalloc::run(func, env, options.verbose_log, options.validate_ssa)?
}
}
Ok(&ctx.output)
}
#[derive(Clone, Copy, Debug, Default)]
pub enum Algorithm {
#[default]
Ion,
Fastalloc,
}
#[derive(Clone, Copy, Debug, Default)]
pub struct RegallocOptions {
pub verbose_log: bool,
pub validate_ssa: bool,
pub algorithm: Algorithm,
}
pub(crate) trait VecExt<T> {
fn repopulate(&mut self, len: usize, value: T) -> &mut [T]
where
T: Clone;
fn cleared(&mut self) -> &mut Self;
fn preallocate(&mut self, cap: usize) -> &mut Self;
}
impl<T> VecExt<T> for Vec<T> {
fn repopulate(&mut self, len: usize, value: T) -> &mut [T]
where
T: Clone,
{
self.clear();
self.resize(len, value);
self
}
fn cleared(&mut self) -> &mut Self {
self.clear();
self
}
fn preallocate(&mut self, cap: usize) -> &mut Self {
self.clear();
self.reserve(cap);
self
}
}
#[derive(Debug, Clone, Default)]
pub(crate) struct Bump(Rc<bumpalo::Bump>);
impl Bump {
pub(crate) fn get_mut(&mut self) -> Option<&mut bumpalo::Bump> {
Rc::get_mut(&mut self.0)
}
}
unsafe impl allocator_api2::alloc::Allocator for Bump {
fn allocate(
&self,
layout: core::alloc::Layout,
) -> Result<core::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
self.0.deref().allocate(layout)
}
unsafe fn deallocate(&self, ptr: core::ptr::NonNull<u8>, layout: core::alloc::Layout) {
self.0.deref().deallocate(ptr, layout);
}
fn allocate_zeroed(
&self,
layout: core::alloc::Layout,
) -> Result<core::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
self.0.deref().allocate_zeroed(layout)
}
unsafe fn grow(
&self,
ptr: core::ptr::NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<core::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
self.0.deref().grow(ptr, old_layout, new_layout)
}
unsafe fn grow_zeroed(
&self,
ptr: core::ptr::NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<core::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
self.0.deref().grow_zeroed(ptr, old_layout, new_layout)
}
unsafe fn shrink(
&self,
ptr: core::ptr::NonNull<u8>,
old_layout: core::alloc::Layout,
new_layout: core::alloc::Layout,
) -> Result<core::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
self.0.deref().shrink(ptr, old_layout, new_layout)
}
}
#[cfg(test)]
mod tests {
use super::{PReg, PRegSet, RegClass::*};
#[test]
fn preg_set_len() {
let mut set = PRegSet::empty();
assert_eq!(set.len(), 0);
set.add(PReg::new(3, Int));
assert_eq!(set.len(), 1);
set.add(PReg::new(3, Int));
assert_eq!(set.len(), 1);
set.add(PReg::new(4, Int));
assert_eq!(set.len(), 2);
}
#[test]
fn preg_set_max_preg() {
let mut set = PRegSet::empty();
assert_eq!(set.max_preg(), None);
set.add(PReg::new(3, Int));
assert_eq!(set.max_preg(), Some(PReg::new(3, Int)));
set.add(PReg::new(4, Int));
assert_eq!(set.max_preg(), Some(PReg::new(4, Int)));
set.add(PReg::new(2, Int));
assert_eq!(set.max_preg(), Some(PReg::new(4, Int)));
}
#[test]
fn preg_set_new_up_to() {
for class in [Int, Float, Vector] {
let p0 = PReg::new(0, class);
let p1 = PReg::new(1, class);
let p2 = PReg::new(2, class);
let p3 = PReg::new(3, class);
{
let mut set = PRegSet::empty();
set.add_up_to(p1);
assert!(set.contains(p0));
assert!(!set.contains(p1));
}
{
let mut set = PRegSet::empty();
set.add_up_to(p0);
assert!(!set.contains(p0));
}
{
let mut set = PRegSet::empty();
set.add_up_to(p3);
assert!(set.contains(p0));
assert!(set.contains(p1));
assert!(set.contains(p2));
assert!(!set.contains(p3));
}
for i in 1..64 {
let mut set = PRegSet::empty();
set.add_up_to(PReg::new(i, class));
assert!(set.contains(p0));
assert!(set.contains(PReg::new(i - 1, class)));
assert!(!set.contains(PReg::new(i, class)));
}
}
}
}