use crate::ir::types::*;
use crate::isa::aarch64::inst::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum ShiftOp {
LSL = 0b00,
LSR = 0b01,
ASR = 0b10,
ROR = 0b11,
}
impl ShiftOp {
pub fn bits(self) -> u8 {
self as u8
}
}
#[derive(Clone, Copy, Debug)]
pub struct ShiftOpShiftImm(u8);
impl ShiftOpShiftImm {
pub const MAX_SHIFT: u64 = 63;
pub fn maybe_from_shift(shift: u64) -> Option<ShiftOpShiftImm> {
if shift <= Self::MAX_SHIFT {
Some(ShiftOpShiftImm(shift as u8))
} else {
None
}
}
pub fn value(self) -> u8 {
self.0
}
pub fn mask(self, bits: u8) -> ShiftOpShiftImm {
ShiftOpShiftImm(self.0 & (bits - 1))
}
}
#[derive(Copy, Clone, Debug)]
pub struct ShiftOpAndAmt {
op: ShiftOp,
shift: ShiftOpShiftImm,
}
impl ShiftOpAndAmt {
pub fn new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt {
ShiftOpAndAmt { op, shift }
}
pub fn op(&self) -> ShiftOp {
self.op
}
pub fn amt(&self) -> ShiftOpShiftImm {
self.shift
}
}
#[derive(Clone, Copy, Debug)]
#[repr(u8)]
pub enum ExtendOp {
UXTB = 0b000,
UXTH = 0b001,
UXTW = 0b010,
UXTX = 0b011,
SXTB = 0b100,
SXTH = 0b101,
SXTW = 0b110,
SXTX = 0b111,
}
impl ExtendOp {
pub fn bits(self) -> u8 {
self as u8
}
}
#[derive(Clone, Debug)]
pub enum MemLabel {
PCRel(i32),
Mach(MachLabel),
}
impl AMode {
pub fn reg(reg: Reg) -> AMode {
AMode::UnsignedOffset {
rn: reg,
uimm12: UImm12Scaled::zero(I64),
}
}
pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, op: ExtendOp) -> AMode {
AMode::RegScaledExtended {
rn: reg1,
rm: reg2,
extendop: op,
}
}
}
pub use crate::isa::aarch64::lower::isle::generated_code::PairAMode;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum Cond {
Eq = 0,
Ne = 1,
Hs = 2,
Lo = 3,
Mi = 4,
Pl = 5,
Vs = 6,
Vc = 7,
Hi = 8,
Ls = 9,
Ge = 10,
Lt = 11,
Gt = 12,
Le = 13,
Al = 14,
Nv = 15,
}
impl Cond {
pub fn invert(self) -> Cond {
match self {
Cond::Eq => Cond::Ne,
Cond::Ne => Cond::Eq,
Cond::Hs => Cond::Lo,
Cond::Lo => Cond::Hs,
Cond::Mi => Cond::Pl,
Cond::Pl => Cond::Mi,
Cond::Vs => Cond::Vc,
Cond::Vc => Cond::Vs,
Cond::Hi => Cond::Ls,
Cond::Ls => Cond::Hi,
Cond::Ge => Cond::Lt,
Cond::Lt => Cond::Ge,
Cond::Gt => Cond::Le,
Cond::Le => Cond::Gt,
Cond::Al => Cond::Nv,
Cond::Nv => Cond::Al,
}
}
pub fn bits(self) -> u32 {
self as u32
}
}
#[derive(Clone, Copy, Debug)]
pub enum CondBrKind {
Zero(Reg, OperandSize),
NotZero(Reg, OperandSize),
Cond(Cond),
}
impl CondBrKind {
pub fn invert(self) -> CondBrKind {
match self {
CondBrKind::Zero(reg, size) => CondBrKind::NotZero(reg, size),
CondBrKind::NotZero(reg, size) => CondBrKind::Zero(reg, size),
CondBrKind::Cond(c) => CondBrKind::Cond(c.invert()),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BranchTarget {
Label(MachLabel),
ResolvedOffset(i32),
}
impl BranchTarget {
pub fn as_label(self) -> Option<MachLabel> {
match self {
BranchTarget::Label(l) => Some(l),
_ => None,
}
}
pub fn as_offset14_or_zero(self) -> u32 {
self.as_offset_bounded(14)
}
pub fn as_offset19_or_zero(self) -> u32 {
self.as_offset_bounded(19)
}
pub fn as_offset26_or_zero(self) -> u32 {
self.as_offset_bounded(26)
}
fn as_offset_bounded(self, bits: u32) -> u32 {
let off = match self {
BranchTarget::ResolvedOffset(off) => off >> 2,
_ => 0,
};
let hi = (1 << (bits - 1)) - 1;
let lo = -(1 << bits - 1);
assert!(off <= hi);
assert!(off >= lo);
(off as u32) & ((1 << bits) - 1)
}
}
impl PrettyPrint for ShiftOpAndAmt {
fn pretty_print(&self, _: u8) -> String {
format!("{:?} {}", self.op(), self.amt().value())
}
}
impl PrettyPrint for ExtendOp {
fn pretty_print(&self, _: u8) -> String {
format!("{self:?}")
}
}
impl PrettyPrint for MemLabel {
fn pretty_print(&self, _: u8) -> String {
match self {
MemLabel::PCRel(off) => format!("pc+{off}"),
MemLabel::Mach(off) => format!("label({})", off.as_u32()),
}
}
}
fn shift_for_type(size_bytes: u8) -> usize {
match size_bytes {
1 => 0,
2 => 1,
4 => 2,
8 => 3,
16 => 4,
_ => panic!("unknown type size: {size_bytes}"),
}
}
impl PrettyPrint for AMode {
fn pretty_print(&self, size_bytes: u8) -> String {
debug_assert!(size_bytes != 0);
match self {
&AMode::Unscaled { rn, simm9 } => {
let reg = pretty_print_reg(rn);
if simm9.value != 0 {
let simm9 = simm9.pretty_print(8);
format!("[{reg}, {simm9}]")
} else {
format!("[{reg}]")
}
}
&AMode::UnsignedOffset { rn, uimm12 } => {
let reg = pretty_print_reg(rn);
if uimm12.value() != 0 {
let uimm12 = uimm12.pretty_print(8);
format!("[{reg}, {uimm12}]")
} else {
format!("[{reg}]")
}
}
&AMode::RegReg { rn, rm } => {
let r1 = pretty_print_reg(rn);
let r2 = pretty_print_reg(rm);
format!("[{r1}, {r2}]")
}
&AMode::RegScaled { rn, rm } => {
let r1 = pretty_print_reg(rn);
let r2 = pretty_print_reg(rm);
let shift = shift_for_type(size_bytes);
format!("[{r1}, {r2}, LSL #{shift}]")
}
&AMode::RegScaledExtended { rn, rm, extendop } => {
let shift = shift_for_type(size_bytes);
let size = match extendop {
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
_ => OperandSize::Size64,
};
let r1 = pretty_print_reg(rn);
let r2 = pretty_print_ireg(rm, size);
let op = extendop.pretty_print(0);
format!("[{r1}, {r2}, {op} #{shift}]")
}
&AMode::RegExtended { rn, rm, extendop } => {
let size = match extendop {
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
_ => OperandSize::Size64,
};
let r1 = pretty_print_reg(rn);
let r2 = pretty_print_ireg(rm, size);
let op = extendop.pretty_print(0);
format!("[{r1}, {r2}, {op}]")
}
&AMode::Label { ref label } => label.pretty_print(0),
&AMode::SPPreIndexed { simm9 } => {
let simm9 = simm9.pretty_print(8);
format!("[sp, {simm9}]!")
}
&AMode::SPPostIndexed { simm9 } => {
let simm9 = simm9.pretty_print(8);
format!("[sp], {simm9}")
}
AMode::Const { addr } => format!("[const({})]", addr.as_u32()),
&AMode::SPOffset { .. }
| &AMode::FPOffset { .. }
| &AMode::IncomingArg { .. }
| &AMode::SlotOffset { .. }
| &AMode::RegOffset { .. } => {
panic!("Unexpected pseudo mem-arg mode: {self:?}")
}
}
}
}
impl PrettyPrint for PairAMode {
fn pretty_print(&self, _: u8) -> String {
match self {
&PairAMode::SignedOffset { reg, simm7 } => {
let reg = pretty_print_reg(reg);
if simm7.value != 0 {
let simm7 = simm7.pretty_print(8);
format!("[{reg}, {simm7}]")
} else {
format!("[{reg}]")
}
}
&PairAMode::SPPreIndexed { simm7 } => {
let simm7 = simm7.pretty_print(8);
format!("[sp, {simm7}]!")
}
&PairAMode::SPPostIndexed { simm7 } => {
let simm7 = simm7.pretty_print(8);
format!("[sp], {simm7}")
}
}
}
}
impl PrettyPrint for Cond {
fn pretty_print(&self, _: u8) -> String {
let mut s = format!("{self:?}");
s.make_ascii_lowercase();
s
}
}
impl PrettyPrint for BranchTarget {
fn pretty_print(&self, _: u8) -> String {
match self {
&BranchTarget::Label(label) => format!("label{:?}", label.as_u32()),
&BranchTarget::ResolvedOffset(off) => format!("{off}"),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OperandSize {
Size32,
Size64,
}
impl OperandSize {
pub fn is32(self) -> bool {
self == OperandSize::Size32
}
pub fn is64(self) -> bool {
self == OperandSize::Size64
}
pub fn from_bits<I: Into<usize>>(bits: I) -> OperandSize {
let bits: usize = bits.into();
assert!(bits <= 64);
if bits <= 32 {
OperandSize::Size32
} else {
OperandSize::Size64
}
}
pub fn bits(&self) -> u8 {
match self {
OperandSize::Size32 => 32,
OperandSize::Size64 => 64,
}
}
pub fn from_ty(ty: Type) -> OperandSize {
debug_assert!(!ty.is_vector());
Self::from_bits(ty_bits(ty))
}
pub fn to_ty(self) -> Type {
match self {
OperandSize::Size32 => I32,
OperandSize::Size64 => I64,
}
}
pub fn sf_bit(&self) -> u32 {
match self {
OperandSize::Size32 => 0,
OperandSize::Size64 => 1,
}
}
pub fn max_value(&self) -> u64 {
match self {
OperandSize::Size32 => u32::MAX as u64,
OperandSize::Size64 => u64::MAX,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ScalarSize {
Size8,
Size16,
Size32,
Size64,
Size128,
}
impl ScalarSize {
pub fn operand_size(&self) -> OperandSize {
match self {
ScalarSize::Size8 | ScalarSize::Size16 | ScalarSize::Size32 => OperandSize::Size32,
ScalarSize::Size64 => OperandSize::Size64,
_ => panic!("Unexpected operand_size request for: {self:?}"),
}
}
pub fn ftype(&self) -> u32 {
match self {
ScalarSize::Size16 => 0b11,
ScalarSize::Size32 => 0b00,
ScalarSize::Size64 => 0b01,
_ => panic!("Unexpected scalar FP operand size: {self:?}"),
}
}
pub fn widen(&self) -> ScalarSize {
match self {
ScalarSize::Size8 => ScalarSize::Size16,
ScalarSize::Size16 => ScalarSize::Size32,
ScalarSize::Size32 => ScalarSize::Size64,
ScalarSize::Size64 => ScalarSize::Size128,
ScalarSize::Size128 => panic!("can't widen 128-bits"),
}
}
pub fn narrow(&self) -> ScalarSize {
match self {
ScalarSize::Size8 => panic!("can't narrow 8-bits"),
ScalarSize::Size16 => ScalarSize::Size8,
ScalarSize::Size32 => ScalarSize::Size16,
ScalarSize::Size64 => ScalarSize::Size32,
ScalarSize::Size128 => ScalarSize::Size64,
}
}
pub fn ty(&self) -> Type {
match self {
ScalarSize::Size8 => I8,
ScalarSize::Size16 => I16,
ScalarSize::Size32 => I32,
ScalarSize::Size64 => I64,
ScalarSize::Size128 => I128,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum VectorSize {
Size8x8,
Size8x16,
Size16x4,
Size16x8,
Size32x2,
Size32x4,
Size64x2,
}
impl VectorSize {
pub fn as_scalar8_vector(&self) -> VectorSize {
match self {
VectorSize::Size8x8 | VectorSize::Size16x4 | VectorSize::Size32x2 => {
VectorSize::Size8x8
}
VectorSize::Size8x16
| VectorSize::Size16x8
| VectorSize::Size32x4
| VectorSize::Size64x2 => VectorSize::Size8x16,
}
}
pub fn from_lane_size(size: ScalarSize, is_128bit: bool) -> VectorSize {
match (size, is_128bit) {
(ScalarSize::Size8, false) => VectorSize::Size8x8,
(ScalarSize::Size8, true) => VectorSize::Size8x16,
(ScalarSize::Size16, false) => VectorSize::Size16x4,
(ScalarSize::Size16, true) => VectorSize::Size16x8,
(ScalarSize::Size32, false) => VectorSize::Size32x2,
(ScalarSize::Size32, true) => VectorSize::Size32x4,
(ScalarSize::Size64, true) => VectorSize::Size64x2,
_ => panic!("Unexpected scalar FP operand size: {size:?}"),
}
}
pub fn operand_size(&self) -> OperandSize {
match self {
VectorSize::Size64x2 => OperandSize::Size64,
_ => OperandSize::Size32,
}
}
pub fn lane_size(&self) -> ScalarSize {
match self {
VectorSize::Size8x8 | VectorSize::Size8x16 => ScalarSize::Size8,
VectorSize::Size16x4 | VectorSize::Size16x8 => ScalarSize::Size16,
VectorSize::Size32x2 | VectorSize::Size32x4 => ScalarSize::Size32,
VectorSize::Size64x2 => ScalarSize::Size64,
}
}
pub fn is_128bits(&self) -> bool {
match self {
VectorSize::Size8x8 => false,
VectorSize::Size8x16 => true,
VectorSize::Size16x4 => false,
VectorSize::Size16x8 => true,
VectorSize::Size32x2 => false,
VectorSize::Size32x4 => true,
VectorSize::Size64x2 => true,
}
}
pub fn enc_size(&self) -> (u32, u32) {
let q = self.is_128bits() as u32;
let size = match self.lane_size() {
ScalarSize::Size8 => 0b00,
ScalarSize::Size16 => 0b01,
ScalarSize::Size32 => 0b10,
ScalarSize::Size64 => 0b11,
_ => unreachable!(),
};
(q, size)
}
pub fn enc_float_size(&self) -> u32 {
match self.lane_size() {
ScalarSize::Size32 => 0b0,
ScalarSize::Size64 => 0b1,
size => panic!("Unsupported floating-point size for vector op: {size:?}"),
}
}
}
impl APIKey {
pub fn enc_auti_hint(&self) -> u32 {
let (crm, op2) = match self {
APIKey::AZ => (0b0011, 0b100),
APIKey::ASP => (0b0011, 0b101),
APIKey::BZ => (0b0011, 0b110),
APIKey::BSP => (0b0011, 0b111),
};
0xd503201f | (crm << 8) | (op2 << 5)
}
}
pub use crate::isa::aarch64::lower::isle::generated_code::TestBitAndBranchKind;
impl TestBitAndBranchKind {
pub fn complement(&self) -> TestBitAndBranchKind {
match self {
TestBitAndBranchKind::Z => TestBitAndBranchKind::NZ,
TestBitAndBranchKind::NZ => TestBitAndBranchKind::Z,
}
}
}