cranelift_codegen/isa/aarch64/inst/
mod.rs

1//! This module defines aarch64-specific machine instruction types.
2
3use crate::binemit::{Addend, CodeOffset, Reloc};
4use crate::ir::types::{F16, F32, F64, F128, I8, I8X16, I16, I32, I64, I128};
5use crate::ir::{MemFlags, Type, types};
6use crate::isa::{CallConv, FunctionAlignment};
7use crate::machinst::*;
8use crate::{CodegenError, CodegenResult, settings};
9
10use crate::machinst::{PrettyPrint, Reg, RegClass, Writable};
11
12use alloc::vec::Vec;
13use core::slice;
14use smallvec::{SmallVec, smallvec};
15use std::fmt::Write;
16use std::string::{String, ToString};
17
18pub(crate) mod regs;
19pub(crate) use self::regs::*;
20pub mod imms;
21pub use self::imms::*;
22pub mod args;
23pub use self::args::*;
24pub mod emit;
25pub(crate) use self::emit::*;
26use crate::isa::aarch64::abi::AArch64MachineDeps;
27
28pub(crate) mod unwind;
29
30#[cfg(test)]
31mod emit_tests;
32
33//=============================================================================
34// Instructions (top level): definition
35
36pub use crate::isa::aarch64::lower::isle::generated_code::{
37    ALUOp, ALUOp3, AMode, APIKey, AtomicRMWLoopOp, AtomicRMWOp, BitOp, BranchTargetType, FPUOp1,
38    FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUModOp,
39    VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, VecRRNarrowOp,
40    VecRRPairLongOp, VecRRRLongModOp, VecRRRLongOp, VecShiftImmModOp, VecShiftImmOp,
41};
42
43/// A floating-point unit (FPU) operation with two args, a register and an immediate.
44#[derive(Copy, Clone, Debug)]
45pub enum FPUOpRI {
46    /// Unsigned right shift. Rd = Rn << #imm
47    UShr32(FPURightShiftImm),
48    /// Unsigned right shift. Rd = Rn << #imm
49    UShr64(FPURightShiftImm),
50}
51
52/// A floating-point unit (FPU) operation with two args, a register and
53/// an immediate that modifies its dest (so takes that input value as a
54/// separate virtual register).
55#[derive(Copy, Clone, Debug)]
56pub enum FPUOpRIMod {
57    /// Shift left and insert. Rd |= Rn << #imm
58    Sli32(FPULeftShiftImm),
59    /// Shift left and insert. Rd |= Rn << #imm
60    Sli64(FPULeftShiftImm),
61}
62
63impl BitOp {
64    /// Get the assembly mnemonic for this opcode.
65    pub fn op_str(&self) -> &'static str {
66        match self {
67            BitOp::RBit => "rbit",
68            BitOp::Clz => "clz",
69            BitOp::Cls => "cls",
70            BitOp::Rev16 => "rev16",
71            BitOp::Rev32 => "rev32",
72            BitOp::Rev64 => "rev64",
73        }
74    }
75}
76
77/// Additional information for `return_call[_ind]` instructions, left out of
78/// line to lower the size of the `Inst` enum.
79#[derive(Clone, Debug)]
80pub struct ReturnCallInfo<T> {
81    /// Where this call is going to
82    pub dest: T,
83    /// Arguments to the call instruction.
84    pub uses: CallArgList,
85    /// The size of the new stack frame's stack arguments. This is necessary
86    /// for copying the frame over our current frame. It must already be
87    /// allocated on the stack.
88    pub new_stack_arg_size: u32,
89    /// API key to use to restore the return address, if any.
90    pub key: Option<APIKey>,
91}
92
93fn count_zero_half_words(mut value: u64, num_half_words: u8) -> usize {
94    let mut count = 0;
95    for _ in 0..num_half_words {
96        if value & 0xffff == 0 {
97            count += 1;
98        }
99        value >>= 16;
100    }
101
102    count
103}
104
105impl Inst {
106    /// Create an instruction that loads a constant, using one of several options (MOVZ, MOVN,
107    /// logical immediate, or constant pool).
108    pub fn load_constant(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]> {
109        // NB: this is duplicated in `lower/isle.rs` and `inst.isle` right now,
110        // if modifications are made here before this is deleted after moving to
111        // ISLE then those locations should be updated as well.
112
113        if let Some(imm) = MoveWideConst::maybe_from_u64(value) {
114            // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVZ
115            smallvec![Inst::MovWide {
116                op: MoveWideOp::MovZ,
117                rd,
118                imm,
119                size: OperandSize::Size64
120            }]
121        } else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) {
122            // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVN
123            smallvec![Inst::MovWide {
124                op: MoveWideOp::MovN,
125                rd,
126                imm,
127                size: OperandSize::Size64
128            }]
129        } else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) {
130            // Weird logical-instruction immediate in ORI using zero register
131            smallvec![Inst::AluRRImmLogic {
132                alu_op: ALUOp::Orr,
133                size: OperandSize::Size64,
134                rd,
135                rn: zero_reg(),
136                imml,
137            }]
138        } else {
139            let mut insts = smallvec![];
140
141            // If the top 32 bits are zero, use 32-bit `mov` operations.
142            let (num_half_words, size, negated) = if value >> 32 == 0 {
143                (2, OperandSize::Size32, (!value << 32) >> 32)
144            } else {
145                (4, OperandSize::Size64, !value)
146            };
147
148            // If the number of 0xffff half words is greater than the number of 0x0000 half words
149            // it is more efficient to use `movn` for the first instruction.
150            let first_is_inverted = count_zero_half_words(negated, num_half_words)
151                > count_zero_half_words(value, num_half_words);
152
153            // Either 0xffff or 0x0000 half words can be skipped, depending on the first
154            // instruction used.
155            let ignored_halfword = if first_is_inverted { 0xffff } else { 0 };
156
157            let halfwords: SmallVec<[_; 4]> = (0..num_half_words)
158                .filter_map(|i| {
159                    let imm16 = (value >> (16 * i)) & 0xffff;
160                    if imm16 == ignored_halfword {
161                        None
162                    } else {
163                        Some((i, imm16))
164                    }
165                })
166                .collect();
167
168            let mut prev_result = None;
169            for (i, imm16) in halfwords {
170                let shift = i * 16;
171
172                if let Some(rn) = prev_result {
173                    let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
174                    insts.push(Inst::MovK { rd, rn, imm, size });
175                } else {
176                    if first_is_inverted {
177                        let imm =
178                            MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, shift)
179                                .unwrap();
180                        insts.push(Inst::MovWide {
181                            op: MoveWideOp::MovN,
182                            rd,
183                            imm,
184                            size,
185                        });
186                    } else {
187                        let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
188                        insts.push(Inst::MovWide {
189                            op: MoveWideOp::MovZ,
190                            rd,
191                            imm,
192                            size,
193                        });
194                    }
195                }
196
197                prev_result = Some(rd.to_reg());
198            }
199
200            assert!(prev_result.is_some());
201
202            insts
203        }
204    }
205
206    /// Generic constructor for a load (zero-extending where appropriate).
207    pub fn gen_load(into_reg: Writable<Reg>, mem: AMode, ty: Type, flags: MemFlags) -> Inst {
208        match ty {
209            I8 => Inst::ULoad8 {
210                rd: into_reg,
211                mem,
212                flags,
213            },
214            I16 => Inst::ULoad16 {
215                rd: into_reg,
216                mem,
217                flags,
218            },
219            I32 => Inst::ULoad32 {
220                rd: into_reg,
221                mem,
222                flags,
223            },
224            I64 => Inst::ULoad64 {
225                rd: into_reg,
226                mem,
227                flags,
228            },
229            _ => {
230                if ty.is_vector() || ty.is_float() {
231                    let bits = ty_bits(ty);
232                    let rd = into_reg;
233
234                    match bits {
235                        128 => Inst::FpuLoad128 { rd, mem, flags },
236                        64 => Inst::FpuLoad64 { rd, mem, flags },
237                        32 => Inst::FpuLoad32 { rd, mem, flags },
238                        16 => Inst::FpuLoad16 { rd, mem, flags },
239                        _ => unimplemented!("gen_load({})", ty),
240                    }
241                } else {
242                    unimplemented!("gen_load({})", ty);
243                }
244            }
245        }
246    }
247
248    /// Generic constructor for a store.
249    pub fn gen_store(mem: AMode, from_reg: Reg, ty: Type, flags: MemFlags) -> Inst {
250        match ty {
251            I8 => Inst::Store8 {
252                rd: from_reg,
253                mem,
254                flags,
255            },
256            I16 => Inst::Store16 {
257                rd: from_reg,
258                mem,
259                flags,
260            },
261            I32 => Inst::Store32 {
262                rd: from_reg,
263                mem,
264                flags,
265            },
266            I64 => Inst::Store64 {
267                rd: from_reg,
268                mem,
269                flags,
270            },
271            _ => {
272                if ty.is_vector() || ty.is_float() {
273                    let bits = ty_bits(ty);
274                    let rd = from_reg;
275
276                    match bits {
277                        128 => Inst::FpuStore128 { rd, mem, flags },
278                        64 => Inst::FpuStore64 { rd, mem, flags },
279                        32 => Inst::FpuStore32 { rd, mem, flags },
280                        16 => Inst::FpuStore16 { rd, mem, flags },
281                        _ => unimplemented!("gen_store({})", ty),
282                    }
283                } else {
284                    unimplemented!("gen_store({})", ty);
285                }
286            }
287        }
288    }
289
290    /// What type does this load or store instruction access in memory? When
291    /// uimm12 encoding is used, the size of this type is the amount that
292    /// immediate offsets are scaled by.
293    pub fn mem_type(&self) -> Option<Type> {
294        match self {
295            Inst::ULoad8 { .. } => Some(I8),
296            Inst::SLoad8 { .. } => Some(I8),
297            Inst::ULoad16 { .. } => Some(I16),
298            Inst::SLoad16 { .. } => Some(I16),
299            Inst::ULoad32 { .. } => Some(I32),
300            Inst::SLoad32 { .. } => Some(I32),
301            Inst::ULoad64 { .. } => Some(I64),
302            Inst::FpuLoad16 { .. } => Some(F16),
303            Inst::FpuLoad32 { .. } => Some(F32),
304            Inst::FpuLoad64 { .. } => Some(F64),
305            Inst::FpuLoad128 { .. } => Some(I8X16),
306            Inst::Store8 { .. } => Some(I8),
307            Inst::Store16 { .. } => Some(I16),
308            Inst::Store32 { .. } => Some(I32),
309            Inst::Store64 { .. } => Some(I64),
310            Inst::FpuStore16 { .. } => Some(F16),
311            Inst::FpuStore32 { .. } => Some(F32),
312            Inst::FpuStore64 { .. } => Some(F64),
313            Inst::FpuStore128 { .. } => Some(I8X16),
314            _ => None,
315        }
316    }
317}
318
319//=============================================================================
320// Instructions: get_regs
321
322fn memarg_operands(memarg: &mut AMode, collector: &mut impl OperandVisitor) {
323    match memarg {
324        AMode::Unscaled { rn, .. } | AMode::UnsignedOffset { rn, .. } => {
325            collector.reg_use(rn);
326        }
327        AMode::RegReg { rn, rm, .. }
328        | AMode::RegScaled { rn, rm, .. }
329        | AMode::RegScaledExtended { rn, rm, .. }
330        | AMode::RegExtended { rn, rm, .. } => {
331            collector.reg_use(rn);
332            collector.reg_use(rm);
333        }
334        AMode::Label { .. } => {}
335        AMode::SPPreIndexed { .. } | AMode::SPPostIndexed { .. } => {}
336        AMode::FPOffset { .. } | AMode::IncomingArg { .. } => {}
337        AMode::SPOffset { .. } | AMode::SlotOffset { .. } => {}
338        AMode::RegOffset { rn, .. } => {
339            collector.reg_use(rn);
340        }
341        AMode::Const { .. } => {}
342    }
343}
344
345fn pairmemarg_operands(pairmemarg: &mut PairAMode, collector: &mut impl OperandVisitor) {
346    match pairmemarg {
347        PairAMode::SignedOffset { reg, .. } => {
348            collector.reg_use(reg);
349        }
350        PairAMode::SPPreIndexed { .. } | PairAMode::SPPostIndexed { .. } => {}
351    }
352}
353
354fn aarch64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
355    match inst {
356        Inst::AluRRR { rd, rn, rm, .. } => {
357            collector.reg_def(rd);
358            collector.reg_use(rn);
359            collector.reg_use(rm);
360        }
361        Inst::AluRRRR { rd, rn, rm, ra, .. } => {
362            collector.reg_def(rd);
363            collector.reg_use(rn);
364            collector.reg_use(rm);
365            collector.reg_use(ra);
366        }
367        Inst::AluRRImm12 { rd, rn, .. } => {
368            collector.reg_def(rd);
369            collector.reg_use(rn);
370        }
371        Inst::AluRRImmLogic { rd, rn, .. } => {
372            collector.reg_def(rd);
373            collector.reg_use(rn);
374        }
375        Inst::AluRRImmShift { rd, rn, .. } => {
376            collector.reg_def(rd);
377            collector.reg_use(rn);
378        }
379        Inst::AluRRRShift { rd, rn, rm, .. } => {
380            collector.reg_def(rd);
381            collector.reg_use(rn);
382            collector.reg_use(rm);
383        }
384        Inst::AluRRRExtend { rd, rn, rm, .. } => {
385            collector.reg_def(rd);
386            collector.reg_use(rn);
387            collector.reg_use(rm);
388        }
389        Inst::BitRR { rd, rn, .. } => {
390            collector.reg_def(rd);
391            collector.reg_use(rn);
392        }
393        Inst::ULoad8 { rd, mem, .. }
394        | Inst::SLoad8 { rd, mem, .. }
395        | Inst::ULoad16 { rd, mem, .. }
396        | Inst::SLoad16 { rd, mem, .. }
397        | Inst::ULoad32 { rd, mem, .. }
398        | Inst::SLoad32 { rd, mem, .. }
399        | Inst::ULoad64 { rd, mem, .. } => {
400            collector.reg_def(rd);
401            memarg_operands(mem, collector);
402        }
403        Inst::Store8 { rd, mem, .. }
404        | Inst::Store16 { rd, mem, .. }
405        | Inst::Store32 { rd, mem, .. }
406        | Inst::Store64 { rd, mem, .. } => {
407            collector.reg_use(rd);
408            memarg_operands(mem, collector);
409        }
410        Inst::StoreP64 { rt, rt2, mem, .. } => {
411            collector.reg_use(rt);
412            collector.reg_use(rt2);
413            pairmemarg_operands(mem, collector);
414        }
415        Inst::LoadP64 { rt, rt2, mem, .. } => {
416            collector.reg_def(rt);
417            collector.reg_def(rt2);
418            pairmemarg_operands(mem, collector);
419        }
420        Inst::Mov { rd, rm, .. } => {
421            collector.reg_def(rd);
422            collector.reg_use(rm);
423        }
424        Inst::MovFromPReg { rd, rm } => {
425            debug_assert!(rd.to_reg().is_virtual());
426            collector.reg_def(rd);
427            collector.reg_fixed_nonallocatable(*rm);
428        }
429        Inst::MovToPReg { rd, rm } => {
430            debug_assert!(rm.is_virtual());
431            collector.reg_fixed_nonallocatable(*rd);
432            collector.reg_use(rm);
433        }
434        Inst::MovK { rd, rn, .. } => {
435            collector.reg_use(rn);
436            collector.reg_reuse_def(rd, 0); // `rn` == `rd`.
437        }
438        Inst::MovWide { rd, .. } => {
439            collector.reg_def(rd);
440        }
441        Inst::CSel { rd, rn, rm, .. } => {
442            collector.reg_def(rd);
443            collector.reg_use(rn);
444            collector.reg_use(rm);
445        }
446        Inst::CSNeg { rd, rn, rm, .. } => {
447            collector.reg_def(rd);
448            collector.reg_use(rn);
449            collector.reg_use(rm);
450        }
451        Inst::CSet { rd, .. } | Inst::CSetm { rd, .. } => {
452            collector.reg_def(rd);
453        }
454        Inst::CCmp { rn, rm, .. } => {
455            collector.reg_use(rn);
456            collector.reg_use(rm);
457        }
458        Inst::CCmpImm { rn, .. } => {
459            collector.reg_use(rn);
460        }
461        Inst::AtomicRMWLoop {
462            op,
463            addr,
464            operand,
465            oldval,
466            scratch1,
467            scratch2,
468            ..
469        } => {
470            collector.reg_fixed_use(addr, xreg(25));
471            collector.reg_fixed_use(operand, xreg(26));
472            collector.reg_fixed_def(oldval, xreg(27));
473            collector.reg_fixed_def(scratch1, xreg(24));
474            if *op != AtomicRMWLoopOp::Xchg {
475                collector.reg_fixed_def(scratch2, xreg(28));
476            }
477        }
478        Inst::AtomicRMW { rs, rt, rn, .. } => {
479            collector.reg_use(rs);
480            collector.reg_def(rt);
481            collector.reg_use(rn);
482        }
483        Inst::AtomicCAS { rd, rs, rt, rn, .. } => {
484            collector.reg_reuse_def(rd, 1); // reuse `rs`.
485            collector.reg_use(rs);
486            collector.reg_use(rt);
487            collector.reg_use(rn);
488        }
489        Inst::AtomicCASLoop {
490            addr,
491            expected,
492            replacement,
493            oldval,
494            scratch,
495            ..
496        } => {
497            collector.reg_fixed_use(addr, xreg(25));
498            collector.reg_fixed_use(expected, xreg(26));
499            collector.reg_fixed_use(replacement, xreg(28));
500            collector.reg_fixed_def(oldval, xreg(27));
501            collector.reg_fixed_def(scratch, xreg(24));
502        }
503        Inst::LoadAcquire { rt, rn, .. } => {
504            collector.reg_use(rn);
505            collector.reg_def(rt);
506        }
507        Inst::StoreRelease { rt, rn, .. } => {
508            collector.reg_use(rn);
509            collector.reg_use(rt);
510        }
511        Inst::Fence {} | Inst::Csdb {} => {}
512        Inst::FpuMove32 { rd, rn } => {
513            collector.reg_def(rd);
514            collector.reg_use(rn);
515        }
516        Inst::FpuMove64 { rd, rn } => {
517            collector.reg_def(rd);
518            collector.reg_use(rn);
519        }
520        Inst::FpuMove128 { rd, rn } => {
521            collector.reg_def(rd);
522            collector.reg_use(rn);
523        }
524        Inst::FpuMoveFromVec { rd, rn, .. } => {
525            collector.reg_def(rd);
526            collector.reg_use(rn);
527        }
528        Inst::FpuExtend { rd, rn, .. } => {
529            collector.reg_def(rd);
530            collector.reg_use(rn);
531        }
532        Inst::FpuRR { rd, rn, .. } => {
533            collector.reg_def(rd);
534            collector.reg_use(rn);
535        }
536        Inst::FpuRRR { rd, rn, rm, .. } => {
537            collector.reg_def(rd);
538            collector.reg_use(rn);
539            collector.reg_use(rm);
540        }
541        Inst::FpuRRI { rd, rn, .. } => {
542            collector.reg_def(rd);
543            collector.reg_use(rn);
544        }
545        Inst::FpuRRIMod { rd, ri, rn, .. } => {
546            collector.reg_reuse_def(rd, 1); // reuse `ri`.
547            collector.reg_use(ri);
548            collector.reg_use(rn);
549        }
550        Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
551            collector.reg_def(rd);
552            collector.reg_use(rn);
553            collector.reg_use(rm);
554            collector.reg_use(ra);
555        }
556        Inst::VecMisc { rd, rn, .. } => {
557            collector.reg_def(rd);
558            collector.reg_use(rn);
559        }
560
561        Inst::VecLanes { rd, rn, .. } => {
562            collector.reg_def(rd);
563            collector.reg_use(rn);
564        }
565        Inst::VecShiftImm { rd, rn, .. } => {
566            collector.reg_def(rd);
567            collector.reg_use(rn);
568        }
569        Inst::VecShiftImmMod { rd, ri, rn, .. } => {
570            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
571            collector.reg_use(ri);
572            collector.reg_use(rn);
573        }
574        Inst::VecExtract { rd, rn, rm, .. } => {
575            collector.reg_def(rd);
576            collector.reg_use(rn);
577            collector.reg_use(rm);
578        }
579        Inst::VecTbl { rd, rn, rm } => {
580            collector.reg_use(rn);
581            collector.reg_use(rm);
582            collector.reg_def(rd);
583        }
584        Inst::VecTblExt { rd, ri, rn, rm } => {
585            collector.reg_use(rn);
586            collector.reg_use(rm);
587            collector.reg_reuse_def(rd, 3); // `rd` == `ri`.
588            collector.reg_use(ri);
589        }
590
591        Inst::VecTbl2 { rd, rn, rn2, rm } => {
592            // Constrain to v30 / v31 so that we satisfy the "adjacent
593            // registers" constraint without use of pinned vregs in
594            // lowering.
595            collector.reg_fixed_use(rn, vreg(30));
596            collector.reg_fixed_use(rn2, vreg(31));
597            collector.reg_use(rm);
598            collector.reg_def(rd);
599        }
600        Inst::VecTbl2Ext {
601            rd,
602            ri,
603            rn,
604            rn2,
605            rm,
606        } => {
607            // Constrain to v30 / v31 so that we satisfy the "adjacent
608            // registers" constraint without use of pinned vregs in
609            // lowering.
610            collector.reg_fixed_use(rn, vreg(30));
611            collector.reg_fixed_use(rn2, vreg(31));
612            collector.reg_use(rm);
613            collector.reg_reuse_def(rd, 4); // `rd` == `ri`.
614            collector.reg_use(ri);
615        }
616        Inst::VecLoadReplicate { rd, rn, .. } => {
617            collector.reg_def(rd);
618            collector.reg_use(rn);
619        }
620        Inst::VecCSel { rd, rn, rm, .. } => {
621            collector.reg_def(rd);
622            collector.reg_use(rn);
623            collector.reg_use(rm);
624        }
625        Inst::FpuCmp { rn, rm, .. } => {
626            collector.reg_use(rn);
627            collector.reg_use(rm);
628        }
629        Inst::FpuLoad16 { rd, mem, .. } => {
630            collector.reg_def(rd);
631            memarg_operands(mem, collector);
632        }
633        Inst::FpuLoad32 { rd, mem, .. } => {
634            collector.reg_def(rd);
635            memarg_operands(mem, collector);
636        }
637        Inst::FpuLoad64 { rd, mem, .. } => {
638            collector.reg_def(rd);
639            memarg_operands(mem, collector);
640        }
641        Inst::FpuLoad128 { rd, mem, .. } => {
642            collector.reg_def(rd);
643            memarg_operands(mem, collector);
644        }
645        Inst::FpuStore16 { rd, mem, .. } => {
646            collector.reg_use(rd);
647            memarg_operands(mem, collector);
648        }
649        Inst::FpuStore32 { rd, mem, .. } => {
650            collector.reg_use(rd);
651            memarg_operands(mem, collector);
652        }
653        Inst::FpuStore64 { rd, mem, .. } => {
654            collector.reg_use(rd);
655            memarg_operands(mem, collector);
656        }
657        Inst::FpuStore128 { rd, mem, .. } => {
658            collector.reg_use(rd);
659            memarg_operands(mem, collector);
660        }
661        Inst::FpuLoadP64 { rt, rt2, mem, .. } => {
662            collector.reg_def(rt);
663            collector.reg_def(rt2);
664            pairmemarg_operands(mem, collector);
665        }
666        Inst::FpuStoreP64 { rt, rt2, mem, .. } => {
667            collector.reg_use(rt);
668            collector.reg_use(rt2);
669            pairmemarg_operands(mem, collector);
670        }
671        Inst::FpuLoadP128 { rt, rt2, mem, .. } => {
672            collector.reg_def(rt);
673            collector.reg_def(rt2);
674            pairmemarg_operands(mem, collector);
675        }
676        Inst::FpuStoreP128 { rt, rt2, mem, .. } => {
677            collector.reg_use(rt);
678            collector.reg_use(rt2);
679            pairmemarg_operands(mem, collector);
680        }
681        Inst::FpuToInt { rd, rn, .. } => {
682            collector.reg_def(rd);
683            collector.reg_use(rn);
684        }
685        Inst::IntToFpu { rd, rn, .. } => {
686            collector.reg_def(rd);
687            collector.reg_use(rn);
688        }
689        Inst::FpuCSel16 { rd, rn, rm, .. }
690        | Inst::FpuCSel32 { rd, rn, rm, .. }
691        | Inst::FpuCSel64 { rd, rn, rm, .. } => {
692            collector.reg_def(rd);
693            collector.reg_use(rn);
694            collector.reg_use(rm);
695        }
696        Inst::FpuRound { rd, rn, .. } => {
697            collector.reg_def(rd);
698            collector.reg_use(rn);
699        }
700        Inst::MovToFpu { rd, rn, .. } => {
701            collector.reg_def(rd);
702            collector.reg_use(rn);
703        }
704        Inst::FpuMoveFPImm { rd, .. } => {
705            collector.reg_def(rd);
706        }
707        Inst::MovToVec { rd, ri, rn, .. } => {
708            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
709            collector.reg_use(ri);
710            collector.reg_use(rn);
711        }
712        Inst::MovFromVec { rd, rn, .. } | Inst::MovFromVecSigned { rd, rn, .. } => {
713            collector.reg_def(rd);
714            collector.reg_use(rn);
715        }
716        Inst::VecDup { rd, rn, .. } => {
717            collector.reg_def(rd);
718            collector.reg_use(rn);
719        }
720        Inst::VecDupFromFpu { rd, rn, .. } => {
721            collector.reg_def(rd);
722            collector.reg_use(rn);
723        }
724        Inst::VecDupFPImm { rd, .. } => {
725            collector.reg_def(rd);
726        }
727        Inst::VecDupImm { rd, .. } => {
728            collector.reg_def(rd);
729        }
730        Inst::VecExtend { rd, rn, .. } => {
731            collector.reg_def(rd);
732            collector.reg_use(rn);
733        }
734        Inst::VecMovElement { rd, ri, rn, .. } => {
735            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
736            collector.reg_use(ri);
737            collector.reg_use(rn);
738        }
739        Inst::VecRRLong { rd, rn, .. } => {
740            collector.reg_def(rd);
741            collector.reg_use(rn);
742        }
743        Inst::VecRRNarrowLow { rd, rn, .. } => {
744            collector.reg_use(rn);
745            collector.reg_def(rd);
746        }
747        Inst::VecRRNarrowHigh { rd, ri, rn, .. } => {
748            collector.reg_use(rn);
749            collector.reg_reuse_def(rd, 2); // `rd` == `ri`.
750            collector.reg_use(ri);
751        }
752        Inst::VecRRPair { rd, rn, .. } => {
753            collector.reg_def(rd);
754            collector.reg_use(rn);
755        }
756        Inst::VecRRRLong { rd, rn, rm, .. } => {
757            collector.reg_def(rd);
758            collector.reg_use(rn);
759            collector.reg_use(rm);
760        }
761        Inst::VecRRRLongMod { rd, ri, rn, rm, .. } => {
762            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
763            collector.reg_use(ri);
764            collector.reg_use(rn);
765            collector.reg_use(rm);
766        }
767        Inst::VecRRPairLong { rd, rn, .. } => {
768            collector.reg_def(rd);
769            collector.reg_use(rn);
770        }
771        Inst::VecRRR { rd, rn, rm, .. } => {
772            collector.reg_def(rd);
773            collector.reg_use(rn);
774            collector.reg_use(rm);
775        }
776        Inst::VecRRRMod { rd, ri, rn, rm, .. } | Inst::VecFmlaElem { rd, ri, rn, rm, .. } => {
777            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
778            collector.reg_use(ri);
779            collector.reg_use(rn);
780            collector.reg_use(rm);
781        }
782        Inst::MovToNZCV { rn } => {
783            collector.reg_use(rn);
784        }
785        Inst::MovFromNZCV { rd } => {
786            collector.reg_def(rd);
787        }
788        Inst::Extend { rd, rn, .. } => {
789            collector.reg_def(rd);
790            collector.reg_use(rn);
791        }
792        Inst::Args { args } => {
793            for ArgPair { vreg, preg } in args {
794                collector.reg_fixed_def(vreg, *preg);
795            }
796        }
797        Inst::Rets { rets } => {
798            for RetPair { vreg, preg } in rets {
799                collector.reg_fixed_use(vreg, *preg);
800            }
801        }
802        Inst::Ret { .. } | Inst::AuthenticatedRet { .. } => {}
803        Inst::Jump { .. } => {}
804        Inst::Call { info, .. } => {
805            let CallInfo { uses, defs, .. } = &mut **info;
806            for CallArgPair { vreg, preg } in uses {
807                collector.reg_fixed_use(vreg, *preg);
808            }
809            for CallRetPair { vreg, location } in defs {
810                match location {
811                    RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
812                    RetLocation::Stack(..) => collector.any_def(vreg),
813                }
814            }
815            collector.reg_clobbers(info.clobbers);
816            if let Some(try_call_info) = &mut info.try_call_info {
817                try_call_info.collect_operands(collector);
818            }
819        }
820        Inst::CallInd { info, .. } => {
821            let CallInfo {
822                dest, uses, defs, ..
823            } = &mut **info;
824            collector.reg_use(dest);
825            for CallArgPair { vreg, preg } in uses {
826                collector.reg_fixed_use(vreg, *preg);
827            }
828            for CallRetPair { vreg, location } in defs {
829                match location {
830                    RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
831                    RetLocation::Stack(..) => collector.any_def(vreg),
832                }
833            }
834            collector.reg_clobbers(info.clobbers);
835            if let Some(try_call_info) = &mut info.try_call_info {
836                try_call_info.collect_operands(collector);
837            }
838        }
839        Inst::ReturnCall { info } => {
840            for CallArgPair { vreg, preg } in &mut info.uses {
841                collector.reg_fixed_use(vreg, *preg);
842            }
843        }
844        Inst::ReturnCallInd { info } => {
845            // TODO(https://github.com/bytecodealliance/regalloc2/issues/145):
846            // This shouldn't be a fixed register constraint, but it's not clear how to pick a
847            // register that won't be clobbered by the callee-save restore code emitted with a
848            // return_call_indirect.
849            collector.reg_fixed_use(&mut info.dest, xreg(1));
850            for CallArgPair { vreg, preg } in &mut info.uses {
851                collector.reg_fixed_use(vreg, *preg);
852            }
853        }
854        Inst::CondBr { kind, .. } => match kind {
855            CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
856            CondBrKind::Cond(_) => {}
857        },
858        Inst::TestBitAndBranch { rn, .. } => {
859            collector.reg_use(rn);
860        }
861        Inst::IndirectBr { rn, .. } => {
862            collector.reg_use(rn);
863        }
864        Inst::Nop0 | Inst::Nop4 => {}
865        Inst::Brk => {}
866        Inst::Udf { .. } => {}
867        Inst::TrapIf { kind, .. } => match kind {
868            CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
869            CondBrKind::Cond(_) => {}
870        },
871        Inst::Adr { rd, .. } | Inst::Adrp { rd, .. } => {
872            collector.reg_def(rd);
873        }
874        Inst::Word4 { .. } | Inst::Word8 { .. } => {}
875        Inst::JTSequence {
876            ridx, rtmp1, rtmp2, ..
877        } => {
878            collector.reg_use(ridx);
879            collector.reg_early_def(rtmp1);
880            collector.reg_early_def(rtmp2);
881        }
882        Inst::LoadExtNameGot { rd, .. }
883        | Inst::LoadExtNameNear { rd, .. }
884        | Inst::LoadExtNameFar { rd, .. } => {
885            collector.reg_def(rd);
886        }
887        Inst::LoadAddr { rd, mem } => {
888            collector.reg_def(rd);
889            memarg_operands(mem, collector);
890        }
891        Inst::Paci { .. } | Inst::Xpaclri => {
892            // Neither LR nor SP is an allocatable register, so there is no need
893            // to do anything.
894        }
895        Inst::Bti { .. } => {}
896
897        Inst::ElfTlsGetAddr { rd, tmp, .. } => {
898            // TLSDESC has a very neat calling convention. It is required to preserve
899            // all registers except x0 and x30. X30 is non allocatable in cranelift since
900            // its the link register.
901            //
902            // Additionally we need a second register as a temporary register for the
903            // TLSDESC sequence. This register can be any register other than x0 (and x30).
904            collector.reg_fixed_def(rd, regs::xreg(0));
905            collector.reg_early_def(tmp);
906        }
907        Inst::MachOTlsGetAddr { rd, .. } => {
908            collector.reg_fixed_def(rd, regs::xreg(0));
909            let mut clobbers =
910                AArch64MachineDeps::get_regs_clobbered_by_call(CallConv::AppleAarch64, false);
911            clobbers.remove(regs::xreg_preg(0));
912            collector.reg_clobbers(clobbers);
913        }
914        Inst::Unwind { .. } => {}
915        Inst::EmitIsland { .. } => {}
916        Inst::DummyUse { reg } => {
917            collector.reg_use(reg);
918        }
919        Inst::StackProbeLoop { start, end, .. } => {
920            collector.reg_early_def(start);
921            collector.reg_use(end);
922        }
923    }
924}
925
926//=============================================================================
927// Instructions: misc functions and external interface
928
929impl MachInst for Inst {
930    type ABIMachineSpec = AArch64MachineDeps;
931    type LabelUse = LabelUse;
932
933    // "CLIF" in hex, to make the trap recognizable during
934    // debugging.
935    const TRAP_OPCODE: &'static [u8] = &0xc11f_u32.to_le_bytes();
936
937    fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
938        aarch64_get_operands(self, collector);
939    }
940
941    fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
942        match self {
943            &Inst::Mov {
944                size: OperandSize::Size64,
945                rd,
946                rm,
947            } => Some((rd, rm)),
948            &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
949            &Inst::FpuMove128 { rd, rn } => Some((rd, rn)),
950            _ => None,
951        }
952    }
953
954    fn is_included_in_clobbers(&self) -> bool {
955        let (caller, callee, is_exception) = match self {
956            Inst::Args { .. } => return false,
957            Inst::Call { info } => (
958                info.caller_conv,
959                info.callee_conv,
960                info.try_call_info.is_some(),
961            ),
962            Inst::CallInd { info } => (
963                info.caller_conv,
964                info.callee_conv,
965                info.try_call_info.is_some(),
966            ),
967            _ => return true,
968        };
969
970        // We exclude call instructions from the clobber-set when they are calls
971        // from caller to callee that both clobber the same register (such as
972        // using the same or similar ABIs). Such calls cannot possibly force any
973        // new registers to be saved in the prologue, because anything that the
974        // callee clobbers, the caller is also allowed to clobber. This both
975        // saves work and enables us to more precisely follow the
976        // half-caller-save, half-callee-save SysV ABI for some vector
977        // registers.
978        //
979        // See the note in [crate::isa::aarch64::abi::is_caller_save_reg] for
980        // more information on this ABI-implementation hack.
981        let caller_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(caller, false);
982        let callee_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(callee, is_exception);
983
984        let mut all_clobbers = caller_clobbers;
985        all_clobbers.union_from(callee_clobbers);
986        all_clobbers != caller_clobbers
987    }
988
989    fn is_trap(&self) -> bool {
990        match self {
991            Self::Udf { .. } => true,
992            _ => false,
993        }
994    }
995
996    fn is_args(&self) -> bool {
997        match self {
998            Self::Args { .. } => true,
999            _ => false,
1000        }
1001    }
1002
1003    fn call_type(&self) -> CallType {
1004        match self {
1005            Inst::Call { .. }
1006            | Inst::CallInd { .. }
1007            | Inst::ElfTlsGetAddr { .. }
1008            | Inst::MachOTlsGetAddr { .. } => CallType::Regular,
1009
1010            Inst::ReturnCall { .. } | Inst::ReturnCallInd { .. } => CallType::TailCall,
1011
1012            _ => CallType::None,
1013        }
1014    }
1015
1016    fn is_term(&self) -> MachTerminator {
1017        match self {
1018            &Inst::Rets { .. } => MachTerminator::Ret,
1019            &Inst::ReturnCall { .. } | &Inst::ReturnCallInd { .. } => MachTerminator::RetCall,
1020            &Inst::Jump { .. } => MachTerminator::Branch,
1021            &Inst::CondBr { .. } => MachTerminator::Branch,
1022            &Inst::TestBitAndBranch { .. } => MachTerminator::Branch,
1023            &Inst::IndirectBr { .. } => MachTerminator::Branch,
1024            &Inst::JTSequence { .. } => MachTerminator::Branch,
1025            &Inst::Call { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1026            &Inst::CallInd { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1027            _ => MachTerminator::None,
1028        }
1029    }
1030
1031    fn is_mem_access(&self) -> bool {
1032        match self {
1033            &Inst::ULoad8 { .. }
1034            | &Inst::SLoad8 { .. }
1035            | &Inst::ULoad16 { .. }
1036            | &Inst::SLoad16 { .. }
1037            | &Inst::ULoad32 { .. }
1038            | &Inst::SLoad32 { .. }
1039            | &Inst::ULoad64 { .. }
1040            | &Inst::LoadP64 { .. }
1041            | &Inst::FpuLoad16 { .. }
1042            | &Inst::FpuLoad32 { .. }
1043            | &Inst::FpuLoad64 { .. }
1044            | &Inst::FpuLoad128 { .. }
1045            | &Inst::FpuLoadP64 { .. }
1046            | &Inst::FpuLoadP128 { .. }
1047            | &Inst::Store8 { .. }
1048            | &Inst::Store16 { .. }
1049            | &Inst::Store32 { .. }
1050            | &Inst::Store64 { .. }
1051            | &Inst::StoreP64 { .. }
1052            | &Inst::FpuStore16 { .. }
1053            | &Inst::FpuStore32 { .. }
1054            | &Inst::FpuStore64 { .. }
1055            | &Inst::FpuStore128 { .. } => true,
1056            // TODO: verify this carefully
1057            _ => false,
1058        }
1059    }
1060
1061    fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
1062        let bits = ty.bits();
1063
1064        assert!(bits <= 128);
1065        assert!(to_reg.to_reg().class() == from_reg.class());
1066        match from_reg.class() {
1067            RegClass::Int => Inst::Mov {
1068                size: OperandSize::Size64,
1069                rd: to_reg,
1070                rm: from_reg,
1071            },
1072            RegClass::Float => {
1073                if bits > 64 {
1074                    Inst::FpuMove128 {
1075                        rd: to_reg,
1076                        rn: from_reg,
1077                    }
1078                } else {
1079                    Inst::FpuMove64 {
1080                        rd: to_reg,
1081                        rn: from_reg,
1082                    }
1083                }
1084            }
1085            RegClass::Vector => unreachable!(),
1086        }
1087    }
1088
1089    fn is_safepoint(&self) -> bool {
1090        match self {
1091            Inst::Call { .. } | Inst::CallInd { .. } => true,
1092            _ => false,
1093        }
1094    }
1095
1096    fn gen_dummy_use(reg: Reg) -> Inst {
1097        Inst::DummyUse { reg }
1098    }
1099
1100    fn gen_nop(preferred_size: usize) -> Inst {
1101        if preferred_size == 0 {
1102            return Inst::Nop0;
1103        }
1104        // We can't give a NOP (or any insn) < 4 bytes.
1105        assert!(preferred_size >= 4);
1106        Inst::Nop4
1107    }
1108
1109    fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
1110        match ty {
1111            I8 => Ok((&[RegClass::Int], &[I8])),
1112            I16 => Ok((&[RegClass::Int], &[I16])),
1113            I32 => Ok((&[RegClass::Int], &[I32])),
1114            I64 => Ok((&[RegClass::Int], &[I64])),
1115            F16 => Ok((&[RegClass::Float], &[F16])),
1116            F32 => Ok((&[RegClass::Float], &[F32])),
1117            F64 => Ok((&[RegClass::Float], &[F64])),
1118            F128 => Ok((&[RegClass::Float], &[F128])),
1119            I128 => Ok((&[RegClass::Int, RegClass::Int], &[I64, I64])),
1120            _ if ty.is_vector() && ty.bits() <= 128 => {
1121                let types = &[types::I8X2, types::I8X4, types::I8X8, types::I8X16];
1122                Ok((
1123                    &[RegClass::Float],
1124                    slice::from_ref(&types[ty.bytes().ilog2() as usize - 1]),
1125                ))
1126            }
1127            _ if ty.is_dynamic_vector() => Ok((&[RegClass::Float], &[I8X16])),
1128            _ => Err(CodegenError::Unsupported(format!(
1129                "Unexpected SSA-value type: {ty}"
1130            ))),
1131        }
1132    }
1133
1134    fn canonical_type_for_rc(rc: RegClass) -> Type {
1135        match rc {
1136            RegClass::Float => types::I8X16,
1137            RegClass::Int => types::I64,
1138            RegClass::Vector => unreachable!(),
1139        }
1140    }
1141
1142    fn gen_jump(target: MachLabel) -> Inst {
1143        Inst::Jump {
1144            dest: BranchTarget::Label(target),
1145        }
1146    }
1147
1148    fn worst_case_size() -> CodeOffset {
1149        // The maximum size, in bytes, of any `Inst`'s emitted code. We have at least one case of
1150        // an 8-instruction sequence (saturating int-to-float conversions) with three embedded
1151        // 64-bit f64 constants.
1152        //
1153        // Note that inline jump-tables handle island/pool insertion separately, so we do not need
1154        // to account for them here (otherwise the worst case would be 2^31 * 4, clearly not
1155        // feasible for other reasons).
1156        44
1157    }
1158
1159    fn ref_type_regclass(_: &settings::Flags) -> RegClass {
1160        RegClass::Int
1161    }
1162
1163    fn gen_block_start(
1164        is_indirect_branch_target: bool,
1165        is_forward_edge_cfi_enabled: bool,
1166    ) -> Option<Self> {
1167        if is_indirect_branch_target && is_forward_edge_cfi_enabled {
1168            Some(Inst::Bti {
1169                targets: BranchTargetType::J,
1170            })
1171        } else {
1172            None
1173        }
1174    }
1175
1176    fn function_alignment() -> FunctionAlignment {
1177        // We use 32-byte alignment for performance reasons, but for correctness
1178        // we would only need 4-byte alignment.
1179        FunctionAlignment {
1180            minimum: 4,
1181            preferred: 32,
1182        }
1183    }
1184}
1185
1186//=============================================================================
1187// Pretty-printing of instructions.
1188
1189fn mem_finalize_for_show(mem: &AMode, access_ty: Type, state: &EmitState) -> (String, String) {
1190    let (mem_insts, mem) = mem_finalize(None, mem, access_ty, state);
1191    let mut mem_str = mem_insts
1192        .into_iter()
1193        .map(|inst| inst.print_with_state(&mut EmitState::default()))
1194        .collect::<Vec<_>>()
1195        .join(" ; ");
1196    if !mem_str.is_empty() {
1197        mem_str += " ; ";
1198    }
1199
1200    let mem = mem.pretty_print(access_ty.bytes() as u8);
1201    (mem_str, mem)
1202}
1203
1204fn pretty_print_try_call(info: &TryCallInfo) -> String {
1205    format!(
1206        "; b {:?}; catch [{}]",
1207        info.continuation,
1208        info.pretty_print_dests()
1209    )
1210}
1211
1212impl Inst {
1213    fn print_with_state(&self, state: &mut EmitState) -> String {
1214        fn op_name(alu_op: ALUOp) -> &'static str {
1215            match alu_op {
1216                ALUOp::Add => "add",
1217                ALUOp::Sub => "sub",
1218                ALUOp::Orr => "orr",
1219                ALUOp::And => "and",
1220                ALUOp::AndS => "ands",
1221                ALUOp::Eor => "eor",
1222                ALUOp::AddS => "adds",
1223                ALUOp::SubS => "subs",
1224                ALUOp::SMulH => "smulh",
1225                ALUOp::UMulH => "umulh",
1226                ALUOp::SDiv => "sdiv",
1227                ALUOp::UDiv => "udiv",
1228                ALUOp::AndNot => "bic",
1229                ALUOp::OrrNot => "orn",
1230                ALUOp::EorNot => "eon",
1231                ALUOp::Extr => "extr",
1232                ALUOp::Lsr => "lsr",
1233                ALUOp::Asr => "asr",
1234                ALUOp::Lsl => "lsl",
1235                ALUOp::Adc => "adc",
1236                ALUOp::AdcS => "adcs",
1237                ALUOp::Sbc => "sbc",
1238                ALUOp::SbcS => "sbcs",
1239            }
1240        }
1241
1242        match self {
1243            &Inst::Nop0 => "nop-zero-len".to_string(),
1244            &Inst::Nop4 => "nop".to_string(),
1245            &Inst::AluRRR {
1246                alu_op,
1247                size,
1248                rd,
1249                rn,
1250                rm,
1251            } => {
1252                let op = op_name(alu_op);
1253                let rd = pretty_print_ireg(rd.to_reg(), size);
1254                let rn = pretty_print_ireg(rn, size);
1255                let rm = pretty_print_ireg(rm, size);
1256                format!("{op} {rd}, {rn}, {rm}")
1257            }
1258            &Inst::AluRRRR {
1259                alu_op,
1260                size,
1261                rd,
1262                rn,
1263                rm,
1264                ra,
1265            } => {
1266                let (op, da_size) = match alu_op {
1267                    ALUOp3::MAdd => ("madd", size),
1268                    ALUOp3::MSub => ("msub", size),
1269                    ALUOp3::UMAddL => ("umaddl", OperandSize::Size64),
1270                    ALUOp3::SMAddL => ("smaddl", OperandSize::Size64),
1271                };
1272                let rd = pretty_print_ireg(rd.to_reg(), da_size);
1273                let rn = pretty_print_ireg(rn, size);
1274                let rm = pretty_print_ireg(rm, size);
1275                let ra = pretty_print_ireg(ra, da_size);
1276
1277                format!("{op} {rd}, {rn}, {rm}, {ra}")
1278            }
1279            &Inst::AluRRImm12 {
1280                alu_op,
1281                size,
1282                rd,
1283                rn,
1284                ref imm12,
1285            } => {
1286                let op = op_name(alu_op);
1287                let rd = pretty_print_ireg(rd.to_reg(), size);
1288                let rn = pretty_print_ireg(rn, size);
1289
1290                if imm12.bits == 0 && alu_op == ALUOp::Add && size.is64() {
1291                    // special-case MOV (used for moving into SP).
1292                    format!("mov {rd}, {rn}")
1293                } else {
1294                    let imm12 = imm12.pretty_print(0);
1295                    format!("{op} {rd}, {rn}, {imm12}")
1296                }
1297            }
1298            &Inst::AluRRImmLogic {
1299                alu_op,
1300                size,
1301                rd,
1302                rn,
1303                ref imml,
1304            } => {
1305                let op = op_name(alu_op);
1306                let rd = pretty_print_ireg(rd.to_reg(), size);
1307                let rn = pretty_print_ireg(rn, size);
1308                let imml = imml.pretty_print(0);
1309                format!("{op} {rd}, {rn}, {imml}")
1310            }
1311            &Inst::AluRRImmShift {
1312                alu_op,
1313                size,
1314                rd,
1315                rn,
1316                ref immshift,
1317            } => {
1318                let op = op_name(alu_op);
1319                let rd = pretty_print_ireg(rd.to_reg(), size);
1320                let rn = pretty_print_ireg(rn, size);
1321                let immshift = immshift.pretty_print(0);
1322                format!("{op} {rd}, {rn}, {immshift}")
1323            }
1324            &Inst::AluRRRShift {
1325                alu_op,
1326                size,
1327                rd,
1328                rn,
1329                rm,
1330                ref shiftop,
1331            } => {
1332                let op = op_name(alu_op);
1333                let rd = pretty_print_ireg(rd.to_reg(), size);
1334                let rn = pretty_print_ireg(rn, size);
1335                let rm = pretty_print_ireg(rm, size);
1336                let shiftop = shiftop.pretty_print(0);
1337                format!("{op} {rd}, {rn}, {rm}, {shiftop}")
1338            }
1339            &Inst::AluRRRExtend {
1340                alu_op,
1341                size,
1342                rd,
1343                rn,
1344                rm,
1345                ref extendop,
1346            } => {
1347                let op = op_name(alu_op);
1348                let rd = pretty_print_ireg(rd.to_reg(), size);
1349                let rn = pretty_print_ireg(rn, size);
1350                let rm = pretty_print_ireg(rm, size);
1351                let extendop = extendop.pretty_print(0);
1352                format!("{op} {rd}, {rn}, {rm}, {extendop}")
1353            }
1354            &Inst::BitRR { op, size, rd, rn } => {
1355                let op = op.op_str();
1356                let rd = pretty_print_ireg(rd.to_reg(), size);
1357                let rn = pretty_print_ireg(rn, size);
1358                format!("{op} {rd}, {rn}")
1359            }
1360            &Inst::ULoad8 { rd, ref mem, .. }
1361            | &Inst::SLoad8 { rd, ref mem, .. }
1362            | &Inst::ULoad16 { rd, ref mem, .. }
1363            | &Inst::SLoad16 { rd, ref mem, .. }
1364            | &Inst::ULoad32 { rd, ref mem, .. }
1365            | &Inst::SLoad32 { rd, ref mem, .. }
1366            | &Inst::ULoad64 { rd, ref mem, .. } => {
1367                let is_unscaled = match &mem {
1368                    &AMode::Unscaled { .. } => true,
1369                    _ => false,
1370                };
1371                let (op, size) = match (self, is_unscaled) {
1372                    (&Inst::ULoad8 { .. }, false) => ("ldrb", OperandSize::Size32),
1373                    (&Inst::ULoad8 { .. }, true) => ("ldurb", OperandSize::Size32),
1374                    (&Inst::SLoad8 { .. }, false) => ("ldrsb", OperandSize::Size64),
1375                    (&Inst::SLoad8 { .. }, true) => ("ldursb", OperandSize::Size64),
1376                    (&Inst::ULoad16 { .. }, false) => ("ldrh", OperandSize::Size32),
1377                    (&Inst::ULoad16 { .. }, true) => ("ldurh", OperandSize::Size32),
1378                    (&Inst::SLoad16 { .. }, false) => ("ldrsh", OperandSize::Size64),
1379                    (&Inst::SLoad16 { .. }, true) => ("ldursh", OperandSize::Size64),
1380                    (&Inst::ULoad32 { .. }, false) => ("ldr", OperandSize::Size32),
1381                    (&Inst::ULoad32 { .. }, true) => ("ldur", OperandSize::Size32),
1382                    (&Inst::SLoad32 { .. }, false) => ("ldrsw", OperandSize::Size64),
1383                    (&Inst::SLoad32 { .. }, true) => ("ldursw", OperandSize::Size64),
1384                    (&Inst::ULoad64 { .. }, false) => ("ldr", OperandSize::Size64),
1385                    (&Inst::ULoad64 { .. }, true) => ("ldur", OperandSize::Size64),
1386                    _ => unreachable!(),
1387                };
1388
1389                let rd = pretty_print_ireg(rd.to_reg(), size);
1390                let mem = mem.clone();
1391                let access_ty = self.mem_type().unwrap();
1392                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1393
1394                format!("{mem_str}{op} {rd}, {mem}")
1395            }
1396            &Inst::Store8 { rd, ref mem, .. }
1397            | &Inst::Store16 { rd, ref mem, .. }
1398            | &Inst::Store32 { rd, ref mem, .. }
1399            | &Inst::Store64 { rd, ref mem, .. } => {
1400                let is_unscaled = match &mem {
1401                    &AMode::Unscaled { .. } => true,
1402                    _ => false,
1403                };
1404                let (op, size) = match (self, is_unscaled) {
1405                    (&Inst::Store8 { .. }, false) => ("strb", OperandSize::Size32),
1406                    (&Inst::Store8 { .. }, true) => ("sturb", OperandSize::Size32),
1407                    (&Inst::Store16 { .. }, false) => ("strh", OperandSize::Size32),
1408                    (&Inst::Store16 { .. }, true) => ("sturh", OperandSize::Size32),
1409                    (&Inst::Store32 { .. }, false) => ("str", OperandSize::Size32),
1410                    (&Inst::Store32 { .. }, true) => ("stur", OperandSize::Size32),
1411                    (&Inst::Store64 { .. }, false) => ("str", OperandSize::Size64),
1412                    (&Inst::Store64 { .. }, true) => ("stur", OperandSize::Size64),
1413                    _ => unreachable!(),
1414                };
1415
1416                let rd = pretty_print_ireg(rd, size);
1417                let mem = mem.clone();
1418                let access_ty = self.mem_type().unwrap();
1419                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1420
1421                format!("{mem_str}{op} {rd}, {mem}")
1422            }
1423            &Inst::StoreP64 {
1424                rt, rt2, ref mem, ..
1425            } => {
1426                let rt = pretty_print_ireg(rt, OperandSize::Size64);
1427                let rt2 = pretty_print_ireg(rt2, OperandSize::Size64);
1428                let mem = mem.clone();
1429                let mem = mem.pretty_print_default();
1430                format!("stp {rt}, {rt2}, {mem}")
1431            }
1432            &Inst::LoadP64 {
1433                rt, rt2, ref mem, ..
1434            } => {
1435                let rt = pretty_print_ireg(rt.to_reg(), OperandSize::Size64);
1436                let rt2 = pretty_print_ireg(rt2.to_reg(), OperandSize::Size64);
1437                let mem = mem.clone();
1438                let mem = mem.pretty_print_default();
1439                format!("ldp {rt}, {rt2}, {mem}")
1440            }
1441            &Inst::Mov { size, rd, rm } => {
1442                let rd = pretty_print_ireg(rd.to_reg(), size);
1443                let rm = pretty_print_ireg(rm, size);
1444                format!("mov {rd}, {rm}")
1445            }
1446            &Inst::MovFromPReg { rd, rm } => {
1447                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1448                let rm = show_ireg_sized(rm.into(), OperandSize::Size64);
1449                format!("mov {rd}, {rm}")
1450            }
1451            &Inst::MovToPReg { rd, rm } => {
1452                let rd = show_ireg_sized(rd.into(), OperandSize::Size64);
1453                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1454                format!("mov {rd}, {rm}")
1455            }
1456            &Inst::MovWide {
1457                op,
1458                rd,
1459                ref imm,
1460                size,
1461            } => {
1462                let op_str = match op {
1463                    MoveWideOp::MovZ => "movz",
1464                    MoveWideOp::MovN => "movn",
1465                };
1466                let rd = pretty_print_ireg(rd.to_reg(), size);
1467                let imm = imm.pretty_print(0);
1468                format!("{op_str} {rd}, {imm}")
1469            }
1470            &Inst::MovK {
1471                rd,
1472                rn,
1473                ref imm,
1474                size,
1475            } => {
1476                let rn = pretty_print_ireg(rn, size);
1477                let rd = pretty_print_ireg(rd.to_reg(), size);
1478                let imm = imm.pretty_print(0);
1479                format!("movk {rd}, {rn}, {imm}")
1480            }
1481            &Inst::CSel { rd, rn, rm, cond } => {
1482                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1483                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1484                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1485                let cond = cond.pretty_print(0);
1486                format!("csel {rd}, {rn}, {rm}, {cond}")
1487            }
1488            &Inst::CSNeg { rd, rn, rm, cond } => {
1489                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1490                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1491                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1492                let cond = cond.pretty_print(0);
1493                format!("csneg {rd}, {rn}, {rm}, {cond}")
1494            }
1495            &Inst::CSet { rd, cond } => {
1496                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1497                let cond = cond.pretty_print(0);
1498                format!("cset {rd}, {cond}")
1499            }
1500            &Inst::CSetm { rd, cond } => {
1501                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1502                let cond = cond.pretty_print(0);
1503                format!("csetm {rd}, {cond}")
1504            }
1505            &Inst::CCmp {
1506                size,
1507                rn,
1508                rm,
1509                nzcv,
1510                cond,
1511            } => {
1512                let rn = pretty_print_ireg(rn, size);
1513                let rm = pretty_print_ireg(rm, size);
1514                let nzcv = nzcv.pretty_print(0);
1515                let cond = cond.pretty_print(0);
1516                format!("ccmp {rn}, {rm}, {nzcv}, {cond}")
1517            }
1518            &Inst::CCmpImm {
1519                size,
1520                rn,
1521                imm,
1522                nzcv,
1523                cond,
1524            } => {
1525                let rn = pretty_print_ireg(rn, size);
1526                let imm = imm.pretty_print(0);
1527                let nzcv = nzcv.pretty_print(0);
1528                let cond = cond.pretty_print(0);
1529                format!("ccmp {rn}, {imm}, {nzcv}, {cond}")
1530            }
1531            &Inst::AtomicRMW {
1532                rs, rt, rn, ty, op, ..
1533            } => {
1534                let op = match op {
1535                    AtomicRMWOp::Add => "ldaddal",
1536                    AtomicRMWOp::Clr => "ldclral",
1537                    AtomicRMWOp::Eor => "ldeoral",
1538                    AtomicRMWOp::Set => "ldsetal",
1539                    AtomicRMWOp::Smax => "ldsmaxal",
1540                    AtomicRMWOp::Umax => "ldumaxal",
1541                    AtomicRMWOp::Smin => "ldsminal",
1542                    AtomicRMWOp::Umin => "lduminal",
1543                    AtomicRMWOp::Swp => "swpal",
1544                };
1545
1546                let size = OperandSize::from_ty(ty);
1547                let rs = pretty_print_ireg(rs, size);
1548                let rt = pretty_print_ireg(rt.to_reg(), size);
1549                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1550
1551                let ty_suffix = match ty {
1552                    I8 => "b",
1553                    I16 => "h",
1554                    _ => "",
1555                };
1556                format!("{op}{ty_suffix} {rs}, {rt}, [{rn}]")
1557            }
1558            &Inst::AtomicRMWLoop {
1559                ty,
1560                op,
1561                addr,
1562                operand,
1563                oldval,
1564                scratch1,
1565                scratch2,
1566                ..
1567            } => {
1568                let op = match op {
1569                    AtomicRMWLoopOp::Add => "add",
1570                    AtomicRMWLoopOp::Sub => "sub",
1571                    AtomicRMWLoopOp::Eor => "eor",
1572                    AtomicRMWLoopOp::Orr => "orr",
1573                    AtomicRMWLoopOp::And => "and",
1574                    AtomicRMWLoopOp::Nand => "nand",
1575                    AtomicRMWLoopOp::Smin => "smin",
1576                    AtomicRMWLoopOp::Smax => "smax",
1577                    AtomicRMWLoopOp::Umin => "umin",
1578                    AtomicRMWLoopOp::Umax => "umax",
1579                    AtomicRMWLoopOp::Xchg => "xchg",
1580                };
1581                let addr = pretty_print_ireg(addr, OperandSize::Size64);
1582                let operand = pretty_print_ireg(operand, OperandSize::Size64);
1583                let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1584                let scratch1 = pretty_print_ireg(scratch1.to_reg(), OperandSize::Size64);
1585                let scratch2 = pretty_print_ireg(scratch2.to_reg(), OperandSize::Size64);
1586                format!(
1587                    "atomic_rmw_loop_{}_{} addr={} operand={} oldval={} scratch1={} scratch2={}",
1588                    op,
1589                    ty.bits(),
1590                    addr,
1591                    operand,
1592                    oldval,
1593                    scratch1,
1594                    scratch2,
1595                )
1596            }
1597            &Inst::AtomicCAS {
1598                rd, rs, rt, rn, ty, ..
1599            } => {
1600                let op = match ty {
1601                    I8 => "casalb",
1602                    I16 => "casalh",
1603                    I32 | I64 => "casal",
1604                    _ => panic!("Unsupported type: {ty}"),
1605                };
1606                let size = OperandSize::from_ty(ty);
1607                let rd = pretty_print_ireg(rd.to_reg(), size);
1608                let rs = pretty_print_ireg(rs, size);
1609                let rt = pretty_print_ireg(rt, size);
1610                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1611
1612                format!("{op} {rd}, {rs}, {rt}, [{rn}]")
1613            }
1614            &Inst::AtomicCASLoop {
1615                ty,
1616                addr,
1617                expected,
1618                replacement,
1619                oldval,
1620                scratch,
1621                ..
1622            } => {
1623                let addr = pretty_print_ireg(addr, OperandSize::Size64);
1624                let expected = pretty_print_ireg(expected, OperandSize::Size64);
1625                let replacement = pretty_print_ireg(replacement, OperandSize::Size64);
1626                let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1627                let scratch = pretty_print_ireg(scratch.to_reg(), OperandSize::Size64);
1628                format!(
1629                    "atomic_cas_loop_{} addr={}, expect={}, replacement={}, oldval={}, scratch={}",
1630                    ty.bits(),
1631                    addr,
1632                    expected,
1633                    replacement,
1634                    oldval,
1635                    scratch,
1636                )
1637            }
1638            &Inst::LoadAcquire {
1639                access_ty, rt, rn, ..
1640            } => {
1641                let (op, ty) = match access_ty {
1642                    I8 => ("ldarb", I32),
1643                    I16 => ("ldarh", I32),
1644                    I32 => ("ldar", I32),
1645                    I64 => ("ldar", I64),
1646                    _ => panic!("Unsupported type: {access_ty}"),
1647                };
1648                let size = OperandSize::from_ty(ty);
1649                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1650                let rt = pretty_print_ireg(rt.to_reg(), size);
1651                format!("{op} {rt}, [{rn}]")
1652            }
1653            &Inst::StoreRelease {
1654                access_ty, rt, rn, ..
1655            } => {
1656                let (op, ty) = match access_ty {
1657                    I8 => ("stlrb", I32),
1658                    I16 => ("stlrh", I32),
1659                    I32 => ("stlr", I32),
1660                    I64 => ("stlr", I64),
1661                    _ => panic!("Unsupported type: {access_ty}"),
1662                };
1663                let size = OperandSize::from_ty(ty);
1664                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1665                let rt = pretty_print_ireg(rt, size);
1666                format!("{op} {rt}, [{rn}]")
1667            }
1668            &Inst::Fence {} => {
1669                format!("dmb ish")
1670            }
1671            &Inst::Csdb {} => {
1672                format!("csdb")
1673            }
1674            &Inst::FpuMove32 { rd, rn } => {
1675                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1676                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1677                format!("fmov {rd}, {rn}")
1678            }
1679            &Inst::FpuMove64 { rd, rn } => {
1680                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1681                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1682                format!("fmov {rd}, {rn}")
1683            }
1684            &Inst::FpuMove128 { rd, rn } => {
1685                let rd = pretty_print_reg(rd.to_reg());
1686                let rn = pretty_print_reg(rn);
1687                format!("mov {rd}.16b, {rn}.16b")
1688            }
1689            &Inst::FpuMoveFromVec { rd, rn, idx, size } => {
1690                let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
1691                let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
1692                format!("mov {rd}, {rn}")
1693            }
1694            &Inst::FpuExtend { rd, rn, size } => {
1695                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1696                let rn = pretty_print_vreg_scalar(rn, size);
1697                format!("fmov {rd}, {rn}")
1698            }
1699            &Inst::FpuRR {
1700                fpu_op,
1701                size,
1702                rd,
1703                rn,
1704            } => {
1705                let op = match fpu_op {
1706                    FPUOp1::Abs => "fabs",
1707                    FPUOp1::Neg => "fneg",
1708                    FPUOp1::Sqrt => "fsqrt",
1709                    FPUOp1::Cvt32To64 | FPUOp1::Cvt64To32 => "fcvt",
1710                };
1711                let dst_size = match fpu_op {
1712                    FPUOp1::Cvt32To64 => ScalarSize::Size64,
1713                    FPUOp1::Cvt64To32 => ScalarSize::Size32,
1714                    _ => size,
1715                };
1716                let rd = pretty_print_vreg_scalar(rd.to_reg(), dst_size);
1717                let rn = pretty_print_vreg_scalar(rn, size);
1718                format!("{op} {rd}, {rn}")
1719            }
1720            &Inst::FpuRRR {
1721                fpu_op,
1722                size,
1723                rd,
1724                rn,
1725                rm,
1726            } => {
1727                let op = match fpu_op {
1728                    FPUOp2::Add => "fadd",
1729                    FPUOp2::Sub => "fsub",
1730                    FPUOp2::Mul => "fmul",
1731                    FPUOp2::Div => "fdiv",
1732                    FPUOp2::Max => "fmax",
1733                    FPUOp2::Min => "fmin",
1734                };
1735                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1736                let rn = pretty_print_vreg_scalar(rn, size);
1737                let rm = pretty_print_vreg_scalar(rm, size);
1738                format!("{op} {rd}, {rn}, {rm}")
1739            }
1740            &Inst::FpuRRI { fpu_op, rd, rn } => {
1741                let (op, imm, vector) = match fpu_op {
1742                    FPUOpRI::UShr32(imm) => ("ushr", imm.pretty_print(0), true),
1743                    FPUOpRI::UShr64(imm) => ("ushr", imm.pretty_print(0), false),
1744                };
1745
1746                let (rd, rn) = if vector {
1747                    (
1748                        pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1749                        pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1750                    )
1751                } else {
1752                    (
1753                        pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1754                        pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1755                    )
1756                };
1757                format!("{op} {rd}, {rn}, {imm}")
1758            }
1759            &Inst::FpuRRIMod { fpu_op, rd, ri, rn } => {
1760                let (op, imm, vector) = match fpu_op {
1761                    FPUOpRIMod::Sli32(imm) => ("sli", imm.pretty_print(0), true),
1762                    FPUOpRIMod::Sli64(imm) => ("sli", imm.pretty_print(0), false),
1763                };
1764
1765                let (rd, ri, rn) = if vector {
1766                    (
1767                        pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1768                        pretty_print_vreg_vector(ri, VectorSize::Size32x2),
1769                        pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1770                    )
1771                } else {
1772                    (
1773                        pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1774                        pretty_print_vreg_scalar(ri, ScalarSize::Size64),
1775                        pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1776                    )
1777                };
1778                format!("{op} {rd}, {ri}, {rn}, {imm}")
1779            }
1780            &Inst::FpuRRRR {
1781                fpu_op,
1782                size,
1783                rd,
1784                rn,
1785                rm,
1786                ra,
1787            } => {
1788                let op = match fpu_op {
1789                    FPUOp3::MAdd => "fmadd",
1790                    FPUOp3::MSub => "fmsub",
1791                    FPUOp3::NMAdd => "fnmadd",
1792                    FPUOp3::NMSub => "fnmsub",
1793                };
1794                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1795                let rn = pretty_print_vreg_scalar(rn, size);
1796                let rm = pretty_print_vreg_scalar(rm, size);
1797                let ra = pretty_print_vreg_scalar(ra, size);
1798                format!("{op} {rd}, {rn}, {rm}, {ra}")
1799            }
1800            &Inst::FpuCmp { size, rn, rm } => {
1801                let rn = pretty_print_vreg_scalar(rn, size);
1802                let rm = pretty_print_vreg_scalar(rm, size);
1803                format!("fcmp {rn}, {rm}")
1804            }
1805            &Inst::FpuLoad16 { rd, ref mem, .. } => {
1806                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1807                let mem = mem.clone();
1808                let access_ty = self.mem_type().unwrap();
1809                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1810                format!("{mem_str}ldr {rd}, {mem}")
1811            }
1812            &Inst::FpuLoad32 { rd, ref mem, .. } => {
1813                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1814                let mem = mem.clone();
1815                let access_ty = self.mem_type().unwrap();
1816                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1817                format!("{mem_str}ldr {rd}, {mem}")
1818            }
1819            &Inst::FpuLoad64 { rd, ref mem, .. } => {
1820                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1821                let mem = mem.clone();
1822                let access_ty = self.mem_type().unwrap();
1823                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1824                format!("{mem_str}ldr {rd}, {mem}")
1825            }
1826            &Inst::FpuLoad128 { rd, ref mem, .. } => {
1827                let rd = pretty_print_reg(rd.to_reg());
1828                let rd = "q".to_string() + &rd[1..];
1829                let mem = mem.clone();
1830                let access_ty = self.mem_type().unwrap();
1831                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1832                format!("{mem_str}ldr {rd}, {mem}")
1833            }
1834            &Inst::FpuStore16 { rd, ref mem, .. } => {
1835                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size16);
1836                let mem = mem.clone();
1837                let access_ty = self.mem_type().unwrap();
1838                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1839                format!("{mem_str}str {rd}, {mem}")
1840            }
1841            &Inst::FpuStore32 { rd, ref mem, .. } => {
1842                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size32);
1843                let mem = mem.clone();
1844                let access_ty = self.mem_type().unwrap();
1845                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1846                format!("{mem_str}str {rd}, {mem}")
1847            }
1848            &Inst::FpuStore64 { rd, ref mem, .. } => {
1849                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size64);
1850                let mem = mem.clone();
1851                let access_ty = self.mem_type().unwrap();
1852                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1853                format!("{mem_str}str {rd}, {mem}")
1854            }
1855            &Inst::FpuStore128 { rd, ref mem, .. } => {
1856                let rd = pretty_print_reg(rd);
1857                let rd = "q".to_string() + &rd[1..];
1858                let mem = mem.clone();
1859                let access_ty = self.mem_type().unwrap();
1860                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1861                format!("{mem_str}str {rd}, {mem}")
1862            }
1863            &Inst::FpuLoadP64 {
1864                rt, rt2, ref mem, ..
1865            } => {
1866                let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size64);
1867                let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size64);
1868                let mem = mem.clone();
1869                let mem = mem.pretty_print_default();
1870
1871                format!("ldp {rt}, {rt2}, {mem}")
1872            }
1873            &Inst::FpuStoreP64 {
1874                rt, rt2, ref mem, ..
1875            } => {
1876                let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size64);
1877                let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size64);
1878                let mem = mem.clone();
1879                let mem = mem.pretty_print_default();
1880
1881                format!("stp {rt}, {rt2}, {mem}")
1882            }
1883            &Inst::FpuLoadP128 {
1884                rt, rt2, ref mem, ..
1885            } => {
1886                let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size128);
1887                let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size128);
1888                let mem = mem.clone();
1889                let mem = mem.pretty_print_default();
1890
1891                format!("ldp {rt}, {rt2}, {mem}")
1892            }
1893            &Inst::FpuStoreP128 {
1894                rt, rt2, ref mem, ..
1895            } => {
1896                let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size128);
1897                let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size128);
1898                let mem = mem.clone();
1899                let mem = mem.pretty_print_default();
1900
1901                format!("stp {rt}, {rt2}, {mem}")
1902            }
1903            &Inst::FpuToInt { op, rd, rn } => {
1904                let (op, sizesrc, sizedest) = match op {
1905                    FpuToIntOp::F32ToI32 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size32),
1906                    FpuToIntOp::F32ToU32 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size32),
1907                    FpuToIntOp::F32ToI64 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size64),
1908                    FpuToIntOp::F32ToU64 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size64),
1909                    FpuToIntOp::F64ToI32 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size32),
1910                    FpuToIntOp::F64ToU32 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size32),
1911                    FpuToIntOp::F64ToI64 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size64),
1912                    FpuToIntOp::F64ToU64 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size64),
1913                };
1914                let rd = pretty_print_ireg(rd.to_reg(), sizedest);
1915                let rn = pretty_print_vreg_scalar(rn, sizesrc);
1916                format!("{op} {rd}, {rn}")
1917            }
1918            &Inst::IntToFpu { op, rd, rn } => {
1919                let (op, sizesrc, sizedest) = match op {
1920                    IntToFpuOp::I32ToF32 => ("scvtf", OperandSize::Size32, ScalarSize::Size32),
1921                    IntToFpuOp::U32ToF32 => ("ucvtf", OperandSize::Size32, ScalarSize::Size32),
1922                    IntToFpuOp::I64ToF32 => ("scvtf", OperandSize::Size64, ScalarSize::Size32),
1923                    IntToFpuOp::U64ToF32 => ("ucvtf", OperandSize::Size64, ScalarSize::Size32),
1924                    IntToFpuOp::I32ToF64 => ("scvtf", OperandSize::Size32, ScalarSize::Size64),
1925                    IntToFpuOp::U32ToF64 => ("ucvtf", OperandSize::Size32, ScalarSize::Size64),
1926                    IntToFpuOp::I64ToF64 => ("scvtf", OperandSize::Size64, ScalarSize::Size64),
1927                    IntToFpuOp::U64ToF64 => ("ucvtf", OperandSize::Size64, ScalarSize::Size64),
1928                };
1929                let rd = pretty_print_vreg_scalar(rd.to_reg(), sizedest);
1930                let rn = pretty_print_ireg(rn, sizesrc);
1931                format!("{op} {rd}, {rn}")
1932            }
1933            &Inst::FpuCSel16 { rd, rn, rm, cond } => {
1934                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1935                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size16);
1936                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size16);
1937                let cond = cond.pretty_print(0);
1938                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1939            }
1940            &Inst::FpuCSel32 { rd, rn, rm, cond } => {
1941                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1942                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1943                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size32);
1944                let cond = cond.pretty_print(0);
1945                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1946            }
1947            &Inst::FpuCSel64 { rd, rn, rm, cond } => {
1948                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1949                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1950                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size64);
1951                let cond = cond.pretty_print(0);
1952                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1953            }
1954            &Inst::FpuRound { op, rd, rn } => {
1955                let (inst, size) = match op {
1956                    FpuRoundMode::Minus32 => ("frintm", ScalarSize::Size32),
1957                    FpuRoundMode::Minus64 => ("frintm", ScalarSize::Size64),
1958                    FpuRoundMode::Plus32 => ("frintp", ScalarSize::Size32),
1959                    FpuRoundMode::Plus64 => ("frintp", ScalarSize::Size64),
1960                    FpuRoundMode::Zero32 => ("frintz", ScalarSize::Size32),
1961                    FpuRoundMode::Zero64 => ("frintz", ScalarSize::Size64),
1962                    FpuRoundMode::Nearest32 => ("frintn", ScalarSize::Size32),
1963                    FpuRoundMode::Nearest64 => ("frintn", ScalarSize::Size64),
1964                };
1965                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1966                let rn = pretty_print_vreg_scalar(rn, size);
1967                format!("{inst} {rd}, {rn}")
1968            }
1969            &Inst::MovToFpu { rd, rn, size } => {
1970                let operand_size = size.operand_size();
1971                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1972                let rn = pretty_print_ireg(rn, operand_size);
1973                format!("fmov {rd}, {rn}")
1974            }
1975            &Inst::FpuMoveFPImm { rd, imm, size } => {
1976                let imm = imm.pretty_print(0);
1977                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1978
1979                format!("fmov {rd}, {imm}")
1980            }
1981            &Inst::MovToVec {
1982                rd,
1983                ri,
1984                rn,
1985                idx,
1986                size,
1987            } => {
1988                let rd = pretty_print_vreg_element(rd.to_reg(), idx as usize, size.lane_size());
1989                let ri = pretty_print_vreg_element(ri, idx as usize, size.lane_size());
1990                let rn = pretty_print_ireg(rn, size.operand_size());
1991                format!("mov {rd}, {ri}, {rn}")
1992            }
1993            &Inst::MovFromVec { rd, rn, idx, size } => {
1994                let op = match size {
1995                    ScalarSize::Size8 => "umov",
1996                    ScalarSize::Size16 => "umov",
1997                    ScalarSize::Size32 => "mov",
1998                    ScalarSize::Size64 => "mov",
1999                    _ => unimplemented!(),
2000                };
2001                let rd = pretty_print_ireg(rd.to_reg(), size.operand_size());
2002                let rn = pretty_print_vreg_element(rn, idx as usize, size);
2003                format!("{op} {rd}, {rn}")
2004            }
2005            &Inst::MovFromVecSigned {
2006                rd,
2007                rn,
2008                idx,
2009                size,
2010                scalar_size,
2011            } => {
2012                let rd = pretty_print_ireg(rd.to_reg(), scalar_size);
2013                let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
2014                format!("smov {rd}, {rn}")
2015            }
2016            &Inst::VecDup { rd, rn, size } => {
2017                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2018                let rn = pretty_print_ireg(rn, size.operand_size());
2019                format!("dup {rd}, {rn}")
2020            }
2021            &Inst::VecDupFromFpu { rd, rn, size, lane } => {
2022                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2023                let rn = pretty_print_vreg_element(rn, lane.into(), size.lane_size());
2024                format!("dup {rd}, {rn}")
2025            }
2026            &Inst::VecDupFPImm { rd, imm, size } => {
2027                let imm = imm.pretty_print(0);
2028                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2029
2030                format!("fmov {rd}, {imm}")
2031            }
2032            &Inst::VecDupImm {
2033                rd,
2034                imm,
2035                invert,
2036                size,
2037            } => {
2038                let imm = imm.pretty_print(0);
2039                let op = if invert { "mvni" } else { "movi" };
2040                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2041
2042                format!("{op} {rd}, {imm}")
2043            }
2044            &Inst::VecExtend {
2045                t,
2046                rd,
2047                rn,
2048                high_half,
2049                lane_size,
2050            } => {
2051                let vec64 = VectorSize::from_lane_size(lane_size.narrow(), false);
2052                let vec128 = VectorSize::from_lane_size(lane_size.narrow(), true);
2053                let rd_size = VectorSize::from_lane_size(lane_size, true);
2054                let (op, rn_size) = match (t, high_half) {
2055                    (VecExtendOp::Sxtl, false) => ("sxtl", vec64),
2056                    (VecExtendOp::Sxtl, true) => ("sxtl2", vec128),
2057                    (VecExtendOp::Uxtl, false) => ("uxtl", vec64),
2058                    (VecExtendOp::Uxtl, true) => ("uxtl2", vec128),
2059                };
2060                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2061                let rn = pretty_print_vreg_vector(rn, rn_size);
2062                format!("{op} {rd}, {rn}")
2063            }
2064            &Inst::VecMovElement {
2065                rd,
2066                ri,
2067                rn,
2068                dest_idx,
2069                src_idx,
2070                size,
2071            } => {
2072                let rd =
2073                    pretty_print_vreg_element(rd.to_reg(), dest_idx as usize, size.lane_size());
2074                let ri = pretty_print_vreg_element(ri, dest_idx as usize, size.lane_size());
2075                let rn = pretty_print_vreg_element(rn, src_idx as usize, size.lane_size());
2076                format!("mov {rd}, {ri}, {rn}")
2077            }
2078            &Inst::VecRRLong {
2079                op,
2080                rd,
2081                rn,
2082                high_half,
2083            } => {
2084                let (op, rd_size, size, suffix) = match (op, high_half) {
2085                    (VecRRLongOp::Fcvtl16, false) => {
2086                        ("fcvtl", VectorSize::Size32x4, VectorSize::Size16x4, "")
2087                    }
2088                    (VecRRLongOp::Fcvtl16, true) => {
2089                        ("fcvtl2", VectorSize::Size32x4, VectorSize::Size16x8, "")
2090                    }
2091                    (VecRRLongOp::Fcvtl32, false) => {
2092                        ("fcvtl", VectorSize::Size64x2, VectorSize::Size32x2, "")
2093                    }
2094                    (VecRRLongOp::Fcvtl32, true) => {
2095                        ("fcvtl2", VectorSize::Size64x2, VectorSize::Size32x4, "")
2096                    }
2097                    (VecRRLongOp::Shll8, false) => {
2098                        ("shll", VectorSize::Size16x8, VectorSize::Size8x8, ", #8")
2099                    }
2100                    (VecRRLongOp::Shll8, true) => {
2101                        ("shll2", VectorSize::Size16x8, VectorSize::Size8x16, ", #8")
2102                    }
2103                    (VecRRLongOp::Shll16, false) => {
2104                        ("shll", VectorSize::Size32x4, VectorSize::Size16x4, ", #16")
2105                    }
2106                    (VecRRLongOp::Shll16, true) => {
2107                        ("shll2", VectorSize::Size32x4, VectorSize::Size16x8, ", #16")
2108                    }
2109                    (VecRRLongOp::Shll32, false) => {
2110                        ("shll", VectorSize::Size64x2, VectorSize::Size32x2, ", #32")
2111                    }
2112                    (VecRRLongOp::Shll32, true) => {
2113                        ("shll2", VectorSize::Size64x2, VectorSize::Size32x4, ", #32")
2114                    }
2115                };
2116                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2117                let rn = pretty_print_vreg_vector(rn, size);
2118
2119                format!("{op} {rd}, {rn}{suffix}")
2120            }
2121            &Inst::VecRRNarrowLow {
2122                op,
2123                rd,
2124                rn,
2125                lane_size,
2126                ..
2127            }
2128            | &Inst::VecRRNarrowHigh {
2129                op,
2130                rd,
2131                rn,
2132                lane_size,
2133                ..
2134            } => {
2135                let vec64 = VectorSize::from_lane_size(lane_size, false);
2136                let vec128 = VectorSize::from_lane_size(lane_size, true);
2137                let rn_size = VectorSize::from_lane_size(lane_size.widen(), true);
2138                let high_half = match self {
2139                    &Inst::VecRRNarrowLow { .. } => false,
2140                    &Inst::VecRRNarrowHigh { .. } => true,
2141                    _ => unreachable!(),
2142                };
2143                let (op, rd_size) = match (op, high_half) {
2144                    (VecRRNarrowOp::Xtn, false) => ("xtn", vec64),
2145                    (VecRRNarrowOp::Xtn, true) => ("xtn2", vec128),
2146                    (VecRRNarrowOp::Sqxtn, false) => ("sqxtn", vec64),
2147                    (VecRRNarrowOp::Sqxtn, true) => ("sqxtn2", vec128),
2148                    (VecRRNarrowOp::Sqxtun, false) => ("sqxtun", vec64),
2149                    (VecRRNarrowOp::Sqxtun, true) => ("sqxtun2", vec128),
2150                    (VecRRNarrowOp::Uqxtn, false) => ("uqxtn", vec64),
2151                    (VecRRNarrowOp::Uqxtn, true) => ("uqxtn2", vec128),
2152                    (VecRRNarrowOp::Fcvtn, false) => ("fcvtn", vec64),
2153                    (VecRRNarrowOp::Fcvtn, true) => ("fcvtn2", vec128),
2154                };
2155                let rn = pretty_print_vreg_vector(rn, rn_size);
2156                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2157                let ri = match self {
2158                    &Inst::VecRRNarrowLow { .. } => "".to_string(),
2159                    &Inst::VecRRNarrowHigh { ri, .. } => {
2160                        format!("{}, ", pretty_print_vreg_vector(ri, rd_size))
2161                    }
2162                    _ => unreachable!(),
2163                };
2164
2165                format!("{op} {rd}, {ri}{rn}")
2166            }
2167            &Inst::VecRRPair { op, rd, rn } => {
2168                let op = match op {
2169                    VecPairOp::Addp => "addp",
2170                };
2171                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
2172                let rn = pretty_print_vreg_vector(rn, VectorSize::Size64x2);
2173
2174                format!("{op} {rd}, {rn}")
2175            }
2176            &Inst::VecRRPairLong { op, rd, rn } => {
2177                let (op, dest, src) = match op {
2178                    VecRRPairLongOp::Saddlp8 => {
2179                        ("saddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2180                    }
2181                    VecRRPairLongOp::Saddlp16 => {
2182                        ("saddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2183                    }
2184                    VecRRPairLongOp::Uaddlp8 => {
2185                        ("uaddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2186                    }
2187                    VecRRPairLongOp::Uaddlp16 => {
2188                        ("uaddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2189                    }
2190                };
2191                let rd = pretty_print_vreg_vector(rd.to_reg(), dest);
2192                let rn = pretty_print_vreg_vector(rn, src);
2193
2194                format!("{op} {rd}, {rn}")
2195            }
2196            &Inst::VecRRR {
2197                rd,
2198                rn,
2199                rm,
2200                alu_op,
2201                size,
2202            } => {
2203                let (op, size) = match alu_op {
2204                    VecALUOp::Sqadd => ("sqadd", size),
2205                    VecALUOp::Uqadd => ("uqadd", size),
2206                    VecALUOp::Sqsub => ("sqsub", size),
2207                    VecALUOp::Uqsub => ("uqsub", size),
2208                    VecALUOp::Cmeq => ("cmeq", size),
2209                    VecALUOp::Cmge => ("cmge", size),
2210                    VecALUOp::Cmgt => ("cmgt", size),
2211                    VecALUOp::Cmhs => ("cmhs", size),
2212                    VecALUOp::Cmhi => ("cmhi", size),
2213                    VecALUOp::Fcmeq => ("fcmeq", size),
2214                    VecALUOp::Fcmgt => ("fcmgt", size),
2215                    VecALUOp::Fcmge => ("fcmge", size),
2216                    VecALUOp::And => ("and", VectorSize::Size8x16),
2217                    VecALUOp::Bic => ("bic", VectorSize::Size8x16),
2218                    VecALUOp::Orr => ("orr", VectorSize::Size8x16),
2219                    VecALUOp::Eor => ("eor", VectorSize::Size8x16),
2220                    VecALUOp::Umaxp => ("umaxp", size),
2221                    VecALUOp::Add => ("add", size),
2222                    VecALUOp::Sub => ("sub", size),
2223                    VecALUOp::Mul => ("mul", size),
2224                    VecALUOp::Sshl => ("sshl", size),
2225                    VecALUOp::Ushl => ("ushl", size),
2226                    VecALUOp::Umin => ("umin", size),
2227                    VecALUOp::Smin => ("smin", size),
2228                    VecALUOp::Umax => ("umax", size),
2229                    VecALUOp::Smax => ("smax", size),
2230                    VecALUOp::Urhadd => ("urhadd", size),
2231                    VecALUOp::Fadd => ("fadd", size),
2232                    VecALUOp::Fsub => ("fsub", size),
2233                    VecALUOp::Fdiv => ("fdiv", size),
2234                    VecALUOp::Fmax => ("fmax", size),
2235                    VecALUOp::Fmin => ("fmin", size),
2236                    VecALUOp::Fmul => ("fmul", size),
2237                    VecALUOp::Addp => ("addp", size),
2238                    VecALUOp::Zip1 => ("zip1", size),
2239                    VecALUOp::Zip2 => ("zip2", size),
2240                    VecALUOp::Sqrdmulh => ("sqrdmulh", size),
2241                    VecALUOp::Uzp1 => ("uzp1", size),
2242                    VecALUOp::Uzp2 => ("uzp2", size),
2243                    VecALUOp::Trn1 => ("trn1", size),
2244                    VecALUOp::Trn2 => ("trn2", size),
2245                };
2246                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2247                let rn = pretty_print_vreg_vector(rn, size);
2248                let rm = pretty_print_vreg_vector(rm, size);
2249                format!("{op} {rd}, {rn}, {rm}")
2250            }
2251            &Inst::VecRRRMod {
2252                rd,
2253                ri,
2254                rn,
2255                rm,
2256                alu_op,
2257                size,
2258            } => {
2259                let (op, size) = match alu_op {
2260                    VecALUModOp::Bsl => ("bsl", VectorSize::Size8x16),
2261                    VecALUModOp::Fmla => ("fmla", size),
2262                    VecALUModOp::Fmls => ("fmls", size),
2263                };
2264                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2265                let ri = pretty_print_vreg_vector(ri, size);
2266                let rn = pretty_print_vreg_vector(rn, size);
2267                let rm = pretty_print_vreg_vector(rm, size);
2268                format!("{op} {rd}, {ri}, {rn}, {rm}")
2269            }
2270            &Inst::VecFmlaElem {
2271                rd,
2272                ri,
2273                rn,
2274                rm,
2275                alu_op,
2276                size,
2277                idx,
2278            } => {
2279                let (op, size) = match alu_op {
2280                    VecALUModOp::Fmla => ("fmla", size),
2281                    VecALUModOp::Fmls => ("fmls", size),
2282                    _ => unreachable!(),
2283                };
2284                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2285                let ri = pretty_print_vreg_vector(ri, size);
2286                let rn = pretty_print_vreg_vector(rn, size);
2287                let rm = pretty_print_vreg_element(rm, idx.into(), size.lane_size());
2288                format!("{op} {rd}, {ri}, {rn}, {rm}")
2289            }
2290            &Inst::VecRRRLong {
2291                rd,
2292                rn,
2293                rm,
2294                alu_op,
2295                high_half,
2296            } => {
2297                let (op, dest_size, src_size) = match (alu_op, high_half) {
2298                    (VecRRRLongOp::Smull8, false) => {
2299                        ("smull", VectorSize::Size16x8, VectorSize::Size8x8)
2300                    }
2301                    (VecRRRLongOp::Smull8, true) => {
2302                        ("smull2", VectorSize::Size16x8, VectorSize::Size8x16)
2303                    }
2304                    (VecRRRLongOp::Smull16, false) => {
2305                        ("smull", VectorSize::Size32x4, VectorSize::Size16x4)
2306                    }
2307                    (VecRRRLongOp::Smull16, true) => {
2308                        ("smull2", VectorSize::Size32x4, VectorSize::Size16x8)
2309                    }
2310                    (VecRRRLongOp::Smull32, false) => {
2311                        ("smull", VectorSize::Size64x2, VectorSize::Size32x2)
2312                    }
2313                    (VecRRRLongOp::Smull32, true) => {
2314                        ("smull2", VectorSize::Size64x2, VectorSize::Size32x4)
2315                    }
2316                    (VecRRRLongOp::Umull8, false) => {
2317                        ("umull", VectorSize::Size16x8, VectorSize::Size8x8)
2318                    }
2319                    (VecRRRLongOp::Umull8, true) => {
2320                        ("umull2", VectorSize::Size16x8, VectorSize::Size8x16)
2321                    }
2322                    (VecRRRLongOp::Umull16, false) => {
2323                        ("umull", VectorSize::Size32x4, VectorSize::Size16x4)
2324                    }
2325                    (VecRRRLongOp::Umull16, true) => {
2326                        ("umull2", VectorSize::Size32x4, VectorSize::Size16x8)
2327                    }
2328                    (VecRRRLongOp::Umull32, false) => {
2329                        ("umull", VectorSize::Size64x2, VectorSize::Size32x2)
2330                    }
2331                    (VecRRRLongOp::Umull32, true) => {
2332                        ("umull2", VectorSize::Size64x2, VectorSize::Size32x4)
2333                    }
2334                };
2335                let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2336                let rn = pretty_print_vreg_vector(rn, src_size);
2337                let rm = pretty_print_vreg_vector(rm, src_size);
2338                format!("{op} {rd}, {rn}, {rm}")
2339            }
2340            &Inst::VecRRRLongMod {
2341                rd,
2342                ri,
2343                rn,
2344                rm,
2345                alu_op,
2346                high_half,
2347            } => {
2348                let (op, dest_size, src_size) = match (alu_op, high_half) {
2349                    (VecRRRLongModOp::Umlal8, false) => {
2350                        ("umlal", VectorSize::Size16x8, VectorSize::Size8x8)
2351                    }
2352                    (VecRRRLongModOp::Umlal8, true) => {
2353                        ("umlal2", VectorSize::Size16x8, VectorSize::Size8x16)
2354                    }
2355                    (VecRRRLongModOp::Umlal16, false) => {
2356                        ("umlal", VectorSize::Size32x4, VectorSize::Size16x4)
2357                    }
2358                    (VecRRRLongModOp::Umlal16, true) => {
2359                        ("umlal2", VectorSize::Size32x4, VectorSize::Size16x8)
2360                    }
2361                    (VecRRRLongModOp::Umlal32, false) => {
2362                        ("umlal", VectorSize::Size64x2, VectorSize::Size32x2)
2363                    }
2364                    (VecRRRLongModOp::Umlal32, true) => {
2365                        ("umlal2", VectorSize::Size64x2, VectorSize::Size32x4)
2366                    }
2367                };
2368                let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2369                let ri = pretty_print_vreg_vector(ri, dest_size);
2370                let rn = pretty_print_vreg_vector(rn, src_size);
2371                let rm = pretty_print_vreg_vector(rm, src_size);
2372                format!("{op} {rd}, {ri}, {rn}, {rm}")
2373            }
2374            &Inst::VecMisc { op, rd, rn, size } => {
2375                let (op, size, suffix) = match op {
2376                    VecMisc2::Not => (
2377                        "mvn",
2378                        if size.is_128bits() {
2379                            VectorSize::Size8x16
2380                        } else {
2381                            VectorSize::Size8x8
2382                        },
2383                        "",
2384                    ),
2385                    VecMisc2::Neg => ("neg", size, ""),
2386                    VecMisc2::Abs => ("abs", size, ""),
2387                    VecMisc2::Fabs => ("fabs", size, ""),
2388                    VecMisc2::Fneg => ("fneg", size, ""),
2389                    VecMisc2::Fsqrt => ("fsqrt", size, ""),
2390                    VecMisc2::Rev16 => ("rev16", size, ""),
2391                    VecMisc2::Rev32 => ("rev32", size, ""),
2392                    VecMisc2::Rev64 => ("rev64", size, ""),
2393                    VecMisc2::Fcvtzs => ("fcvtzs", size, ""),
2394                    VecMisc2::Fcvtzu => ("fcvtzu", size, ""),
2395                    VecMisc2::Scvtf => ("scvtf", size, ""),
2396                    VecMisc2::Ucvtf => ("ucvtf", size, ""),
2397                    VecMisc2::Frintn => ("frintn", size, ""),
2398                    VecMisc2::Frintz => ("frintz", size, ""),
2399                    VecMisc2::Frintm => ("frintm", size, ""),
2400                    VecMisc2::Frintp => ("frintp", size, ""),
2401                    VecMisc2::Cnt => ("cnt", size, ""),
2402                    VecMisc2::Cmeq0 => ("cmeq", size, ", #0"),
2403                    VecMisc2::Cmge0 => ("cmge", size, ", #0"),
2404                    VecMisc2::Cmgt0 => ("cmgt", size, ", #0"),
2405                    VecMisc2::Cmle0 => ("cmle", size, ", #0"),
2406                    VecMisc2::Cmlt0 => ("cmlt", size, ", #0"),
2407                    VecMisc2::Fcmeq0 => ("fcmeq", size, ", #0.0"),
2408                    VecMisc2::Fcmge0 => ("fcmge", size, ", #0.0"),
2409                    VecMisc2::Fcmgt0 => ("fcmgt", size, ", #0.0"),
2410                    VecMisc2::Fcmle0 => ("fcmle", size, ", #0.0"),
2411                    VecMisc2::Fcmlt0 => ("fcmlt", size, ", #0.0"),
2412                };
2413                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2414                let rn = pretty_print_vreg_vector(rn, size);
2415                format!("{op} {rd}, {rn}{suffix}")
2416            }
2417            &Inst::VecLanes { op, rd, rn, size } => {
2418                let op = match op {
2419                    VecLanesOp::Uminv => "uminv",
2420                    VecLanesOp::Addv => "addv",
2421                };
2422                let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
2423                let rn = pretty_print_vreg_vector(rn, size);
2424                format!("{op} {rd}, {rn}")
2425            }
2426            &Inst::VecShiftImm {
2427                op,
2428                rd,
2429                rn,
2430                size,
2431                imm,
2432            } => {
2433                let op = match op {
2434                    VecShiftImmOp::Shl => "shl",
2435                    VecShiftImmOp::Ushr => "ushr",
2436                    VecShiftImmOp::Sshr => "sshr",
2437                };
2438                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2439                let rn = pretty_print_vreg_vector(rn, size);
2440                format!("{op} {rd}, {rn}, #{imm}")
2441            }
2442            &Inst::VecShiftImmMod {
2443                op,
2444                rd,
2445                ri,
2446                rn,
2447                size,
2448                imm,
2449            } => {
2450                let op = match op {
2451                    VecShiftImmModOp::Sli => "sli",
2452                };
2453                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2454                let ri = pretty_print_vreg_vector(ri, size);
2455                let rn = pretty_print_vreg_vector(rn, size);
2456                format!("{op} {rd}, {ri}, {rn}, #{imm}")
2457            }
2458            &Inst::VecExtract { rd, rn, rm, imm4 } => {
2459                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2460                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2461                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2462                format!("ext {rd}, {rn}, {rm}, #{imm4}")
2463            }
2464            &Inst::VecTbl { rd, rn, rm } => {
2465                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2466                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2467                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2468                format!("tbl {rd}, {{ {rn} }}, {rm}")
2469            }
2470            &Inst::VecTblExt { rd, ri, rn, rm } => {
2471                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2472                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2473                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2474                let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2475                format!("tbx {rd}, {ri}, {{ {rn} }}, {rm}")
2476            }
2477            &Inst::VecTbl2 { rd, rn, rn2, rm } => {
2478                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2479                let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2480                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2481                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2482                format!("tbl {rd}, {{ {rn}, {rn2} }}, {rm}")
2483            }
2484            &Inst::VecTbl2Ext {
2485                rd,
2486                ri,
2487                rn,
2488                rn2,
2489                rm,
2490            } => {
2491                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2492                let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2493                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2494                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2495                let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2496                format!("tbx {rd}, {ri}, {{ {rn}, {rn2} }}, {rm}")
2497            }
2498            &Inst::VecLoadReplicate { rd, rn, size, .. } => {
2499                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2500                let rn = pretty_print_reg(rn);
2501
2502                format!("ld1r {{ {rd} }}, [{rn}]")
2503            }
2504            &Inst::VecCSel { rd, rn, rm, cond } => {
2505                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2506                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2507                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2508                let cond = cond.pretty_print(0);
2509                format!("vcsel {rd}, {rn}, {rm}, {cond} (if-then-else diamond)")
2510            }
2511            &Inst::MovToNZCV { rn } => {
2512                let rn = pretty_print_reg(rn);
2513                format!("msr nzcv, {rn}")
2514            }
2515            &Inst::MovFromNZCV { rd } => {
2516                let rd = pretty_print_reg(rd.to_reg());
2517                format!("mrs {rd}, nzcv")
2518            }
2519            &Inst::Extend {
2520                rd,
2521                rn,
2522                signed: false,
2523                from_bits: 1,
2524                ..
2525            } => {
2526                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2527                let rn = pretty_print_ireg(rn, OperandSize::Size32);
2528                format!("and {rd}, {rn}, #1")
2529            }
2530            &Inst::Extend {
2531                rd,
2532                rn,
2533                signed: false,
2534                from_bits: 32,
2535                to_bits: 64,
2536            } => {
2537                // The case of a zero extension from 32 to 64 bits, is implemented
2538                // with a "mov" to a 32-bit (W-reg) dest, because this zeroes
2539                // the top 32 bits.
2540                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2541                let rn = pretty_print_ireg(rn, OperandSize::Size32);
2542                format!("mov {rd}, {rn}")
2543            }
2544            &Inst::Extend {
2545                rd,
2546                rn,
2547                signed,
2548                from_bits,
2549                to_bits,
2550            } => {
2551                assert!(from_bits <= to_bits);
2552                let op = match (signed, from_bits) {
2553                    (false, 8) => "uxtb",
2554                    (true, 8) => "sxtb",
2555                    (false, 16) => "uxth",
2556                    (true, 16) => "sxth",
2557                    (true, 32) => "sxtw",
2558                    (true, _) => "sbfx",
2559                    (false, _) => "ubfx",
2560                };
2561                if op == "sbfx" || op == "ubfx" {
2562                    let dest_size = OperandSize::from_bits(to_bits);
2563                    let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2564                    let rn = pretty_print_ireg(rn, dest_size);
2565                    format!("{op} {rd}, {rn}, #0, #{from_bits}")
2566                } else {
2567                    let dest_size = if signed {
2568                        OperandSize::from_bits(to_bits)
2569                    } else {
2570                        OperandSize::Size32
2571                    };
2572                    let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2573                    let rn = pretty_print_ireg(rn, OperandSize::from_bits(from_bits));
2574                    format!("{op} {rd}, {rn}")
2575                }
2576            }
2577            &Inst::Call { ref info } => {
2578                let try_call = info
2579                    .try_call_info
2580                    .as_ref()
2581                    .map(|tci| pretty_print_try_call(tci))
2582                    .unwrap_or_default();
2583                format!("bl 0{try_call}")
2584            }
2585            &Inst::CallInd { ref info } => {
2586                let rn = pretty_print_reg(info.dest);
2587                let try_call = info
2588                    .try_call_info
2589                    .as_ref()
2590                    .map(|tci| pretty_print_try_call(tci))
2591                    .unwrap_or_default();
2592                format!("blr {rn}{try_call}")
2593            }
2594            &Inst::ReturnCall { ref info } => {
2595                let mut s = format!(
2596                    "return_call {:?} new_stack_arg_size:{}",
2597                    info.dest, info.new_stack_arg_size
2598                );
2599                for ret in &info.uses {
2600                    let preg = pretty_print_reg(ret.preg);
2601                    let vreg = pretty_print_reg(ret.vreg);
2602                    write!(&mut s, " {vreg}={preg}").unwrap();
2603                }
2604                s
2605            }
2606            &Inst::ReturnCallInd { ref info } => {
2607                let callee = pretty_print_reg(info.dest);
2608                let mut s = format!(
2609                    "return_call_ind {callee} new_stack_arg_size:{}",
2610                    info.new_stack_arg_size
2611                );
2612                for ret in &info.uses {
2613                    let preg = pretty_print_reg(ret.preg);
2614                    let vreg = pretty_print_reg(ret.vreg);
2615                    write!(&mut s, " {vreg}={preg}").unwrap();
2616                }
2617                s
2618            }
2619            &Inst::Args { ref args } => {
2620                let mut s = "args".to_string();
2621                for arg in args {
2622                    let preg = pretty_print_reg(arg.preg);
2623                    let def = pretty_print_reg(arg.vreg.to_reg());
2624                    write!(&mut s, " {def}={preg}").unwrap();
2625                }
2626                s
2627            }
2628            &Inst::Rets { ref rets } => {
2629                let mut s = "rets".to_string();
2630                for ret in rets {
2631                    let preg = pretty_print_reg(ret.preg);
2632                    let vreg = pretty_print_reg(ret.vreg);
2633                    write!(&mut s, " {vreg}={preg}").unwrap();
2634                }
2635                s
2636            }
2637            &Inst::Ret {} => "ret".to_string(),
2638            &Inst::AuthenticatedRet { key, is_hint } => {
2639                let key = match key {
2640                    APIKey::AZ => "az",
2641                    APIKey::BZ => "bz",
2642                    APIKey::ASP => "asp",
2643                    APIKey::BSP => "bsp",
2644                };
2645                match is_hint {
2646                    false => format!("reta{key}"),
2647                    true => format!("auti{key} ; ret"),
2648                }
2649            }
2650            &Inst::Jump { ref dest } => {
2651                let dest = dest.pretty_print(0);
2652                format!("b {dest}")
2653            }
2654            &Inst::CondBr {
2655                ref taken,
2656                ref not_taken,
2657                ref kind,
2658            } => {
2659                let taken = taken.pretty_print(0);
2660                let not_taken = not_taken.pretty_print(0);
2661                match kind {
2662                    &CondBrKind::Zero(reg, size) => {
2663                        let reg = pretty_print_reg_sized(reg, size);
2664                        format!("cbz {reg}, {taken} ; b {not_taken}")
2665                    }
2666                    &CondBrKind::NotZero(reg, size) => {
2667                        let reg = pretty_print_reg_sized(reg, size);
2668                        format!("cbnz {reg}, {taken} ; b {not_taken}")
2669                    }
2670                    &CondBrKind::Cond(c) => {
2671                        let c = c.pretty_print(0);
2672                        format!("b.{c} {taken} ; b {not_taken}")
2673                    }
2674                }
2675            }
2676            &Inst::TestBitAndBranch {
2677                kind,
2678                ref taken,
2679                ref not_taken,
2680                rn,
2681                bit,
2682            } => {
2683                let cond = match kind {
2684                    TestBitAndBranchKind::Z => "z",
2685                    TestBitAndBranchKind::NZ => "nz",
2686                };
2687                let taken = taken.pretty_print(0);
2688                let not_taken = not_taken.pretty_print(0);
2689                let rn = pretty_print_reg(rn);
2690                format!("tb{cond} {rn}, #{bit}, {taken} ; b {not_taken}")
2691            }
2692            &Inst::IndirectBr { rn, .. } => {
2693                let rn = pretty_print_reg(rn);
2694                format!("br {rn}")
2695            }
2696            &Inst::Brk => "brk #0xf000".to_string(),
2697            &Inst::Udf { .. } => "udf #0xc11f".to_string(),
2698            &Inst::TrapIf {
2699                ref kind,
2700                trap_code,
2701            } => match kind {
2702                &CondBrKind::Zero(reg, size) => {
2703                    let reg = pretty_print_reg_sized(reg, size);
2704                    format!("cbz {reg}, #trap={trap_code}")
2705                }
2706                &CondBrKind::NotZero(reg, size) => {
2707                    let reg = pretty_print_reg_sized(reg, size);
2708                    format!("cbnz {reg}, #trap={trap_code}")
2709                }
2710                &CondBrKind::Cond(c) => {
2711                    let c = c.pretty_print(0);
2712                    format!("b.{c} #trap={trap_code}")
2713                }
2714            },
2715            &Inst::Adr { rd, off } => {
2716                let rd = pretty_print_reg(rd.to_reg());
2717                format!("adr {rd}, pc+{off}")
2718            }
2719            &Inst::Adrp { rd, off } => {
2720                let rd = pretty_print_reg(rd.to_reg());
2721                // This instruction addresses 4KiB pages, so multiply it by the page size.
2722                let byte_offset = off * 4096;
2723                format!("adrp {rd}, pc+{byte_offset}")
2724            }
2725            &Inst::Word4 { data } => format!("data.i32 {data}"),
2726            &Inst::Word8 { data } => format!("data.i64 {data}"),
2727            &Inst::JTSequence {
2728                default,
2729                ref targets,
2730                ridx,
2731                rtmp1,
2732                rtmp2,
2733                ..
2734            } => {
2735                let ridx = pretty_print_reg(ridx);
2736                let rtmp1 = pretty_print_reg(rtmp1.to_reg());
2737                let rtmp2 = pretty_print_reg(rtmp2.to_reg());
2738                let default_target = BranchTarget::Label(default).pretty_print(0);
2739                format!(
2740                    concat!(
2741                        "b.hs {} ; ",
2742                        "csel {}, xzr, {}, hs ; ",
2743                        "csdb ; ",
2744                        "adr {}, pc+16 ; ",
2745                        "ldrsw {}, [{}, {}, uxtw #2] ; ",
2746                        "add {}, {}, {} ; ",
2747                        "br {} ; ",
2748                        "jt_entries {:?}"
2749                    ),
2750                    default_target,
2751                    rtmp2,
2752                    ridx,
2753                    rtmp1,
2754                    rtmp2,
2755                    rtmp1,
2756                    rtmp2,
2757                    rtmp1,
2758                    rtmp1,
2759                    rtmp2,
2760                    rtmp1,
2761                    targets
2762                )
2763            }
2764            &Inst::LoadExtNameGot { rd, ref name } => {
2765                let rd = pretty_print_reg(rd.to_reg());
2766                format!("load_ext_name_got {rd}, {name:?}")
2767            }
2768            &Inst::LoadExtNameNear {
2769                rd,
2770                ref name,
2771                offset,
2772            } => {
2773                let rd = pretty_print_reg(rd.to_reg());
2774                format!("load_ext_name_near {rd}, {name:?}+{offset}")
2775            }
2776            &Inst::LoadExtNameFar {
2777                rd,
2778                ref name,
2779                offset,
2780            } => {
2781                let rd = pretty_print_reg(rd.to_reg());
2782                format!("load_ext_name_far {rd}, {name:?}+{offset}")
2783            }
2784            &Inst::LoadAddr { rd, ref mem } => {
2785                // TODO: we really should find a better way to avoid duplication of
2786                // this logic between `emit()` and `show_rru()` -- a separate 1-to-N
2787                // expansion stage (i.e., legalization, but without the slow edit-in-place
2788                // of the existing legalization framework).
2789                let mem = mem.clone();
2790                let (mem_insts, mem) = mem_finalize(None, &mem, I8, state);
2791                let mut ret = String::new();
2792                for inst in mem_insts.into_iter() {
2793                    ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2794                }
2795                let (reg, index_reg, offset) = match mem {
2796                    AMode::RegExtended { rn, rm, extendop } => (rn, Some((rm, extendop)), 0),
2797                    AMode::Unscaled { rn, simm9 } => (rn, None, simm9.value()),
2798                    AMode::UnsignedOffset { rn, uimm12 } => (rn, None, uimm12.value() as i32),
2799                    _ => panic!("Unsupported case for LoadAddr: {mem:?}"),
2800                };
2801                let abs_offset = if offset < 0 {
2802                    -offset as u64
2803                } else {
2804                    offset as u64
2805                };
2806                let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
2807
2808                if let Some((idx, extendop)) = index_reg {
2809                    let add = Inst::AluRRRExtend {
2810                        alu_op: ALUOp::Add,
2811                        size: OperandSize::Size64,
2812                        rd,
2813                        rn: reg,
2814                        rm: idx,
2815                        extendop,
2816                    };
2817
2818                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2819                } else if offset == 0 {
2820                    let mov = Inst::gen_move(rd, reg, I64);
2821                    ret.push_str(&mov.print_with_state(&mut EmitState::default()));
2822                } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
2823                    let add = Inst::AluRRImm12 {
2824                        alu_op,
2825                        size: OperandSize::Size64,
2826                        rd,
2827                        rn: reg,
2828                        imm12,
2829                    };
2830                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2831                } else {
2832                    let tmp = writable_spilltmp_reg();
2833                    for inst in Inst::load_constant(tmp, abs_offset).into_iter() {
2834                        ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2835                    }
2836                    let add = Inst::AluRRR {
2837                        alu_op,
2838                        size: OperandSize::Size64,
2839                        rd,
2840                        rn: reg,
2841                        rm: tmp.to_reg(),
2842                    };
2843                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2844                }
2845                ret
2846            }
2847            &Inst::Paci { key } => {
2848                let key = match key {
2849                    APIKey::AZ => "az",
2850                    APIKey::BZ => "bz",
2851                    APIKey::ASP => "asp",
2852                    APIKey::BSP => "bsp",
2853                };
2854
2855                "paci".to_string() + key
2856            }
2857            &Inst::Xpaclri => "xpaclri".to_string(),
2858            &Inst::Bti { targets } => {
2859                let targets = match targets {
2860                    BranchTargetType::None => "",
2861                    BranchTargetType::C => " c",
2862                    BranchTargetType::J => " j",
2863                    BranchTargetType::JC => " jc",
2864                };
2865
2866                "bti".to_string() + targets
2867            }
2868            &Inst::EmitIsland { needed_space } => format!("emit_island {needed_space}"),
2869
2870            &Inst::ElfTlsGetAddr {
2871                ref symbol,
2872                rd,
2873                tmp,
2874            } => {
2875                let rd = pretty_print_reg(rd.to_reg());
2876                let tmp = pretty_print_reg(tmp.to_reg());
2877                format!("elf_tls_get_addr {}, {}, {}", rd, tmp, symbol.display(None))
2878            }
2879            &Inst::MachOTlsGetAddr { ref symbol, rd } => {
2880                let rd = pretty_print_reg(rd.to_reg());
2881                format!("macho_tls_get_addr {}, {}", rd, symbol.display(None))
2882            }
2883            &Inst::Unwind { ref inst } => {
2884                format!("unwind {inst:?}")
2885            }
2886            &Inst::DummyUse { reg } => {
2887                let reg = pretty_print_reg(reg);
2888                format!("dummy_use {reg}")
2889            }
2890            &Inst::StackProbeLoop { start, end, step } => {
2891                let start = pretty_print_reg(start.to_reg());
2892                let end = pretty_print_reg(end);
2893                let step = step.pretty_print(0);
2894                format!("stack_probe_loop {start}, {end}, {step}")
2895            }
2896        }
2897    }
2898}
2899
2900//=============================================================================
2901// Label fixups and jump veneers.
2902
2903/// Different forms of label references for different instruction formats.
2904#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2905pub enum LabelUse {
2906    /// 14-bit branch offset (conditional branches). PC-rel, offset is imm <<
2907    /// 2. Immediate is 14 signed bits, in bits 18:5. Used by tbz and tbnz.
2908    Branch14,
2909    /// 19-bit branch offset (conditional branches). PC-rel, offset is imm << 2. Immediate is 19
2910    /// signed bits, in bits 23:5. Used by cbz, cbnz, b.cond.
2911    Branch19,
2912    /// 26-bit branch offset (unconditional branches). PC-rel, offset is imm << 2. Immediate is 26
2913    /// signed bits, in bits 25:0. Used by b, bl.
2914    Branch26,
2915    /// 19-bit offset for LDR (load literal). PC-rel, offset is imm << 2. Immediate is 19 signed bits,
2916    /// in bits 23:5.
2917    Ldr19,
2918    /// 21-bit offset for ADR (get address of label). PC-rel, offset is not shifted. Immediate is
2919    /// 21 signed bits, with high 19 bits in bits 23:5 and low 2 bits in bits 30:29.
2920    Adr21,
2921    /// 32-bit PC relative constant offset (from address of constant itself),
2922    /// signed. Used in jump tables.
2923    PCRel32,
2924}
2925
2926impl MachInstLabelUse for LabelUse {
2927    /// Alignment for veneer code. Every AArch64 instruction must be 4-byte-aligned.
2928    const ALIGN: CodeOffset = 4;
2929
2930    /// Maximum PC-relative range (positive), inclusive.
2931    fn max_pos_range(self) -> CodeOffset {
2932        match self {
2933            // N-bit immediate, left-shifted by 2, for (N+2) bits of total
2934            // range. Signed, so +2^(N+1) from zero. Likewise for two other
2935            // shifted cases below.
2936            LabelUse::Branch14 => (1 << 15) - 1,
2937            LabelUse::Branch19 => (1 << 20) - 1,
2938            LabelUse::Branch26 => (1 << 27) - 1,
2939            LabelUse::Ldr19 => (1 << 20) - 1,
2940            // Adr does not shift its immediate, so the 21-bit immediate gives 21 bits of total
2941            // range.
2942            LabelUse::Adr21 => (1 << 20) - 1,
2943            LabelUse::PCRel32 => 0x7fffffff,
2944        }
2945    }
2946
2947    /// Maximum PC-relative range (negative).
2948    fn max_neg_range(self) -> CodeOffset {
2949        // All forms are twos-complement signed offsets, so negative limit is one more than
2950        // positive limit.
2951        self.max_pos_range() + 1
2952    }
2953
2954    /// Size of window into code needed to do the patch.
2955    fn patch_size(self) -> CodeOffset {
2956        // Patch is on one instruction only for all of these label reference types.
2957        4
2958    }
2959
2960    /// Perform the patch.
2961    fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2962        let pc_rel = (label_offset as i64) - (use_offset as i64);
2963        debug_assert!(pc_rel <= self.max_pos_range() as i64);
2964        debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2965        let pc_rel = pc_rel as u32;
2966        let insn_word = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2967        let mask = match self {
2968            LabelUse::Branch14 => 0x0007ffe0, // bits 18..5 inclusive
2969            LabelUse::Branch19 => 0x00ffffe0, // bits 23..5 inclusive
2970            LabelUse::Branch26 => 0x03ffffff, // bits 25..0 inclusive
2971            LabelUse::Ldr19 => 0x00ffffe0,    // bits 23..5 inclusive
2972            LabelUse::Adr21 => 0x60ffffe0,    // bits 30..29, 25..5 inclusive
2973            LabelUse::PCRel32 => 0xffffffff,
2974        };
2975        let pc_rel_shifted = match self {
2976            LabelUse::Adr21 | LabelUse::PCRel32 => pc_rel,
2977            _ => {
2978                debug_assert!(pc_rel & 3 == 0);
2979                pc_rel >> 2
2980            }
2981        };
2982        let pc_rel_inserted = match self {
2983            LabelUse::Branch14 => (pc_rel_shifted & 0x3fff) << 5,
2984            LabelUse::Branch19 | LabelUse::Ldr19 => (pc_rel_shifted & 0x7ffff) << 5,
2985            LabelUse::Branch26 => pc_rel_shifted & 0x3ffffff,
2986            LabelUse::Adr21 => (pc_rel_shifted & 0x7ffff) << 5 | (pc_rel_shifted & 0x180000) << 10,
2987            LabelUse::PCRel32 => pc_rel_shifted,
2988        };
2989        let is_add = match self {
2990            LabelUse::PCRel32 => true,
2991            _ => false,
2992        };
2993        let insn_word = if is_add {
2994            insn_word.wrapping_add(pc_rel_inserted)
2995        } else {
2996            (insn_word & !mask) | pc_rel_inserted
2997        };
2998        buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
2999    }
3000
3001    /// Is a veneer supported for this label reference type?
3002    fn supports_veneer(self) -> bool {
3003        match self {
3004            LabelUse::Branch14 | LabelUse::Branch19 => true, // veneer is a Branch26
3005            LabelUse::Branch26 => true,                      // veneer is a PCRel32
3006            _ => false,
3007        }
3008    }
3009
3010    /// How large is the veneer, if supported?
3011    fn veneer_size(self) -> CodeOffset {
3012        match self {
3013            LabelUse::Branch14 | LabelUse::Branch19 => 4,
3014            LabelUse::Branch26 => 20,
3015            _ => unreachable!(),
3016        }
3017    }
3018
3019    fn worst_case_veneer_size() -> CodeOffset {
3020        20
3021    }
3022
3023    /// Generate a veneer into the buffer, given that this veneer is at `veneer_offset`, and return
3024    /// an offset and label-use for the veneer's use of the original label.
3025    fn generate_veneer(
3026        self,
3027        buffer: &mut [u8],
3028        veneer_offset: CodeOffset,
3029    ) -> (CodeOffset, LabelUse) {
3030        match self {
3031            LabelUse::Branch14 | LabelUse::Branch19 => {
3032                // veneer is a Branch26 (unconditional branch). Just encode directly here -- don't
3033                // bother with constructing an Inst.
3034                let insn_word = 0b000101 << 26;
3035                buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
3036                (veneer_offset, LabelUse::Branch26)
3037            }
3038
3039            // This is promoting a 26-bit call/jump to a 32-bit call/jump to
3040            // get a further range. This jump translates to a jump to a
3041            // relative location based on the address of the constant loaded
3042            // from here.
3043            //
3044            // If this path is taken from a call instruction then caller-saved
3045            // registers are available (minus arguments), so x16/x17 are
3046            // available. Otherwise for intra-function jumps we also reserve
3047            // x16/x17 as spill-style registers. In both cases these are
3048            // available for us to use.
3049            LabelUse::Branch26 => {
3050                let tmp1 = regs::spilltmp_reg();
3051                let tmp1_w = regs::writable_spilltmp_reg();
3052                let tmp2 = regs::tmp2_reg();
3053                let tmp2_w = regs::writable_tmp2_reg();
3054                // ldrsw x16, 16
3055                let ldr = emit::enc_ldst_imm19(0b1001_1000, 16 / 4, tmp1);
3056                // adr x17, 12
3057                let adr = emit::enc_adr(12, tmp2_w);
3058                // add x16, x16, x17
3059                let add = emit::enc_arith_rrr(0b10001011_000, 0, tmp1_w, tmp1, tmp2);
3060                // br x16
3061                let br = emit::enc_br(tmp1);
3062                buffer[0..4].clone_from_slice(&u32::to_le_bytes(ldr));
3063                buffer[4..8].clone_from_slice(&u32::to_le_bytes(adr));
3064                buffer[8..12].clone_from_slice(&u32::to_le_bytes(add));
3065                buffer[12..16].clone_from_slice(&u32::to_le_bytes(br));
3066                // the 4-byte signed immediate we'll load is after these
3067                // instructions, 16-bytes in.
3068                (veneer_offset + 16, LabelUse::PCRel32)
3069            }
3070
3071            _ => panic!("Unsupported label-reference type for veneer generation!"),
3072        }
3073    }
3074
3075    fn from_reloc(reloc: Reloc, addend: Addend) -> Option<LabelUse> {
3076        match (reloc, addend) {
3077            (Reloc::Arm64Call, 0) => Some(LabelUse::Branch26),
3078            _ => None,
3079        }
3080    }
3081}
3082
3083#[cfg(test)]
3084mod tests {
3085    use super::*;
3086
3087    #[test]
3088    fn inst_size_test() {
3089        // This test will help with unintentionally growing the size
3090        // of the Inst enum.
3091        let expected = if cfg!(target_pointer_width = "32") && !cfg!(target_arch = "arm") {
3092            28
3093        } else {
3094            32
3095        };
3096        assert_eq!(expected, std::mem::size_of::<Inst>());
3097    }
3098}